index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
22,154 | paypalrestsdk.invoices | record_refund | null | def record_refund(self, attributes):
return self.post('record-refund', attributes, self)
| (self, attributes) |
22,155 | paypalrestsdk.invoices | remind | null | def remind(self, attributes):
return self.post('remind', attributes, self)
| (self, attributes) |
22,156 | paypalrestsdk.invoices | send | null | def send(self, refresh_token=None):
return self.post('send', {}, self, refresh_token=refresh_token)
| (self, refresh_token=None) |
22,159 | paypalrestsdk.resource | update | null | def update(self, attributes=None, refresh_token=None):
attributes = attributes or self.to_dict()
url = util.join_url(self.path, str(self['id']))
new_attributes = self.api.put(url, attributes, self.http_headers(), refresh_token)
self.error = None
self.merge(new_attributes)
return self.success()
| (self, attributes=None, refresh_token=None) |
22,160 | paypalrestsdk.invoice_templates | InvoiceTemplate | InvoiceTemplate class wrapping the REST v1/invoicing/templates endpoint"
Usage::
>>> templates = InvoiceTemplate.all()
>>> invoice_template = InvoiceTemplate.new({})
>>> invoice.create() # return True or False
| class InvoiceTemplate(List, Find, Create, Delete, Update):
"""InvoiceTemplate class wrapping the REST v1/invoicing/templates endpoint"
Usage::
>>> templates = InvoiceTemplate.all()
>>> invoice_template = InvoiceTemplate.new({})
>>> invoice.create() # return True or False
"""
path = "v1/invoicing/templates"
def __getitem__(self, key):
if key == 'id':
return self.__data__['template_id']
else:
return super.__getitem__(self, key)
| (attributes=None, api=None) |
22,163 | paypalrestsdk.invoice_templates | __getitem__ | null | def __getitem__(self, key):
if key == 'id':
return self.__data__['template_id']
else:
return super.__getitem__(self, key)
| (self, key) |
22,178 | paypalrestsdk.exceptions | MissingConfig | null | class MissingConfig(Exception):
pass
| null |
22,179 | paypalrestsdk.payments | Order | Enables looking up, voiding, authorizing and capturing a paypal order which
is a payment created with intent order. This indicates buyer consent for a purchase
and does not place funds on hold.
Usage::
>>> order = Order.find("<ORDER_ID>")
>>> order.void()
| class Order(Find, Post):
"""Enables looking up, voiding, authorizing and capturing a paypal order which
is a payment created with intent order. This indicates buyer consent for a purchase
and does not place funds on hold.
Usage::
>>> order = Order.find("<ORDER_ID>")
>>> order.void()
"""
path = "v1/payments/orders"
def capture(self, attributes):
return self.post('capture', attributes, Order)
def void(self):
return self.post('do-void', {}, self)
def authorize(self, attributes):
return self.post('authorize', attributes, self)
| (attributes=None, api=None) |
22,188 | paypalrestsdk.payments | authorize | null | def authorize(self, attributes):
return self.post('authorize', attributes, self)
| (self, attributes) |
22,189 | paypalrestsdk.payments | capture | null | def capture(self, attributes):
return self.post('capture', attributes, Order)
| (self, attributes) |
22,197 | paypalrestsdk.payments | void | null | def void(self):
return self.post('do-void', {}, self)
| (self) |
22,198 | paypalrestsdk.payments | Payment | Payment class wrapping the REST v1/payments/payment endpoint
Usage::
>>> payment_history = Payment.all({"count": 5})
>>> payment = Payment.find("<PAYMENT_ID>")
>>> payment = Payment.new({"intent": "sale"})
>>> payment.create() # return True or False
>>> payment.execute({"payer_id": 1234}) # return True or False
| class Payment(List, Find, Create, Post, Replace):
"""Payment class wrapping the REST v1/payments/payment endpoint
Usage::
>>> payment_history = Payment.all({"count": 5})
>>> payment = Payment.find("<PAYMENT_ID>")
>>> payment = Payment.new({"intent": "sale"})
>>> payment.create() # return True or False
>>> payment.execute({"payer_id": 1234}) # return True or False
"""
path = "v1/payments/payment"
def execute(self, attributes):
return self.post('execute', attributes, self)
| (attributes=None, api=None) |
22,209 | paypalrestsdk.payments | execute | null | def execute(self, attributes):
return self.post('execute', attributes, self)
| (self, attributes) |
22,217 | paypalrestsdk.payments | Payout |
Usage::
>>> payout = Payout.find("<PAYOUT_ID>")
| class Payout(Create, Find):
"""
Usage::
>>> payout = Payout.find("<PAYOUT_ID>")
"""
path = '/v1/payments/payouts/'
def create(self, sync_mode=False, **kwargs):
"""Creates a payout resource
"""
if sync_mode:
self.path = "/v1/payments/payouts?sync_mode=true"
return super(Payout, self).create(**kwargs)
| (attributes=None, api=None) |
22,227 | paypalrestsdk.payments | create | Creates a payout resource
| def create(self, sync_mode=False, **kwargs):
"""Creates a payout resource
"""
if sync_mode:
self.path = "/v1/payments/payouts?sync_mode=true"
return super(Payout, self).create(**kwargs)
| (self, sync_mode=False, **kwargs) |
22,233 | paypalrestsdk.payments | PayoutItem |
Usage::
>>> payout = Payout.find("<PAYOUT_ID>")
| class PayoutItem(Find, Post):
"""
Usage::
>>> payout = Payout.find("<PAYOUT_ID>")
"""
path = '/v1/payments/payouts-item/'
def cancel(self):
return self.post('cancel', {}, self, fieldname='payout_item_id')
| (attributes=None, api=None) |
22,242 | paypalrestsdk.payments | cancel | null | def cancel(self):
return self.post('cancel', {}, self, fieldname='payout_item_id')
| (self) |
22,250 | paypalrestsdk.payments | Refund | Get details for a refund on direct or captured payment
Usage::
>>> refund = Refund.find("<REFUND_ID")
| class Refund(Find):
"""Get details for a refund on direct or captured payment
Usage::
>>> refund = Refund.find("<REFUND_ID")
"""
path = "v1/payments/refund"
| (attributes=None, api=None) |
22,265 | paypalrestsdk.exceptions | ResourceNotFound | 404 Not Found
| class ResourceNotFound(ClientError):
"""404 Not Found
"""
pass
| (response, content=None, message=None) |
22,266 | paypalrestsdk.exceptions | __init__ | null | def __init__(self, response, content=None, message=None):
self.response = response
self.content = content
self.message = message
| (self, response, content=None, message=None) |
22,267 | paypalrestsdk.exceptions | __str__ | null | def __str__(self):
message = "Failed."
if hasattr(self.response, 'status_code'):
message += " Response status: %s." % (self.response.status_code)
if hasattr(self.response, 'reason'):
message += " Response message: %s." % (self.response.reason)
if self.content is not None:
message += " Error message: " + str(self.content)
return message
| (self) |
22,268 | paypalrestsdk.payments | Sale | Sale class wrapping the REST v1/payments/sale endpoint
Usage::
>>> sale = Sale.find("<SALE_ID>")
>>> refund = sale.refund({"amount": {"total": "1.00", "currency": "USD"}})
>>> refund.success() # return True or False
| class Sale(Find, Post):
"""Sale class wrapping the REST v1/payments/sale endpoint
Usage::
>>> sale = Sale.find("<SALE_ID>")
>>> refund = sale.refund({"amount": {"total": "1.00", "currency": "USD"}})
>>> refund.success() # return True or False
"""
path = "v1/payments/sale"
def refund(self, attributes):
return self.post('refund', attributes, Refund)
| (attributes=None, api=None) |
22,285 | paypalrestsdk.openid_connect | Tokeninfo | Token service for Log In with PayPal, API docs at
https://developer.paypal.com/docs/api/#identity
| class Tokeninfo(Base):
"""Token service for Log In with PayPal, API docs at
https://developer.paypal.com/docs/api/#identity
"""
path = "v1/identity/openidconnect/tokenservice"
@classmethod
def create(cls, options=None, api=None):
options = options or {}
api = api or default_api()
if isinstance(options, string_types):
options = {'code': options}
options = util.merge_dict({
'grant_type': 'authorization_code',
'client_id': client_id(api),
'client_secret': client_secret(api)
}, options)
return cls.post(cls.path, options, api=api)
@classmethod
def create_with_refresh_token(cls, options=None, api=None):
options = options or {}
api = api or default_api()
if isinstance(options, string_types):
options = {'refresh_token': options}
options = util.merge_dict({
'grant_type': 'refresh_token',
'client_id': client_id(api),
'client_secret': client_secret(api)
}, options)
return cls.post(cls.path, options, api=api)
@classmethod
def authorize_url(cls, options=None, api=None):
return authorize_url(options or {}, api=api)
def logout_url(self, options=None, api=None):
return logout_url(util.merge_dict({'id_token': self.id_token}, options or {}), api=api)
def refresh(self, options=None, api=None):
options = util.merge_dict({'refresh_token': self.refresh_token}, options or {})
tokeninfo = self.__class__.create_with_refresh_token(options, api=api)
self.merge(tokeninfo.to_dict())
return self
def userinfo(self, options=None, api=None):
return Userinfo.get(util.merge_dict({'access_token': self.access_token}, options or {}), api=api)
| (attributes=None, api=None) |
22,297 | paypalrestsdk.openid_connect | logout_url | null | def logout_url(self, options=None, api=None):
return logout_url(util.merge_dict({'id_token': self.id_token}, options or {}), api=api)
| (self, options=None, api=None) |
22,299 | paypalrestsdk.openid_connect | refresh | null | def refresh(self, options=None, api=None):
options = util.merge_dict({'refresh_token': self.refresh_token}, options or {})
tokeninfo = self.__class__.create_with_refresh_token(options, api=api)
self.merge(tokeninfo.to_dict())
return self
| (self, options=None, api=None) |
22,302 | paypalrestsdk.openid_connect | userinfo | null | def userinfo(self, options=None, api=None):
return Userinfo.get(util.merge_dict({'access_token': self.access_token}, options or {}), api=api)
| (self, options=None, api=None) |
22,303 | paypalrestsdk.exceptions | UnauthorizedAccess | 401 Unauthorized
| class UnauthorizedAccess(ClientError):
"""401 Unauthorized
"""
pass
| (response, content=None, message=None) |
22,306 | paypalrestsdk.openid_connect | Userinfo | Retrive user profile attributes for Log In with PayPal
| class Userinfo(Base):
"""Retrive user profile attributes for Log In with PayPal
"""
path = "v1/identity/openidconnect/userinfo"
@classmethod
def get(cls, options=None, api=None):
options = options or {}
if isinstance(options, string_types):
options = {'access_token': options}
options = util.merge_dict({'schema': 'openid'}, options)
api = api or default_api()
return cls.post(cls.path, options, api=api)
| (attributes=None, api=None) |
22,321 | paypalrestsdk.payment_experience | WebProfile | The payment experience api allows merchants to provide a
customized experience to consumers from the merchant's website
to the PayPal payment. API docs at
https://developer.paypal.com/docs/api/#payment-experience
Usage::
>>> web_profile = WebProfile.find("XP-3NWU-L5YK-X5EC-6KJM")
| class WebProfile(Create, Find, List, Delete, Update, Replace):
"""The payment experience api allows merchants to provide a
customized experience to consumers from the merchant's website
to the PayPal payment. API docs at
https://developer.paypal.com/docs/api/#payment-experience
Usage::
>>> web_profile = WebProfile.find("XP-3NWU-L5YK-X5EC-6KJM")
"""
path = "/v1/payment-experience/web-profiles"
| (attributes=None, api=None) |
22,340 | paypalrestsdk.notifications | Webhook | Exposes REST endpoints for creating and managing webhooks
Usage::
>>> web_profile = WebProfile.find("XP-3NWU-L5YK-X5EC-6KJM")
| class Webhook(Create, Find, List, Delete, Replace):
"""Exposes REST endpoints for creating and managing webhooks
Usage::
>>> web_profile = WebProfile.find("XP-3NWU-L5YK-X5EC-6KJM")
"""
path = "/v1/notifications/webhooks/"
def get_event_types(self, api=None):
"""Get the list of events types that are subscribed to a webhook
"""
api = api or default_api()
url = util.join_url(self.path, str(self['id']), 'event-types')
return Resource(self.api.get(url), api=api)
| (attributes=None, api=None) |
22,353 | paypalrestsdk.notifications | get_event_types | Get the list of events types that are subscribed to a webhook
| def get_event_types(self, api=None):
"""Get the list of events types that are subscribed to a webhook
"""
api = api or default_api()
url = util.join_url(self.path, str(self['id']), 'event-types')
return Resource(self.api.get(url), api=api)
| (self, api=None) |
22,359 | paypalrestsdk.notifications | WebhookEvent | Exposes REST endpoints for working with subscribed webhooks events
| class WebhookEvent(Find, List, Post):
"""Exposes REST endpoints for working with subscribed webhooks events
"""
path = "/v1/notifications/webhooks-events/"
__root_cert_path = "data/DigiCertHighAssuranceEVRootCA.crt.pem"
__intermediate_cert_path = "data/DigiCertSHA2ExtendedValidationServerCA.crt.pem"
__cert_chain_path = [__root_cert_path, __intermediate_cert_path]
def resend(self):
"""Specify a received webhook event-id to resend the event notification
"""
return self.post('resend', {}, self)
def get_resource(self):
"""Get the resource sent via the webhook event, e.g. Authorization, conveniently
wrapped in the corresponding paypalrestsdk class
"""
webhook_resource_type = self.resource_type
klass = util.get_member(webhook_resource_type)
resource = klass(self.resource.__dict__)
return resource
@staticmethod
def _get_expected_sig(transmission_id, timestamp, webhook_id, event_body):
"""Get the input string to generate the HMAC signature
"""
if sys.version_info[0] == 2:
data = str(binascii.crc32(event_body.decode('utf-8').encode('utf-8')) & 0xffffffff)
else:
data = str(binascii.crc32(event_body.encode('utf-8')) & 0xffffffff)
expected_sig = transmission_id + "|" + timestamp + "|" + webhook_id + "|" + data
return expected_sig
@staticmethod
def _is_common_name_valid(cert):
"""Check that the common name in the certificate refers to paypal"""
from OpenSSL import crypto
if cert.get_subject().commonName.lower().endswith(".paypal.com"):
return True
else:
return False
@classmethod
def _get_certificate_store(cls):
"""Returns a certificate store with the trust chain loaded
"""
from OpenSSL import crypto
store = crypto.X509Store()
try:
for cert_path in cls.__cert_chain_path:
cert_str = open(os.path.join(os.path.dirname(__file__), cert_path)).read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
store.add_cert(cert)
return store
except Exception as e:
print(e)
@classmethod
def _verify_certificate_chain(cls, cert):
"""Verify certificate using chain of trust shipped with sdk
"""
from OpenSSL import crypto
store = cls._get_certificate_store()
try:
store_ctx = crypto.X509StoreContext(store, cert)
store_ctx.verify_certificate()
return True
except Exception as e:
print(e)
return False
@classmethod
def _verify_certificate(cls, cert):
"""Verify that certificate is unexpired, has valid common name and is trustworthy
"""
if cls._verify_certificate_chain(cert) and cls._is_common_name_valid(cert) and not cert.has_expired():
return True
else:
return False
@staticmethod
def _get_cert(cert_url):
"""Fetches the paypal certificate used to sign the webhook event payload
"""
from OpenSSL import crypto
try:
r = requests.get(cert_url)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, str(r.text))
return cert
except requests.exceptions.RequestException as e:
print("Error retrieving PayPal certificate with url " + cert_url)
print(e)
@classmethod
def _verify_signature(cls, transmission_id, timestamp, webhook_id, event_body, cert, actual_sig, auth_algo):
"""Verify that the webhook payload received is from PayPal,
unaltered and targeted towards correct recipient
"""
from OpenSSL import crypto
expected_sig = WebhookEvent._get_expected_sig(transmission_id, timestamp, webhook_id, event_body)
try:
crypto.verify(cert, b64decode(actual_sig), expected_sig.encode('utf-8'), auth_algo)
return True
except Exception as e:
print(e)
return False
@classmethod
def verify(cls, transmission_id, timestamp, webhook_id, event_body, cert_url, actual_sig, auth_algo='sha256'):
"""Verify certificate and payload
"""
__auth_algo_map = {
'SHA256withRSA': 'sha256WithRSAEncryption',
'SHA1withRSA': 'sha1WithRSAEncryption'
}
try:
if auth_algo != 'sha256' and auth_algo not in __auth_algo_map.values():
auth_algo = __auth_algo_map[auth_algo]
except KeyError as e:
print('Authorization algorithm mapping not found in verify method.')
return False
cert = WebhookEvent._get_cert(cert_url)
return WebhookEvent._verify_certificate(cert) and WebhookEvent._verify_signature(transmission_id, timestamp, webhook_id, event_body, cert, actual_sig, auth_algo)
| (attributes=None, api=None) |
22,368 | paypalrestsdk.notifications | _get_cert | Fetches the paypal certificate used to sign the webhook event payload
| @staticmethod
def _get_cert(cert_url):
"""Fetches the paypal certificate used to sign the webhook event payload
"""
from OpenSSL import crypto
try:
r = requests.get(cert_url)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, str(r.text))
return cert
except requests.exceptions.RequestException as e:
print("Error retrieving PayPal certificate with url " + cert_url)
print(e)
| (cert_url) |
22,369 | paypalrestsdk.notifications | _get_expected_sig | Get the input string to generate the HMAC signature
| @staticmethod
def _get_expected_sig(transmission_id, timestamp, webhook_id, event_body):
"""Get the input string to generate the HMAC signature
"""
if sys.version_info[0] == 2:
data = str(binascii.crc32(event_body.decode('utf-8').encode('utf-8')) & 0xffffffff)
else:
data = str(binascii.crc32(event_body.encode('utf-8')) & 0xffffffff)
expected_sig = transmission_id + "|" + timestamp + "|" + webhook_id + "|" + data
return expected_sig
| (transmission_id, timestamp, webhook_id, event_body) |
22,370 | paypalrestsdk.notifications | _is_common_name_valid | Check that the common name in the certificate refers to paypal | @staticmethod
def _is_common_name_valid(cert):
"""Check that the common name in the certificate refers to paypal"""
from OpenSSL import crypto
if cert.get_subject().commonName.lower().endswith(".paypal.com"):
return True
else:
return False
| (cert) |
22,373 | paypalrestsdk.notifications | get_resource | Get the resource sent via the webhook event, e.g. Authorization, conveniently
wrapped in the corresponding paypalrestsdk class
| def get_resource(self):
"""Get the resource sent via the webhook event, e.g. Authorization, conveniently
wrapped in the corresponding paypalrestsdk class
"""
webhook_resource_type = self.resource_type
klass = util.get_member(webhook_resource_type)
resource = klass(self.resource.__dict__)
return resource
| (self) |
22,377 | paypalrestsdk.notifications | resend | Specify a received webhook event-id to resend the event notification
| def resend(self):
"""Specify a received webhook event-id to resend the event notification
"""
return self.post('resend', {}, self)
| (self) |
22,380 | paypalrestsdk.notifications | WebhookEventType | Exposes REST endpoint for listing available event types for webhooks
| class WebhookEventType(List):
"""Exposes REST endpoint for listing available event types for webhooks
"""
path = "/v1/notifications/webhooks-event-types/"
| (attributes=None, api=None) |
22,397 | paypalrestsdk.api | set_config | Create new default api object with given configuration
| def set_config(options=None, **config):
"""Create new default api object with given configuration
"""
global __api__
__api__ = Api(options or {}, **config)
return __api__
| (options=None, **config) |
22,409 | copulas | NotFittedError | NotFittedError class. | class NotFittedError(Exception):
"""NotFittedError class."""
| null |
22,410 | copulas | _find_addons | Find and load all copulas add-ons. | def _find_addons():
"""Find and load all copulas add-ons."""
group = 'copulas_modules'
try:
eps = entry_points(group=group)
except TypeError:
# Load-time selection requires Python >= 3.10 or importlib_metadata >= 3.6
eps = entry_points().get(group, [])
for entry_point in eps:
try:
addon = entry_point.load()
except Exception: # pylint: disable=broad-exception-caught
msg = f'Failed to load "{entry_point.name}" from "{entry_point.module_name}".'
warnings.warn(msg)
continue
try:
addon_target, addon_name = _get_addon_target(entry_point.name)
except AttributeError as error:
msg = f"Failed to set '{entry_point.name}': {error}."
warnings.warn(msg)
continue
setattr(addon_target, addon_name, addon)
| () |
22,411 | copulas | _get_addon_target | Find the target object for the add-on.
Args:
addon_path_name (str):
The add-on's name. The add-on's name should be the full path of valid Python
identifiers (i.e. importable.module:object.attr).
Returns:
tuple:
* object:
The base module or object the add-on should be added to.
* str:
The name the add-on should be added to under the module or object.
| def _get_addon_target(addon_path_name):
"""Find the target object for the add-on.
Args:
addon_path_name (str):
The add-on's name. The add-on's name should be the full path of valid Python
identifiers (i.e. importable.module:object.attr).
Returns:
tuple:
* object:
The base module or object the add-on should be added to.
* str:
The name the add-on should be added to under the module or object.
"""
module_path, _, object_path = addon_path_name.partition(':')
module_path = module_path.split('.')
if module_path[0] != __name__:
msg = f"expected base module to be '{__name__}', found '{module_path[0]}'"
raise AttributeError(msg)
target_base = sys.modules[__name__]
for submodule in module_path[1:-1]:
target_base = getattr(target_base, submodule)
addon_name = module_path[-1]
if object_path:
if len(module_path) > 1 and not hasattr(target_base, module_path[-1]):
msg = f"cannot add '{object_path}' to unknown submodule '{'.'.join(module_path)}'"
raise AttributeError(msg)
if len(module_path) > 1:
target_base = getattr(target_base, module_path[-1])
split_object = object_path.split('.')
addon_name = split_object[-1]
if len(split_object) > 1:
target_base = attrgetter('.'.join(split_object[:-1]))(target_base)
return target_base, addon_name
| (addon_path_name) |
22,412 | operator | attrgetter | attrgetter(attr, ...) --> attrgetter object
Return a callable object that fetches the given attribute(s) from its operand.
After f = attrgetter('name'), the call f(r) returns r.name.
After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).
After h = attrgetter('name.first', 'name.last'), the call h(r) returns
(r.name.first, r.name.last). | class attrgetter:
"""
Return a callable object that fetches the given attribute(s) from its operand.
After f = attrgetter('name'), the call f(r) returns r.name.
After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).
After h = attrgetter('name.first', 'name.last'), the call h(r) returns
(r.name.first, r.name.last).
"""
__slots__ = ('_attrs', '_call')
def __init__(self, attr, *attrs):
if not attrs:
if not isinstance(attr, str):
raise TypeError('attribute name must be a string')
self._attrs = (attr,)
names = attr.split('.')
def func(obj):
for name in names:
obj = getattr(obj, name)
return obj
self._call = func
else:
self._attrs = (attr,) + attrs
getters = tuple(map(attrgetter, self._attrs))
def func(obj):
return tuple(getter(obj) for getter in getters)
self._call = func
def __call__(self, obj):
return self._call(obj)
def __repr__(self):
return '%s.%s(%s)' % (self.__class__.__module__,
self.__class__.__qualname__,
', '.join(map(repr, self._attrs)))
def __reduce__(self):
return self.__class__, self._attrs
| null |
22,413 | copulas | check_valid_values | Raise an exception if the given values are not supported.
Args:
function(callable): Method whose unique argument is a numpy.array-like object.
Returns:
callable: Decorated function
Raises:
ValueError: If there are missing or invalid values or if the dataset is empty.
| def check_valid_values(function):
"""Raise an exception if the given values are not supported.
Args:
function(callable): Method whose unique argument is a numpy.array-like object.
Returns:
callable: Decorated function
Raises:
ValueError: If there are missing or invalid values or if the dataset is empty.
"""
def decorated(self, X, *args, **kwargs):
if isinstance(X, pd.DataFrame):
W = X.to_numpy()
else:
W = X
if not len(W):
raise ValueError('Your dataset is empty.')
if not (np.issubdtype(W.dtype, np.floating) or np.issubdtype(W.dtype, np.integer)):
raise ValueError('There are non-numerical values in your data.')
if np.isnan(W).any().any():
raise ValueError('There are nan values in your data.')
return function(self, X, *args, **kwargs)
return decorated
| (function) |
22,416 | importlib.metadata | entry_points | Return EntryPoint objects for all installed packages.
Pass selection parameters (group or name) to filter the
result to entry points matching those properties (see
EntryPoints.select()).
For compatibility, returns ``SelectableGroups`` object unless
selection parameters are supplied. In the future, this function
will return ``EntryPoints`` instead of ``SelectableGroups``
even when no selection parameters are supplied.
For maximum future compatibility, pass selection parameters
or invoke ``.select`` with parameters on the result.
:return: EntryPoints or SelectableGroups for all installed packages.
| def entry_points(**params) -> Union[EntryPoints, SelectableGroups]:
"""Return EntryPoint objects for all installed packages.
Pass selection parameters (group or name) to filter the
result to entry points matching those properties (see
EntryPoints.select()).
For compatibility, returns ``SelectableGroups`` object unless
selection parameters are supplied. In the future, this function
will return ``EntryPoints`` instead of ``SelectableGroups``
even when no selection parameters are supplied.
For maximum future compatibility, pass selection parameters
or invoke ``.select`` with parameters on the result.
:return: EntryPoints or SelectableGroups for all installed packages.
"""
norm_name = operator.attrgetter('_normalized_name')
unique = functools.partial(unique_everseen, key=norm_name)
eps = itertools.chain.from_iterable(
dist.entry_points for dist in unique(distributions())
)
return SelectableGroups.load(eps).select(**params)
| (**params) -> Union[importlib.metadata.EntryPoints, importlib.metadata.SelectableGroups] |
22,417 | copulas | get_instance | Create new instance of the ``obj`` argument.
Args:
obj (str, type, instance):
| def get_instance(obj, **kwargs):
"""Create new instance of the ``obj`` argument.
Args:
obj (str, type, instance):
"""
instance = None
if isinstance(obj, str):
package, name = obj.rsplit('.', 1)
instance = getattr(importlib.import_module(package), name)(**kwargs)
elif isinstance(obj, type):
instance = obj(**kwargs)
else:
if kwargs:
instance = obj.__class__(**kwargs)
else:
args = getattr(obj, '__args__', ())
kwargs = getattr(obj, '__kwargs__', {})
instance = obj.__class__(*args, **kwargs)
return instance
| (obj, **kwargs) |
22,418 | copulas | get_qualified_name | Return the Fully Qualified Name from an instance or class. | def get_qualified_name(_object):
"""Return the Fully Qualified Name from an instance or class."""
module = _object.__module__
if hasattr(_object, '__name__'):
_class = _object.__name__
else:
_class = _object.__class__.__name__
return module + '.' + _class
| (_object) |
22,422 | copulas | random_state | Set the random state before calling the function.
Args:
function (Callable):
The function to wrap around.
| def random_state(function):
"""Set the random state before calling the function.
Args:
function (Callable):
The function to wrap around.
"""
def wrapper(self, *args, **kwargs):
if self.random_state is None:
return function(self, *args, **kwargs)
else:
with set_random_state(self.random_state, self.set_random_state):
return function(self, *args, **kwargs)
return wrapper
| (function) |
22,423 | copulas | scalarize | Allow methods that only accepts 1-d vectors to work with scalars.
Args:
function(callable): Function that accepts and returns vectors.
Returns:
callable: Decorated function that accepts and returns scalars.
| def scalarize(function):
"""Allow methods that only accepts 1-d vectors to work with scalars.
Args:
function(callable): Function that accepts and returns vectors.
Returns:
callable: Decorated function that accepts and returns scalars.
"""
def decorated(self, X, *args, **kwargs):
scalar = not isinstance(X, np.ndarray)
if scalar:
X = np.array([X])
result = function(self, X, *args, **kwargs)
if scalar:
result = result[0]
return result
decorated.__doc__ = function.__doc__
return decorated
| (function) |
22,424 | copulas | set_random_state | Context manager for managing the random state.
Args:
random_state (int or np.random.RandomState):
The random seed or RandomState.
set_model_random_state (function):
Function to set the random state on the model.
| def _get_addon_target(addon_path_name):
"""Find the target object for the add-on.
Args:
addon_path_name (str):
The add-on's name. The add-on's name should be the full path of valid Python
identifiers (i.e. importable.module:object.attr).
Returns:
tuple:
* object:
The base module or object the add-on should be added to.
* str:
The name the add-on should be added to under the module or object.
"""
module_path, _, object_path = addon_path_name.partition(':')
module_path = module_path.split('.')
if module_path[0] != __name__:
msg = f"expected base module to be '{__name__}', found '{module_path[0]}'"
raise AttributeError(msg)
target_base = sys.modules[__name__]
for submodule in module_path[1:-1]:
target_base = getattr(target_base, submodule)
addon_name = module_path[-1]
if object_path:
if len(module_path) > 1 and not hasattr(target_base, module_path[-1]):
msg = f"cannot add '{object_path}' to unknown submodule '{'.'.join(module_path)}'"
raise AttributeError(msg)
if len(module_path) > 1:
target_base = getattr(target_base, module_path[-1])
split_object = object_path.split('.')
addon_name = split_object[-1]
if len(split_object) > 1:
target_base = attrgetter('.'.join(split_object[:-1]))(target_base)
return target_base, addon_name
| (random_state, set_model_random_state) |
22,425 | copulas | store_args | Save ``*args`` and ``**kwargs`` used in the ``__init__`` of a copula.
Args:
__init__(callable): ``__init__`` function to store their arguments.
Returns:
callable: Decorated ``__init__`` function.
| def store_args(__init__):
"""Save ``*args`` and ``**kwargs`` used in the ``__init__`` of a copula.
Args:
__init__(callable): ``__init__`` function to store their arguments.
Returns:
callable: Decorated ``__init__`` function.
"""
def new__init__(self, *args, **kwargs):
args_copy = deepcopy(args)
kwargs_copy = deepcopy(kwargs)
__init__(self, *args, **kwargs)
self.__args__ = args_copy
self.__kwargs__ = kwargs_copy
return new__init__
| (__init__) |
22,427 | copulas | validate_random_state | Validate random state argument.
Args:
random_state (int, numpy.random.RandomState, tuple, or None):
Seed or RandomState for the random generator.
Output:
numpy.random.RandomState
| def validate_random_state(random_state):
"""Validate random state argument.
Args:
random_state (int, numpy.random.RandomState, tuple, or None):
Seed or RandomState for the random generator.
Output:
numpy.random.RandomState
"""
if random_state is None:
return None
if isinstance(random_state, int):
return np.random.RandomState(seed=random_state)
elif isinstance(random_state, np.random.RandomState):
return random_state
else:
raise TypeError(
f'`random_state` {random_state} expected to be an int '
'or `np.random.RandomState` object.')
| (random_state) |
22,428 | copulas | vectorize | Allow a method that only accepts scalars to accept vectors too.
This decorator has two different behaviors depending on the dimensionality of the
array passed as an argument:
**1-d array**
It will work under the assumption that the `function` argument is a callable
with signature::
function(self, X, *args, **kwargs)
where X is an scalar magnitude.
In this case the arguments of the input array will be given one at a time, and
both the input and output of the decorated function will have shape (n,).
**2-d array**
It will work under the assumption that the `function` argument is a callable with signature::
function(self, X0, ..., Xj, *args, **kwargs)
where `Xi` are scalar magnitudes.
It will pass the contents of each row unpacked on each call. The input is espected to have
shape (n, j), the output a shape of (n,)
It will return a function that is guaranteed to return a `numpy.array`.
Args:
function(callable): Function that only accept and return scalars.
Returns:
callable: Decorated function that can accept and return :attr:`numpy.array`.
| def vectorize(function):
"""Allow a method that only accepts scalars to accept vectors too.
This decorator has two different behaviors depending on the dimensionality of the
array passed as an argument:
**1-d array**
It will work under the assumption that the `function` argument is a callable
with signature::
function(self, X, *args, **kwargs)
where X is an scalar magnitude.
In this case the arguments of the input array will be given one at a time, and
both the input and output of the decorated function will have shape (n,).
**2-d array**
It will work under the assumption that the `function` argument is a callable with signature::
function(self, X0, ..., Xj, *args, **kwargs)
where `Xi` are scalar magnitudes.
It will pass the contents of each row unpacked on each call. The input is espected to have
shape (n, j), the output a shape of (n,)
It will return a function that is guaranteed to return a `numpy.array`.
Args:
function(callable): Function that only accept and return scalars.
Returns:
callable: Decorated function that can accept and return :attr:`numpy.array`.
"""
def decorated(self, X, *args, **kwargs):
if not isinstance(X, np.ndarray):
return function(self, X, *args, **kwargs)
if len(X.shape) == 1:
X = X.reshape([-1, 1])
if len(X.shape) == 2:
return np.fromiter(
(function(self, *x, *args, **kwargs) for x in X),
np.dtype('float64')
)
else:
raise ValueError('Arrays of dimensionality higher than 2 are not supported.')
decorated.__doc__ = function.__doc__
return decorated
| (function) |
22,430 | coca_pytorch.coca_pytorch | CoCa | null | class CoCa(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
unimodal_depth,
multimodal_depth,
dim_latents = None,
image_dim = None,
num_img_queries=256,
dim_head=64,
heads=8,
ff_mult=4,
img_encoder=None,
caption_loss_weight=1.,
contrastive_loss_weight=1.,
pad_id=0
):
super().__init__()
self.dim = dim
self.pad_id = pad_id
self.caption_loss_weight = caption_loss_weight
self.contrastive_loss_weight = contrastive_loss_weight
# token embeddings
self.token_emb = nn.Embedding(num_tokens, dim)
self.text_cls_token = nn.Parameter(torch.randn(dim))
# image encoder
self.img_encoder = img_encoder
# attention pooling for image tokens
self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning
self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True)
self.img_attn_pool_norm = LayerNorm(dim)
self.text_cls_norm = LayerNorm(dim)
# to latents
dim_latents = default(dim_latents, dim)
self.img_to_latents = EmbedToLatents(dim, dim_latents)
self.text_to_latents = EmbedToLatents(dim, dim_latents)
# contrastive learning temperature
self.temperature = nn.Parameter(torch.Tensor([1.]))
# unimodal layers
self.unimodal_layers = nn.ModuleList([])
for ind in range(unimodal_depth):
self.unimodal_layers.append(
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
)
# multimodal layers
self.multimodal_layers = nn.ModuleList([])
for ind in range(multimodal_depth):
self.multimodal_layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult))
]))
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
# whether in data parallel setting
self.is_distributed = dist.is_initialized() and dist.get_world_size() > 1
def embed_text(self, text):
batch, device = text.shape[0], text.device
seq = text.shape[1]
text_tokens = self.token_emb(text)
# append text cls tokens
text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch)
text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2)
# create specific mask for text cls token at the end
# to prevent it from attending to padding
cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j')
attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True)
# go through unimodal layers
for attn_ff in self.unimodal_layers:
text_tokens = attn_ff(text_tokens, attn_mask=attn_mask)
# get text cls token
text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1]
text_embeds = self.text_cls_norm(text_cls_tokens)
return text_embeds, text_tokens
def embed_image(self, images=None, image_tokens=None):
# encode images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image tokens
assert not (exists(images) and exists(image_tokens))
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
image_tokens = self.img_encoder(images)
# attention pool image tokens
img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0])
img_queries = self.img_attn_pool(img_queries, image_tokens)
img_queries = self.img_attn_pool_norm(img_queries)
return img_queries[:, 0], img_queries[:, 1:]
def forward(
self,
text,
images=None,
image_tokens=None,
labels=None,
return_loss=False,
return_embeddings=False
):
batch, device = text.shape[0], text.device
if return_loss and not exists(labels):
text, labels = text[:, :-1], text[:, 1:]
text_embeds, text_tokens = self.embed_text(text)
image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens)
# return embeddings if that is what the researcher wants
if return_embeddings:
return text_embeds, image_embeds
# go through multimodal layers
for attn_ff, cross_attn in self.multimodal_layers:
text_tokens = attn_ff(text_tokens)
text_tokens = cross_attn(text_tokens, image_tokens)
logits = self.to_logits(text_tokens)
if not return_loss:
return logits
# shorthand
ce = F.cross_entropy
# calculate caption loss (cross entropy loss)
logits = rearrange(logits, 'b n c -> b c n')
caption_loss = ce(logits, labels, ignore_index=self.pad_id)
caption_loss = caption_loss * self.caption_loss_weight
# embedding to latents
text_latents = self.text_to_latents(text_embeds)
image_latents = self.img_to_latents(image_embeds)
# maybe distributed all gather
if self.is_distributed:
latents = torch.stack((text_latents, image_latents), dim = 1)
latents = all_gather(latents)
text_latents, image_latents = latents.unbind(dim = 1)
# calculate contrastive loss
sim = einsum('i d, j d -> i j', text_latents, image_latents)
sim = sim * self.temperature.exp()
contrastive_labels = torch.arange(batch, device=device)
contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5
contrastive_loss = contrastive_loss * self.contrastive_loss_weight
return caption_loss + contrastive_loss
| (*, dim, num_tokens, unimodal_depth, multimodal_depth, dim_latents=None, image_dim=None, num_img_queries=256, dim_head=64, heads=8, ff_mult=4, img_encoder=None, caption_loss_weight=1.0, contrastive_loss_weight=1.0, pad_id=0) |
22,433 | torch.nn.modules.module | __dir__ | null | def __dir__(self):
module_attrs = dir(self.__class__)
attrs = list(self.__dict__.keys())
parameters = list(self._parameters.keys())
modules = list(self._modules.keys())
buffers = list(self._buffers.keys())
keys = module_attrs + attrs + parameters + modules + buffers
# Eliminate attrs that are not legal Python variable names
keys = [key for key in keys if not key[0].isdigit()]
return sorted(keys)
| (self) |
22,436 | coca_pytorch.coca_pytorch | __init__ | null | def __init__(
self,
*,
dim,
num_tokens,
unimodal_depth,
multimodal_depth,
dim_latents = None,
image_dim = None,
num_img_queries=256,
dim_head=64,
heads=8,
ff_mult=4,
img_encoder=None,
caption_loss_weight=1.,
contrastive_loss_weight=1.,
pad_id=0
):
super().__init__()
self.dim = dim
self.pad_id = pad_id
self.caption_loss_weight = caption_loss_weight
self.contrastive_loss_weight = contrastive_loss_weight
# token embeddings
self.token_emb = nn.Embedding(num_tokens, dim)
self.text_cls_token = nn.Parameter(torch.randn(dim))
# image encoder
self.img_encoder = img_encoder
# attention pooling for image tokens
self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning
self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True)
self.img_attn_pool_norm = LayerNorm(dim)
self.text_cls_norm = LayerNorm(dim)
# to latents
dim_latents = default(dim_latents, dim)
self.img_to_latents = EmbedToLatents(dim, dim_latents)
self.text_to_latents = EmbedToLatents(dim, dim_latents)
# contrastive learning temperature
self.temperature = nn.Parameter(torch.Tensor([1.]))
# unimodal layers
self.unimodal_layers = nn.ModuleList([])
for ind in range(unimodal_depth):
self.unimodal_layers.append(
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
)
# multimodal layers
self.multimodal_layers = nn.ModuleList([])
for ind in range(multimodal_depth):
self.multimodal_layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult))
]))
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
# whether in data parallel setting
self.is_distributed = dist.is_initialized() and dist.get_world_size() > 1
| (self, *, dim, num_tokens, unimodal_depth, multimodal_depth, dim_latents=None, image_dim=None, num_img_queries=256, dim_head=64, heads=8, ff_mult=4, img_encoder=None, caption_loss_weight=1.0, contrastive_loss_weight=1.0, pad_id=0) |
22,463 | coca_pytorch.coca_pytorch | embed_image | null | def embed_image(self, images=None, image_tokens=None):
# encode images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image tokens
assert not (exists(images) and exists(image_tokens))
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
image_tokens = self.img_encoder(images)
# attention pool image tokens
img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0])
img_queries = self.img_attn_pool(img_queries, image_tokens)
img_queries = self.img_attn_pool_norm(img_queries)
return img_queries[:, 0], img_queries[:, 1:]
| (self, images=None, image_tokens=None) |
22,464 | coca_pytorch.coca_pytorch | embed_text | null | def embed_text(self, text):
batch, device = text.shape[0], text.device
seq = text.shape[1]
text_tokens = self.token_emb(text)
# append text cls tokens
text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch)
text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2)
# create specific mask for text cls token at the end
# to prevent it from attending to padding
cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j')
attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True)
# go through unimodal layers
for attn_ff in self.unimodal_layers:
text_tokens = attn_ff(text_tokens, attn_mask=attn_mask)
# get text cls token
text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1]
text_embeds = self.text_cls_norm(text_cls_tokens)
return text_embeds, text_tokens
| (self, text) |
22,468 | coca_pytorch.coca_pytorch | forward | null | def forward(
self,
text,
images=None,
image_tokens=None,
labels=None,
return_loss=False,
return_embeddings=False
):
batch, device = text.shape[0], text.device
if return_loss and not exists(labels):
text, labels = text[:, :-1], text[:, 1:]
text_embeds, text_tokens = self.embed_text(text)
image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens)
# return embeddings if that is what the researcher wants
if return_embeddings:
return text_embeds, image_embeds
# go through multimodal layers
for attn_ff, cross_attn in self.multimodal_layers:
text_tokens = attn_ff(text_tokens)
text_tokens = cross_attn(text_tokens, image_tokens)
logits = self.to_logits(text_tokens)
if not return_loss:
return logits
# shorthand
ce = F.cross_entropy
# calculate caption loss (cross entropy loss)
logits = rearrange(logits, 'b n c -> b c n')
caption_loss = ce(logits, labels, ignore_index=self.pad_id)
caption_loss = caption_loss * self.caption_loss_weight
# embedding to latents
text_latents = self.text_to_latents(text_embeds)
image_latents = self.img_to_latents(image_embeds)
# maybe distributed all gather
if self.is_distributed:
latents = torch.stack((text_latents, image_latents), dim = 1)
latents = all_gather(latents)
text_latents, image_latents = latents.unbind(dim = 1)
# calculate contrastive loss
sim = einsum('i d, j d -> i j', text_latents, image_latents)
sim = sim * self.temperature.exp()
contrastive_labels = torch.arange(batch, device=device)
contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5
contrastive_loss = contrastive_loss * self.contrastive_loss_weight
return caption_loss + contrastive_loss
| (self, text, images=None, image_tokens=None, labels=None, return_loss=False, return_embeddings=False) |
22,503 | salem.sio | DataArrayAccessor | null | class DataArrayAccessor(_XarrayAccessorBase):
def quick_map(self, ax=None, interp='nearest', **kwargs):
"""Make a plot of the DataArray."""
return self._quick_map(self._obj, ax=ax, interp=interp, **kwargs)
def deacc(self, as_rate=True):
"""De-accumulates the variable (i.e. compute the variable's rate).
The returned variable has one element less over the time dimension.
The default is to return in units per hour.
Parameters
----------
as_rate: bool
set to false if you don't want units per hour,
but units per given data timestep
"""
out = self._obj[{self.t_dim: slice(1, len(self._obj[self.t_dim]))}]
diff = self._obj[{self.t_dim: slice(0, len(self._obj[self.t_dim])-1)}]
out.values = out.values - diff.values
out.attrs['description'] = out.attrs['description'].replace('ACCUMULATED ', '')
if as_rate:
dth = self._obj.time[1].values - self._obj.time[0].values
dth = dth.astype('timedelta64[h]').astype(float)
out.values = out.values / dth
out.attrs['units'] += ' h-1'
else:
out.attrs['units'] += ' step-1'
return out
def interpz(self, zcoord, levels, dim_name='', fill_value=np.NaN,
use_multiprocessing=True):
"""Interpolates the array along the vertical dimension
Parameters
----------
zcoord: DataArray
the z coordinates of the variable. Must be of same dimensions
levels: 1dArray
the levels at which to interpolate
dim_name: str
the name of the new dimension
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
a new DataArray with the interpolated data
"""
if self.z_dim is None:
raise RuntimeError('zdimension not recognized')
data = wrftools.interp3d(self._obj.values, zcoord.values,
np.atleast_1d(levels), fill_value=fill_value,
use_multiprocessing=use_multiprocessing)
dims = list(self._obj.dims)
zd = np.nonzero([self.z_dim == d for d in dims])[0][0]
dims[zd] = dim_name
coords = dict(self._obj.coords)
coords.pop(self.z_dim, None)
coords[dim_name] = np.atleast_1d(levels)
out = xr.DataArray(data, name=self._obj.name, dims=dims, coords=coords)
out.attrs['pyproj_srs'] = self.grid.proj.srs
if not np.asarray(levels).shape:
out = out.isel(**{dim_name: 0})
return out
| (xarray_obj) |
22,504 | salem.sio | __init__ | null | def __init__(self, xarray_obj):
self._obj = xarray_obj
if isinstance(xarray_obj, xr.DataArray):
xarray_obj = xarray_obj.to_dataset(name='var')
try: # maybe there was already some georef
xarray_obj.attrs['pyproj_srs'] = xarray_obj['var'].pyproj_srs
except:
pass
self.grid = grid_from_dataset(xarray_obj)
if self.grid is None:
raise RuntimeError('dataset Grid not understood.')
dn = xarray_obj.sizes.keys()
self.x_dim = utils.str_in_list(dn, utils.valid_names['x_dim'])[0]
self.y_dim = utils.str_in_list(dn, utils.valid_names['y_dim'])[0]
dim = utils.str_in_list(dn, utils.valid_names['t_dim'])
self.t_dim = dim[0] if dim else None
dim = utils.str_in_list(dn, utils.valid_names['z_dim'])
self.z_dim = dim[0] if dim else None
| (self, xarray_obj) |
22,505 | salem.sio | _apply_transform | Common transform mixin | def _apply_transform(self, transform, grid, other, return_lut=False):
"""Common transform mixin"""
was_dataarray = False
if not isinstance(other, xr.Dataset):
try:
other = other.to_dataset(name=other.name)
was_dataarray = True
except AttributeError:
# must be a ndarray
if return_lut:
rdata, lut = transform(other, grid=grid, return_lut=True)
else:
rdata = transform(other, grid=grid)
# let's guess
sh = rdata.shape
nd = len(sh)
if nd == 2:
dims = (self.y_dim, self.x_dim)
elif nd == 3:
newdim = 'new_dim'
if self.t_dim and sh[0] == self._obj.sizes[self.t_dim]:
newdim = self.t_dim
if self.z_dim and sh[0] == self._obj.sizes[self.z_dim]:
newdim = self.z_dim
dims = (newdim, self.y_dim, self.x_dim)
else:
raise NotImplementedError('more than 3 dims not ok yet.')
coords = {}
for d in dims:
if d in self._obj:
coords[d] = self._obj[d]
out = xr.DataArray(rdata, coords=coords, dims=dims)
out.attrs['pyproj_srs'] = self.grid.proj.srs
if return_lut:
return out, lut
else:
return out
# go
out = xr.Dataset()
for v in other.data_vars:
var = other[v]
if return_lut:
rdata, lut = transform(var, return_lut=True)
else:
rdata = transform(var)
# remove old coords
dims = [d for d in var.dims]
coords = {}
for c in var.coords:
n = utils.str_in_list([c], utils.valid_names['x_dim'])
if n:
dims = [self.x_dim if x in n else x for x in dims]
continue
n = utils.str_in_list([c], utils.valid_names['y_dim'])
if n:
dims = [self.y_dim if x in n else x for x in dims]
continue
coords[c] = var.coords[c]
# add new ones
coords[self.x_dim] = self._obj[self.x_dim]
coords[self.y_dim] = self._obj[self.y_dim]
rdata = xr.DataArray(rdata, coords=coords, attrs=var.attrs,
dims=dims)
rdata.attrs['pyproj_srs'] = self.grid.proj.srs
out[v] = rdata
if was_dataarray:
out = out[v]
else:
out.attrs['pyproj_srs'] = self.grid.proj.srs
if return_lut:
return out, lut
else:
return out
| (self, transform, grid, other, return_lut=False) |
22,506 | salem.sio | _quick_map | Make a plot of a data array. | def _quick_map(self, obj, ax=None, interp='nearest', **kwargs):
"""Make a plot of a data array."""
# some metadata?
title = obj.name or ''
if obj._title_for_slice():
title += ' (' + obj._title_for_slice() + ')'
cb = obj.attrs['units'] if 'units' in obj.attrs else ''
smap = self.get_map(**kwargs)
smap.set_data(obj.values, interp=interp)
smap.visualize(ax=ax, title=title, cbar_title=cb)
return smap
| (self, obj, ax=None, interp='nearest', **kwargs) |
22,507 | salem.sio | cartopy | Get a cartopy.crs.Projection for this dataset. | def cartopy(self):
"""Get a cartopy.crs.Projection for this dataset."""
return proj_to_cartopy(self.grid.proj)
| (self) |
22,508 | salem.sio | deacc | De-accumulates the variable (i.e. compute the variable's rate).
The returned variable has one element less over the time dimension.
The default is to return in units per hour.
Parameters
----------
as_rate: bool
set to false if you don't want units per hour,
but units per given data timestep
| def deacc(self, as_rate=True):
"""De-accumulates the variable (i.e. compute the variable's rate).
The returned variable has one element less over the time dimension.
The default is to return in units per hour.
Parameters
----------
as_rate: bool
set to false if you don't want units per hour,
but units per given data timestep
"""
out = self._obj[{self.t_dim: slice(1, len(self._obj[self.t_dim]))}]
diff = self._obj[{self.t_dim: slice(0, len(self._obj[self.t_dim])-1)}]
out.values = out.values - diff.values
out.attrs['description'] = out.attrs['description'].replace('ACCUMULATED ', '')
if as_rate:
dth = self._obj.time[1].values - self._obj.time[0].values
dth = dth.astype('timedelta64[h]').astype(float)
out.values = out.values / dth
out.attrs['units'] += ' h-1'
else:
out.attrs['units'] += ' step-1'
return out
| (self, as_rate=True) |
22,509 | salem.sio | get_map | Make a salem.Map out of the dataset.
All keywords are passed to :py:class:salem.Map
| def get_map(self, **kwargs):
"""Make a salem.Map out of the dataset.
All keywords are passed to :py:class:salem.Map
"""
from salem.graphics import Map
return Map(self.grid, **kwargs)
| (self, **kwargs) |
22,510 | salem.sio | interpz | Interpolates the array along the vertical dimension
Parameters
----------
zcoord: DataArray
the z coordinates of the variable. Must be of same dimensions
levels: 1dArray
the levels at which to interpolate
dim_name: str
the name of the new dimension
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
a new DataArray with the interpolated data
| def interpz(self, zcoord, levels, dim_name='', fill_value=np.NaN,
use_multiprocessing=True):
"""Interpolates the array along the vertical dimension
Parameters
----------
zcoord: DataArray
the z coordinates of the variable. Must be of same dimensions
levels: 1dArray
the levels at which to interpolate
dim_name: str
the name of the new dimension
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
a new DataArray with the interpolated data
"""
if self.z_dim is None:
raise RuntimeError('zdimension not recognized')
data = wrftools.interp3d(self._obj.values, zcoord.values,
np.atleast_1d(levels), fill_value=fill_value,
use_multiprocessing=use_multiprocessing)
dims = list(self._obj.dims)
zd = np.nonzero([self.z_dim == d for d in dims])[0][0]
dims[zd] = dim_name
coords = dict(self._obj.coords)
coords.pop(self.z_dim, None)
coords[dim_name] = np.atleast_1d(levels)
out = xr.DataArray(data, name=self._obj.name, dims=dims, coords=coords)
out.attrs['pyproj_srs'] = self.grid.proj.srs
if not np.asarray(levels).shape:
out = out.isel(**{dim_name: 0})
return out
| (self, zcoord, levels, dim_name='', fill_value=nan, use_multiprocessing=True) |
22,511 | salem.sio | lookup_transform | Reprojects an other Dataset or DataArray onto self using the
forward tranform lookup.
See : :py:meth:`Grid.lookup_transform`
Parameters
----------
other: Dataset, DataArray or ndarray
the data to project onto self
grid: salem.Grid
in case the input dataset does not carry georef info
method : function, default: np.mean
the aggregation method. Possibilities: np.std, np.median, np.sum,
and more. Use ``len`` to count the number of grid points!
lut : ndarray, optional
computing the lookup table can be expensive. If you have several
operations to do with the same grid, set ``lut`` to an existing
table obtained from a previous call to :py:meth:`Grid.grid_lookup`
return_lut : bool, optional
set to True if you want to return the lookup table for later use.
in this case, returns a tuple
Returns
-------
a dataset or a dataarray
If ``return_lut==True``, also return the lookup table
| def lookup_transform(self, other, grid=None, method=np.mean, lut=None,
return_lut=False):
"""Reprojects an other Dataset or DataArray onto self using the
forward tranform lookup.
See : :py:meth:`Grid.lookup_transform`
Parameters
----------
other: Dataset, DataArray or ndarray
the data to project onto self
grid: salem.Grid
in case the input dataset does not carry georef info
method : function, default: np.mean
the aggregation method. Possibilities: np.std, np.median, np.sum,
and more. Use ``len`` to count the number of grid points!
lut : ndarray, optional
computing the lookup table can be expensive. If you have several
operations to do with the same grid, set ``lut`` to an existing
table obtained from a previous call to :py:meth:`Grid.grid_lookup`
return_lut : bool, optional
set to True if you want to return the lookup table for later use.
in this case, returns a tuple
Returns
-------
a dataset or a dataarray
If ``return_lut==True``, also return the lookup table
"""
transform = partial(self.grid.lookup_transform, method=method, lut=lut)
return self._apply_transform(transform, grid, other,
return_lut=return_lut)
| (self, other, grid=None, method=<function mean at 0x7f9b3c8b9c70>, lut=None, return_lut=False) |
22,512 | salem.sio | quick_map | Make a plot of the DataArray. | def quick_map(self, ax=None, interp='nearest', **kwargs):
"""Make a plot of the DataArray."""
return self._quick_map(self._obj, ax=ax, interp=interp, **kwargs)
| (self, ax=None, interp='nearest', **kwargs) |
22,513 | salem.sio | roi | roi(self, shape=None, geometry=None, grid=None, corners=None,
crs=wgs84, roi=None, all_touched=False, other=None)
Make a region of interest (ROI) for the dataset.
All grid points outside the ROI will be masked out.
Parameters
----------
ds : Dataset or DataArray
form the ROI from the extent of the Dataset or DataArray
shape : str
path to a shapefile
geometry : geometry
a shapely geometry (don't forget the crs keyword)
grid : Grid
a Grid object which extent will form the ROI
corners : tuple
a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to (don't forget the crs keyword)
crs : crs, default wgs84
coordinate reference system of the geometry and corners
roi : ndarray
if you have a mask ready, you can give it here
all_touched : boolean
pass-through argument for rasterio.features.rasterize, indicating
that all grid cells which are clipped by the shapefile defining
the region of interest should be included (default=False)
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where cond is False. By
default, these locations filled with NA.
As in http://xarray.pydata.org/en/stable/generated/xarray.DataArray.where.html
| def roi(self, ds=None, **kwargs):
"""roi(self, shape=None, geometry=None, grid=None, corners=None,
crs=wgs84, roi=None, all_touched=False, other=None)
Make a region of interest (ROI) for the dataset.
All grid points outside the ROI will be masked out.
Parameters
----------
ds : Dataset or DataArray
form the ROI from the extent of the Dataset or DataArray
shape : str
path to a shapefile
geometry : geometry
a shapely geometry (don't forget the crs keyword)
grid : Grid
a Grid object which extent will form the ROI
corners : tuple
a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to (don't forget the crs keyword)
crs : crs, default wgs84
coordinate reference system of the geometry and corners
roi : ndarray
if you have a mask ready, you can give it here
all_touched : boolean
pass-through argument for rasterio.features.rasterize, indicating
that all grid cells which are clipped by the shapefile defining
the region of interest should be included (default=False)
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where cond is False. By
default, these locations filled with NA.
As in http://xarray.pydata.org/en/stable/generated/xarray.DataArray.where.html
"""
other = kwargs.pop('other', dtypes.NA)
if ds is not None:
grid = ds.salem.grid
kwargs.setdefault('grid', grid)
mask = self.grid.region_of_interest(**kwargs)
out = self._obj.where(mask, other=other)
# keep attrs and encoding
out.attrs = self._obj.attrs
out.encoding = self._obj.encoding
if isinstance(out, xr.Dataset):
for v in self._obj.variables:
out[v].encoding = self._obj[v].encoding
if isinstance(out, xr.DataArray):
out.name = self._obj.name
# add pyproj string everywhere
out.attrs['pyproj_srs'] = self.grid.proj.srs
if isinstance(out, xr.Dataset):
for v in out.data_vars:
out.variables[v].attrs = self._obj.variables[v].attrs
out.variables[v].attrs['pyproj_srs'] = self.grid.proj.srs
return out
| (self, ds=None, **kwargs) |
22,514 | salem.sio | subset | subset(self, margin=0, shape=None, geometry=None, grid=None,
corners=None, crs=wgs84, roi=None)
Get a subset of the dataset.
Accepts all keywords of :py:func:`~Grid.roi`
Parameters
----------
ds : Dataset or DataArray
form the ROI from the extent of the Dataset or DataArray
shape : str
path to a shapefile
geometry : geometry
a shapely geometry (don't forget the crs keyword)
grid : Grid
a Grid object which extent will form the ROI
corners : tuple
a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to (don't forget the crs keyword)
crs : crs, default wgs84
coordinate reference system of the geometry and corners
roi : ndarray
a mask for the region of interest to subset the dataset onto
margin : int
add a margin to the region to subset (can be negative!). Can
be used a single keyword, too: set_subset(margin=-5) will remove
five pixels from each boundary of the dataset.
| def subset(self, margin=0, ds=None, **kwargs):
"""subset(self, margin=0, shape=None, geometry=None, grid=None,
corners=None, crs=wgs84, roi=None)
Get a subset of the dataset.
Accepts all keywords of :py:func:`~Grid.roi`
Parameters
----------
ds : Dataset or DataArray
form the ROI from the extent of the Dataset or DataArray
shape : str
path to a shapefile
geometry : geometry
a shapely geometry (don't forget the crs keyword)
grid : Grid
a Grid object which extent will form the ROI
corners : tuple
a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to (don't forget the crs keyword)
crs : crs, default wgs84
coordinate reference system of the geometry and corners
roi : ndarray
a mask for the region of interest to subset the dataset onto
margin : int
add a margin to the region to subset (can be negative!). Can
be used a single keyword, too: set_subset(margin=-5) will remove
five pixels from each boundary of the dataset.
"""
if ds is not None:
grid = ds.salem.grid
kwargs.setdefault('grid', grid)
mask = self.grid.region_of_interest(**kwargs)
if not kwargs:
# user just wants a margin
mask[:] = 1
ids = np.nonzero(mask)
sub_x = [np.min(ids[1]) - margin, np.max(ids[1]) + margin]
sub_y = [np.min(ids[0]) - margin, np.max(ids[0]) + margin]
out_ds = self._obj[{self.x_dim: slice(sub_x[0], sub_x[1]+1),
self.y_dim: slice(sub_y[0], sub_y[1]+1)}
]
return out_ds
| (self, margin=0, ds=None, **kwargs) |
22,515 | salem.sio | transform | Reprojects an other Dataset or DataArray onto self.
The returned object has the same data structure as ``other`` (i.e.
variables names, attributes), but is defined on the new grid
(``self.grid``).
Parameters
----------
other: Dataset, DataArray or ndarray
the data to project onto self
grid: salem.Grid
in case the input dataset does not carry georef info
interp : str
'nearest' (default), 'linear', or 'spline'
ks : int
Degree of the bivariate spline. Default is 3.
Returns
-------
a dataset or a dataarray
| def transform(self, other, grid=None, interp='nearest', ks=3):
"""Reprojects an other Dataset or DataArray onto self.
The returned object has the same data structure as ``other`` (i.e.
variables names, attributes), but is defined on the new grid
(``self.grid``).
Parameters
----------
other: Dataset, DataArray or ndarray
the data to project onto self
grid: salem.Grid
in case the input dataset does not carry georef info
interp : str
'nearest' (default), 'linear', or 'spline'
ks : int
Degree of the bivariate spline. Default is 3.
Returns
-------
a dataset or a dataarray
"""
transform = partial(self.grid.map_gridded_data, interp=interp, ks=ks)
return self._apply_transform(transform, grid, other)
| (self, other, grid=None, interp='nearest', ks=3) |
22,516 | salem | DataLevels | null | def DataLevels():
raise ImportError('requires matplotlib')
| () |
22,517 | salem.sio | DatasetAccessor | null | class DatasetAccessor(_XarrayAccessorBase):
def quick_map(self, varname, ax=None, interp='nearest', **kwargs):
"""Make a plot of a variable of the DataSet."""
return self._quick_map(self._obj[varname], ax=ax, interp=interp,
**kwargs)
def transform_and_add(self, other, grid=None, interp='nearest', ks=3,
name=None):
"""Reprojects an other Dataset and adds it's content to the current one.
Parameters
----------
other: Dataset, DataArray or ndarray
the data to project onto self
grid: salem.Grid
in case the input dataset does not carry georef info
interp : str
'nearest' (default), 'linear', or 'spline'
ks : int
Degree of the bivariate spline. Default is 3.
name: str or dict-like
how to name to new variables in self. Per default the new variables
are going to keep their name (it will raise an error in case of
conflict). Set to a str to to rename the variable (if unique) or
set to a dict for mapping the old names to the new names for
datasets.
"""
out = self.transform(other, grid=grid, interp=interp, ks=ks)
if isinstance(out, xr.DataArray):
new_name = name or out.name
if new_name is None:
raise ValueError('You need to set name')
self._obj[new_name] = out
else:
for v in out.data_vars:
try:
new_name = name[v]
except (KeyError, TypeError):
new_name = v
self._obj[new_name] = out[v]
def wrf_zlevel(self, varname, levels=None, fill_value=np.NaN,
use_multiprocessing=True):
"""Interpolates to a specified height above sea level.
Parameters
----------
varname: str
the name of the variable to interpolate
levels: 1d array
levels at which to interpolate (default: some levels I thought of)
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
an interpolated DataArray
"""
if levels is None:
levels = np.array([10, 20, 30, 50, 75, 100, 200, 300, 500, 750,
1000, 2000, 3000, 5000, 7500, 10000])
zcoord = self._obj['Z']
out = self._obj[varname].salem.interpz(zcoord, levels, dim_name='z',
fill_value=fill_value,
use_multiprocessing=use_multiprocessing)
out['z'].attrs['description'] = 'height above sea level'
out['z'].attrs['units'] = 'm'
return out
def wrf_plevel(self, varname, levels=None, fill_value=np.NaN,
use_multiprocessing=True):
"""Interpolates to a specified pressure level (hPa).
Parameters
----------
varname: str
the name of the variable to interpolate
levels: 1d array
levels at which to interpolate (default: some levels I thought of)
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
an interpolated DataArray
"""
if levels is None:
levels = np.array([1000, 975, 950, 925, 900, 850, 800, 750, 700,
650, 600, 550, 500, 450, 400, 300, 200, 100])
zcoord = self._obj['PRESSURE']
out = self._obj[varname].salem.interpz(zcoord, levels, dim_name='p',
fill_value=fill_value,
use_multiprocessing=use_multiprocessing)
out['p'].attrs['description'] = 'pressure'
out['p'].attrs['units'] = 'hPa'
return out
| (xarray_obj) |
22,524 | salem.sio | quick_map | Make a plot of a variable of the DataSet. | def quick_map(self, varname, ax=None, interp='nearest', **kwargs):
"""Make a plot of a variable of the DataSet."""
return self._quick_map(self._obj[varname], ax=ax, interp=interp,
**kwargs)
| (self, varname, ax=None, interp='nearest', **kwargs) |
22,528 | salem.sio | transform_and_add | Reprojects an other Dataset and adds it's content to the current one.
Parameters
----------
other: Dataset, DataArray or ndarray
the data to project onto self
grid: salem.Grid
in case the input dataset does not carry georef info
interp : str
'nearest' (default), 'linear', or 'spline'
ks : int
Degree of the bivariate spline. Default is 3.
name: str or dict-like
how to name to new variables in self. Per default the new variables
are going to keep their name (it will raise an error in case of
conflict). Set to a str to to rename the variable (if unique) or
set to a dict for mapping the old names to the new names for
datasets.
| def transform_and_add(self, other, grid=None, interp='nearest', ks=3,
name=None):
"""Reprojects an other Dataset and adds it's content to the current one.
Parameters
----------
other: Dataset, DataArray or ndarray
the data to project onto self
grid: salem.Grid
in case the input dataset does not carry georef info
interp : str
'nearest' (default), 'linear', or 'spline'
ks : int
Degree of the bivariate spline. Default is 3.
name: str or dict-like
how to name to new variables in self. Per default the new variables
are going to keep their name (it will raise an error in case of
conflict). Set to a str to to rename the variable (if unique) or
set to a dict for mapping the old names to the new names for
datasets.
"""
out = self.transform(other, grid=grid, interp=interp, ks=ks)
if isinstance(out, xr.DataArray):
new_name = name or out.name
if new_name is None:
raise ValueError('You need to set name')
self._obj[new_name] = out
else:
for v in out.data_vars:
try:
new_name = name[v]
except (KeyError, TypeError):
new_name = v
self._obj[new_name] = out[v]
| (self, other, grid=None, interp='nearest', ks=3, name=None) |
22,529 | salem.sio | wrf_plevel | Interpolates to a specified pressure level (hPa).
Parameters
----------
varname: str
the name of the variable to interpolate
levels: 1d array
levels at which to interpolate (default: some levels I thought of)
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
an interpolated DataArray
| def wrf_plevel(self, varname, levels=None, fill_value=np.NaN,
use_multiprocessing=True):
"""Interpolates to a specified pressure level (hPa).
Parameters
----------
varname: str
the name of the variable to interpolate
levels: 1d array
levels at which to interpolate (default: some levels I thought of)
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
an interpolated DataArray
"""
if levels is None:
levels = np.array([1000, 975, 950, 925, 900, 850, 800, 750, 700,
650, 600, 550, 500, 450, 400, 300, 200, 100])
zcoord = self._obj['PRESSURE']
out = self._obj[varname].salem.interpz(zcoord, levels, dim_name='p',
fill_value=fill_value,
use_multiprocessing=use_multiprocessing)
out['p'].attrs['description'] = 'pressure'
out['p'].attrs['units'] = 'hPa'
return out
| (self, varname, levels=None, fill_value=nan, use_multiprocessing=True) |
22,530 | salem.sio | wrf_zlevel | Interpolates to a specified height above sea level.
Parameters
----------
varname: str
the name of the variable to interpolate
levels: 1d array
levels at which to interpolate (default: some levels I thought of)
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
an interpolated DataArray
| def wrf_zlevel(self, varname, levels=None, fill_value=np.NaN,
use_multiprocessing=True):
"""Interpolates to a specified height above sea level.
Parameters
----------
varname: str
the name of the variable to interpolate
levels: 1d array
levels at which to interpolate (default: some levels I thought of)
fill_value : np.NaN or 'extrapolate', optional
how to handle levels below the topography. Default is to mark them
as invalid, but you might want the have them extrapolated.
use_multiprocessing: bool
set to false if, for some reason, you don't want to use mp
Returns
-------
an interpolated DataArray
"""
if levels is None:
levels = np.array([10, 20, 30, 50, 75, 100, 200, 300, 500, 750,
1000, 2000, 3000, 5000, 7500, 10000])
zcoord = self._obj['Z']
out = self._obj[varname].salem.interpz(zcoord, levels, dim_name='z',
fill_value=fill_value,
use_multiprocessing=use_multiprocessing)
out['z'].attrs['description'] = 'height above sea level'
out['z'].attrs['units'] = 'm'
return out
| (self, varname, levels=None, fill_value=nan, use_multiprocessing=True) |
22,531 | salem.datasets | EsriITMIX | Open ITMIX geolocalised Esri ASCII images (needs rasterio). | class EsriITMIX(GeoDataset):
"""Open ITMIX geolocalised Esri ASCII images (needs rasterio)."""
def __init__(self, file):
"""Open the file.
Parameters
----------
file: path to the file
"""
bname = os.path.basename(file).split('.')[0]
pok = bname.find('UTM')
if pok == -1:
raise ValueError(file + ' does not seem to be an ITMIX file.')
zone = int(bname[pok+3:])
south = False
if zone < 0:
south = True
zone = -zone
proj = pyproj.Proj(proj='utm', zone=zone, ellps='WGS84',
south=south)
# brutally efficient
with rasterio.Env():
with rasterio.open(file) as src:
nxny = (src.width, src.height)
ul_corner = (src.bounds.left, src.bounds.top)
dxdy = (src.res[0], -src.res[1])
grid = Grid(x0y0=ul_corner, nxny=nxny, dxdy=dxdy,
pixel_ref='corner', proj=proj)
# done
self.file = file
GeoDataset.__init__(self, grid)
def get_vardata(self, var_id=1):
"""Read the geotiff band.
Parameters
----------
var_id: the variable name (here the band number)
"""
wx = (self.sub_x[0], self.sub_x[1]+1)
wy = (self.sub_y[0], self.sub_y[1]+1)
with rasterio.Env():
with rasterio.open(self.file) as src:
band = src.read(var_id, window=(wy, wx))
return band
| (file) |
22,532 | salem.datasets | __init__ | Open the file.
Parameters
----------
file: path to the file
| def __init__(self, file):
"""Open the file.
Parameters
----------
file: path to the file
"""
bname = os.path.basename(file).split('.')[0]
pok = bname.find('UTM')
if pok == -1:
raise ValueError(file + ' does not seem to be an ITMIX file.')
zone = int(bname[pok+3:])
south = False
if zone < 0:
south = True
zone = -zone
proj = pyproj.Proj(proj='utm', zone=zone, ellps='WGS84',
south=south)
# brutally efficient
with rasterio.Env():
with rasterio.open(file) as src:
nxny = (src.width, src.height)
ul_corner = (src.bounds.left, src.bounds.top)
dxdy = (src.res[0], -src.res[1])
grid = Grid(x0y0=ul_corner, nxny=nxny, dxdy=dxdy,
pixel_ref='corner', proj=proj)
# done
self.file = file
GeoDataset.__init__(self, grid)
| (self, file) |
22,533 | salem.datasets | get_vardata | Read the geotiff band.
Parameters
----------
var_id: the variable name (here the band number)
| def get_vardata(self, var_id=1):
"""Read the geotiff band.
Parameters
----------
var_id: the variable name (here the band number)
"""
wx = (self.sub_x[0], self.sub_x[1]+1)
wy = (self.sub_y[0], self.sub_y[1]+1)
with rasterio.Env():
with rasterio.open(self.file) as src:
band = src.read(var_id, window=(wy, wx))
return band
| (self, var_id=1) |
22,534 | salem.datasets | set_period | Set a period of interest for the dataset.
This will be remembered at later calls to time() or GeoDataset's
getvardata implementations.
Parameters
----------
t0: anything that represents a time. Could be a string (e.g
'2012-01-01'), a DateTime, or an index in the dataset's time
t1: same as t0 (inclusive)
| def set_period(self, t0=0, t1=-1):
"""Set a period of interest for the dataset.
This will be remembered at later calls to time() or GeoDataset's
getvardata implementations.
Parameters
----------
t0: anything that represents a time. Could be a string (e.g
'2012-01-01'), a DateTime, or an index in the dataset's time
t1: same as t0 (inclusive)
"""
if self._time is not None:
self.sub_t = [0, -1]
# we dont check for what t0 or t1 is, we let Pandas do the job
try:
self.sub_t[0] = self._time.loc[t0]
except KeyError:
self.sub_t[0] = self._time.iloc[t0]
try:
self.sub_t[1] = self._time.loc[t1]
except KeyError:
self.sub_t[1] = self._time.iloc[t1]
self.t0 = self._time.index[self.sub_t[0]]
self.t1 = self._time.index[self.sub_t[1]]
| (self, t0=0, t1=-1) |
22,535 | salem.datasets | set_roi | Set a region of interest for the dataset.
If set succesfully, a ROI is simply a mask of the same size as the
dataset's grid, obtained with the .roi attribute.
I haven't decided yet if the data should be masekd out when a ROI
has been set.
Parameters
----------
shape: path to a shapefile
geometry: a shapely geometry
crs: the crs of the geometry
grid: a Grid object
corners: a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to. The coordinates are not expressed in
wgs84, set the crs keyword
noerase: set to true in order to add the new ROI to the previous one
| def set_roi(self, shape=None, geometry=None, crs=wgs84, grid=None,
corners=None, noerase=False):
"""Set a region of interest for the dataset.
If set succesfully, a ROI is simply a mask of the same size as the
dataset's grid, obtained with the .roi attribute.
I haven't decided yet if the data should be masekd out when a ROI
has been set.
Parameters
----------
shape: path to a shapefile
geometry: a shapely geometry
crs: the crs of the geometry
grid: a Grid object
corners: a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to. The coordinates are not expressed in
wgs84, set the crs keyword
noerase: set to true in order to add the new ROI to the previous one
"""
# The rois are always defined on the original grids, but of course
# we take that into account when a subset is set (see roi
# decorator below)
ogrid = self._ogrid
# Initial mask
if noerase and (self.roi is not None):
mask = self.roi
else:
mask = np.zeros((ogrid.ny, ogrid.nx), dtype=np.int16)
# Several cases
if shape is not None:
if isinstance(shape, pd.DataFrame):
gdf = shape
else:
gdf = sio.read_shapefile(shape)
gis.transform_geopandas(gdf, to_crs=ogrid.corner_grid,
inplace=True)
if rasterio is None:
raise ImportError('This feature needs rasterio')
from rasterio.features import rasterize
with rasterio.Env():
mask = rasterize(gdf.geometry, out=mask)
if geometry is not None:
geom = gis.transform_geometry(geometry, crs=crs,
to_crs=ogrid.corner_grid)
if rasterio is None:
raise ImportError('This feature needs rasterio')
from rasterio.features import rasterize
with rasterio.Env():
mask = rasterize(np.atleast_1d(geom), out=mask)
if grid is not None:
_tmp = np.ones((grid.ny, grid.nx), dtype=np.int16)
mask = ogrid.map_gridded_data(_tmp, grid, out=mask).filled(0)
if corners is not None:
cgrid = self._ogrid.center_grid
xy0, xy1 = corners
x0, y0 = cgrid.transform(*xy0, crs=crs, nearest=True)
x1, y1 = cgrid.transform(*xy1, crs=crs, nearest=True)
mask[np.min([y0, y1]):np.max([y0, y1])+1,
np.min([x0, x1]):np.max([x0, x1])+1] = 1
self.roi = mask
| (self, shape=None, geometry=None, crs=<Other Coordinate Operation Transformer: longlat>
Description: PROJ-based coordinate operation
Area of Use:
- undefined, grid=None, corners=None, noerase=False) |
22,536 | salem.datasets | set_subset | Set a subset for the dataset.
This will be remembered at later calls to GeoDataset's
getvardata implementations.
Parameters
----------
corners: a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to. The coordinates are not expressed in
wgs84, set the crs keyword
crs: the coordinates of the corner coordinates
toroi: set to true to generate the smallest possible subset arond
the region of interest set with set_roi()
margin: when doing the subset, add a margin (can be negative!). Can
be used alone: set_subset(margin=-5) will remove five pixels from
each boundary of the dataset.
TODO: shouldnt we make the toroi stuff easier to use?
| def set_subset(self, corners=None, crs=wgs84, toroi=False, margin=0):
"""Set a subset for the dataset.
This will be remembered at later calls to GeoDataset's
getvardata implementations.
Parameters
----------
corners: a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to. The coordinates are not expressed in
wgs84, set the crs keyword
crs: the coordinates of the corner coordinates
toroi: set to true to generate the smallest possible subset arond
the region of interest set with set_roi()
margin: when doing the subset, add a margin (can be negative!). Can
be used alone: set_subset(margin=-5) will remove five pixels from
each boundary of the dataset.
TODO: shouldnt we make the toroi stuff easier to use?
"""
# Useful variables
mx = self._ogrid.nx-1
my = self._ogrid.ny-1
cgrid = self._ogrid.center_grid
# Three possible cases
if toroi:
if self.roi is None or np.max(self.roi) == 0:
raise RuntimeError('roi is empty.')
ids = np.nonzero(self.roi)
sub_x = [np.min(ids[1])-margin, np.max(ids[1])+margin]
sub_y = [np.min(ids[0])-margin, np.max(ids[0])+margin]
elif corners is not None:
xy0, xy1 = corners
x0, y0 = cgrid.transform(*xy0, crs=crs, nearest=True)
x1, y1 = cgrid.transform(*xy1, crs=crs, nearest=True)
sub_x = [np.min([x0, x1])-margin, np.max([x0, x1])+margin]
sub_y = [np.min([y0, y1])-margin, np.max([y0, y1])+margin]
else:
# Reset
sub_x = [0-margin, mx+margin]
sub_y = [0-margin, my+margin]
# Some necessary checks
if (np.max(sub_x) < 0) or (np.min(sub_x) > mx) or \
(np.max(sub_y) < 0) or (np.min(sub_y) > my):
raise RuntimeError('subset not valid')
if (sub_x[0] < 0) or (sub_x[1] > mx):
warnings.warn('x0 out of bounds', RuntimeWarning)
if (sub_y[0] < 0) or (sub_y[1] > my):
warnings.warn('y0 out of bounds', RuntimeWarning)
# Make the new grid
sub_x = np.clip(sub_x, 0, mx)
sub_y = np.clip(sub_y, 0, my)
nxny = (sub_x[1] - sub_x[0] + 1, sub_y[1] - sub_y[0] + 1)
dxdy = (self._ogrid.dx, self._ogrid.dy)
xy0 = (self._ogrid.x0 + sub_x[0] * self._ogrid.dx,
self._ogrid.y0 + sub_y[0] * self._ogrid.dy)
self.grid = Grid(proj=self._ogrid.proj, nxny=nxny, dxdy=dxdy, x0y0=xy0)
# If we arrived here, we can safely set the subset
self.sub_x = sub_x
self.sub_y = sub_y
| (self, corners=None, crs=<Other Coordinate Operation Transformer: longlat>
Description: PROJ-based coordinate operation
Area of Use:
- undefined, toroi=False, margin=0) |
22,537 | salem.datasets | GeoDataset | Interface for georeferenced datasets.
A GeoDataset is a formalism for gridded data arrays, which are usually
stored in geotiffs or netcdf files. It provides an interface to realise
subsets, compute regions of interest and more.
A GeoDataset makes more sense if it is subclassed for real files,
such as GeoTiff or GeoNetCDF. In that case, the implemetations must make
use of the subset indexes provided in the sub_x, sub_y and sub_t
properties.
| class GeoDataset(object):
"""Interface for georeferenced datasets.
A GeoDataset is a formalism for gridded data arrays, which are usually
stored in geotiffs or netcdf files. It provides an interface to realise
subsets, compute regions of interest and more.
A GeoDataset makes more sense if it is subclassed for real files,
such as GeoTiff or GeoNetCDF. In that case, the implemetations must make
use of the subset indexes provided in the sub_x, sub_y and sub_t
properties.
"""
def __init__(self, grid, time=None):
"""Set-up the georeferencing, time is optional.
Parameters:
grid: a salem.Grid object which represents the underlying data
time: if the data has a time dimension
"""
# The original grid, for always stored
self._ogrid = grid
# The current grid (changes if set_subset() is called)
self.grid = grid
# Default indexes to get in the underlying data (BOTH inclusive,
# i.e [, ], not [,[ as in numpy)
self.sub_x = [0, grid.nx-1]
self.sub_y = [0, grid.ny-1]
# Roi is a ny, nx array if set
self.roi = None
self.set_roi()
# _time is a pd.Series because it's so nice to misuse the series.loc
# flexibility (see set_period)
if time is not None:
if isinstance(time, pd.Series):
time = pd.Series(np.arange(len(time)), index=time.index)
else:
try:
time = pd.Series(np.arange(len(time)), index=time)
except AttributeError:
# https://github.com/pandas-dev/pandas/issues/23419
for t in time:
setattr(t, 'nanosecond', 0)
time = pd.Series(np.arange(len(time)), index=time)
self._time = time
# set_period() will set those
self.t0 = None
self.t1 = None
self.sub_t = None
self.set_period()
@property
def time(self):
"""Time array"""
if self._time is None:
return None
return self._time[self.t0:self.t1].index
def set_period(self, t0=0, t1=-1):
"""Set a period of interest for the dataset.
This will be remembered at later calls to time() or GeoDataset's
getvardata implementations.
Parameters
----------
t0: anything that represents a time. Could be a string (e.g
'2012-01-01'), a DateTime, or an index in the dataset's time
t1: same as t0 (inclusive)
"""
if self._time is not None:
self.sub_t = [0, -1]
# we dont check for what t0 or t1 is, we let Pandas do the job
try:
self.sub_t[0] = self._time.loc[t0]
except KeyError:
self.sub_t[0] = self._time.iloc[t0]
try:
self.sub_t[1] = self._time.loc[t1]
except KeyError:
self.sub_t[1] = self._time.iloc[t1]
self.t0 = self._time.index[self.sub_t[0]]
self.t1 = self._time.index[self.sub_t[1]]
def set_subset(self, corners=None, crs=wgs84, toroi=False, margin=0):
"""Set a subset for the dataset.
This will be remembered at later calls to GeoDataset's
getvardata implementations.
Parameters
----------
corners: a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to. The coordinates are not expressed in
wgs84, set the crs keyword
crs: the coordinates of the corner coordinates
toroi: set to true to generate the smallest possible subset arond
the region of interest set with set_roi()
margin: when doing the subset, add a margin (can be negative!). Can
be used alone: set_subset(margin=-5) will remove five pixels from
each boundary of the dataset.
TODO: shouldnt we make the toroi stuff easier to use?
"""
# Useful variables
mx = self._ogrid.nx-1
my = self._ogrid.ny-1
cgrid = self._ogrid.center_grid
# Three possible cases
if toroi:
if self.roi is None or np.max(self.roi) == 0:
raise RuntimeError('roi is empty.')
ids = np.nonzero(self.roi)
sub_x = [np.min(ids[1])-margin, np.max(ids[1])+margin]
sub_y = [np.min(ids[0])-margin, np.max(ids[0])+margin]
elif corners is not None:
xy0, xy1 = corners
x0, y0 = cgrid.transform(*xy0, crs=crs, nearest=True)
x1, y1 = cgrid.transform(*xy1, crs=crs, nearest=True)
sub_x = [np.min([x0, x1])-margin, np.max([x0, x1])+margin]
sub_y = [np.min([y0, y1])-margin, np.max([y0, y1])+margin]
else:
# Reset
sub_x = [0-margin, mx+margin]
sub_y = [0-margin, my+margin]
# Some necessary checks
if (np.max(sub_x) < 0) or (np.min(sub_x) > mx) or \
(np.max(sub_y) < 0) or (np.min(sub_y) > my):
raise RuntimeError('subset not valid')
if (sub_x[0] < 0) or (sub_x[1] > mx):
warnings.warn('x0 out of bounds', RuntimeWarning)
if (sub_y[0] < 0) or (sub_y[1] > my):
warnings.warn('y0 out of bounds', RuntimeWarning)
# Make the new grid
sub_x = np.clip(sub_x, 0, mx)
sub_y = np.clip(sub_y, 0, my)
nxny = (sub_x[1] - sub_x[0] + 1, sub_y[1] - sub_y[0] + 1)
dxdy = (self._ogrid.dx, self._ogrid.dy)
xy0 = (self._ogrid.x0 + sub_x[0] * self._ogrid.dx,
self._ogrid.y0 + sub_y[0] * self._ogrid.dy)
self.grid = Grid(proj=self._ogrid.proj, nxny=nxny, dxdy=dxdy, x0y0=xy0)
# If we arrived here, we can safely set the subset
self.sub_x = sub_x
self.sub_y = sub_y
def set_roi(self, shape=None, geometry=None, crs=wgs84, grid=None,
corners=None, noerase=False):
"""Set a region of interest for the dataset.
If set succesfully, a ROI is simply a mask of the same size as the
dataset's grid, obtained with the .roi attribute.
I haven't decided yet if the data should be masekd out when a ROI
has been set.
Parameters
----------
shape: path to a shapefile
geometry: a shapely geometry
crs: the crs of the geometry
grid: a Grid object
corners: a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to. The coordinates are not expressed in
wgs84, set the crs keyword
noerase: set to true in order to add the new ROI to the previous one
"""
# The rois are always defined on the original grids, but of course
# we take that into account when a subset is set (see roi
# decorator below)
ogrid = self._ogrid
# Initial mask
if noerase and (self.roi is not None):
mask = self.roi
else:
mask = np.zeros((ogrid.ny, ogrid.nx), dtype=np.int16)
# Several cases
if shape is not None:
if isinstance(shape, pd.DataFrame):
gdf = shape
else:
gdf = sio.read_shapefile(shape)
gis.transform_geopandas(gdf, to_crs=ogrid.corner_grid,
inplace=True)
if rasterio is None:
raise ImportError('This feature needs rasterio')
from rasterio.features import rasterize
with rasterio.Env():
mask = rasterize(gdf.geometry, out=mask)
if geometry is not None:
geom = gis.transform_geometry(geometry, crs=crs,
to_crs=ogrid.corner_grid)
if rasterio is None:
raise ImportError('This feature needs rasterio')
from rasterio.features import rasterize
with rasterio.Env():
mask = rasterize(np.atleast_1d(geom), out=mask)
if grid is not None:
_tmp = np.ones((grid.ny, grid.nx), dtype=np.int16)
mask = ogrid.map_gridded_data(_tmp, grid, out=mask).filled(0)
if corners is not None:
cgrid = self._ogrid.center_grid
xy0, xy1 = corners
x0, y0 = cgrid.transform(*xy0, crs=crs, nearest=True)
x1, y1 = cgrid.transform(*xy1, crs=crs, nearest=True)
mask[np.min([y0, y1]):np.max([y0, y1])+1,
np.min([x0, x1]):np.max([x0, x1])+1] = 1
self.roi = mask
@property
def roi(self):
"""Mask of the ROI (same size as subset)."""
return self._roi[self.sub_y[0]:self.sub_y[1]+1,
self.sub_x[0]:self.sub_x[1]+1]
@roi.setter
def roi(self, value):
"""A mask is allways defined on _ogrid"""
self._roi = value
def get_vardata(self, var_id=None):
"""Interface to implement by subclasses, taking sub_x, sub_y and
sub_t into account."""
raise NotImplementedError()
| (grid, time=None) |
22,538 | salem.datasets | __init__ | Set-up the georeferencing, time is optional.
Parameters:
grid: a salem.Grid object which represents the underlying data
time: if the data has a time dimension
| def __init__(self, grid, time=None):
"""Set-up the georeferencing, time is optional.
Parameters:
grid: a salem.Grid object which represents the underlying data
time: if the data has a time dimension
"""
# The original grid, for always stored
self._ogrid = grid
# The current grid (changes if set_subset() is called)
self.grid = grid
# Default indexes to get in the underlying data (BOTH inclusive,
# i.e [, ], not [,[ as in numpy)
self.sub_x = [0, grid.nx-1]
self.sub_y = [0, grid.ny-1]
# Roi is a ny, nx array if set
self.roi = None
self.set_roi()
# _time is a pd.Series because it's so nice to misuse the series.loc
# flexibility (see set_period)
if time is not None:
if isinstance(time, pd.Series):
time = pd.Series(np.arange(len(time)), index=time.index)
else:
try:
time = pd.Series(np.arange(len(time)), index=time)
except AttributeError:
# https://github.com/pandas-dev/pandas/issues/23419
for t in time:
setattr(t, 'nanosecond', 0)
time = pd.Series(np.arange(len(time)), index=time)
self._time = time
# set_period() will set those
self.t0 = None
self.t1 = None
self.sub_t = None
self.set_period()
| (self, grid, time=None) |
22,539 | salem.datasets | get_vardata | Interface to implement by subclasses, taking sub_x, sub_y and
sub_t into account. | def get_vardata(self, var_id=None):
"""Interface to implement by subclasses, taking sub_x, sub_y and
sub_t into account."""
raise NotImplementedError()
| (self, var_id=None) |
22,543 | salem.datasets | GeoNetcdf | NetCDF files with geolocalisation info.
GeoNetcdf will try hard to understand the geoloc and time of the file,
but if it can't you can still provide the time and grid at instantiation.
| class GeoNetcdf(GeoDataset):
"""NetCDF files with geolocalisation info.
GeoNetcdf will try hard to understand the geoloc and time of the file,
but if it can't you can still provide the time and grid at instantiation.
"""
def __init__(self, file, grid=None, time=None, monthbegin=False):
"""Open the file and try to understand it.
Parameters
----------
file: path to the netcdf file
grid: a Grid object. This will override the normal behavior of
GeoNetcdf, which is to try to understand the grid automatically.
time: a time array. This will override the normal behavior of
GeoNetcdf, which is to try to understand the time automatically.
monthbegin: set to true if you are sure that your data is monthly
and that the data provider decided to tag the date as the center of
the month (stupid)
"""
self._nc = netCDF4.Dataset(file)
self._nc.set_auto_mask(False)
self.variables = self._nc.variables
if grid is None:
grid = sio.grid_from_dataset(self._nc)
if grid is None:
raise RuntimeError('File grid not understood')
if time is None:
time = sio.netcdf_time(self._nc, monthbegin=monthbegin)
dn = self._nc.dimensions.keys()
try:
self.x_dim = utils.str_in_list(dn, utils.valid_names['x_dim'])[0]
self.y_dim = utils.str_in_list(dn, utils.valid_names['y_dim'])[0]
except IndexError:
raise RuntimeError('File coordinates not understood')
dim = utils.str_in_list(dn, utils.valid_names['t_dim'])
self.t_dim = dim[0] if dim else None
dim = utils.str_in_list(dn, utils.valid_names['z_dim'])
self.z_dim = dim[0] if dim else None
GeoDataset.__init__(self, grid, time=time)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def close(self):
self._nc.close()
def get_vardata(self, var_id=0, as_xarray=False):
"""Reads the data out of the netCDF file while taking into account
time and spatial subsets.
Parameters
----------
var_id: the name of the variable (must be available in self.variables)
as_xarray: returns a DataArray object
"""
v = self.variables[var_id]
# Make the slices
item = []
for d in v.dimensions:
it = slice(None)
if d == self.t_dim and self.sub_t is not None:
it = slice(self.sub_t[0], self.sub_t[1]+1)
elif d == self.y_dim:
it = slice(self.sub_y[0], self.sub_y[1]+1)
elif d == self.x_dim:
it = slice(self.sub_x[0], self.sub_x[1]+1)
item.append(it)
with np.errstate(invalid='ignore'):
# This is due to some numpy warnings
out = v[tuple(item)]
if as_xarray:
# convert to xarray
dims = v.dimensions
coords = dict()
x, y = self.grid.x_coord, self.grid.y_coord
for d in dims:
if d == self.t_dim:
coords[d] = self.time
elif d == self.y_dim:
coords[d] = y
elif d == self.x_dim:
coords[d] = x
attrs = v.__dict__.copy()
bad_keys = ['scale_factor', 'add_offset',
'_FillValue', 'missing_value', 'ncvars']
_ = [attrs.pop(b, None) for b in bad_keys]
out = xr.DataArray(out, dims=dims, coords=coords, attrs=attrs)
return out
| (file, grid=None, time=None, monthbegin=False) |
22,545 | salem.datasets | __exit__ | null | def __exit__(self, exception_type, exception_value, traceback):
self.close()
| (self, exception_type, exception_value, traceback) |
22,546 | salem.datasets | __init__ | Open the file and try to understand it.
Parameters
----------
file: path to the netcdf file
grid: a Grid object. This will override the normal behavior of
GeoNetcdf, which is to try to understand the grid automatically.
time: a time array. This will override the normal behavior of
GeoNetcdf, which is to try to understand the time automatically.
monthbegin: set to true if you are sure that your data is monthly
and that the data provider decided to tag the date as the center of
the month (stupid)
| def __init__(self, file, grid=None, time=None, monthbegin=False):
"""Open the file and try to understand it.
Parameters
----------
file: path to the netcdf file
grid: a Grid object. This will override the normal behavior of
GeoNetcdf, which is to try to understand the grid automatically.
time: a time array. This will override the normal behavior of
GeoNetcdf, which is to try to understand the time automatically.
monthbegin: set to true if you are sure that your data is monthly
and that the data provider decided to tag the date as the center of
the month (stupid)
"""
self._nc = netCDF4.Dataset(file)
self._nc.set_auto_mask(False)
self.variables = self._nc.variables
if grid is None:
grid = sio.grid_from_dataset(self._nc)
if grid is None:
raise RuntimeError('File grid not understood')
if time is None:
time = sio.netcdf_time(self._nc, monthbegin=monthbegin)
dn = self._nc.dimensions.keys()
try:
self.x_dim = utils.str_in_list(dn, utils.valid_names['x_dim'])[0]
self.y_dim = utils.str_in_list(dn, utils.valid_names['y_dim'])[0]
except IndexError:
raise RuntimeError('File coordinates not understood')
dim = utils.str_in_list(dn, utils.valid_names['t_dim'])
self.t_dim = dim[0] if dim else None
dim = utils.str_in_list(dn, utils.valid_names['z_dim'])
self.z_dim = dim[0] if dim else None
GeoDataset.__init__(self, grid, time=time)
| (self, file, grid=None, time=None, monthbegin=False) |
22,547 | salem.datasets | close | null | def close(self):
self._nc.close()
| (self) |
22,548 | salem.datasets | get_vardata | Reads the data out of the netCDF file while taking into account
time and spatial subsets.
Parameters
----------
var_id: the name of the variable (must be available in self.variables)
as_xarray: returns a DataArray object
| def get_vardata(self, var_id=0, as_xarray=False):
"""Reads the data out of the netCDF file while taking into account
time and spatial subsets.
Parameters
----------
var_id: the name of the variable (must be available in self.variables)
as_xarray: returns a DataArray object
"""
v = self.variables[var_id]
# Make the slices
item = []
for d in v.dimensions:
it = slice(None)
if d == self.t_dim and self.sub_t is not None:
it = slice(self.sub_t[0], self.sub_t[1]+1)
elif d == self.y_dim:
it = slice(self.sub_y[0], self.sub_y[1]+1)
elif d == self.x_dim:
it = slice(self.sub_x[0], self.sub_x[1]+1)
item.append(it)
with np.errstate(invalid='ignore'):
# This is due to some numpy warnings
out = v[tuple(item)]
if as_xarray:
# convert to xarray
dims = v.dimensions
coords = dict()
x, y = self.grid.x_coord, self.grid.y_coord
for d in dims:
if d == self.t_dim:
coords[d] = self.time
elif d == self.y_dim:
coords[d] = y
elif d == self.x_dim:
coords[d] = x
attrs = v.__dict__.copy()
bad_keys = ['scale_factor', 'add_offset',
'_FillValue', 'missing_value', 'ncvars']
_ = [attrs.pop(b, None) for b in bad_keys]
out = xr.DataArray(out, dims=dims, coords=coords, attrs=attrs)
return out
| (self, var_id=0, as_xarray=False) |
22,552 | salem.datasets | GeoTiff | Geolocalised tiff images (needs rasterio). | class GeoTiff(GeoDataset):
"""Geolocalised tiff images (needs rasterio)."""
def __init__(self, file):
"""Open the file.
Parameters
----------
file: path to the file
"""
if rasterio is None:
raise ImportError('This feature needs rasterio to be insalled')
# brutally efficient
with rasterio.Env():
with rasterio.open(file) as src:
nxny = (src.width, src.height)
ul_corner = (src.bounds.left, src.bounds.top)
proj = pyproj.Proj(src.crs)
dxdy = (src.res[0], -src.res[1])
grid = Grid(x0y0=ul_corner, nxny=nxny, dxdy=dxdy,
pixel_ref='corner', proj=proj)
# done
self.file = file
GeoDataset.__init__(self, grid)
def get_vardata(self, var_id=1):
"""Read the geotiff band.
Parameters
----------
var_id: the variable name (here the band number)
"""
wx = (self.sub_x[0], self.sub_x[1]+1)
wy = (self.sub_y[0], self.sub_y[1]+1)
with rasterio.Env():
with rasterio.open(self.file) as src:
band = src.read(var_id, window=(wy, wx))
return band
| (file) |
22,553 | salem.datasets | __init__ | Open the file.
Parameters
----------
file: path to the file
| def __init__(self, file):
"""Open the file.
Parameters
----------
file: path to the file
"""
if rasterio is None:
raise ImportError('This feature needs rasterio to be insalled')
# brutally efficient
with rasterio.Env():
with rasterio.open(file) as src:
nxny = (src.width, src.height)
ul_corner = (src.bounds.left, src.bounds.top)
proj = pyproj.Proj(src.crs)
dxdy = (src.res[0], -src.res[1])
grid = Grid(x0y0=ul_corner, nxny=nxny, dxdy=dxdy,
pixel_ref='corner', proj=proj)
# done
self.file = file
GeoDataset.__init__(self, grid)
| (self, file) |
22,558 | salem.datasets | GoogleCenterMap | Google static map centered on a point.
Needs motionless.
| class GoogleCenterMap(GeoDataset):
"""Google static map centered on a point.
Needs motionless.
"""
def __init__(self, center_ll=(11.38, 47.26), size_x=640, size_y=640,
scale=1, zoom=12, maptype='satellite', use_cache=True,
**kwargs):
"""Initialize
Parameters
----------
center_ll : tuple
tuple of lon, lat center of the map
size_x : int
image size
size_y : int
image size
scale : int
image scaling factor. 1, 2. 2 is higher resolution but takes
longer to download
zoom : int
google zoom level (https://developers.google.com/maps/documentation/
static-maps/intro#Zoomlevels). 1 (world) - 20 (buildings)
maptype : str, default: 'satellite'
'roadmap', 'satellite', 'hybrid', 'terrain'
use_cache : bool, default: True
store the downloaded image in the cache to avoid future downloads
kwargs : **
any keyword accepted by motionless.CenterMap (e.g. `key` for the API)
"""
global API_KEY
# Google grid
grid = gis.googlestatic_mercator_grid(center_ll=center_ll,
nx=size_x, ny=size_y,
zoom=zoom, scale=scale)
if 'key' not in kwargs:
if API_KEY is None:
with open(utils.get_demo_file('.api_key'), 'r') as f:
API_KEY = f.read().replace('\n', '')
kwargs['key'] = API_KEY
# Motionless
import motionless
googleurl = motionless.CenterMap(lon=center_ll[0], lat=center_ll[1],
size_x=size_x, size_y=size_y,
maptype=maptype, zoom=zoom, scale=scale,
**kwargs)
# done
self.googleurl = googleurl
self.use_cache = use_cache
GeoDataset.__init__(self, grid)
@lazy_property
def _img(self):
"""Download the image."""
if self.use_cache:
return utils.joblib_read_img_url(self.googleurl.generate_url())
else:
from matplotlib.image import imread
fd = urlopen(self.googleurl.generate_url())
return imread(io.BytesIO(fd.read()))
def get_vardata(self, var_id=0):
"""Return and subset the image."""
return self._img[self.sub_y[0]:self.sub_y[1]+1,
self.sub_x[0]:self.sub_x[1]+1, :]
| (center_ll=(11.38, 47.26), size_x=640, size_y=640, scale=1, zoom=12, maptype='satellite', use_cache=True, **kwargs) |
22,559 | salem.datasets | __init__ | Initialize
Parameters
----------
center_ll : tuple
tuple of lon, lat center of the map
size_x : int
image size
size_y : int
image size
scale : int
image scaling factor. 1, 2. 2 is higher resolution but takes
longer to download
zoom : int
google zoom level (https://developers.google.com/maps/documentation/
static-maps/intro#Zoomlevels). 1 (world) - 20 (buildings)
maptype : str, default: 'satellite'
'roadmap', 'satellite', 'hybrid', 'terrain'
use_cache : bool, default: True
store the downloaded image in the cache to avoid future downloads
kwargs : **
any keyword accepted by motionless.CenterMap (e.g. `key` for the API)
| def __init__(self, center_ll=(11.38, 47.26), size_x=640, size_y=640,
scale=1, zoom=12, maptype='satellite', use_cache=True,
**kwargs):
"""Initialize
Parameters
----------
center_ll : tuple
tuple of lon, lat center of the map
size_x : int
image size
size_y : int
image size
scale : int
image scaling factor. 1, 2. 2 is higher resolution but takes
longer to download
zoom : int
google zoom level (https://developers.google.com/maps/documentation/
static-maps/intro#Zoomlevels). 1 (world) - 20 (buildings)
maptype : str, default: 'satellite'
'roadmap', 'satellite', 'hybrid', 'terrain'
use_cache : bool, default: True
store the downloaded image in the cache to avoid future downloads
kwargs : **
any keyword accepted by motionless.CenterMap (e.g. `key` for the API)
"""
global API_KEY
# Google grid
grid = gis.googlestatic_mercator_grid(center_ll=center_ll,
nx=size_x, ny=size_y,
zoom=zoom, scale=scale)
if 'key' not in kwargs:
if API_KEY is None:
with open(utils.get_demo_file('.api_key'), 'r') as f:
API_KEY = f.read().replace('\n', '')
kwargs['key'] = API_KEY
# Motionless
import motionless
googleurl = motionless.CenterMap(lon=center_ll[0], lat=center_ll[1],
size_x=size_x, size_y=size_y,
maptype=maptype, zoom=zoom, scale=scale,
**kwargs)
# done
self.googleurl = googleurl
self.use_cache = use_cache
GeoDataset.__init__(self, grid)
| (self, center_ll=(11.38, 47.26), size_x=640, size_y=640, scale=1, zoom=12, maptype='satellite', use_cache=True, **kwargs) |
22,560 | salem.datasets | get_vardata | Return and subset the image. | def get_vardata(self, var_id=0):
"""Return and subset the image."""
return self._img[self.sub_y[0]:self.sub_y[1]+1,
self.sub_x[0]:self.sub_x[1]+1, :]
| (self, var_id=0) |
22,564 | salem.datasets | GoogleVisibleMap | Google static map automatically sized and zoomed to a selected region.
It's usually more practical to use than GoogleCenterMap.
| class GoogleVisibleMap(GoogleCenterMap):
"""Google static map automatically sized and zoomed to a selected region.
It's usually more practical to use than GoogleCenterMap.
"""
def __init__(self, x, y, crs=wgs84, size_x=640, size_y=640, scale=1,
maptype='satellite', use_cache=True, **kwargs):
"""Initialize
Parameters
----------
x : array
x coordinates of the points to include on the map
y : array
y coordinates of the points to include on the map
crs : proj or Grid
coordinate reference system of x, y
size_x : int
image size
size_y : int
image size
scale : int
image scaling factor. 1, 2. 2 is higher resolution but takes
longer to download
maptype : str, default: 'satellite'
'roadmap', 'satellite', 'hybrid', 'terrain'
use_cache : bool, default: True
store the downloaded image in the cache to avoid future downloads
kwargs : **
any keyword accepted by motionless.CenterMap (e.g. `key` for the API)
Notes
-----
To obtain the exact domain specified in `x` and `y` you may have to
play with the `size_x` and `size_y` kwargs.
"""
global API_KEY
if 'zoom' in kwargs or 'center_ll' in kwargs:
raise ValueError('incompatible kwargs.')
# Transform to lonlat
crs = gis.check_crs(crs)
if isinstance(crs, pyproj.Proj):
lon, lat = gis.transform_proj(crs, wgs84, x, y)
elif isinstance(crs, Grid):
lon, lat = crs.ij_to_crs(x, y, crs=wgs84)
else:
raise NotImplementedError()
# surely not the smartest way to do but should be enough for now
mc = (np.mean(lon), np.mean(lat))
zoom = 20
while zoom >= 0:
grid = gis.googlestatic_mercator_grid(center_ll=mc, nx=size_x,
ny=size_y, zoom=zoom,
scale=scale)
dx, dy = grid.transform(lon, lat, maskout=True)
if np.any(dx.mask):
zoom -= 1
else:
break
if 'key' not in kwargs:
if API_KEY is None:
with open(utils.get_demo_file('.api_key'), 'r') as f:
API_KEY = f.read().replace('\n', '')
kwargs['key'] = API_KEY
GoogleCenterMap.__init__(self, center_ll=mc, size_x=size_x,
size_y=size_y, zoom=zoom, scale=scale,
maptype=maptype, use_cache=use_cache, **kwargs)
| (x, y, crs=<Other Coordinate Operation Transformer: longlat>
Description: PROJ-based coordinate operation
Area of Use:
- undefined, size_x=640, size_y=640, scale=1, maptype='satellite', use_cache=True, **kwargs) |
22,565 | salem.datasets | __init__ | Initialize
Parameters
----------
x : array
x coordinates of the points to include on the map
y : array
y coordinates of the points to include on the map
crs : proj or Grid
coordinate reference system of x, y
size_x : int
image size
size_y : int
image size
scale : int
image scaling factor. 1, 2. 2 is higher resolution but takes
longer to download
maptype : str, default: 'satellite'
'roadmap', 'satellite', 'hybrid', 'terrain'
use_cache : bool, default: True
store the downloaded image in the cache to avoid future downloads
kwargs : **
any keyword accepted by motionless.CenterMap (e.g. `key` for the API)
Notes
-----
To obtain the exact domain specified in `x` and `y` you may have to
play with the `size_x` and `size_y` kwargs.
| def __init__(self, x, y, crs=wgs84, size_x=640, size_y=640, scale=1,
maptype='satellite', use_cache=True, **kwargs):
"""Initialize
Parameters
----------
x : array
x coordinates of the points to include on the map
y : array
y coordinates of the points to include on the map
crs : proj or Grid
coordinate reference system of x, y
size_x : int
image size
size_y : int
image size
scale : int
image scaling factor. 1, 2. 2 is higher resolution but takes
longer to download
maptype : str, default: 'satellite'
'roadmap', 'satellite', 'hybrid', 'terrain'
use_cache : bool, default: True
store the downloaded image in the cache to avoid future downloads
kwargs : **
any keyword accepted by motionless.CenterMap (e.g. `key` for the API)
Notes
-----
To obtain the exact domain specified in `x` and `y` you may have to
play with the `size_x` and `size_y` kwargs.
"""
global API_KEY
if 'zoom' in kwargs or 'center_ll' in kwargs:
raise ValueError('incompatible kwargs.')
# Transform to lonlat
crs = gis.check_crs(crs)
if isinstance(crs, pyproj.Proj):
lon, lat = gis.transform_proj(crs, wgs84, x, y)
elif isinstance(crs, Grid):
lon, lat = crs.ij_to_crs(x, y, crs=wgs84)
else:
raise NotImplementedError()
# surely not the smartest way to do but should be enough for now
mc = (np.mean(lon), np.mean(lat))
zoom = 20
while zoom >= 0:
grid = gis.googlestatic_mercator_grid(center_ll=mc, nx=size_x,
ny=size_y, zoom=zoom,
scale=scale)
dx, dy = grid.transform(lon, lat, maskout=True)
if np.any(dx.mask):
zoom -= 1
else:
break
if 'key' not in kwargs:
if API_KEY is None:
with open(utils.get_demo_file('.api_key'), 'r') as f:
API_KEY = f.read().replace('\n', '')
kwargs['key'] = API_KEY
GoogleCenterMap.__init__(self, center_ll=mc, size_x=size_x,
size_y=size_y, zoom=zoom, scale=scale,
maptype=maptype, use_cache=use_cache, **kwargs)
| (self, x, y, crs=<Other Coordinate Operation Transformer: longlat>
Description: PROJ-based coordinate operation
Area of Use:
- undefined, size_x=640, size_y=640, scale=1, maptype='satellite', use_cache=True, **kwargs) |
22,570 | salem.gis | Grid | A structured grid on a map projection.
Central class in the library, taking over user concerns about the
gridded representation of georeferenced data. It adds a level of
abstraction by defining a new coordinate system.
A grid requires georeferencing information at instantiation and is
immutable *in principle*. I didn't implement barriers: if you want to mess
with it, you can (not recommended). Note that in most cases, users won't
have to define the grid themselves: most georeferenced datasets contain
enough metadata for Salem to determine the data's grid automatically.
A grid is defined by a projection, a reference point in this projection,
a grid spacing and a number of grid points. The grid can be defined in a
"upper left" convention (reference point at the top left corner,
dy negative - always -). This is the python convention, but not that of all
datasets (for example, the output of the WRF model follows a down-left
corner convention). Therefore, grids can also be defined in a "lower left"
corner convention (dy positive). The use of one or the other convention
depends on the data, so the user should take care of what he is doing.
The reference points of the grid points might be located at the corner of
the pixel (upper left corner for example if dy is negative) or the center
of the pixel (most atmospheric datasets follow this convention). The two
concepts are truly equivalent and each grid instance gives access to one
representation or another ("center_grid" and "corner_grid" properties).
Under the hood, Salem uses the representation it needs to do
the job by accessing either one or the other of these parameters. The user
should know which convention he needs for his purposes: some grid
functions and properties are representation dependant (transform,
ll_coordinates, ...) while some are not (extent,
corner_ll_coordinates ...).
Attributes
----------
proj
nx
ny
dx
dy
x0
y0
origin
pixel_ref
x_coord
y_coord
xy_coordinates
ll_coordinates
xstagg_xy_coordinates
ystagg_xy_coordinates
xstagg_ll_coordinates
ystagg_ll_coordinates
center_grid
corner_grid
extent
| class Grid(object):
"""A structured grid on a map projection.
Central class in the library, taking over user concerns about the
gridded representation of georeferenced data. It adds a level of
abstraction by defining a new coordinate system.
A grid requires georeferencing information at instantiation and is
immutable *in principle*. I didn't implement barriers: if you want to mess
with it, you can (not recommended). Note that in most cases, users won't
have to define the grid themselves: most georeferenced datasets contain
enough metadata for Salem to determine the data's grid automatically.
A grid is defined by a projection, a reference point in this projection,
a grid spacing and a number of grid points. The grid can be defined in a
"upper left" convention (reference point at the top left corner,
dy negative - always -). This is the python convention, but not that of all
datasets (for example, the output of the WRF model follows a down-left
corner convention). Therefore, grids can also be defined in a "lower left"
corner convention (dy positive). The use of one or the other convention
depends on the data, so the user should take care of what he is doing.
The reference points of the grid points might be located at the corner of
the pixel (upper left corner for example if dy is negative) or the center
of the pixel (most atmospheric datasets follow this convention). The two
concepts are truly equivalent and each grid instance gives access to one
representation or another ("center_grid" and "corner_grid" properties).
Under the hood, Salem uses the representation it needs to do
the job by accessing either one or the other of these parameters. The user
should know which convention he needs for his purposes: some grid
functions and properties are representation dependant (transform,
ll_coordinates, ...) while some are not (extent,
corner_ll_coordinates ...).
Attributes
----------
proj
nx
ny
dx
dy
x0
y0
origin
pixel_ref
x_coord
y_coord
xy_coordinates
ll_coordinates
xstagg_xy_coordinates
ystagg_xy_coordinates
xstagg_ll_coordinates
ystagg_ll_coordinates
center_grid
corner_grid
extent
"""
def __init__(self, proj=wgs84, nxny=None, dxdy=None, x0y0=None,
pixel_ref='center',
corner=None, ul_corner=None, ll_corner=None):
"""
Parameters
----------
proj : pyproj.Proj instance
defines the grid's map projection. Defaults to 'PlateCarree'
(wgs84)
nxny : (int, int)
(nx, ny) number of grid points
dxdy : (float, float)
(dx, dy) grid spacing in proj coordinates. dx must be positive,
while dy can be positive or negative depending on the origin
grid point's lecation (upper-left or lower-left)
x0y0 : (float, float)
(x0, y0) cartesian coordinates (in proj) of the upper left
or lower left corner, depending on the sign of dy
pixel_ref : str
either 'center' or 'corner' (default: 'center'). Tells
the Grid object where the (x0, y0) is located in the grid point.
If ``pixel_ref`` is set to 'corner' and dy < 0, the ``x0y0``
kwarg specifies the **grid point's upper left** corner
coordinates. Equivalently, if dy > 0, x0y0 specifies the
**grid point's lower left** coordinate.
corner : (float, float)
DEPRECATED in favor of ``x0y0``
(x0, y0) cartesian coordinates (in proj) of the upper left
or lower left corner, depending on the sign of dy
ul_corner : (float, float)
DEPRECATED in favor of ``x0y0``
(x0, y0) cartesian coordinates (in proj) of the upper left corner
ll_corner : (float, float)
DEPRECATED in favor of ``x0y0``
(x0, y0) cartesian coordinates (in proj) of the lower left corner
Examples
--------
>>> g = Grid(nxny=(3, 2), dxdy=(1, 1), x0y0=(0, 0), proj=wgs84)
>>> lon, lat = g.ll_coordinates
>>> lon
array([[ 0., 1., 2.],
[ 0., 1., 2.]])
>>> lat
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> lon, lat = g.corner_grid.ll_coordinates
>>> lon
array([[-0.5, 0.5, 1.5],
[-0.5, 0.5, 1.5]])
>>> lat
array([[-0.5, -0.5, -0.5],
[ 0.5, 0.5, 0.5]])
>>> g.corner_grid == g.center_grid # the two reprs are equivalent
True
"""
# Check for coordinate system
proj = check_crs(proj)
if proj is None:
raise ValueError('proj must be of type pyproj.Proj')
self._proj = proj
# deprecations
if corner is not None:
warnings.warn('The `corner` kwarg is deprecated: '
'use `x0y0` instead.', DeprecationWarning)
x0y0 = corner
if ul_corner is not None:
warnings.warn('The `ul_corner` kwarg is deprecated: '
'use `x0y0` instead.', DeprecationWarning)
if dxdy[1] > 0.:
raise ValueError('dxdy and input params not compatible')
x0y0 = ul_corner
if ll_corner is not None:
warnings.warn('The `ll_corner` kwarg is deprecated: '
'use `x0y0` instead.', DeprecationWarning)
if dxdy[1] < 0.:
raise ValueError('dxdy and input params not compatible')
x0y0 = ll_corner
# Check for shortcut
if dxdy[1] < 0.:
ul_corner = x0y0
else:
ll_corner = x0y0
# Initialise the rest
self._check_input(nxny=nxny, dxdy=dxdy,
ul_corner=ul_corner,
ll_corner=ll_corner,
pixel_ref=pixel_ref)
def _check_input(self, **kwargs):
"""See which parameter combination we have and set everything."""
combi_a = ['nxny', 'dxdy', 'ul_corner']
combi_b = ['nxny', 'dxdy', 'll_corner']
if all(kwargs[k] is not None for k in combi_a):
nx, ny = kwargs['nxny']
dx, dy = kwargs['dxdy']
x0, y0 = kwargs['ul_corner']
if (dx <= 0.) or (dy >= 0.):
raise ValueError('dxdy and input params not compatible')
origin = 'upper-left'
elif all(kwargs[k] is not None for k in combi_b):
nx, ny = kwargs['nxny']
dx, dy = kwargs['dxdy']
x0, y0 = kwargs['ll_corner']
if (dx <= 0.) or (dy <= 0.):
raise ValueError('dxdy and input params not compatible')
origin = 'lower-left'
else:
raise ValueError('Input params not compatible')
self._nx = int(nx)
self._ny = int(ny)
if (self._nx <= 0) or (self._ny <= 0):
raise ValueError('nxny not valid')
self._dx = float(dx)
self._dy = float(dy)
self._x0 = float(x0)
self._y0 = float(y0)
self._origin = origin
# Check for pixel ref
self._pixel_ref = kwargs['pixel_ref'].lower()
if self._pixel_ref not in ['corner', 'center']:
raise ValueError('pixel_ref not recognized')
def __eq__(self, other):
"""Two grids are considered equal when their defining coordinates
and projection are equal.
Note: equality also means floating point equality, with all the
problems that come with it.
(independent of the grid's cornered or centered representation.)
"""
# Attributes defining the instance
ckeys = ['x0', 'y0', 'nx', 'ny', 'dx', 'dy', 'origin']
a = dict((k, getattr(self.corner_grid, k)) for k in ckeys)
b = dict((k, getattr(other.corner_grid, k)) for k in ckeys)
p1 = self.corner_grid.proj
p2 = other.corner_grid.proj
return (a == b) and proj_is_same(p1, p2)
def __repr__(self):
srs = '+'.join(sorted(self.proj.srs.split('+'))).strip()
summary = ['<salem.Grid>']
summary += [' proj: ' + srs]
summary += [' pixel_ref: ' + self.pixel_ref]
summary += [' origin: ' + str(self.origin)]
summary += [' (nx, ny): (' + str(self.nx) + ', ' + str(self.ny) + ')']
summary += [' (dx, dy): (' + str(self.dx) + ', ' + str(self.dy) + ')']
summary += [' (x0, y0): (' + str(self.x0) + ', ' + str(self.y0) + ')']
return '\n'.join(summary) + '\n'
@property
def proj(self):
"""``pyproj.Proj`` instance defining the grid's map projection."""
return self._proj
@property
def nx(self):
"""number of grid points in the x direction."""
return self._nx
@property
def ny(self):
"""number of grid points in the y direction."""
return self._ny
@property
def dx(self):
"""x grid spacing (always positive)."""
return self._dx
@property
def dy(self):
"""y grid spacing (positive if ll_corner, negative if ul_corner)."""
return self._dy
@property
def x0(self):
"""X reference point in projection coordinates."""
return self._x0
@property
def y0(self):
"""Y reference point in projection coordinates."""
return self._y0
@property
def origin(self):
"""``'upper-left'`` or ``'lower-left'``."""
return self._origin
@property
def pixel_ref(self):
"""if coordinates are at the ``'center'`` or ``'corner'`` of the grid.
"""
return self._pixel_ref
@lazy_property
def center_grid(self):
"""``salem.Grid`` instance representing the grid in center coordinates.
"""
if self.pixel_ref == 'center':
return self
else:
# shift the grid
x0y0 = ((self.x0 + self.dx / 2.), (self.y0 + self.dy / 2.))
args = dict(nxny=(self.nx, self.ny), dxdy=(self.dx, self.dy),
proj=self.proj, pixel_ref='center', x0y0=x0y0)
return Grid(**args)
@lazy_property
def corner_grid(self):
"""``salem.Grid`` instance representing the grid in corner coordinates.
"""
if self.pixel_ref == 'corner':
return self
else:
# shift the grid
x0y0 = ((self.x0 - self.dx / 2.), (self.y0 - self.dy / 2.))
args = dict(nxny=(self.nx, self.ny), dxdy=(self.dx, self.dy),
proj=self.proj, pixel_ref='corner', x0y0=x0y0)
return Grid(**args)
@property
def ij_coordinates(self):
"""Tuple of i, j coordinates of the grid points.
(dependent of the grid's cornered or centered representation.)
"""
x = np.arange(self.nx)
y = np.arange(self.ny)
return np.meshgrid(x, y)
@property
def x_coord(self):
"""x coordinates of the grid points (1D, no mesh)"""
return self.x0 + np.arange(self.nx) * self.dx
@property
def y_coord(self):
"""y coordinates of the grid points (1D, no mesh)"""
return self.y0 + np.arange(self.ny) * self.dy
@property
def xy_coordinates(self):
"""Tuple of x, y coordinates of the grid points.
(dependent of the grid's cornered or centered representation.)
"""
return np.meshgrid(self.x_coord, self.y_coord)
@lazy_property
def ll_coordinates(self):
"""Tuple of longitudes, latitudes of the grid points.
(dependent of the grid's cornered or centered representation.)
"""
x, y = self.xy_coordinates
proj_out = check_crs('EPSG:4326')
return transform_proj(self.proj, proj_out, x, y)
@property
def xstagg_xy_coordinates(self):
"""Tuple of x, y coordinates of the X staggered grid.
(independent of the grid's cornered or centered representation.)
"""
x_s = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx
y = self.center_grid.y0 + np.arange(self.ny) * self.dy
return np.meshgrid(x_s, y)
@property
def ystagg_xy_coordinates(self):
"""Tuple of x, y coordinates of the Y staggered grid.
(independent of the grid's cornered or centered representation.)
"""
x = self.center_grid.x0 + np.arange(self.nx) * self.dx
y_s = self.corner_grid.y0 + np.arange(self.ny+1) * self.dy
return np.meshgrid(x, y_s)
@lazy_property
def xstagg_ll_coordinates(self):
"""Tuple of longitudes, latitudes of the X staggered grid.
(independent of the grid's cornered or centered representation.)
"""
x, y = self.xstagg_xy_coordinates
proj_out = check_crs('EPSG:4326')
return transform_proj(self.proj, proj_out, x, y)
@lazy_property
def ystagg_ll_coordinates(self):
"""Tuple of longitudes, latitudes of the Y staggered grid.
(independent of the grid's cornered or centered representation.)
"""
x, y = self.ystagg_xy_coordinates
proj_out = check_crs('EPSG:4326')
return transform_proj(self.proj, proj_out, x, y)
@lazy_property
def pixcorner_ll_coordinates(self):
"""Tuple of longitudes, latitudes (dims: ny+1, nx+1) at the corners of
the grid.
Useful for graphics.Map essentially
(independant of the grid's cornered or centered representation.)
"""
x = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx
y = self.corner_grid.y0 + np.arange(self.ny+1) * self.dy
x, y = np.meshgrid(x, y)
proj_out = check_crs('EPSG:4326')
return transform_proj(self.proj, proj_out, x, y)
@lazy_property
def extent(self):
"""[left, right, bottom, top] boundaries of the grid in the grid's
projection.
The boundaries are the pixels leftmost, rightmost, lowermost and
uppermost corners, meaning that they are independent from the grid's
representation.
"""
x = np.array([0, self.nx]) * self.dx + self.corner_grid.x0
ypoint = [0, self.ny] if self.origin == 'lower-left' else [self.ny, 0]
y = np.array(ypoint) * self.dy + self.corner_grid.y0
return [x[0], x[1], y[0], y[1]]
def almost_equal(self, other, rtol=1e-05, atol=1e-08):
"""A less strict comparison between grids.
Two grids are considered equal when their defining coordinates
and projection are equal.
grid1 == grid2 uses floating point equality, which is very strict; here
we uses numpy's is close instead.
(independent of the grid's cornered or centered representation.)
"""
# float attributes defining the instance
fkeys = ['x0', 'y0', 'dx', 'dy']
# unambiguous attributes
ckeys = ['nx', 'ny', 'origin']
ok = True
for k in fkeys:
ok = ok and np.isclose(getattr(self.corner_grid, k),
getattr(other.corner_grid, k),
rtol=rtol, atol=atol)
for k in ckeys:
_ok = getattr(self.corner_grid, k) == getattr(other.corner_grid, k)
ok = ok and _ok
p1 = self.corner_grid.proj
p2 = other.corner_grid.proj
return ok and proj_is_same(p1, p2)
def extent_in_crs(self, crs=wgs84):
"""Get the extent of the grid in a desired crs.
Parameters
----------
crs : crs
the target coordinate reference system.
Returns
-------
[left, right, bottom, top] boundaries of the grid.
"""
# this is not so trivial
# for optimisation we will transform the boundaries only
poly = self.extent_as_polygon(crs=crs)
_i, _j = poly.exterior.xy
return [np.min(_i), np.max(_i), np.min(_j), np.max(_j)]
def extent_as_polygon(self, crs=wgs84):
"""Get the extent of the grid in a shapely.Polygon and desired crs.
Parameters
----------
crs : crs
the target coordinate reference system.
Returns
-------
[left, right, bottom, top] boundaries of the grid.
"""
from shapely.geometry import Polygon
# this is not so trivial
# for optimisation we will transform the boundaries only
_i = np.hstack([np.arange(self.nx+1),
np.ones(self.ny+1)*self.nx,
np.arange(self.nx+1)[::-1],
np.zeros(self.ny+1)]).flatten()
_j = np.hstack([np.zeros(self.nx+1),
np.arange(self.ny+1),
np.ones(self.nx+1)*self.ny,
np.arange(self.ny+1)[::-1]]).flatten()
_i, _j = self.corner_grid.ij_to_crs(_i, _j, crs=crs)
return Polygon(zip(_i, _j))
def regrid(self, nx=None, ny=None, factor=1):
"""Make a copy of the grid with an updated spatial resolution.
The keyword parameters are mutually exclusive, because the x/y ratio
of the grid has to be preserved.
Parameters
----------
nx : int
the new number of x pixels
nx : int
the new number of y pixels
factor : int
multiplication factor (factor=3 will generate a grid with
a spatial resolution 3 times finer)
Returns
-------
a new Grid object.
"""
if nx is not None:
factor = nx / self.nx
if ny is not None:
factor = ny / self.ny
nx = self.nx * factor
ny = self.ny * factor
dx = self.dx / factor
dy = self.dy / factor
x0 = self.corner_grid.x0
y0 = self.corner_grid.y0
args = dict(nxny=(nx, ny), dxdy=(dx, dy), x0y0=(x0, y0),
proj=self.proj, pixel_ref='corner')
g = Grid(**args)
if self.pixel_ref == 'center':
g = g.center_grid
return g
def ij_to_crs(self, i, j, crs=None, nearest=False):
"""Converts local i, j to cartesian coordinates in a specified crs
Parameters
----------
i : array of floats
the grid coordinates of the point(s) you want to convert
j : array of floats
the grid coordinates of the point(s) you want to convert
crs: crs
the target crs (default: self.proj)
nearest: bool
(for Grid crs only) convert to the nearest grid point
Returns
-------
(x, y) coordinates of the points in the specified crs.
"""
# Default
if crs is None:
crs = self.proj
# Convert i, j to x, y
try:
x = i * self.dx + self.x0
y = j * self.dy + self.y0
except TypeError:
x = np.asarray(i) * self.dx + self.x0
y = np.asarray(j) * self.dy + self.y0
# Convert x, y to crs
_crs = check_crs(crs, raise_on_error=True)
if isinstance(_crs, pyproj.Proj):
ret = transform_proj(self.proj, _crs, x, y)
elif isinstance(_crs, Grid):
ret = _crs.transform(x, y, crs=self.proj, nearest=nearest)
return ret
def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):
"""Converts any coordinates into the local grid.
Parameters
----------
x : ndarray
the grid coordinates of the point(s) you want to convert
y : ndarray
the grid coordinates of the point(s) you want to convert
z : None
ignored (but necessary since some shapes have a z dimension)
crs : crs
reference system of x, y. Could be a pyproj.Proj instance or a
Grid instance. In the latter case (x, y) are actually (i, j).
(Default: lonlat in wgs84).
nearest : bool
set to True if you wish to return the closest i, j coordinates
instead of subpixel coords.
maskout : bool
set to true if you want to mask out the transformed
coordinates that are not within the grid.
Returns
-------
(i, j) coordinates of the points in the local grid.
"""
x, y = np.ma.array(x), np.ma.array(y)
# First to local proj
_crs = check_crs(crs, raise_on_error=True)
if isinstance(_crs, pyproj.Proj):
x, y = transform_proj(_crs, self.proj, x, y)
elif isinstance(_crs, Grid):
x, y = _crs.ij_to_crs(x, y, crs=self.proj)
# Then to local grid
x = (x - self.x0) / self.dx
y = (y - self.y0) / self.dy
# See if we need to round
if nearest:
f = np.rint if self.pixel_ref == 'center' else np.floor
x = f(x).astype(int)
y = f(y).astype(int)
# Mask?
if maskout:
if self.pixel_ref == 'center':
mask = ~((x >= -0.5) & (x < self.nx-0.5) &
(y >= -0.5) & (y < self.ny-0.5))
else:
mask = ~((x >= 0) & (x < self.nx) &
(y >= 0) & (y < self.ny))
x = np.ma.array(x, mask=mask)
y = np.ma.array(y, mask=mask)
return x, y
def grid_lookup(self, other):
"""Performs forward transformation of any other grid into self.
The principle of forward transform is to obtain, for each grid point of
``self`` , all the indices of ``other`` that are located into the
given grid point. This transformation makes sense ONLY if ``other`` has
a higher resolution than the object grid. If ``other`` has a similar
or coarser resolution than ``self`` , choose the more general
(and much faster) :py:meth:`Grid.map_gridded_data` method.
Parameters
----------
other : salem.Grid
the grid that needs to be transformed into self
Returns
-------
a dict: each key (j, i) contains an array of shape (n, 2) where n is
the number of ``other`` 's grid points found within the grid point
(j, i)
"""
# Input checks
other = check_crs(other)
if not isinstance(other, Grid):
raise ValueError('`other` should be a Grid instance')
# Transform the other grid into the local grid (forward transform)
# Work in center grid cause that's what we need
i, j = other.center_grid.ij_coordinates
i, j = i.flatten(), j.flatten()
oi, oj = self.center_grid.transform(i, j, crs=other.center_grid,
nearest=True, maskout=True)
# keep only valid values
oi, oj, i, j = oi[~oi.mask], oj[~oi.mask], i[~oi.mask], j[~oi.mask]
out_inds = oi.flatten() + self.nx * oj.flatten()
# find the links
ris = np.digitize(out_inds, bins=np.arange(self.nx*self.ny+1))
# some optim based on the fact that ris has many duplicates
sort_idx = np.argsort(ris)
unq_items, unq_count = np.unique(ris[sort_idx], return_counts=True)
unq_idx = np.split(sort_idx, np.cumsum(unq_count))
# lets go
out = dict()
for idx, ri in zip(unq_idx, unq_items):
ij = divmod(ri-1, self.nx)
out[ij] = np.stack((j[idx], i[idx]), axis=1)
return out
def lookup_transform(self, data, grid=None, method=np.mean, lut=None,
return_lut=False):
"""Performs the forward transformation of gridded data into self.
This method is suitable when the data grid is of higher resolution
than ``self``. ``lookup_transform`` performs aggregation of data
according to a user given rule (e.g. ``np.mean``, ``len``, ``np.std``),
applied to all grid points found below a grid point in ``self``.
See also :py:meth:`Grid.grid_lookup` and examples in the docs
Parameters
----------
data : ndarray
an ndarray of dimensions 2, 3, or 4, the two last ones being y, x.
grid : Grid
a Grid instance matching the data
method : function, default: np.mean
the aggregation method. Possibilities: np.std, np.median, np.sum,
and more. Use ``len`` to count the number of grid points!
lut : ndarray, optional
computing the lookup table can be expensive. If you have several
operations to do with the same grid, set ``lut`` to an existing
table obtained from a previous call to :py:meth:`Grid.grid_lookup`
return_lut : bool, optional
set to True if you want to return the lookup table for later use.
in this case, returns a tuple
Returns
-------
An aggregated ndarray of the data, in ``self`` coordinates.
If ``return_lut==True``, also return the lookup table
"""
# Input checks
if grid is None:
grid = check_crs(data) # xarray
if not isinstance(grid, Grid):
raise ValueError('grid should be a Grid instance')
if hasattr(data, 'values'):
data = data.values # xarray
# dimensional check
in_shape = data.shape
ndims = len(in_shape)
if (ndims < 2) or (ndims > 4):
raise ValueError('data dimension not accepted')
if (in_shape[-1] != grid.nx) or (in_shape[-2] != grid.ny):
raise ValueError('data dimension not compatible')
if lut is None:
lut = self.grid_lookup(grid)
# Prepare the output
out_shape = list(in_shape)
out_shape[-2:] = [self.ny, self.nx]
if data.dtype.kind == 'i':
out_data = np.zeros(out_shape, dtype=float) * np.NaN
else:
out_data = np.zeros(out_shape, dtype=data.dtype) * np.NaN
def _2d_trafo(ind, outd):
for ji, l in lut.items():
outd[ji] = method(ind[l[:, 0], l[:, 1]])
return outd
if ndims == 2:
_2d_trafo(data, out_data)
if ndims == 3:
for dimi, cdata in enumerate(data):
out_data[dimi, ...] = _2d_trafo(cdata, out_data[dimi, ...])
if ndims == 4:
for dimj, cdata in enumerate(data):
for dimi, ccdata in enumerate(cdata):
tmp = _2d_trafo(ccdata, out_data[dimj, dimi, ...])
out_data[dimj, dimi, ...] = tmp
# prepare output
if method is len:
out_data[~np.isfinite(out_data)] = 0
out_data = out_data.astype(int)
else:
out_data = np.ma.masked_invalid(out_data)
if return_lut:
return out_data, lut
else:
return out_data
def map_gridded_data(self, data, grid=None, interp='nearest',
ks=3, out=None):
"""Reprojects any structured data onto the local grid.
The z and time dimensions of the data (if provided) are conserved, but
the projected data will have the x, y dimensions of the local grid.
Currently, nearest neighbor, linear, and spline interpolation are
available. The dtype of the input data is guaranteed to be conserved,
except for int which will be converted to floats if non nearest
neighbor interpolation is asked.
Parameters
----------
data : ndarray
an ndarray of dimensions 2, 3, or 4, the two last ones being y, x.
grid : Grid
a Grid instance matching the data
interp : str
'nearest' (default), 'linear', or 'spline'
ks : int
Degree of the bivariate spline. Default is 3.
missing : int
integer value to attribute to invalid data (for integer data
only, floats invalids are forced to NaNs)
out : ndarray
output array to fill instead of creating a new one (useful for
overwriting stuffs)
Returns
-------
A projected ndarray of the data, in ``self`` coordinates.
"""
if grid is None:
try:
grid = data.salem.grid # try xarray
except AttributeError:
pass
# Input checks
if not isinstance(grid, Grid):
raise ValueError('grid should be a Grid instance')
try: # in case someone gave an xarray dataarray
data = data.values
except AttributeError:
pass
try: # in case someone gave a masked array (won't work with scipy)
data = data.filled(np.nan)
except AttributeError:
pass
if data.dtype == np.float32:
# New in scipy - issue with float32
data = data.astype(np.float64)
in_shape = data.shape
ndims = len(in_shape)
if (ndims < 2) or (ndims > 4):
raise ValueError('data dimension not accepted')
if (in_shape[-1] != grid.nx) or (in_shape[-2] != grid.ny):
raise ValueError('data dimension not compatible')
interp = interp.lower()
use_nn = False
if interp == 'nearest':
use_nn = True
# Transform the local grid into the input grid (backwards transform)
# Work in center grid cause that's what we need
# TODO: this stage could be optimized when many variables need transfo
i, j = self.center_grid.ij_coordinates
oi, oj = grid.center_grid.transform(i, j, crs=self.center_grid,
nearest=use_nn, maskout=False)
pv = np.nonzero((oi >= 0) & (oi < grid.nx) &
(oj >= 0) & (oj < grid.ny))
# Prepare the output
if out is not None:
out_data = np.ma.asarray(out)
else:
out_shape = list(in_shape)
out_shape[-2:] = [self.ny, self.nx]
if (data.dtype.kind == 'i') and (interp == 'nearest'):
# We dont do integer arithmetics other than nearest
out_data = np.ma.masked_all(out_shape, dtype=data.dtype)
elif data.dtype.kind == 'i':
out_data = np.ma.masked_all(out_shape, dtype=float)
else:
out_data = np.ma.masked_all(out_shape, dtype=data.dtype)
# Spare us the trouble
if len(pv[0]) == 0:
return out_data
i, j, oi, oj = i[pv], j[pv], oi[pv], oj[pv]
# Interpolate
if interp == 'nearest':
if out is not None:
if ndims > 2:
raise ValueError('Need 2D for now.')
vok = np.isfinite(data[oj, oi])
out_data[j[vok], i[vok]] = data[oj[vok], oi[vok]]
else:
out_data[..., j, i] = data[..., oj, oi]
elif interp == 'linear':
points = (np.arange(grid.ny), np.arange(grid.nx))
if ndims == 2:
f = RegularGridInterpolator(points, data, bounds_error=False)
if out is not None:
tmp = f((oj, oi))
vok = np.isfinite(tmp)
out_data[j[vok], i[vok]] = tmp[vok]
else:
out_data[j, i] = f((oj, oi))
if ndims == 3:
for dimi, cdata in enumerate(data):
f = RegularGridInterpolator(points, cdata,
bounds_error=False)
if out is not None:
tmp = f((oj, oi))
vok = np.isfinite(tmp)
out_data[dimi, j[vok], i[vok]] = tmp[vok]
else:
out_data[dimi, j, i] = f((oj, oi))
if ndims == 4:
for dimj, cdata in enumerate(data):
for dimi, ccdata in enumerate(cdata):
f = RegularGridInterpolator(points, ccdata,
bounds_error=False)
if out is not None:
tmp = f((oj, oi))
vok = np.isfinite(tmp)
out_data[dimj, dimi, j[vok], i[vok]] = tmp[vok]
else:
out_data[dimj, dimi, j, i] = f((oj, oi))
elif interp == 'spline':
px, py = np.arange(grid.ny), np.arange(grid.nx)
if ndims == 2:
f = RectBivariateSpline(px, py, data, kx=ks, ky=ks)
if out is not None:
tmp = f(oj, oi, grid=False)
vok = np.isfinite(tmp)
out_data[j[vok], i[vok]] = tmp[vok]
else:
out_data[j, i] = f(oj, oi, grid=False)
if ndims == 3:
for dimi, cdata in enumerate(data):
f = RectBivariateSpline(px, py, cdata, kx=ks, ky=ks)
if out is not None:
tmp = f(oj, oi, grid=False)
vok = np.isfinite(tmp)
out_data[dimi, j[vok], i[vok]] = tmp[vok]
else:
out_data[dimi, j, i] = f(oj, oi, grid=False)
if ndims == 4:
for dimj, cdata in enumerate(data):
for dimi, ccdata in enumerate(cdata):
f = RectBivariateSpline(px, py, ccdata, kx=ks, ky=ks)
if out is not None:
tmp = f(oj, oi, grid=False)
vok = np.isfinite(tmp)
out_data[dimj, dimi, j[vok], i[vok]] = tmp[vok]
else:
out_data[dimj, dimi, j, i] = f(oj, oi, grid=False)
else:
msg = 'interpolation not understood: {}'.format(interp)
raise ValueError(msg)
# we have to catch a warning for an unexplained reason
with warnings.catch_warnings():
mess = "invalid value encountered in isfinite"
warnings.filterwarnings("ignore", message=mess)
out_data = np.ma.masked_invalid(out_data)
return out_data
def region_of_interest(self, shape=None, geometry=None, grid=None,
corners=None, crs=wgs84, roi=None,
all_touched=False):
"""Computes a region of interest (ROI).
A ROI is simply a mask of the same size as the grid.
Parameters
----------
shape : str
path to a shapefile
geometry : geometry
a shapely geometry (don't forget the crs keyword)
grid : Grid
a Grid object which extent will form the ROI
corners : tuple
a ((x0, y0), (x1, y1)) tuple of the corners of the square
to subset the dataset to (don't forget the crs keyword)
crs : crs, default wgs84
coordinate reference system of the geometry and corners
roi : ndarray
add the new region_of_interest to a previous one (useful for
multiple geometries for example)
all_touched : boolean
pass-through argument for rasterio.features.rasterize, indicating
that all grid cells which are clipped by the shapefile defining
the region of interest should be included (default=False)
"""
# Initial mask
if roi is not None:
mask = np.array(roi, dtype=np.int16)
else:
mask = np.zeros((self.ny, self.nx), dtype=np.int16)
# Collect keyword arguments, overriding anything the user
# inadvertently added
rasterize_kws = dict(out=mask, all_touched=all_touched)
# Several cases
if shape is not None:
import pandas as pd
inplace = False
if not isinstance(shape, pd.DataFrame):
from salem.sio import read_shapefile
shape = read_shapefile(shape)
inplace = True
# corner grid is needed for rasterio
shape = transform_geopandas(shape, to_crs=self.corner_grid,
inplace=inplace)
import rasterio
from rasterio.features import rasterize
with rasterio.Env():
mask = rasterize(shape.geometry, **rasterize_kws)
if geometry is not None:
import rasterio
from rasterio.features import rasterize
# corner grid is needed for rasterio
geom = transform_geometry(geometry, crs=crs,
to_crs=self.corner_grid)
with rasterio.Env():
mask = rasterize(np.atleast_1d(geom), **rasterize_kws)
if grid is not None:
_tmp = np.ones((grid.ny, grid.nx), dtype=np.int16)
mask = self.map_gridded_data(_tmp, grid, out=mask).filled(0)
if corners is not None:
cgrid = self.center_grid
xy0, xy1 = corners
x0, y0 = cgrid.transform(*xy0, crs=crs, nearest=True)
x1, y1 = cgrid.transform(*xy1, crs=crs, nearest=True)
mask[np.min([y0, y1]):np.max([y0, y1]) + 1,
np.min([x0, x1]):np.max([x0, x1]) + 1] = 1
return mask
def to_dict(self):
"""Serialize this grid to a dictionary.
Returns
-------
a grid dictionary
See Also
--------
from_dict : create a Grid from a dict
"""
return dict(proj=self.proj.srs, x0y0=(self.x0, self.y0),
nxny=(self.nx, self.ny), dxdy=(self.dx, self.dy),
pixel_ref=self.pixel_ref)
@classmethod
def from_dict(self, d):
"""Create a Grid from a dictionary
Parameters
----------
d : dict, required
the dict with the necessary information
Returns
-------
a salem.Grid instance
See Also
--------
to_dict : create a dict from a Grid
"""
return Grid(**d)
def to_json(self, fpath):
"""Serialize this grid to a json file.
Parameters
----------
fpath : str, required
the path to the file to create
See Also
--------
from_json : read a json file
"""
import json
with open(fpath, 'w') as fp:
json.dump(self.to_dict(), fp)
@classmethod
def from_json(self, fpath):
"""Create a Grid from a json file
Parameters
----------
fpath : str, required
the path to the file to open
Returns
-------
a salem.Grid instance
See Also
--------
to_json : create a json file
"""
import json
with open(fpath, 'r') as fp:
d = json.load(fp)
return Grid.from_dict(d)
def to_dataset(self):
"""Creates an empty dataset based on the Grid's geolocalisation.
Returns
-------
An xarray.Dataset object ready to be filled with data
"""
import xarray as xr
ds = xr.Dataset(coords={'x': (['x', ], self.center_grid.x_coord),
'y': (['y', ], self.center_grid.y_coord)}
)
ds.attrs['pyproj_srs'] = self.proj.srs
return ds
def to_geometry(self, to_crs=None):
"""Makes a geometrical representation of the grid (e.g. for drawing).
This can come also handy when doing shape-to-raster operations.
TODO: currently returns one polygon for each grid point, but this
could do more.
Returns
-------
a geopandas.GeoDataFrame
"""
from geopandas import GeoDataFrame
from shapely.geometry import Polygon
out = GeoDataFrame()
geoms = []
ii = []
jj = []
xx = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx
yy = self.corner_grid.y0 + np.arange(self.ny+1) * self.dy
for j, (y0, y1) in enumerate(zip(yy[:-1], yy[1:])):
for i, (x0, x1) in enumerate(zip(xx[:-1], xx[1:])):
coords = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
geoms.append(Polygon(coords))
jj.append(j)
ii.append(i)
out['j'] = jj
out['i'] = ii
out.set_geometry(geoms, crs=self.proj.srs, inplace=True)
if check_crs(to_crs):
transform_geopandas(out, to_crs=to_crs, inplace=True)
return out
| (proj=<Other Coordinate Operation Transformer: longlat>
Description: PROJ-based coordinate operation
Area of Use:
- undefined, nxny=None, dxdy=None, x0y0=None, pixel_ref='center', corner=None, ul_corner=None, ll_corner=None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.