language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
async def delete_txt_record(self, name: str, text: str): """Deletes a DNS TXT record. :param name: The name of the TXT record to delete. :param text: The text of the TXT record to delete. """ logger.debug("Deleting TXT record %s = %s", name, text) # Fetch all TXT records of the given name that contain the given text. records = await self._loop.run_in_executor( None, functools.partial( objects.TXTRecord.search_all, self._conn, name=name, text=text ), ) # De-provision those TXT records await asyncio.gather( *[self._loop.run_in_executor(None, record.delete) for record in records] )
async def delete_txt_record(self, name: str, text: str): """Deletes a DNS TXT record. :param name: The name of the TXT record to delete. :param text: The text of the TXT record to delete. """ logger.debug("Deleting TXT record %s = %s", name, text) # Fetch all TXT records of the given name that contain the given text. records = await self._loop.run_in_executor( None, functools.partial( objects.TXTRecord.search_all, self._conn, name=name, text=text ), ) # De-provision those TXT records await asyncio.gather( *[self._loop.run_in_executor(None, record.delete) for record in records] )
Python
async def query_txt_record( self, resolver: dns.asyncresolver.Resolver, name: str ) -> typing.Set[str]: """Queries a DNS TXT record. :param name: Name of the TXT record to query. :return: Set of strings stored in the TXT record. """ txt_records = [] with contextlib.suppress( dns.asyncresolver.NXDOMAIN, dns.asyncresolver.NoAnswer ): resp = await resolver.resolve(name, "TXT") for records in resp.rrset.items.keys(): txt_records.extend([record.decode() for record in records.strings]) return set(txt_records)
async def query_txt_record( self, resolver: dns.asyncresolver.Resolver, name: str ) -> typing.Set[str]: """Queries a DNS TXT record. :param name: Name of the TXT record to query. :return: Set of strings stored in the TXT record. """ txt_records = [] with contextlib.suppress( dns.asyncresolver.NXDOMAIN, dns.asyncresolver.NoAnswer ): resp = await resolver.resolve(name, "TXT") for records in resp.rrset.items.keys(): txt_records.extend([record.decode() for record in records.strings]) return set(txt_records)
Python
async def complete_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Completes the given DNS-01 challenge. This method provisions the TXT record needed to complete the given challenge. Then it polls the DNS for up to :attr:`POLLING_TIMEOUT` seconds to ensure that the record is visible to the remote CA's DNS. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to be completed. :raises: :class:`~acmetk.client.exceptions.CouldNotCompleteChallenge` If the challenge completion attempt failed. """ name = challenge.chall.validation_domain_name(identifier.value) text = challenge.chall.validation(key) try: await self.set_txt_record(name, text) except Exception as e: logger.exception( "Could not set TXT record to solve challenge: %s = %s", name, text ) raise CouldNotCompleteChallenge( challenge, acme.messages.Error(typ="infoblox", title="error", detail=str(e)), ) # Poll the DNS until the correct record is available try: await asyncio.wait_for( self._query_until_completed(name, text), self.POLLING_TIMEOUT ) except asyncio.TimeoutError: raise CouldNotCompleteChallenge( challenge, acme.messages.Error( typ="infoblox", title="error", detail="Could not complete challenge due to a DNS polling timeout", ), )
async def complete_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Completes the given DNS-01 challenge. This method provisions the TXT record needed to complete the given challenge. Then it polls the DNS for up to :attr:`POLLING_TIMEOUT` seconds to ensure that the record is visible to the remote CA's DNS. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to be completed. :raises: :class:`~acmetk.client.exceptions.CouldNotCompleteChallenge` If the challenge completion attempt failed. """ name = challenge.chall.validation_domain_name(identifier.value) text = challenge.chall.validation(key) try: await self.set_txt_record(name, text) except Exception as e: logger.exception( "Could not set TXT record to solve challenge: %s = %s", name, text ) raise CouldNotCompleteChallenge( challenge, acme.messages.Error(typ="infoblox", title="error", detail=str(e)), ) # Poll the DNS until the correct record is available try: await asyncio.wait_for( self._query_until_completed(name, text), self.POLLING_TIMEOUT ) except asyncio.TimeoutError: raise CouldNotCompleteChallenge( challenge, acme.messages.Error( typ="infoblox", title="error", detail="Could not complete challenge due to a DNS polling timeout", ), )
Python
async def cleanup_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Performs cleanup for the given challenge. This method de-provisions the TXT record that was created to complete the given challenge. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to clean up after. """ name = challenge.chall.validation_domain_name(identifier.value) text = challenge.chall.validation(key) await self.delete_txt_record(name, text)
async def cleanup_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Performs cleanup for the given challenge. This method de-provisions the TXT record that was created to complete the given challenge. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to clean up after. """ name = challenge.chall.validation_domain_name(identifier.value) text = challenge.chall.validation(key) await self.delete_txt_record(name, text)
Python
def create_eab(self, public_key: josepy.jwk.JWK, directory: dict) -> dict: """Creates an external account binding from the stored credentials. :param public_key: The account's public key :param directory: The ACME server's directory :return: The JWS representing the external account binding """ if self.kid and self.hmac_key: return acme.messages.ExternalAccountBinding.from_data( public_key, self.kid, self.hmac_key, directory ) else: raise ValueError("Must specify both kid and hmac_key")
def create_eab(self, public_key: josepy.jwk.JWK, directory: dict) -> dict: """Creates an external account binding from the stored credentials. :param public_key: The account's public key :param directory: The ACME server's directory :return: The JWS representing the external account binding """ if self.kid and self.hmac_key: return acme.messages.ExternalAccountBinding.from_data( public_key, self.kid, self.hmac_key, directory ) else: raise ValueError("Must specify both kid and hmac_key")
Python
def eab_credentials(self) -> ExternalAccountBindingCredentials: """The client's currently stored external account binding credentials Getter: Returns the client's currently stored external account binding credentials to be used on registration. Setter: Sets the client's stored external account binding credentials :param credentials: The kid and hmac_key :raises: :class:`ValueError` If the tuple does not contain exactly the kid and hmac_key. """ return self._eab_credentials
def eab_credentials(self) -> ExternalAccountBindingCredentials: """The client's currently stored external account binding credentials Getter: Returns the client's currently stored external account binding credentials to be used on registration. Setter: Sets the client's stored external account binding credentials :param credentials: The kid and hmac_key :raises: :class:`ValueError` If the tuple does not contain exactly the kid and hmac_key. """ return self._eab_credentials
Python
def eab_credentials(self, credentials: typing.Tuple[str]): """Sets the client's stored external account binding credentials :param credentials: The kid and hmac_key :raises: :class:`ValueError` If the tuple does not contain exactly the kid and hmac_key. """ if isinstance(credentials, tuple) and len(credentials) == 2: self._eab_credentials = ExternalAccountBindingCredentials(*credentials) else: raise ValueError("A tuple containing the kid and hmac_key is required")
def eab_credentials(self, credentials: typing.Tuple[str]): """Sets the client's stored external account binding credentials :param credentials: The kid and hmac_key :raises: :class:`ValueError` If the tuple does not contain exactly the kid and hmac_key. """ if isinstance(credentials, tuple) and len(credentials) == 2: self._eab_credentials = ExternalAccountBindingCredentials(*credentials) else: raise ValueError("A tuple containing the kid and hmac_key is required")
Python
async def start(self): """Starts the client's session. This method must be called after initialization and before making requests to an ACME server, as it fetches the ACME directory and registers the private key with the server. It is advised to register at least one :class:`ChallengeSolver` using :meth:`register_challenge_solver` before starting the client. """ async with self._session.get( self._directory_url, ssl=self._ssl_context ) as resp: self._directory = await resp.json() if not self._challenge_solvers.keys(): logger.warning( "There is no challenge solver registered with the client. " "Certificate retrieval will likely fail." ) if self._account: try: await self.account_lookup() except acme.messages.Error as e: if e.code != "accountDoesNotExist": raise await self.account_register() else: await self.account_register()
async def start(self): """Starts the client's session. This method must be called after initialization and before making requests to an ACME server, as it fetches the ACME directory and registers the private key with the server. It is advised to register at least one :class:`ChallengeSolver` using :meth:`register_challenge_solver` before starting the client. """ async with self._session.get( self._directory_url, ssl=self._ssl_context ) as resp: self._directory = await resp.json() if not self._challenge_solvers.keys(): logger.warning( "There is no challenge solver registered with the client. " "Certificate retrieval will likely fail." ) if self._account: try: await self.account_lookup() except acme.messages.Error as e: if e.code != "accountDoesNotExist": raise await self.account_register() else: await self.account_register()
Python
async def account_register( self, email: str = None, phone: str = None, kid: str = None, hmac_key: str = None, ) -> None: """Registers an account with the CA. Also sends the given contact information and stores the account internally for subsequent requests. If the private key is already registered, then the account is only queried. It is usually not necessary to call this method as the account is registered or fetched automatically in :meth:`start`. :param email: The contact email :param phone: The contact phone number :param kid: The external account binding's key identifier :param hmac_key: The external account binding's symmetric encryption key :raises: :class:`acme.messages.Error` If the server rejects any of the contact information, the private key, or the external account binding. """ eab_credentials = ( ExternalAccountBindingCredentials(kid, hmac_key) if kid and hmac_key else self.eab_credentials ) try: external_account_binding = eab_credentials.create_eab( self._private_key.public_key(), self._directory ) except ValueError: external_account_binding = None if self.eab_credentials.kid or self.eab_credentials.hmac_key: logger.warning( "The external account binding credentials are invalid, " "i.e. the kid or the hmac_key was not supplied. Trying without EAB." ) reg = acme.messages.Registration.from_data( email=email or self._contact.get("email"), phone=phone or self._contact.get("phone"), terms_of_service_agreed=True, external_account_binding=external_account_binding, ) resp, account_obj = await self._signed_request( reg, self._directory["newAccount"] ) account_obj["kid"] = resp.headers["Location"] self._account = messages.Account.from_json(account_obj)
async def account_register( self, email: str = None, phone: str = None, kid: str = None, hmac_key: str = None, ) -> None: """Registers an account with the CA. Also sends the given contact information and stores the account internally for subsequent requests. If the private key is already registered, then the account is only queried. It is usually not necessary to call this method as the account is registered or fetched automatically in :meth:`start`. :param email: The contact email :param phone: The contact phone number :param kid: The external account binding's key identifier :param hmac_key: The external account binding's symmetric encryption key :raises: :class:`acme.messages.Error` If the server rejects any of the contact information, the private key, or the external account binding. """ eab_credentials = ( ExternalAccountBindingCredentials(kid, hmac_key) if kid and hmac_key else self.eab_credentials ) try: external_account_binding = eab_credentials.create_eab( self._private_key.public_key(), self._directory ) except ValueError: external_account_binding = None if self.eab_credentials.kid or self.eab_credentials.hmac_key: logger.warning( "The external account binding credentials are invalid, " "i.e. the kid or the hmac_key was not supplied. Trying without EAB." ) reg = acme.messages.Registration.from_data( email=email or self._contact.get("email"), phone=phone or self._contact.get("phone"), terms_of_service_agreed=True, external_account_binding=external_account_binding, ) resp, account_obj = await self._signed_request( reg, self._directory["newAccount"] ) account_obj["kid"] = resp.headers["Location"] self._account = messages.Account.from_json(account_obj)
Python
async def account_update(self, **kwargs) -> None: """Updates the account's contact information. :param kwargs: Kwargs that are passed to :class:`acme.messages.Registration`'s constructor. May include a :class:`dict` *contact* containing new contact information or *status* set to :class:`acme.messages.STATUS_DEACTIVATED` to deactivate the account. :raises: :class:`acme.messages.Error` If the server rejects any of the contact info or the status update. """ reg = acme.messages.Registration(**kwargs) _, account_obj = await self._signed_request(reg, self._account.kid) account_obj["kid"] = self._account.kid self._account = messages.Account.from_json(account_obj)
async def account_update(self, **kwargs) -> None: """Updates the account's contact information. :param kwargs: Kwargs that are passed to :class:`acme.messages.Registration`'s constructor. May include a :class:`dict` *contact* containing new contact information or *status* set to :class:`acme.messages.STATUS_DEACTIVATED` to deactivate the account. :raises: :class:`acme.messages.Error` If the server rejects any of the contact info or the status update. """ reg = acme.messages.Registration(**kwargs) _, account_obj = await self._signed_request(reg, self._account.kid) account_obj["kid"] = self._account.kid self._account = messages.Account.from_json(account_obj)
Python
async def account_lookup(self) -> None: """Looks up an account using the stored private key. Also stores the account internally for subsequent requests. :raises: :class:`acme.messages.Error` If no account associated with the private key exists. """ reg = acme.messages.Registration.from_data( terms_of_service_agreed=True, only_return_existing=True ) self._account = None # Otherwise the kid is sent instead of the JWK. Results in the request failing. resp, account_obj = await self._signed_request( reg, self._directory["newAccount"] ) account_obj["kid"] = resp.headers["Location"] self._account = messages.Account.from_json(account_obj)
async def account_lookup(self) -> None: """Looks up an account using the stored private key. Also stores the account internally for subsequent requests. :raises: :class:`acme.messages.Error` If no account associated with the private key exists. """ reg = acme.messages.Registration.from_data( terms_of_service_agreed=True, only_return_existing=True ) self._account = None # Otherwise the kid is sent instead of the JWK. Results in the request failing. resp, account_obj = await self._signed_request( reg, self._directory["newAccount"] ) account_obj["kid"] = resp.headers["Location"] self._account = messages.Account.from_json(account_obj)
Python
async def order_create( self, identifiers: typing.Union[typing.List[dict], typing.List[str]] ) -> messages.Order: """Creates a new order with the given identifiers. :param identifiers: :class:`list` of identifiers that the order should contain. May either be a list of fully qualified domain names or a list of :class:`dict` containing the *type* and *name* (both :class:`str`) of each identifier. :raises: :class:`acme.messages.Error` If the server is unwilling to create an order with the requested identifiers. :returns: The new order. """ order = messages.NewOrder.from_data(identifiers=identifiers) resp, order_obj = await self._signed_request(order, self._directory["newOrder"]) order_obj["url"] = resp.headers["Location"] return messages.Order.from_json(order_obj)
async def order_create( self, identifiers: typing.Union[typing.List[dict], typing.List[str]] ) -> messages.Order: """Creates a new order with the given identifiers. :param identifiers: :class:`list` of identifiers that the order should contain. May either be a list of fully qualified domain names or a list of :class:`dict` containing the *type* and *name* (both :class:`str`) of each identifier. :raises: :class:`acme.messages.Error` If the server is unwilling to create an order with the requested identifiers. :returns: The new order. """ order = messages.NewOrder.from_data(identifiers=identifiers) resp, order_obj = await self._signed_request(order, self._directory["newOrder"]) order_obj["url"] = resp.headers["Location"] return messages.Order.from_json(order_obj)
Python
async def order_finalize( self, order: messages.Order, csr: "cryptography.x509.CertificateSigningRequest" ) -> messages.Order: """Finalizes the order using the given CSR. The caller needs to ensure that this method is called with :py:func:`asyncio.wait_for` and a time-out value. Otherwise it may result in an infinite loop if the CA never reports the order's status as *ready*. :param order: Order that is to be finalized. :param csr: The CSR that is submitted to apply for certificate issuance. :raises: * :class:`acme.messages.Error` If the server is unwilling to finalize the order. * :class:`aiohttp.ClientResponseError` If the order does not exist. :returns: The finalized order. """ cert_req = messages.CertificateRequest(csr=csr) while True: try: resp, order_obj = await self._signed_request(cert_req, order.finalize) break except acme.messages.Error as e: # Make sure that the order is in state READY before moving on. if e.code == "orderNotReady": await asyncio.sleep(self.FINALIZE_DELAY) else: raise e finalized = await self._poll_until( self.order_get, resp.headers["Location"], predicate=is_valid, negative_predicate=is_invalid, delay=5.0, max_tries=15, ) return finalized
async def order_finalize( self, order: messages.Order, csr: "cryptography.x509.CertificateSigningRequest" ) -> messages.Order: """Finalizes the order using the given CSR. The caller needs to ensure that this method is called with :py:func:`asyncio.wait_for` and a time-out value. Otherwise it may result in an infinite loop if the CA never reports the order's status as *ready*. :param order: Order that is to be finalized. :param csr: The CSR that is submitted to apply for certificate issuance. :raises: * :class:`acme.messages.Error` If the server is unwilling to finalize the order. * :class:`aiohttp.ClientResponseError` If the order does not exist. :returns: The finalized order. """ cert_req = messages.CertificateRequest(csr=csr) while True: try: resp, order_obj = await self._signed_request(cert_req, order.finalize) break except acme.messages.Error as e: # Make sure that the order is in state READY before moving on. if e.code == "orderNotReady": await asyncio.sleep(self.FINALIZE_DELAY) else: raise e finalized = await self._poll_until( self.order_get, resp.headers["Location"], predicate=is_valid, negative_predicate=is_invalid, delay=5.0, max_tries=15, ) return finalized
Python
async def order_get(self, order_url: str) -> messages.Order: """Fetches an order given its URL. :param order_url: The order's URL. :raises: :class:`aiohttp.ClientResponseError` If the order does not exist. :return: The fetched order. """ resp, order = await self._signed_request(None, order_url) order["url"] = order_url return messages.Order.from_json(order)
async def order_get(self, order_url: str) -> messages.Order: """Fetches an order given its URL. :param order_url: The order's URL. :raises: :class:`aiohttp.ClientResponseError` If the order does not exist. :return: The fetched order. """ resp, order = await self._signed_request(None, order_url) order["url"] = order_url return messages.Order.from_json(order)
Python
async def orders_get(self) -> typing.List[str]: """Fetches the account's orders list. :return: List containing the URLs of the account's orders. """ if not self._account.orders: return [] orders = [] next_url = self._account["orders"] while True: resp, orders_chunk = await self._signed_request(None, next_url) orders.extend(orders_chunk["orders"]) if link := resp.links.get("next", {}).get("url"): next_url = str(link) else: break return orders
async def orders_get(self) -> typing.List[str]: """Fetches the account's orders list. :return: List containing the URLs of the account's orders. """ if not self._account.orders: return [] orders = [] next_url = self._account["orders"] while True: resp, orders_chunk = await self._signed_request(None, next_url) orders.extend(orders_chunk["orders"]) if link := resp.links.get("next", {}).get("url"): next_url = str(link) else: break return orders
Python
async def authorization_get( self, authorization_url: str ) -> acme.messages.Authorization: """Fetches an authorization given its URL. :param authorization_url: The authorization's URL. :raises: :class:`aiohttp.ClientResponseError` If the authorization does not exist. :return: The fetched authorization. """ resp, authorization = await self._signed_request(None, authorization_url) return acme.messages.Authorization.from_json(authorization)
async def authorization_get( self, authorization_url: str ) -> acme.messages.Authorization: """Fetches an authorization given its URL. :param authorization_url: The authorization's URL. :raises: :class:`aiohttp.ClientResponseError` If the authorization does not exist. :return: The fetched authorization. """ resp, authorization = await self._signed_request(None, authorization_url) return acme.messages.Authorization.from_json(authorization)
Python
async def authorizations_complete(self, order: acme.messages.Order) -> None: """Completes all authorizations associated with the given order. Uses one of the registered :class:`ChallengeSolver` to complete one challenge per authorization. :param order: Order whose authorizations should be completed. :raises: :class:`CouldNotCompleteChallenge` If completion of one of the authorizations' challenges failed. """ authorizations = [ await self.authorization_get(authorization_url) for authorization_url in order.authorizations ] challenge_types = set( [ ChallengeType(challenge.chall.typ) for authorization in authorizations for challenge in authorization.challenges ] ) possible_types = self._challenge_solvers.keys() & challenge_types if len(possible_types) == 0: raise ValueError( f"The server offered the following challenge types but there is no solver " f"that is able to complete them: {', '.join(possible_types)}" ) chosen_challenge_type = possible_types.pop() solver = self._challenge_solvers[chosen_challenge_type] logger.debug( "Chosen challenge type: %s, solver: %s", chosen_challenge_type, type(solver).__name__, ) challenges_to_complete: typing.List[ typing.Tuple[acme.messages.Identifier, acme.messages.ChallengeBody] ] = [] for authorization in authorizations: for challenge in authorization.challenges: if ChallengeType(challenge.chall.typ) == chosen_challenge_type: challenges_to_complete.append((authorization.identifier, challenge)) break try: await self.challenges_complete(solver, challenges_to_complete) except Exception: await self.challenges_cleanup(solver, challenges_to_complete) raise else: await self.challenges_cleanup(solver, challenges_to_complete) # Realistically, polling for the authorizations to become valid should never fail since we have already # ensured that one challenge per authorization is valid. await asyncio.gather( *[ self._poll_until( self.authorization_get, authorization_url, predicate=is_valid, negative_predicate=is_invalid, ) for authorization_url in order.authorizations ] )
async def authorizations_complete(self, order: acme.messages.Order) -> None: """Completes all authorizations associated with the given order. Uses one of the registered :class:`ChallengeSolver` to complete one challenge per authorization. :param order: Order whose authorizations should be completed. :raises: :class:`CouldNotCompleteChallenge` If completion of one of the authorizations' challenges failed. """ authorizations = [ await self.authorization_get(authorization_url) for authorization_url in order.authorizations ] challenge_types = set( [ ChallengeType(challenge.chall.typ) for authorization in authorizations for challenge in authorization.challenges ] ) possible_types = self._challenge_solvers.keys() & challenge_types if len(possible_types) == 0: raise ValueError( f"The server offered the following challenge types but there is no solver " f"that is able to complete them: {', '.join(possible_types)}" ) chosen_challenge_type = possible_types.pop() solver = self._challenge_solvers[chosen_challenge_type] logger.debug( "Chosen challenge type: %s, solver: %s", chosen_challenge_type, type(solver).__name__, ) challenges_to_complete: typing.List[ typing.Tuple[acme.messages.Identifier, acme.messages.ChallengeBody] ] = [] for authorization in authorizations: for challenge in authorization.challenges: if ChallengeType(challenge.chall.typ) == chosen_challenge_type: challenges_to_complete.append((authorization.identifier, challenge)) break try: await self.challenges_complete(solver, challenges_to_complete) except Exception: await self.challenges_cleanup(solver, challenges_to_complete) raise else: await self.challenges_cleanup(solver, challenges_to_complete) # Realistically, polling for the authorizations to become valid should never fail since we have already # ensured that one challenge per authorization is valid. await asyncio.gather( *[ self._poll_until( self.authorization_get, authorization_url, predicate=is_valid, negative_predicate=is_invalid, ) for authorization_url in order.authorizations ] )
Python
async def challenges_cleanup( self, solver: ChallengeSolver, challenges: typing.List[ typing.Tuple[acme.messages.Identifier, acme.messages.ChallengeBody] ], ): """Cleans up after the challenges leveraging the given solver. :param solver: The challenge solver to use. :param challenges: List of identifier, challenge tuples to clean up after.""" await asyncio.gather( *[ solver.cleanup_challenge(self._private_key, identifier, challenge) for identifier, challenge in challenges ] )
async def challenges_cleanup( self, solver: ChallengeSolver, challenges: typing.List[ typing.Tuple[acme.messages.Identifier, acme.messages.ChallengeBody] ], ): """Cleans up after the challenges leveraging the given solver. :param solver: The challenge solver to use. :param challenges: List of identifier, challenge tuples to clean up after.""" await asyncio.gather( *[ solver.cleanup_challenge(self._private_key, identifier, challenge) for identifier, challenge in challenges ] )
Python
async def challenges_complete( self, solver: ChallengeSolver, challenges: typing.List[ typing.Tuple[acme.messages.Identifier, acme.messages.ChallengeBody] ], ): """Attempts to complete the challenges leveraging the given solver. :param solver: The challenge solver to use. :param challenges: List of identifier, challenge tuples to complete. :raises: :class:`CouldNotCompleteChallenge` If completion of one of the challenges failed. """ # Complete the pending challenges await asyncio.gather( *[ solver.complete_challenge(self._private_key, identifier, challenge) for (identifier, challenge) in challenges ] ) # Tell the server that we are ready for challenge validation await asyncio.gather( *[self.challenge_validate(challenge.uri) for _, challenge in challenges] ) # Poll until all challenges have become valid try: await asyncio.gather( *[ self._poll_until( self.challenge_get, challenge.uri, predicate=is_valid, negative_predicate=is_invalid, delay=5.0, max_tries=50, ) for _, challenge in challenges ] ) except PollingException as e: raise CouldNotCompleteChallenge(e.obj)
async def challenges_complete( self, solver: ChallengeSolver, challenges: typing.List[ typing.Tuple[acme.messages.Identifier, acme.messages.ChallengeBody] ], ): """Attempts to complete the challenges leveraging the given solver. :param solver: The challenge solver to use. :param challenges: List of identifier, challenge tuples to complete. :raises: :class:`CouldNotCompleteChallenge` If completion of one of the challenges failed. """ # Complete the pending challenges await asyncio.gather( *[ solver.complete_challenge(self._private_key, identifier, challenge) for (identifier, challenge) in challenges ] ) # Tell the server that we are ready for challenge validation await asyncio.gather( *[self.challenge_validate(challenge.uri) for _, challenge in challenges] ) # Poll until all challenges have become valid try: await asyncio.gather( *[ self._poll_until( self.challenge_get, challenge.uri, predicate=is_valid, negative_predicate=is_invalid, delay=5.0, max_tries=50, ) for _, challenge in challenges ] ) except PollingException as e: raise CouldNotCompleteChallenge(e.obj)
Python
async def challenge_get(self, challenge_url: str) -> acme.messages.ChallengeBody: """Fetches a challenge given its URL. :param challenge_url: The challenge's URL. :raises: :class:`aiohttp.ClientResponseError` If the challenge does not exist. :return: The fetched challenge. """ _, challenge_obj = await self._signed_request(None, challenge_url) return acme.messages.ChallengeBody.from_json(challenge_obj)
async def challenge_get(self, challenge_url: str) -> acme.messages.ChallengeBody: """Fetches a challenge given its URL. :param challenge_url: The challenge's URL. :raises: :class:`aiohttp.ClientResponseError` If the challenge does not exist. :return: The fetched challenge. """ _, challenge_obj = await self._signed_request(None, challenge_url) return acme.messages.ChallengeBody.from_json(challenge_obj)
Python
async def challenge_validate(self, challenge_url: str) -> None: """Initiates the given challenge's validation. :param challenge_url: The challenge's URL. :raises: :class:`aiohttp.ClientResponseError` If the challenge does not exist. """ await self._signed_request(None, challenge_url, post_as_get=False)
async def challenge_validate(self, challenge_url: str) -> None: """Initiates the given challenge's validation. :param challenge_url: The challenge's URL. :raises: :class:`aiohttp.ClientResponseError` If the challenge does not exist. """ await self._signed_request(None, challenge_url, post_as_get=False)
Python
async def certificate_get(self, order: acme.messages.Order) -> str: """Downloads the given order's certificate. :param order: The order whose certificate to download. :raises: * :class:`aiohttp.ClientResponseError` If the certificate does not exist. * :class:`ValueError` If the order has not been finalized yet, i.e. the certificate \ property is *None*. :return: The order's certificate encoded as PEM. """ if not order.certificate: raise ValueError("This order has not been finalized") _, pem = await self._signed_request(None, order.certificate) return pem
async def certificate_get(self, order: acme.messages.Order) -> str: """Downloads the given order's certificate. :param order: The order whose certificate to download. :raises: * :class:`aiohttp.ClientResponseError` If the certificate does not exist. * :class:`ValueError` If the order has not been finalized yet, i.e. the certificate \ property is *None*. :return: The order's certificate encoded as PEM. """ if not order.certificate: raise ValueError("This order has not been finalized") _, pem = await self._signed_request(None, order.certificate) return pem
Python
def register_challenge_solver( self, challenge_solver: ChallengeSolver, ): """Registers a challenge solver with the client. The challenge solver is used to complete authorizations' challenges whose types it supports. :param challenge_solver: The challenge solver to register. :raises: :class:`ValueError` If a challenge solver is already registered that supports any of the challenge types that *challenge_solver* supports. """ for challenge_type in challenge_solver.SUPPORTED_CHALLENGES: if self._challenge_solvers.get(challenge_type): raise ValueError( f"A challenge solver for type {challenge_type} is already registered" ) else: self._challenge_solvers[challenge_type] = challenge_solver
def register_challenge_solver( self, challenge_solver: ChallengeSolver, ): """Registers a challenge solver with the client. The challenge solver is used to complete authorizations' challenges whose types it supports. :param challenge_solver: The challenge solver to register. :raises: :class:`ValueError` If a challenge solver is already registered that supports any of the challenge types that *challenge_solver* supports. """ for challenge_type in challenge_solver.SUPPORTED_CHALLENGES: if self._challenge_solvers.get(challenge_type): raise ValueError( f"A challenge solver for type {challenge_type} is already registered" ) else: self._challenge_solvers[challenge_type] = challenge_solver
Python
async def begin(self): """Creates the database's tables according to the models defined in :mod:`acmetk.models`.""" async with self.engine.begin() as conn: await conn.run_sync(Base.metadata.create_all) async with self.session() as session: session.add(models.base.alembic_version(version_num=self.ALEMBIC_REVISION)) await session.flush() await session.commit()
async def begin(self): """Creates the database's tables according to the models defined in :mod:`acmetk.models`.""" async with self.engine.begin() as conn: await conn.run_sync(Base.metadata.create_all) async with self.session() as session: session.add(models.base.alembic_version(version_num=self.ALEMBIC_REVISION)) await session.flush() await session.commit()
Python
def url(self, request) -> str: """Returns the challenge's URL. :param request: The client request needed to build the URL. :return: The challenge's URL. """ return url_for(request, "challenge", id=str(self.challenge_id))
def url(self, request) -> str: """Returns the challenge's URL. :param request: The client request needed to build the URL. :return: The challenge's URL. """ return url_for(request, "challenge", id=str(self.challenge_id))
Python
def create_types( cls, types: typing.Iterable[ChallengeType] ) -> typing.List["Challenge"]: """Returns new pending challenges of the given types. :param types: The types of challenges to be created. :return: The created challenges. """ return [cls(type=type_, status=ChallengeStatus.PENDING) for type_ in types]
def create_types( cls, types: typing.Iterable[ChallengeType] ) -> typing.List["Challenge"]: """Returns new pending challenges of the given types. :param types: The types of challenges to be created. :return: The created challenges. """ return [cls(type=type_, status=ChallengeStatus.PENDING) for type_ in types]
Python
async def validate( self, session, request, validator: "acmetk.server.challenge_validator.ChallengeValidator", ) -> ChallengeStatus: """Validates the challenge with the given validator. Also, it calls its parent authorization's :func:`~acmetk.models.authorization.Authorization.validate` method and finally returns the new status after validation. :param session: The open database session. :param validator: The challenge validator to perform the validation with. :return: The challenge's status after validation. """ try: await validator.validate_challenge(self, request=request) except acmetk.server.challenge_validator.CouldNotValidateChallenge as e: self.error = e.to_acme_error() self.status = ChallengeStatus.INVALID if self.status in (ChallengeStatus.PENDING, ChallengeStatus.PROCESSING): self.status = ChallengeStatus.VALID self.validated = datetime.datetime.now(datetime.timezone.utc) await self.authorization.validate(session) return self.status
async def validate( self, session, request, validator: "acmetk.server.challenge_validator.ChallengeValidator", ) -> ChallengeStatus: """Validates the challenge with the given validator. Also, it calls its parent authorization's :func:`~acmetk.models.authorization.Authorization.validate` method and finally returns the new status after validation. :param session: The open database session. :param validator: The challenge validator to perform the validation with. :return: The challenge's status after validation. """ try: await validator.validate_challenge(self, request=request) except acmetk.server.challenge_validator.CouldNotValidateChallenge as e: self.error = e.to_acme_error() self.status = ChallengeStatus.INVALID if self.status in (ChallengeStatus.PENDING, ChallengeStatus.PROCESSING): self.status = ChallengeStatus.VALID self.validated = datetime.datetime.now(datetime.timezone.utc) await self.authorization.validate(session) return self.status
Python
def verify( self, jws: acme.jws.JWS, ) -> bool: """Checks the given signature against the EAB's. :param jws: The EAB request JWS to be verified. :return: True iff the given signature and the EAB's are equal. """ key = josepy.jwk.JWKOct(key=josepy.b64.b64decode(self.hmac_key)) return jws.verify(key)
def verify( self, jws: acme.jws.JWS, ) -> bool: """Checks the given signature against the EAB's. :param jws: The EAB request JWS to be verified. :return: True iff the given signature and the EAB's are equal. """ key = josepy.jwk.JWKOct(key=josepy.b64.b64decode(self.hmac_key)) return jws.verify(key)
Python
def expired(self) -> bool: """Returns whether the EAB has expired. :return: True iff the EAB has expired. """ return datetime.datetime.now() - self.when > self.EXPIRES_AFTER
def expired(self) -> bool: """Returns whether the EAB has expired. :return: True iff the EAB has expired. """ return datetime.datetime.now() - self.when > self.EXPIRES_AFTER
Python
def signature( self, key_json: str, ) -> str: """Returns the EAB's signature. :param key_json: The ACME account key that the external account is to be bound to. """ return josepy.b64.b64encode(self._eab(key_json).signature.signature).decode()
def signature( self, key_json: str, ) -> str: """Returns the EAB's signature. :param key_json: The ACME account key that the external account is to be bound to. """ return josepy.b64.b64encode(self._eab(key_json).signature.signature).decode()
Python
def create(self, request) -> typing.Tuple[str, str]: """Creates an :class:`ExternalAccountBinding` request and stores it internally for verification at a later point in time. :param request: The request that contains the PEM-encoded x509 client certificate in the *X-SSL-CERT* header. :return: The resulting pending EAB's :attr:`~ExternalAccountBinding.kid` and :attr:`~ExternalAccountBinding.hmac_key`. """ # The client certificate in the PEM format (urlencoded) for an established SSL connection (1.13.5); cert = x509.load_pem_x509_certificate( urllib.parse.unquote(request.headers["X-SSL-CERT"]).encode() ) if not ( mail := cert.subject.get_attributes_for_oid(x509.NameOID.EMAIL_ADDRESS) ): ext = cert.extensions.get_extension_for_oid( x509.ExtensionOID.SUBJECT_ALTERNATIVE_NAME ) mails = ext.value.get_values_for_type(x509.RFC822Name) if len(mails) != 1: raise ValueError(f"{len(mails)} mail addresses in cert, expecting 1") mail = mails.pop() if not (pending_eab := self._pending.get(mail, None)) or pending_eab.expired(): pending_eab = self._pending[mail] = ExternalAccountBinding( mail, url_for(request, "new-account") ) return pending_eab.kid, pending_eab.hmac_key
def create(self, request) -> typing.Tuple[str, str]: """Creates an :class:`ExternalAccountBinding` request and stores it internally for verification at a later point in time. :param request: The request that contains the PEM-encoded x509 client certificate in the *X-SSL-CERT* header. :return: The resulting pending EAB's :attr:`~ExternalAccountBinding.kid` and :attr:`~ExternalAccountBinding.hmac_key`. """ # The client certificate in the PEM format (urlencoded) for an established SSL connection (1.13.5); cert = x509.load_pem_x509_certificate( urllib.parse.unquote(request.headers["X-SSL-CERT"]).encode() ) if not ( mail := cert.subject.get_attributes_for_oid(x509.NameOID.EMAIL_ADDRESS) ): ext = cert.extensions.get_extension_for_oid( x509.ExtensionOID.SUBJECT_ALTERNATIVE_NAME ) mails = ext.value.get_values_for_type(x509.RFC822Name) if len(mails) != 1: raise ValueError(f"{len(mails)} mail addresses in cert, expecting 1") mail = mails.pop() if not (pending_eab := self._pending.get(mail, None)) or pending_eab.expired(): pending_eab = self._pending[mail] = ExternalAccountBinding( mail, url_for(request, "new-account") ) return pending_eab.kid, pending_eab.hmac_key
Python
def verify( self, kid: str, jws: acme.jws.JWS, ) -> bool: """Verifies an external account binding given its ACME account key, kid and signature. :param kid: The EAB's kid. :param jws: The EAB request JWS. :return: True iff verification was successful. """ if not (pending := self._pending.get(kid, None)): return False if pending.expired(): return False return pending.verify(jws)
def verify( self, kid: str, jws: acme.jws.JWS, ) -> bool: """Verifies an external account binding given its ACME account key, kid and signature. :param kid: The EAB's kid. :param jws: The EAB request JWS. :return: True iff verification was successful. """ if not (pending := self._pending.get(kid, None)): return False if pending.expired(): return False return pending.verify(jws)
Python
def verify_eab( self, request, pub_key: "cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey", reg: acme.messages.Registration, ): """Verifies an ACME Registration request whose payload contains an external account binding JWS. :param pub_key: The public key that is contained in the outer JWS, i.e. the ACME account key. :param reg: The registration message. :raises: * :class:`acme.messages.Error` if any of the following are true: * The request does not contain a valid JWS * The request JWS does not contain an *externalAccountBinding* field * The EAB JWS was signed with an unsupported algorithm (:attr:`SUPPORTED_EAB_JWS_ALGORITHMS`) * The EAB JWS' payload does not contain the same public key as the encapsulating JWS * The EAB JWS' signature is invalid """ if not reg.external_account_binding: raise acme.messages.Error.with_code( "externalAccountRequired", detail=f"Visit {url_for(request, 'eab')}" ) try: jws = acme.jws.JWS.from_json(dict(reg.external_account_binding)) except josepy.errors.DeserializationError: raise acme.messages.Error.with_code( "malformed", detail="The request does not contain a valid JWS." ) if jws.signature.combined.alg not in self.SUPPORTED_EAB_JWS_ALGORITHMS: raise acme.messages.Error.with_code( "badSignatureAlgorithm", detail="The external account binding JWS was signed with an unsupported algorithm. " f"Supported algorithms: {', '.join([str(alg) for alg in self.SUPPORTED_EAB_JWS_ALGORITHMS])}", ) sig = jws.signature.combined kid = sig.kid if sig.url != str(forwarded_url(request)): raise acme.messages.Error.with_code("unauthorized") if josepy.jwk.JWKRSA.from_json(json.loads(jws.payload)) != josepy.jwk.JWKRSA( key=pub_key ): raise acme.messages.Error.with_code( "malformed", detail="The external account binding does not contain the same public key as the request JWS.", ) if kid not in reg.contact + reg.emails: raise acme.messages.Error.with_code( "malformed", detail="The contact field must contain the email address from the " "SSL client certificate which was used to request the EAB.", ) if not self._eab_store.verify(kid, jws): raise acme.messages.Error.with_code( "unauthorized", detail="The external account binding is invalid." )
def verify_eab( self, request, pub_key: "cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey", reg: acme.messages.Registration, ): """Verifies an ACME Registration request whose payload contains an external account binding JWS. :param pub_key: The public key that is contained in the outer JWS, i.e. the ACME account key. :param reg: The registration message. :raises: * :class:`acme.messages.Error` if any of the following are true: * The request does not contain a valid JWS * The request JWS does not contain an *externalAccountBinding* field * The EAB JWS was signed with an unsupported algorithm (:attr:`SUPPORTED_EAB_JWS_ALGORITHMS`) * The EAB JWS' payload does not contain the same public key as the encapsulating JWS * The EAB JWS' signature is invalid """ if not reg.external_account_binding: raise acme.messages.Error.with_code( "externalAccountRequired", detail=f"Visit {url_for(request, 'eab')}" ) try: jws = acme.jws.JWS.from_json(dict(reg.external_account_binding)) except josepy.errors.DeserializationError: raise acme.messages.Error.with_code( "malformed", detail="The request does not contain a valid JWS." ) if jws.signature.combined.alg not in self.SUPPORTED_EAB_JWS_ALGORITHMS: raise acme.messages.Error.with_code( "badSignatureAlgorithm", detail="The external account binding JWS was signed with an unsupported algorithm. " f"Supported algorithms: {', '.join([str(alg) for alg in self.SUPPORTED_EAB_JWS_ALGORITHMS])}", ) sig = jws.signature.combined kid = sig.kid if sig.url != str(forwarded_url(request)): raise acme.messages.Error.with_code("unauthorized") if josepy.jwk.JWKRSA.from_json(json.loads(jws.payload)) != josepy.jwk.JWKRSA( key=pub_key ): raise acme.messages.Error.with_code( "malformed", detail="The external account binding does not contain the same public key as the request JWS.", ) if kid not in reg.contact + reg.emails: raise acme.messages.Error.with_code( "malformed", detail="The contact field must contain the email address from the " "SSL client certificate which was used to request the EAB.", ) if not self._eab_store.verify(kid, jws): raise acme.messages.Error.with_code( "unauthorized", detail="The external account binding is invalid." )
Python
async def eab(self, request): """Handler that displays the user's external account binding credentials, i.e. their *kid* and *hmac_key* after their client certificate has been verified and forwarded by the reverse proxy. """ # from unittest.mock import Mock # request = Mock(headers={"X-SSL-CERT": urllib.parse.quote(self.data)}, url=request.url) if not request.headers.get("X-SSL-CERT"): response = aiohttp_jinja2.render_template("eab.jinja2", request, {}) response.set_status(403) response.text = ( "An External Account Binding may only be created if a valid client certificate " "is sent with the request." ) return response kid, hmac_key = self._eab_store.create(request) return {"kid": kid, "hmac_key": hmac_key}
async def eab(self, request): """Handler that displays the user's external account binding credentials, i.e. their *kid* and *hmac_key* after their client certificate has been verified and forwarded by the reverse proxy. """ # from unittest.mock import Mock # request = Mock(headers={"X-SSL-CERT": urllib.parse.quote(self.data)}, url=request.url) if not request.headers.get("X-SSL-CERT"): response = aiohttp_jinja2.render_template("eab.jinja2", request, {}) response.set_status(403) response.text = ( "An External Account Binding may only be created if a valid client certificate " "is sent with the request." ) return response kid, hmac_key = self._eab_store.create(request) return {"kid": kid, "hmac_key": hmac_key}
Python
async def validate_challenge(self, challenge: Challenge, **kwargs): """Validates the given challenge. This method should attempt to validate the given challenge and raise a :class:`CouldNotValidateChallenge` exception if the validation failed. :param challenge: The challenge to be validated :raises: :class:`CouldNotValidateChallenge` If the validation failed """ pass
async def validate_challenge(self, challenge: Challenge, **kwargs): """Validates the given challenge. This method should attempt to validate the given challenge and raise a :class:`CouldNotValidateChallenge` exception if the validation failed. :param challenge: The challenge to be validated :raises: :class:`CouldNotValidateChallenge` If the validation failed """ pass
Python
async def query_records(self, name: str) -> typing.Set[str]: """Queries DNS A and AAAA records. :param name: Name of the A/AAAA record to query. :return: Set of IPs that the A/AAAA records resolve to. """ resolved_ips = [ await self._query_record(name, type_) for type_ in ("A", "AAAA") ] return set(itertools.chain.from_iterable(resolved_ips))
async def query_records(self, name: str) -> typing.Set[str]: """Queries DNS A and AAAA records. :param name: Name of the A/AAAA record to query. :return: Set of IPs that the A/AAAA records resolve to. """ resolved_ips = [ await self._query_record(name, type_) for type_ in ("A", "AAAA") ] return set(itertools.chain.from_iterable(resolved_ips))
Python
async def validate_challenge(self, challenge: Challenge, request=None): """Validates the given challenge. This method takes a challenge of :class:`ChallengeType` *DNS_01* or *HTTP_01* and does not actually validate that challenge, but instead checks whether the corresponding authorization's identifier resolves to the IP address that the validation request is being made from. :param challenge: The challenge to be validated :raises: :class:`CouldNotValidateChallenge` If the validation failed """ identifier = challenge.authorization.identifier.value logger.debug( "Validating challenge %s for identifier %s", challenge.challenge_id, identifier, ) """Wildcard validation … Resolve some names """ if challenge.authorization.wildcard: identifier = identifier[2:] names = ["www", "mail", "smtp", "gitlab"] rnames = [ "".join([random.choice(string.ascii_lowercase) for j in range(i)]) for i in range(6) ] names.extend(rnames) resolved = await asyncio.gather( *[self.query_records(f"{i}.{identifier}") for i in names] ) resolved_ips = set.intersection(*resolved) else: resolved_ips = await self.query_records(identifier) actual_ip = request["actual_ip"] if actual_ip not in resolved_ips: logger.debug( "Validation of challenge %s failed; %s does not resolve to IP %s. Resolved IPs: %s", challenge.challenge_id, identifier, actual_ip, resolved_ips, ) raise CouldNotValidateChallenge( detail=f"Identifier '{identifier}' does not resolve to host IP '{actual_ip}'." )
async def validate_challenge(self, challenge: Challenge, request=None): """Validates the given challenge. This method takes a challenge of :class:`ChallengeType` *DNS_01* or *HTTP_01* and does not actually validate that challenge, but instead checks whether the corresponding authorization's identifier resolves to the IP address that the validation request is being made from. :param challenge: The challenge to be validated :raises: :class:`CouldNotValidateChallenge` If the validation failed """ identifier = challenge.authorization.identifier.value logger.debug( "Validating challenge %s for identifier %s", challenge.challenge_id, identifier, ) """Wildcard validation … Resolve some names """ if challenge.authorization.wildcard: identifier = identifier[2:] names = ["www", "mail", "smtp", "gitlab"] rnames = [ "".join([random.choice(string.ascii_lowercase) for j in range(i)]) for i in range(6) ] names.extend(rnames) resolved = await asyncio.gather( *[self.query_records(f"{i}.{identifier}") for i in names] ) resolved_ips = set.intersection(*resolved) else: resolved_ips = await self.query_records(identifier) actual_ip = request["actual_ip"] if actual_ip not in resolved_ips: logger.debug( "Validation of challenge %s failed; %s does not resolve to IP %s. Resolved IPs: %s", challenge.challenge_id, identifier, actual_ip, resolved_ips, ) raise CouldNotValidateChallenge( detail=f"Identifier '{identifier}' does not resolve to host IP '{actual_ip}'." )
Python
async def validate_challenge(self, challenge: Challenge, **kwargs): """Does not validate the given challenge. Instead, this method only logs the mock validation attempt and pauses execution for one second. :param challenge: The challenge to be validated """ identifier = challenge.authorization.identifier.value logger.debug( f"(not) validating challenge {challenge.challenge_id}, type {challenge.type} identifier {identifier}" ) # await asyncio.sleep(1)
async def validate_challenge(self, challenge: Challenge, **kwargs): """Does not validate the given challenge. Instead, this method only logs the mock validation attempt and pauses execution for one second. :param challenge: The challenge to be validated """ identifier = challenge.authorization.identifier.value logger.debug( f"(not) validating challenge {challenge.challenge_id}, type {challenge.type} identifier {identifier}" ) # await asyncio.sleep(1)
Python
def plugins(): """Lists the available plugins and their respective config strings.""" for plugins in [ ("Server apps", server_app_registry.config_mapping()), ("Challenge solvers", challenge_solver_registry.config_mapping()), ("Challenge validators", challenge_validator_registry.config_mapping()), ]: click.echo( f"{plugins[0]}: {', '.join([f'{app.__name__} ({config_name})' for config_name, app in plugins[1].items()])}" )
def plugins(): """Lists the available plugins and their respective config strings.""" for plugins in [ ("Server apps", server_app_registry.config_mapping()), ("Challenge solvers", challenge_solver_registry.config_mapping()), ("Challenge validators", challenge_validator_registry.config_mapping()), ]: click.echo( f"{plugins[0]}: {', '.join([f'{app.__name__} ({config_name})' for config_name, app in plugins[1].items()])}" )
Python
def generate_keys(root_key_file): """Generates a self-signed root key pair/cert for the CA.""" click.echo("Generating root key pair/cert") # TODO: swap out info generate_root_cert( Path(root_key_file), "DE", "Lower Saxony", "Hanover", "ACME Toolkit", "ACMEToolkit", )
def generate_keys(root_key_file): """Generates a self-signed root key pair/cert for the CA.""" click.echo("Generating root key pair/cert") # TODO: swap out info generate_root_cert( Path(root_key_file), "DE", "Lower Saxony", "Hanover", "ACME Toolkit", "ACMEToolkit", )
Python
def generate_account_key(account_key_file, key_type): """Generates an account key for the ACME client.""" click.echo(f"Generating client key of type {key_type} at {account_key_file}.") account_key_file = Path(account_key_file) if key_type == "rsa": generate_rsa_key(account_key_file) else: generate_ec_key(account_key_file)
def generate_account_key(account_key_file, key_type): """Generates an account key for the ACME client.""" click.echo(f"Generating client key of type {key_type} at {account_key_file}.") account_key_file = Path(account_key_file) if key_type == "rsa": generate_rsa_key(account_key_file) else: generate_ec_key(account_key_file)
Python
def run(config_file, bootstrap_port, path): """Starts the app as defined in the config file. Starts the app in bootstrap mode if the bootstrap port is set via --bootstrap-port.""" config = load_config(config_file) loop = asyncio.get_event_loop() app_config_name = list(config.keys())[0] try: app_class = server_app_registry.get_plugin(app_config_name) except ValueError as e: raise click.UsageError(*e.args) if bootstrap_port: if app_class is AcmeCA: raise click.UsageError( f"Bootstrapping is not supported for the {app_class} at this moment." ) click.echo( f"Starting {app_class.__name__} in bootstrap mode on port {bootstrap_port}" ) app_config = config[app_config_name] app_config["port"] = bootstrap_port app_config["challenge_validator"] = "dummy" # Do not validate challenges app_config["subnets"] = [ "127.0.0.1/32", "10.110.0.0/24", ] # Only allow localhost and the docker bridge network # Bootstrap app does not run behind a reverse proxy: app_config["use_forwarded_header"] = False app_config["require_eab"] = False else: click.echo(f"Starting {app_class.__name__}") if issubclass(app_class, AcmeRelayBase): runner, site = loop.run_until_complete( run_relay(config, path, app_class, app_config_name) ) elif app_class is AcmeCA: runner, site = loop.run_until_complete(run_ca(config, path)) else: raise ValueError(app_class) aiohttp_jinja2.setup(site.app, loader=jinja2.FileSystemLoader("./tpl/")) aiohttp_jinja2.get_env(site.app).globals.update({"url_for": _url_for}) try: loop.run_forever() except KeyboardInterrupt: loop.run_until_complete(runner.cleanup())
def run(config_file, bootstrap_port, path): """Starts the app as defined in the config file. Starts the app in bootstrap mode if the bootstrap port is set via --bootstrap-port.""" config = load_config(config_file) loop = asyncio.get_event_loop() app_config_name = list(config.keys())[0] try: app_class = server_app_registry.get_plugin(app_config_name) except ValueError as e: raise click.UsageError(*e.args) if bootstrap_port: if app_class is AcmeCA: raise click.UsageError( f"Bootstrapping is not supported for the {app_class} at this moment." ) click.echo( f"Starting {app_class.__name__} in bootstrap mode on port {bootstrap_port}" ) app_config = config[app_config_name] app_config["port"] = bootstrap_port app_config["challenge_validator"] = "dummy" # Do not validate challenges app_config["subnets"] = [ "127.0.0.1/32", "10.110.0.0/24", ] # Only allow localhost and the docker bridge network # Bootstrap app does not run behind a reverse proxy: app_config["use_forwarded_header"] = False app_config["require_eab"] = False else: click.echo(f"Starting {app_class.__name__}") if issubclass(app_class, AcmeRelayBase): runner, site = loop.run_until_complete( run_relay(config, path, app_class, app_config_name) ) elif app_class is AcmeCA: runner, site = loop.run_until_complete(run_ca(config, path)) else: raise ValueError(app_class) aiohttp_jinja2.setup(site.app, loader=jinja2.FileSystemLoader("./tpl/")) aiohttp_jinja2.get_env(site.app).globals.update({"url_for": _url_for}) try: loop.run_forever() except KeyboardInterrupt: loop.run_until_complete(runner.cleanup())
Python
def drop(connection_string, password): """Drops the database's tables. Make sure to backup the database before running this command. """ db = Database(connection_string.format(password)) click.echo("Dropping tables...") loop = asyncio.get_event_loop() if click.confirm("Really drop all tables?"): loop.run_until_complete(db.drop()) click.echo("OK.") else: click.echo("Aborting...")
def drop(connection_string, password): """Drops the database's tables. Make sure to backup the database before running this command. """ db = Database(connection_string.format(password)) click.echo("Dropping tables...") loop = asyncio.get_event_loop() if click.confirm("Really drop all tables?"): loop.run_until_complete(db.drop()) click.echo("OK.") else: click.echo("Aborting...")
Python
def load_plugins(cls, path: str) -> None: """Loads plugins from all modules under the given path. :param path: The path to load plugins from. """ project_base = "acmetk" path_ = Path(path) try: cls._load_plugins_from_path(path_) except FileNotFoundError: try: cls._load_plugins_from_path( path_.parent.parent / project_base / path_.stem ) except FileNotFoundError: logger.warning( "Could not find the plugins directory in ./%s/%s or ./%s", project_base, path, path, )
def load_plugins(cls, path: str) -> None: """Loads plugins from all modules under the given path. :param path: The path to load plugins from. """ project_base = "acmetk" path_ = Path(path) try: cls._load_plugins_from_path(path_) except FileNotFoundError: try: cls._load_plugins_from_path( path_.parent.parent / project_base / path_.stem ) except FileNotFoundError: logger.warning( "Could not find the plugins directory in ./%s/%s or ./%s", project_base, path, path, )
Python
def register_plugin(cls, config_name): """Decorator that registers a class as a plugin under the given name. The name is used to refer to the class in config files. :param config_name: The plugin's name in config files :return: The registered plugin class. """ def deco(plugin_cls): # find the parent class in the registry map for registered_parent, registry_ in cls._registry_map.items(): if issubclass(plugin_cls, registered_parent): registry = registry_ break else: registry = cls.get_registry(plugin_cls.__mro__[1]) registry._subclasses[config_name] = plugin_cls return plugin_cls return deco
def register_plugin(cls, config_name): """Decorator that registers a class as a plugin under the given name. The name is used to refer to the class in config files. :param config_name: The plugin's name in config files :return: The registered plugin class. """ def deco(plugin_cls): # find the parent class in the registry map for registered_parent, registry_ in cls._registry_map.items(): if issubclass(plugin_cls, registered_parent): registry = registry_ break else: registry = cls.get_registry(plugin_cls.__mro__[1]) registry._subclasses[config_name] = plugin_cls return plugin_cls return deco
Python
def config_mapping(self) -> typing.Dict[str, type]: """Method that maps plugin config names to the actual class object. :return: Mapping from config names to the actual class objects. """ return self._subclasses
def config_mapping(self) -> typing.Dict[str, type]: """Method that maps plugin config names to the actual class object. :return: Mapping from config names to the actual class objects. """ return self._subclasses
Python
def orders_url(self, request) -> str: """Returns the URL of account's orders list. :param request: The client request needed to build the URL. :return: The URL at which the account's orders list may be requested. """ return url_for(request, "orders", id=str(self.kid))
def orders_url(self, request) -> str: """Returns the URL of account's orders list. :param request: The client request needed to build the URL. :return: The URL at which the account's orders list may be requested. """ return url_for(request, "orders", id=str(self.kid))
Python
def orders_list(self, request) -> typing.List[str]: """Returns the account's orders list. :param request: The client request needed to build the list of URLs. :return: A list of URLs of the account's orders. """ return [ order.url(request) for order in self.orders if order.status == OrderStatus.PENDING ]
def orders_list(self, request) -> typing.List[str]: """Returns the account's orders list. :param request: The client request needed to build the list of URLs. :return: A list of URLs of the account's orders. """ return [ order.url(request) for order in self.orders if order.status == OrderStatus.PENDING ]
Python
def authorized_identifiers(self, lower: bool = False) -> typing.Set[str]: """Returns the identifiers that the account holds valid authorizations for. :param lower: True if the list of authorized identifiers should be lowercased. :return: The set of identifiers that the account holds authorizations for. """ # We deliberately don't check whether the identifiers' authorizations have expired, # so that older certs may still be revoked. identifiers = [ identifier for order in self.orders for identifier in order.identifiers if identifier.authorization.is_valid(expired=True) ] return set( identifier.value.lower() if lower else identifier.value for identifier in identifiers )
def authorized_identifiers(self, lower: bool = False) -> typing.Set[str]: """Returns the identifiers that the account holds valid authorizations for. :param lower: True if the list of authorized identifiers should be lowercased. :return: The set of identifiers that the account holds authorizations for. """ # We deliberately don't check whether the identifiers' authorizations have expired, # so that older certs may still be revoked. identifiers = [ identifier for order in self.orders for identifier in order.identifiers if identifier.authorization.is_valid(expired=True) ] return set( identifier.value.lower() if lower else identifier.value for identifier in identifiers )
Python
def validate_cert(self, cert: "cryptography.x509.Certificate") -> bool: """Validates whether the account holds authorizations for all names present in the certificate. :param cert: The certificate to validate. :return: *True* iff the account holds authorizations for all names present in the certificate. """ return names_of(cert, lower=True).issubset( self.authorized_identifiers(lower=True) )
def validate_cert(self, cert: "cryptography.x509.Certificate") -> bool: """Validates whether the account holds authorizations for all names present in the certificate. :param cert: The certificate to validate. :return: *True* iff the account holds authorizations for all names present in the certificate. """ return names_of(cert, lower=True).issubset( self.authorized_identifiers(lower=True) )
Python
def update(self, upd: "acmetk.models.messages.AccountUpdate"): """Updates the account with new information. Possible updates are currently to the :attr:`contact` field and to the :attr:`status` field. :param upd: The requested updates. """ if contact := upd.contact: self.contact = json.dumps(contact) # the only allowed state transition is VALID -> DEACTIVATED if requested by the client if upd.status == AccountStatus.DEACTIVATED: self.status = AccountStatus.DEACTIVATED elif upd.status: raise ValueError(f"Cannot set an account's status to {upd.status}")
def update(self, upd: "acmetk.models.messages.AccountUpdate"): """Updates the account with new information. Possible updates are currently to the :attr:`contact` field and to the :attr:`status` field. :param upd: The requested updates. """ if contact := upd.contact: self.contact = json.dumps(contact) # the only allowed state transition is VALID -> DEACTIVATED if requested by the client if upd.status == AccountStatus.DEACTIVATED: self.status = AccountStatus.DEACTIVATED elif upd.status: raise ValueError(f"Cannot set an account's status to {upd.status}")
Python
def from_obj( cls, jwk: josepy.jwk.JWK, obj: acme.messages.Registration ) -> "Account": """A factory that constructs a new :class:`Account` from a message object. The *kid* is set to the passed JWK's SHA-256 hex digest and the *status* is set to *valid*. :param jwk: The account's key. :param obj: The registration message object. :return: The constructed account. """ return Account( key=jwk, kid=cls._jwk_kid(jwk), status=AccountStatus.VALID, contact=json.dumps(obj.contact), )
def from_obj( cls, jwk: josepy.jwk.JWK, obj: acme.messages.Registration ) -> "Account": """A factory that constructs a new :class:`Account` from a message object. The *kid* is set to the passed JWK's SHA-256 hex digest and the *status* is set to *valid*. :param jwk: The account's key. :param obj: The registration message object. :return: The constructed account. """ return Account( key=jwk, kid=cls._jwk_kid(jwk), status=AccountStatus.VALID, contact=json.dumps(obj.contact), )
Python
def read_config(self): ''' Get configuration from config file. ''' filename = os.path.join(self._directory, '.config') res = config.read_config(filename) ext = res[0][:-1] if ext[0] == ".": ext = ext[1:] self._ext = ext self._track_folder_name = int(''.join(res[1][:-1])) self._track_file_name = int(''.join(res[2][:-1])) if res[3][:-1] == 't': self._debugMode = True else: self._debugMode = False
def read_config(self): ''' Get configuration from config file. ''' filename = os.path.join(self._directory, '.config') res = config.read_config(filename) ext = res[0][:-1] if ext[0] == ".": ext = ext[1:] self._ext = ext self._track_folder_name = int(''.join(res[1][:-1])) self._track_file_name = int(''.join(res[2][:-1])) if res[3][:-1] == 't': self._debugMode = True else: self._debugMode = False
Python
def collect_files(self): ''' Goes to the directory defined by the user and collect list of files ''' for dirName, subdirList, fileList in os.walk(self._directory): for fname in fileList: newFile = os.path.join(dirName, fname) self._fileList.append(newFile)
def collect_files(self): ''' Goes to the directory defined by the user and collect list of files ''' for dirName, subdirList, fileList in os.walk(self._directory): for fname in fileList: newFile = os.path.join(dirName, fname) self._fileList.append(newFile)
Python
def only_data_files(self): ''' Restrict the files in the directory to those with the data extension ''' newFileList = [] for file in self._fileList: file_ext = file.split(os.extsep) if file_ext[-1] == self._ext: newFileList.append(file) self._fileList = newFileList
def only_data_files(self): ''' Restrict the files in the directory to those with the data extension ''' newFileList = [] for file in self._fileList: file_ext = file.split(os.extsep) if file_ext[-1] == self._ext: newFileList.append(file) self._fileList = newFileList
Python
def read_files_to_obj(self): ''' Input: List of data files in readable format Calls read_file_to_obj for each ''' file_num = 0 for file in self._fileList: if self._track_folder_name: self._folder = file.split(os.sep)[-2] if self._track_file_name: tmpfile = file.split(os.sep)[-1] self._file = tmpfile.split(os.extsep)[0] self.read_file_to_obj(file, file_num) file_num += 1
def read_files_to_obj(self): ''' Input: List of data files in readable format Calls read_file_to_obj for each ''' file_num = 0 for file in self._fileList: if self._track_folder_name: self._folder = file.split(os.sep)[-2] if self._track_file_name: tmpfile = file.split(os.sep)[-1] self._file = tmpfile.split(os.extsep)[0] self.read_file_to_obj(file, file_num) file_num += 1
Python
def read_file_to_obj(self, file_name, file_num): ''' Inputs: file name of data to import, file number to act as data id Pulls data into nested list. Each file becomes a first-level item, each line in the file becomes a list of the items on that line. ''' full_file_name = os.path.join(self._directory, file_name) dsname = self.sample_name() if dsname == '': dsname = file_num ds_dict = self._data_samples.get_data_samples() if (dsname in ds_dict): dsamp = self._data_samples.newDataSample(dsname) else: dsamp = self._data_samples.initDataSamples(dsname) f = open(full_file_name) # create an "instance" for the file di = dsamp.add_instance(self.instance_name()) for line in f: line_data = [] if self._ext == 'csv': # split by comma line_data = line.split(',') else: # default to split by whitespace line_data = line.split() # if the line of data ends with a newline, cut it off if str(line_data[-1])[-1] == '\n': line_data[-1] = str(line_data[-1])[:-1] # confirm all data is float for i in range(len(line_data)): line_data[i] = float(line_data[i]) di.add_point(line_data) f.close()
def read_file_to_obj(self, file_name, file_num): ''' Inputs: file name of data to import, file number to act as data id Pulls data into nested list. Each file becomes a first-level item, each line in the file becomes a list of the items on that line. ''' full_file_name = os.path.join(self._directory, file_name) dsname = self.sample_name() if dsname == '': dsname = file_num ds_dict = self._data_samples.get_data_samples() if (dsname in ds_dict): dsamp = self._data_samples.newDataSample(dsname) else: dsamp = self._data_samples.initDataSamples(dsname) f = open(full_file_name) # create an "instance" for the file di = dsamp.add_instance(self.instance_name()) for line in f: line_data = [] if self._ext == 'csv': # split by comma line_data = line.split(',') else: # default to split by whitespace line_data = line.split() # if the line of data ends with a newline, cut it off if str(line_data[-1])[-1] == '\n': line_data[-1] = str(line_data[-1])[:-1] # confirm all data is float for i in range(len(line_data)): line_data[i] = float(line_data[i]) di.add_point(line_data) f.close()
Python
def print_data(self, data_id): ''' Input: data_id for data to print Prints data, one line per line. ''' for file in self._data[data_id]: for line in file: print(line) # for position in time: # print(position, end="") print("\n", end="")
def print_data(self, data_id): ''' Input: data_id for data to print Prints data, one line per line. ''' for file in self._data[data_id]: for line in file: print(line) # for position in time: # print(position, end="") print("\n", end="")
Python
def main(): ''' Sequence through the process of instantiating a data processor and importing data from the relevant computer directory ''' directory = sys.argv[1] # directory = os.path.join(os.getcwd(), 'examples', 'example_data') # dp = DataProcessor(directory) # dp.collect_files() # dp.only_data_files() # dp.read_files_to_obj() print(dp.get_data_samples_obj().make_trajectories()) print(dp.make_set_of_trajectories())
def main(): ''' Sequence through the process of instantiating a data processor and importing data from the relevant computer directory ''' directory = sys.argv[1] # directory = os.path.join(os.getcwd(), 'examples', 'example_data') # dp = DataProcessor(directory) # dp.collect_files() # dp.only_data_files() # dp.read_files_to_obj() print(dp.get_data_samples_obj().make_trajectories()) print(dp.make_set_of_trajectories())
Python
def execute(self, **kwargs): """ SlackAPIOperator calls will not fail even if the call is not unsuccessful. It should not prevent a DAG from completing in success """ if not self.params: self.construct_api_call_params() sc = SlackClient(self.token) sc.api_call(self.method, **self.params)
def execute(self, **kwargs): """ SlackAPIOperator calls will not fail even if the call is not unsuccessful. It should not prevent a DAG from completing in success """ if not self.params: self.construct_api_call_params() sc = SlackClient(self.token) sc.api_call(self.method, **self.params)
Python
def heartbeat(self): ''' Heartbeats update the job's entry in the database with a timestamp for the latest_heartbeat and allows for the job to be killed externally. This allows at the system level to monitor what is actually active. For instance, an old heartbeat for SchedulerJob would mean something is wrong. This also allows for any job to be killed externally, regardless of who is running it or on which machine it is running. Note that if your heartbeat is set to 60 seconds and you call this method after 10 seconds of processing since the last heartbeat, it will sleep 50 seconds to complete the 60 seconds and keep a steady heart rate. If you go over 60 seconds before calling it, it won't sleep at all. ''' session = settings.Session() job = session.query(BaseJob).filter(BaseJob.id == self.id).first() if job.state == State.SHUTDOWN: self.kill() if job.latest_heartbeat: sleep_for = self.heartrate - ( datetime.now() - job.latest_heartbeat).total_seconds() if sleep_for > 0: sleep(sleep_for) job.latest_heartbeat = datetime.now() session.merge(job) session.commit() session.close() self.heartbeat_callback() logging.debug('[heart] Boom.')
def heartbeat(self): ''' Heartbeats update the job's entry in the database with a timestamp for the latest_heartbeat and allows for the job to be killed externally. This allows at the system level to monitor what is actually active. For instance, an old heartbeat for SchedulerJob would mean something is wrong. This also allows for any job to be killed externally, regardless of who is running it or on which machine it is running. Note that if your heartbeat is set to 60 seconds and you call this method after 10 seconds of processing since the last heartbeat, it will sleep 50 seconds to complete the 60 seconds and keep a steady heart rate. If you go over 60 seconds before calling it, it won't sleep at all. ''' session = settings.Session() job = session.query(BaseJob).filter(BaseJob.id == self.id).first() if job.state == State.SHUTDOWN: self.kill() if job.latest_heartbeat: sleep_for = self.heartrate - ( datetime.now() - job.latest_heartbeat).total_seconds() if sleep_for > 0: sleep(sleep_for) job.latest_heartbeat = datetime.now() session.merge(job) session.commit() session.close() self.heartbeat_callback() logging.debug('[heart] Boom.')
Python
def manage_slas(self, dag, session=None): """ Finding all tasks that have SLAs defined, and sending alert emails where needed. New SLA misses are also recorded in the database. Where assuming that the scheduler runs often, so we only check for tasks that should have succeeded in the past hour. """ TI = models.TaskInstance sq = ( session .query( TI.task_id, func.max(TI.execution_date).label('max_ti')) .filter(TI.dag_id == dag.dag_id) .filter(TI.state == State.SUCCESS) .filter(TI.task_id.in_(dag.task_ids)) .group_by(TI.task_id).subquery('sq') ) max_tis = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.task_id == sq.c.task_id, TI.execution_date == sq.c.max_ti, ).all() ts = datetime.now() SlaMiss = models.SlaMiss for ti in max_tis: task = dag.get_task(ti.task_id) dttm = ti.execution_date if task.sla: dttm += dag.schedule_interval while dttm < datetime.now(): if dttm + task.sla + dag.schedule_interval < datetime.now(): session.merge(models.SlaMiss( task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)) dttm += dag.schedule_interval session.commit() slas = ( session .query(SlaMiss) .filter(SlaMiss.email_sent == False) .filter(SlaMiss.dag_id == dag.dag_id) .all() ) if slas: sla_dates = [sla.execution_date for sla in slas] blocking_tis = ( session .query(TI) .filter(TI.state != State.SUCCESS) .filter(TI.execution_date.in_(sla_dates)) .filter(TI.dag_id == dag.dag_id) .all() ) for ti in blocking_tis: ti.task = dag.get_task(ti.task_id) blocking_tis = ([ti for ti in blocking_tis if ti.are_dependencies_met(main_session=session)]) task_list = "\n".join([ sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas]) blocking_task_list = "\n".join([ ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis]) from airflow import ascii email_content = """\ Here's a list of tasks thas missed their SLAs: <pre><code>{task_list}\n<code></pre> Blocking tasks: <pre><code>{blocking_task_list}\n{ascii.bug}<code></pre> """.format(**locals()) emails = [] for t in dag.tasks: if t.email: if isinstance(t.email, basestring): l = [t.email] elif isinstance(t.email, (list, tuple)): l = t.email for email in l: if email not in emails: emails.append(email) if emails and len(slas): utils.send_email( emails, "[airflow] SLA miss on DAG=" + dag.dag_id, email_content) for sla in slas: sla.email_sent = True session.merge(sla) session.commit() session.close()
def manage_slas(self, dag, session=None): """ Finding all tasks that have SLAs defined, and sending alert emails where needed. New SLA misses are also recorded in the database. Where assuming that the scheduler runs often, so we only check for tasks that should have succeeded in the past hour. """ TI = models.TaskInstance sq = ( session .query( TI.task_id, func.max(TI.execution_date).label('max_ti')) .filter(TI.dag_id == dag.dag_id) .filter(TI.state == State.SUCCESS) .filter(TI.task_id.in_(dag.task_ids)) .group_by(TI.task_id).subquery('sq') ) max_tis = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.task_id == sq.c.task_id, TI.execution_date == sq.c.max_ti, ).all() ts = datetime.now() SlaMiss = models.SlaMiss for ti in max_tis: task = dag.get_task(ti.task_id) dttm = ti.execution_date if task.sla: dttm += dag.schedule_interval while dttm < datetime.now(): if dttm + task.sla + dag.schedule_interval < datetime.now(): session.merge(models.SlaMiss( task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)) dttm += dag.schedule_interval session.commit() slas = ( session .query(SlaMiss) .filter(SlaMiss.email_sent == False) .filter(SlaMiss.dag_id == dag.dag_id) .all() ) if slas: sla_dates = [sla.execution_date for sla in slas] blocking_tis = ( session .query(TI) .filter(TI.state != State.SUCCESS) .filter(TI.execution_date.in_(sla_dates)) .filter(TI.dag_id == dag.dag_id) .all() ) for ti in blocking_tis: ti.task = dag.get_task(ti.task_id) blocking_tis = ([ti for ti in blocking_tis if ti.are_dependencies_met(main_session=session)]) task_list = "\n".join([ sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas]) blocking_task_list = "\n".join([ ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis]) from airflow import ascii email_content = """\ Here's a list of tasks thas missed their SLAs: <pre><code>{task_list}\n<code></pre> Blocking tasks: <pre><code>{blocking_task_list}\n{ascii.bug}<code></pre> """.format(**locals()) emails = [] for t in dag.tasks: if t.email: if isinstance(t.email, basestring): l = [t.email] elif isinstance(t.email, (list, tuple)): l = t.email for email in l: if email not in emails: emails.append(email) if emails and len(slas): utils.send_email( emails, "[airflow] SLA miss on DAG=" + dag.dag_id, email_content) for sla in slas: sla.email_sent = True session.merge(sla) session.commit() session.close()
Python
def process_dag(self, dag, executor): """ This method schedules a single DAG by looking at the latest run for each task and attempting to schedule the following run. As multiple schedulers may be running for redundancy, this function takes a lock on the DAG and timestamps the last run in ``last_scheduler_run``. """ DagModel = models.DagModel session = settings.Session() # picklin' pickle_id = None if self.do_pickle and self.executor.__class__ not in ( executors.LocalExecutor, executors.SequentialExecutor): pickle_id = dag.pickle(session).id db_dag = session.query( DagModel).filter(DagModel.dag_id == dag.dag_id).first() last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1) secs_since_last = ( datetime.now() - last_scheduler_run).total_seconds() # if db_dag.scheduler_lock or if secs_since_last < self.heartrate: session.commit() session.close() return None else: # Taking a lock db_dag.scheduler_lock = True db_dag.last_scheduler_run = datetime.now() session.commit() TI = models.TaskInstance logging.info( "Getting latest instance " "for all tasks in dag " + dag.dag_id) sq = ( session .query( TI.task_id, func.max(TI.execution_date).label('max_ti')) .filter(TI.dag_id == dag.dag_id) .group_by(TI.task_id).subquery('sq') ) qry = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.task_id == sq.c.task_id, TI.execution_date == sq.c.max_ti, ) logging.debug("Querying max dates for each task") latest_ti = qry.all() ti_dict = {ti.task_id: ti for ti in latest_ti} session.expunge_all() session.commit() logging.debug("{} rows returned".format(len(latest_ti))) for task in dag.tasks: if task.adhoc: continue if task.task_id not in ti_dict: # Brand new task, let's get started ti = TI(task, task.start_date) ti.refresh_from_db() if ti.is_queueable(flag_upstream_failed=True): logging.info( 'First run for {ti}'.format(**locals())) executor.queue_task_instance(ti, pickle_id=pickle_id) else: ti = ti_dict[task.task_id] ti.task = task # Hacky but worky if ti.state == State.RUNNING: continue # Only one task at a time elif ti.state == State.UP_FOR_RETRY: # If task instance if up for retry, make sure # the retry delay is met if ti.is_runnable(): logging.debug('Triggering retry: ' + str(ti)) executor.queue_task_instance(ti, pickle_id=pickle_id) elif ti.state == State.QUEUED: # If was queued we skipped so that in gets prioritized # in self.prioritize_queued continue else: # Trying to run the next schedule next_schedule = ( ti.execution_date + task.schedule_interval) if ( ti.task.end_date and next_schedule > ti.task.end_date): continue ti = TI( task=task, execution_date=next_schedule, ) ti.refresh_from_db() if ti.is_queueable(flag_upstream_failed=True): logging.debug('Queuing next run: ' + str(ti)) executor.queue_task_instance(ti, pickle_id=pickle_id) # Releasing the lock logging.debug("Unlocking DAG (scheduler_lock)") db_dag = ( session.query(DagModel) .filter(DagModel.dag_id == dag.dag_id) .first() ) db_dag.scheduler_lock = False session.merge(db_dag) session.commit() session.close()
def process_dag(self, dag, executor): """ This method schedules a single DAG by looking at the latest run for each task and attempting to schedule the following run. As multiple schedulers may be running for redundancy, this function takes a lock on the DAG and timestamps the last run in ``last_scheduler_run``. """ DagModel = models.DagModel session = settings.Session() # picklin' pickle_id = None if self.do_pickle and self.executor.__class__ not in ( executors.LocalExecutor, executors.SequentialExecutor): pickle_id = dag.pickle(session).id db_dag = session.query( DagModel).filter(DagModel.dag_id == dag.dag_id).first() last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1) secs_since_last = ( datetime.now() - last_scheduler_run).total_seconds() # if db_dag.scheduler_lock or if secs_since_last < self.heartrate: session.commit() session.close() return None else: # Taking a lock db_dag.scheduler_lock = True db_dag.last_scheduler_run = datetime.now() session.commit() TI = models.TaskInstance logging.info( "Getting latest instance " "for all tasks in dag " + dag.dag_id) sq = ( session .query( TI.task_id, func.max(TI.execution_date).label('max_ti')) .filter(TI.dag_id == dag.dag_id) .group_by(TI.task_id).subquery('sq') ) qry = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.task_id == sq.c.task_id, TI.execution_date == sq.c.max_ti, ) logging.debug("Querying max dates for each task") latest_ti = qry.all() ti_dict = {ti.task_id: ti for ti in latest_ti} session.expunge_all() session.commit() logging.debug("{} rows returned".format(len(latest_ti))) for task in dag.tasks: if task.adhoc: continue if task.task_id not in ti_dict: # Brand new task, let's get started ti = TI(task, task.start_date) ti.refresh_from_db() if ti.is_queueable(flag_upstream_failed=True): logging.info( 'First run for {ti}'.format(**locals())) executor.queue_task_instance(ti, pickle_id=pickle_id) else: ti = ti_dict[task.task_id] ti.task = task # Hacky but worky if ti.state == State.RUNNING: continue # Only one task at a time elif ti.state == State.UP_FOR_RETRY: # If task instance if up for retry, make sure # the retry delay is met if ti.is_runnable(): logging.debug('Triggering retry: ' + str(ti)) executor.queue_task_instance(ti, pickle_id=pickle_id) elif ti.state == State.QUEUED: # If was queued we skipped so that in gets prioritized # in self.prioritize_queued continue else: # Trying to run the next schedule next_schedule = ( ti.execution_date + task.schedule_interval) if ( ti.task.end_date and next_schedule > ti.task.end_date): continue ti = TI( task=task, execution_date=next_schedule, ) ti.refresh_from_db() if ti.is_queueable(flag_upstream_failed=True): logging.debug('Queuing next run: ' + str(ti)) executor.queue_task_instance(ti, pickle_id=pickle_id) # Releasing the lock logging.debug("Unlocking DAG (scheduler_lock)") db_dag = ( session.query(DagModel) .filter(DagModel.dag_id == dag.dag_id) .first() ) db_dag.scheduler_lock = False session.merge(db_dag) session.commit() session.close()
Python
def _execute(self): """ Runs a dag for a specified date range. """ session = settings.Session() start_date = self.bf_start_date end_date = self.bf_end_date # picklin' pickle_id = None if not self.donot_pickle and self.executor.__class__ not in ( executors.LocalExecutor, executors.SequentialExecutor): pickle = models.DagPickle(self.dag) session.add(pickle) session.commit() pickle_id = pickle.id executor = self.executor executor.start() # Build a list of all instances to run tasks_to_run = {} failed = [] succeeded = [] started = [] wont_run = [] for task in self.dag.tasks: if (not self.include_adhoc) and task.adhoc: continue start_date = start_date or task.start_date end_date = end_date or task.end_date or datetime.now() for dttm in utils.date_range( start_date, end_date, task.dag.schedule_interval): ti = models.TaskInstance(task, dttm) tasks_to_run[ti.key] = ti # Triggering what is ready to get triggered while tasks_to_run: for key, ti in list(tasks_to_run.items()): ti.refresh_from_db() if ti.state == State.SUCCESS and key in tasks_to_run: succeeded.append(key) del tasks_to_run[key] elif ti.is_runnable(): executor.queue_task_instance( ti, mark_success=self.mark_success, task_start_date=self.bf_start_date, pickle_id=pickle_id, ignore_dependencies=self.ignore_dependencies) ti.state = State.RUNNING if key not in started: started.append(key) self.heartbeat() executor.heartbeat() # Reacting to events for key, state in list(executor.get_event_buffer().items()): dag_id, task_id, execution_date = key if key not in tasks_to_run: continue ti = tasks_to_run[key] ti.refresh_from_db() if ti.state in (State.FAILED, State.SKIPPED): if ti.state == State.FAILED: failed.append(key) logging.error("Task instance " + str(key) + " failed") elif ti.state == State.SKIPPED: wont_run.append(key) logging.error("Skipping " + str(key) + " failed") del tasks_to_run[key] # Removing downstream tasks that also shouldn't run for t in self.dag.get_task(task_id).get_flat_relatives( upstream=False): key = (ti.dag_id, t.task_id, execution_date) if key in tasks_to_run: wont_run.append(key) del tasks_to_run[key] elif ti.state == State.SUCCESS: succeeded.append(key) del tasks_to_run[key] msg = ( "[backfill progress] " "waiting: {0} | " "succeeded: {1} | " "kicked_off: {2} | " "failed: {3} | " "wont_run: {4} ").format( len(tasks_to_run), len(succeeded), len(started), len(failed), len(wont_run)) logging.info(msg) executor.end() session.close() if failed: raise AirflowException( "Some tasks instances failed, here's the list:\n"+str(failed)) logging.info("All done. Exiting.")
def _execute(self): """ Runs a dag for a specified date range. """ session = settings.Session() start_date = self.bf_start_date end_date = self.bf_end_date # picklin' pickle_id = None if not self.donot_pickle and self.executor.__class__ not in ( executors.LocalExecutor, executors.SequentialExecutor): pickle = models.DagPickle(self.dag) session.add(pickle) session.commit() pickle_id = pickle.id executor = self.executor executor.start() # Build a list of all instances to run tasks_to_run = {} failed = [] succeeded = [] started = [] wont_run = [] for task in self.dag.tasks: if (not self.include_adhoc) and task.adhoc: continue start_date = start_date or task.start_date end_date = end_date or task.end_date or datetime.now() for dttm in utils.date_range( start_date, end_date, task.dag.schedule_interval): ti = models.TaskInstance(task, dttm) tasks_to_run[ti.key] = ti # Triggering what is ready to get triggered while tasks_to_run: for key, ti in list(tasks_to_run.items()): ti.refresh_from_db() if ti.state == State.SUCCESS and key in tasks_to_run: succeeded.append(key) del tasks_to_run[key] elif ti.is_runnable(): executor.queue_task_instance( ti, mark_success=self.mark_success, task_start_date=self.bf_start_date, pickle_id=pickle_id, ignore_dependencies=self.ignore_dependencies) ti.state = State.RUNNING if key not in started: started.append(key) self.heartbeat() executor.heartbeat() # Reacting to events for key, state in list(executor.get_event_buffer().items()): dag_id, task_id, execution_date = key if key not in tasks_to_run: continue ti = tasks_to_run[key] ti.refresh_from_db() if ti.state in (State.FAILED, State.SKIPPED): if ti.state == State.FAILED: failed.append(key) logging.error("Task instance " + str(key) + " failed") elif ti.state == State.SKIPPED: wont_run.append(key) logging.error("Skipping " + str(key) + " failed") del tasks_to_run[key] # Removing downstream tasks that also shouldn't run for t in self.dag.get_task(task_id).get_flat_relatives( upstream=False): key = (ti.dag_id, t.task_id, execution_date) if key in tasks_to_run: wont_run.append(key) del tasks_to_run[key] elif ti.state == State.SUCCESS: succeeded.append(key) del tasks_to_run[key] msg = ( "[backfill progress] " "waiting: {0} | " "succeeded: {1} | " "kicked_off: {2} | " "failed: {3} | " "wont_run: {4} ").format( len(tasks_to_run), len(succeeded), len(started), len(failed), len(wont_run)) logging.info(msg) executor.end() session.close() if failed: raise AirflowException( "Some tasks instances failed, here's the list:\n"+str(failed)) logging.info("All done. Exiting.")
Python
def run(self, sql, autocommit=False, parameters=None): """ Runs a command or a list of commands. Pass a list of sql statements to the sql parameter to get them to execute sequentially :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list """ conn = self.get_conn() if isinstance(sql, basestring): sql = [sql] if self.supports_autocommit: self.set_autocommit(conn, autocommit) cur = conn.cursor() for s in sql: cur.execute(s, parameters) cur.close() conn.commit() conn.close()
def run(self, sql, autocommit=False, parameters=None): """ Runs a command or a list of commands. Pass a list of sql statements to the sql parameter to get them to execute sequentially :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list """ conn = self.get_conn() if isinstance(sql, basestring): sql = [sql] if self.supports_autocommit: self.set_autocommit(conn, autocommit) cur = conn.cursor() for s in sql: cur.execute(s, parameters) cur.close() conn.commit() conn.close()
Python
def insert_rows(self, table, rows, target_fields=None, commit_every=1000): """ A generic way to insert a set of tuples into a table, the whole set of inserts is treated as one transaction """ if target_fields: target_fields = ", ".join(target_fields) target_fields = "({})".format(target_fields) else: target_fields = '' conn = self.get_conn() cur = conn.cursor() if self.supports_autocommit: cur.execute('SET autocommit = 0') conn.commit() i = 0 for row in rows: i += 1 l = [] for cell in row: if isinstance(cell, basestring): l.append("'" + str(cell).replace("'", "''") + "'") elif cell is None: l.append('NULL') elif isinstance(cell, numpy.datetime64): l.append("'" + str(cell) + "'") elif isinstance(cell, datetime): l.append("'" + cell.isoformat() + "'") else: l.append(str(cell)) values = tuple(l) sql = "INSERT INTO {0} {1} VALUES ({2});".format( table, target_fields, ",".join(values)) cur.execute(sql) if i % commit_every == 0: conn.commit() logging.info( "Loaded {i} into {table} rows so far".format(**locals())) conn.commit() cur.close() conn.close() logging.info( "Done loading. Loaded a total of {i} rows".format(**locals()))
def insert_rows(self, table, rows, target_fields=None, commit_every=1000): """ A generic way to insert a set of tuples into a table, the whole set of inserts is treated as one transaction """ if target_fields: target_fields = ", ".join(target_fields) target_fields = "({})".format(target_fields) else: target_fields = '' conn = self.get_conn() cur = conn.cursor() if self.supports_autocommit: cur.execute('SET autocommit = 0') conn.commit() i = 0 for row in rows: i += 1 l = [] for cell in row: if isinstance(cell, basestring): l.append("'" + str(cell).replace("'", "''") + "'") elif cell is None: l.append('NULL') elif isinstance(cell, numpy.datetime64): l.append("'" + str(cell) + "'") elif isinstance(cell, datetime): l.append("'" + cell.isoformat() + "'") else: l.append(str(cell)) values = tuple(l) sql = "INSERT INTO {0} {1} VALUES ({2});".format( table, target_fields, ",".join(values)) cur.execute(sql) if i % commit_every == 0: conn.commit() logging.info( "Loaded {i} into {table} rows so far".format(**locals())) conn.commit() cur.close() conn.close() logging.info( "Done loading. Loaded a total of {i} rows".format(**locals()))
Python
def round_time(dt, delta, start_date=datetime.min): """ Returns the datetime of the form start_date + i * delta which is closest to dt for any non-negative integer i. Note that delta may be a datetime.timedelta or a dateutil.relativedelta >>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1)) datetime.datetime(2015, 1, 1, 0, 0) >>> round_time(datetime(2015, 1, 2), relativedelta(months=1)) datetime.datetime(2015, 1, 1, 0, 0) >>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0)) datetime.datetime(2015, 9, 16, 0, 0) >>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0)) datetime.datetime(2015, 9, 15, 0, 0) >>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0)) datetime.datetime(2015, 9, 14, 0, 0) >>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0)) datetime.datetime(2015, 9, 14, 0, 0) """ # Ignore the microseconds of dt dt -= timedelta(microseconds = dt.microsecond) # We are looking for a datetime in the form start_date + i * delta # which is as close as possible to dt. Since delta could be a relative # delta we don't know it's exact length in seconds so we cannot rely on # division to find i. Instead we employ a binary search algorithm, first # finding an upper and lower limit and then disecting the interval until # we have found the closest match. # We first search an upper limit for i for which start_date + upper * delta # exceeds dt. upper = 1 while start_date + upper*delta < dt: # To speed up finding an upper limit we grow this exponentially by a # factor of 2 upper *= 2 # Since upper is the first value for which start_date + upper * delta # exceeds dt, upper // 2 is below dt and therefore forms a lower limited # for the i we are looking for lower = upper // 2 # We now continue to intersect the interval between # start_date + lower * delta and start_date + upper * delta # until we find the closest value while True: # Invariant: start + lower * delta < dt <= start + upper * delta # If start_date + (lower + 1)*delta exceeds dt, then either lower or # lower+1 has to be the solution we are searching for if start_date + (lower + 1)*delta >= dt: # Check if start_date + (lower + 1)*delta or # start_date + lower*delta is closer to dt and return the solution if (start_date + (lower + 1)*delta) - dt <= dt - (start_date + lower*delta): return start_date + (lower + 1)*delta else: return start_date + lower*delta # We intersect the interval and either replace the lower or upper # limit with the candidate candidate = lower + (upper - lower) // 2 if start_date + candidate*delta >= dt: upper = candidate else: lower = candidate # in the special case when start_date > dt the search for upper will # immediately stop for upper == 1 which results in lower = upper // 2 = 0 # and this function returns start_date.
def round_time(dt, delta, start_date=datetime.min): """ Returns the datetime of the form start_date + i * delta which is closest to dt for any non-negative integer i. Note that delta may be a datetime.timedelta or a dateutil.relativedelta >>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1)) datetime.datetime(2015, 1, 1, 0, 0) >>> round_time(datetime(2015, 1, 2), relativedelta(months=1)) datetime.datetime(2015, 1, 1, 0, 0) >>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0)) datetime.datetime(2015, 9, 16, 0, 0) >>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0)) datetime.datetime(2015, 9, 15, 0, 0) >>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0)) datetime.datetime(2015, 9, 14, 0, 0) >>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0)) datetime.datetime(2015, 9, 14, 0, 0) """ # Ignore the microseconds of dt dt -= timedelta(microseconds = dt.microsecond) # We are looking for a datetime in the form start_date + i * delta # which is as close as possible to dt. Since delta could be a relative # delta we don't know it's exact length in seconds so we cannot rely on # division to find i. Instead we employ a binary search algorithm, first # finding an upper and lower limit and then disecting the interval until # we have found the closest match. # We first search an upper limit for i for which start_date + upper * delta # exceeds dt. upper = 1 while start_date + upper*delta < dt: # To speed up finding an upper limit we grow this exponentially by a # factor of 2 upper *= 2 # Since upper is the first value for which start_date + upper * delta # exceeds dt, upper // 2 is below dt and therefore forms a lower limited # for the i we are looking for lower = upper // 2 # We now continue to intersect the interval between # start_date + lower * delta and start_date + upper * delta # until we find the closest value while True: # Invariant: start + lower * delta < dt <= start + upper * delta # If start_date + (lower + 1)*delta exceeds dt, then either lower or # lower+1 has to be the solution we are searching for if start_date + (lower + 1)*delta >= dt: # Check if start_date + (lower + 1)*delta or # start_date + lower*delta is closer to dt and return the solution if (start_date + (lower + 1)*delta) - dt <= dt - (start_date + lower*delta): return start_date + (lower + 1)*delta else: return start_date + lower*delta # We intersect the interval and either replace the lower or upper # limit with the candidate candidate = lower + (upper - lower) // 2 if start_date + candidate*delta >= dt: upper = candidate else: lower = candidate # in the special case when start_date > dt the search for upper will # immediately stop for upper == 1 which results in lower = upper // 2 = 0 # and this function returns start_date.
Python
def construct_ingest_query( self, datasource, static_path, ts_dim, columns, metric_spec, intervals, hadoop_dependency_coordinates=None): """ Builds an ingest query for an HDFS TSV load. :param datasource: target datasource in druid :param columns: list of all columns in the TSV, in the right order """ metric_names = [ m['fieldName'] for m in metric_spec if m['type'] != 'count'] dimensions = [c for c in columns if c not in metric_names] ingest_query_dict = { "type": "index_hadoop", "spec": { "dataSchema": { "metricsSpec": metric_spec, "granularitySpec": { "queryGranularity": "NONE", "intervals": intervals, "type": "uniform", "segmentGranularity": "DAY", }, "parser": { "type": "string", "parseSpec": { "columns": columns, "dimensionsSpec": { "dimensionExclusions": [], "dimensions": dimensions, # list of names "spatialDimensions": [] }, "timestampSpec": { "column": ts_dim, "format": "auto" }, "format": "tsv" } }, "dataSource": datasource }, "tuningConfig": { "type": "hadoop", "jobProperties": { "mapreduce.job.user.classpath.first": "false", "mapreduce.map.output.compress" : "false", "mapreduce.output.fileoutputformat.compress" : "false", }, }, "ioConfig": { "inputSpec": { "paths": static_path, "type": "static" }, "type": "hadoop" } } } if hadoop_dependency_coordinates: ingest_query_dict[ 'hadoopDependencyCoordinates'] = hadoop_dependency_coordinates return json.dumps(ingest_query_dict, indent=4)
def construct_ingest_query( self, datasource, static_path, ts_dim, columns, metric_spec, intervals, hadoop_dependency_coordinates=None): """ Builds an ingest query for an HDFS TSV load. :param datasource: target datasource in druid :param columns: list of all columns in the TSV, in the right order """ metric_names = [ m['fieldName'] for m in metric_spec if m['type'] != 'count'] dimensions = [c for c in columns if c not in metric_names] ingest_query_dict = { "type": "index_hadoop", "spec": { "dataSchema": { "metricsSpec": metric_spec, "granularitySpec": { "queryGranularity": "NONE", "intervals": intervals, "type": "uniform", "segmentGranularity": "DAY", }, "parser": { "type": "string", "parseSpec": { "columns": columns, "dimensionsSpec": { "dimensionExclusions": [], "dimensions": dimensions, # list of names "spatialDimensions": [] }, "timestampSpec": { "column": ts_dim, "format": "auto" }, "format": "tsv" } }, "dataSource": datasource }, "tuningConfig": { "type": "hadoop", "jobProperties": { "mapreduce.job.user.classpath.first": "false", "mapreduce.map.output.compress" : "false", "mapreduce.output.fileoutputformat.compress" : "false", }, }, "ioConfig": { "inputSpec": { "paths": static_path, "type": "static" }, "type": "hadoop" } } } if hadoop_dependency_coordinates: ingest_query_dict[ 'hadoopDependencyCoordinates'] = hadoop_dependency_coordinates return json.dumps(ingest_query_dict, indent=4)
Python
def load_from_hdfs( self, datasource, static_path, ts_dim, columns, intervals, metric_spec=None, hadoop_dependency_coordinates=None): """ load data to druid from hdfs :params ts_dim: The column name to use as a timestamp :params metric_spec: A list of dictionaries """ task_id = self.send_ingest_query( datasource, static_path, ts_dim, columns, metric_spec, intervals, hadoop_dependency_coordinates) status_url = self.get_ingest_status_url(task_id) while True: r = requests.get(status_url) d = json.loads(r.text) if d['status']['status'] == 'FAILED': logging.error(d) raise AirflowDruidLoadException( "[Error]: Ingesting data to druid failed.") elif d['status']['status'] == 'SUCCESS': break time.sleep(LOAD_CHECK_INTERVAL)
def load_from_hdfs( self, datasource, static_path, ts_dim, columns, intervals, metric_spec=None, hadoop_dependency_coordinates=None): """ load data to druid from hdfs :params ts_dim: The column name to use as a timestamp :params metric_spec: A list of dictionaries """ task_id = self.send_ingest_query( datasource, static_path, ts_dim, columns, metric_spec, intervals, hadoop_dependency_coordinates) status_url = self.get_ingest_status_url(task_id) while True: r = requests.get(status_url) d = json.loads(r.text) if d['status']['status'] == 'FAILED': logging.error(d) raise AirflowDruidLoadException( "[Error]: Ingesting data to druid failed.") elif d['status']['status'] == 'SUCCESS': break time.sleep(LOAD_CHECK_INTERVAL)
Python
def _parse_s3_config(config_file_name, config_format='boto', profile=None): """ Parses a config file for s3 credentials. Can currently parse boto, s3cmd.conf and AWS SDK config formats :param config_file_name: path to the config file :type config_file_name: str :param config_format: config type. One of "boto", "s3cmd" or "aws". Defaults to "boto" :type config_format: str :param profile: profile name in AWS type config file :type profile: str """ Config = configparser.ConfigParser() if Config.read(config_file_name): sections = Config.sections() else: raise AirflowException("Couldn't read {0}".format(config_file_name)) # Setting option names depending on file format conf_format = config_format.lower() if conf_format == 'boto': if profile is not None and 'profile ' + profile in sections: cred_section = 'profile ' + profile else: cred_section = 'Credentials' elif conf_format == 'aws' and profile is not None: cred_section = profile else: cred_section = 'default' # Option names if conf_format in ('boto', 'aws'): key_id_option = 'aws_access_key_id' secret_key_option = 'aws_secret_access_key' # security_token_option = 'aws_security_token' else: key_id_option = 'access_key' secret_key_option = 'secret_key' # Actual Parsing if cred_section not in sections: raise AirflowException("This config file format is not recognized") else: try: access_key = Config.get(cred_section, key_id_option) secret_key = Config.get(cred_section, secret_key_option) except: logging.warning("Option Error in parsing s3 config file") raise return (access_key, secret_key)
def _parse_s3_config(config_file_name, config_format='boto', profile=None): """ Parses a config file for s3 credentials. Can currently parse boto, s3cmd.conf and AWS SDK config formats :param config_file_name: path to the config file :type config_file_name: str :param config_format: config type. One of "boto", "s3cmd" or "aws". Defaults to "boto" :type config_format: str :param profile: profile name in AWS type config file :type profile: str """ Config = configparser.ConfigParser() if Config.read(config_file_name): sections = Config.sections() else: raise AirflowException("Couldn't read {0}".format(config_file_name)) # Setting option names depending on file format conf_format = config_format.lower() if conf_format == 'boto': if profile is not None and 'profile ' + profile in sections: cred_section = 'profile ' + profile else: cred_section = 'Credentials' elif conf_format == 'aws' and profile is not None: cred_section = profile else: cred_section = 'default' # Option names if conf_format in ('boto', 'aws'): key_id_option = 'aws_access_key_id' secret_key_option = 'aws_secret_access_key' # security_token_option = 'aws_security_token' else: key_id_option = 'access_key' secret_key_option = 'secret_key' # Actual Parsing if cred_section not in sections: raise AirflowException("This config file format is not recognized") else: try: access_key = Config.get(cred_section, key_id_option) secret_key = Config.get(cred_section, secret_key_option) except: logging.warning("Option Error in parsing s3 config file") raise return (access_key, secret_key)
Python
def load_file(self, filename, key, bucket_name=None, replace=False): """ Loads a local file to S3 This is provided as a convenience to drop a file in S3. It uses the boto infrastructure to ship a file to s3. It is currently using only a single part download, and should not be used to move large files. :param filename: name of the file to load. :type filename: str :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists :type replace: bool """ if not bucket_name: (bucket_name, key) = self._parse_s3_url(key) bucket = self.get_bucket(bucket_name) if not self.check_for_key(key, bucket_name): key_obj = bucket.new_key(key_name=key) else: key_obj = bucket.get_key(key) key_size = key_obj.set_contents_from_filename(filename, replace=replace) logging.info("The key {key} now contains" " {key_size} bytes".format(**locals()))
def load_file(self, filename, key, bucket_name=None, replace=False): """ Loads a local file to S3 This is provided as a convenience to drop a file in S3. It uses the boto infrastructure to ship a file to s3. It is currently using only a single part download, and should not be used to move large files. :param filename: name of the file to load. :type filename: str :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists :type replace: bool """ if not bucket_name: (bucket_name, key) = self._parse_s3_url(key) bucket = self.get_bucket(bucket_name) if not self.check_for_key(key, bucket_name): key_obj = bucket.new_key(key_name=key) else: key_obj = bucket.get_key(key) key_size = key_obj.set_contents_from_filename(filename, replace=replace) logging.info("The key {key} now contains" " {key_size} bytes".format(**locals()))
Python
def load_string(self, string_data, key, bucket_name=None, replace=False): """ Loads a local file to S3 This is provided as a convenience to drop a file in S3. It uses the boto infrastructure to ship a file to s3. It is currently using only a single part download, and should not be used to move large files. :param string_data: string to set as content for the key. :type string_data: str :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists :type replace: bool """ if not bucket_name: (bucket_name, key) = self._parse_s3_url(key) bucket = self.get_bucket(bucket_name) if not self.check_for_key(key, bucket_name): key_obj = bucket.new_key(key_name=key) else: key_obj = bucket.get_key(key) key_size = key_obj.set_contents_from_string(string_data, replace=replace) logging.info("The key {key} now contains" " {key_size} bytes".format(**locals()))
def load_string(self, string_data, key, bucket_name=None, replace=False): """ Loads a local file to S3 This is provided as a convenience to drop a file in S3. It uses the boto infrastructure to ship a file to s3. It is currently using only a single part download, and should not be used to move large files. :param string_data: string to set as content for the key. :type string_data: str :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists :type replace: bool """ if not bucket_name: (bucket_name, key) = self._parse_s3_url(key) bucket = self.get_bucket(bucket_name) if not self.check_for_key(key, bucket_name): key_obj = bucket.new_key(key_name=key) else: key_obj = bucket.get_key(key) key_size = key_obj.set_contents_from_string(string_data, replace=replace) logging.info("The key {key} now contains" " {key_size} bytes".format(**locals()))
Python
def _draw(self, frame, boxes, probs, landmarks): """ Draw landmarks and boxes for each face detected """ try: for box, prob, ld in zip(boxes, probs, landmarks): # Draw rectangle on frame cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), thickness=2) # Show probability cv2.putText(frame, str( prob), (box[2], box[3]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA) # Draw landmarks cv2.circle(frame, tuple(ld[0]), 5, (0, 0, 255), -1) cv2.circle(frame, tuple(ld[1]), 5, (0, 0, 255), -1) cv2.circle(frame, tuple(ld[2]), 5, (0, 0, 255), -1) cv2.circle(frame, tuple(ld[3]), 5, (0, 0, 255), -1) cv2.circle(frame, tuple(ld[4]), 5, (0, 0, 255), -1) except: pass return frame
def _draw(self, frame, boxes, probs, landmarks): """ Draw landmarks and boxes for each face detected """ try: for box, prob, ld in zip(boxes, probs, landmarks): # Draw rectangle on frame cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), thickness=2) # Show probability cv2.putText(frame, str( prob), (box[2], box[3]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA) # Draw landmarks cv2.circle(frame, tuple(ld[0]), 5, (0, 0, 255), -1) cv2.circle(frame, tuple(ld[1]), 5, (0, 0, 255), -1) cv2.circle(frame, tuple(ld[2]), 5, (0, 0, 255), -1) cv2.circle(frame, tuple(ld[3]), 5, (0, 0, 255), -1) cv2.circle(frame, tuple(ld[4]), 5, (0, 0, 255), -1) except: pass return frame
Python
def run(self): """ Run the FaceDetector and draw landmarks and boxes around detected faces """ cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() try: # detect face box, probability and landmarks boxes, probs, landmarks = self.mtcnn.detect(frame, landmarks=True) # draw on frame self._draw(frame, boxes, probs, landmarks) except: pass # Show the frame cv2.imshow('Face Detection', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
def run(self): """ Run the FaceDetector and draw landmarks and boxes around detected faces """ cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() try: # detect face box, probability and landmarks boxes, probs, landmarks = self.mtcnn.detect(frame, landmarks=True) # draw on frame self._draw(frame, boxes, probs, landmarks) except: pass # Show the frame cv2.imshow('Face Detection', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
Python
def describe_stack_resource(self, logicalResourceId, stackName, request_credentials=None): """ Calls DescribeStackResource and returns a StackResourceDetail object. Throws an IOError on failure. """ log.debug("Describing resource %s in stack %s", logicalResourceId, stackName) return StackResourceDetail(self._call({"Action" : "DescribeStackResource", "LogicalResourceId" : logicalResourceId, "ContentType" : "JSON", "StackName": stackName, "Version": CloudFormationClient._apiVersion }, request_credentials=request_credentials))
def describe_stack_resource(self, logicalResourceId, stackName, request_credentials=None): """ Calls DescribeStackResource and returns a StackResourceDetail object. Throws an IOError on failure. """ log.debug("Describing resource %s in stack %s", logicalResourceId, stackName) return StackResourceDetail(self._call({"Action" : "DescribeStackResource", "LogicalResourceId" : logicalResourceId, "ContentType" : "JSON", "StackName": stackName, "Version": CloudFormationClient._apiVersion }, request_credentials=request_credentials))
Python
def register_listener(self, stack_name, listener_id=None, request_credentials=None): """ Calls RegisterListener and returns a Listener object Throws an IOError on failure. """ log.debug("Registering listener %s for stack %s", listener_id, stack_name) params = {"Action" : "RegisterListener", "StackName" : stack_name, "ContentType" : "JSON"} if not self.using_instance_identity: params["ListenerId"] = listener_id return Listener(self._call(params, request_credentials = request_credentials))
def register_listener(self, stack_name, listener_id=None, request_credentials=None): """ Calls RegisterListener and returns a Listener object Throws an IOError on failure. """ log.debug("Registering listener %s for stack %s", listener_id, stack_name) params = {"Action" : "RegisterListener", "StackName" : stack_name, "ContentType" : "JSON"} if not self.using_instance_identity: params["ListenerId"] = listener_id return Listener(self._call(params, request_credentials = request_credentials))
Python
def elect_command_leader(self, stack_name, command_name, invocation_id, listener_id=None, request_credentials=None): """ Calls ElectCommandLeader and returns the listener id of the leader Throws an IOError on failure. """ log.debug("Attempting to elect '%s' as leader for stack: %s, command: %s, invocation: %s", listener_id, stack_name, command_name, invocation_id) params = {"Action" : "ElectCommandLeader", "CommandName" : command_name, "InvocationId" : invocation_id, "StackName" : stack_name, "ContentType" : "JSON"} if not self.using_instance_identity: params["ListenerId"] = listener_id result_data = self._call(params, request_credentials = request_credentials).json() return result_data['ElectCommandLeaderResponse']['ElectCommandLeaderResult']['ListenerId']
def elect_command_leader(self, stack_name, command_name, invocation_id, listener_id=None, request_credentials=None): """ Calls ElectCommandLeader and returns the listener id of the leader Throws an IOError on failure. """ log.debug("Attempting to elect '%s' as leader for stack: %s, command: %s, invocation: %s", listener_id, stack_name, command_name, invocation_id) params = {"Action" : "ElectCommandLeader", "CommandName" : command_name, "InvocationId" : invocation_id, "StackName" : stack_name, "ContentType" : "JSON"} if not self.using_instance_identity: params["ListenerId"] = listener_id result_data = self._call(params, request_credentials = request_credentials).json() return result_data['ElectCommandLeaderResponse']['ElectCommandLeaderResult']['ListenerId']
Python
def apply(self, action): """ Create groups, returning a list of groups that were created Arguments: action -- a dict of group name to attributes, where attributes has keys of: gid: the gid of the user (a string or int) Exceptions: ToolError -- on expected failures """ groups_created = [] if not action: log.debug("No groups specified") return groups_created for name in sorted(action.keys()): gid = None if "gid" in action[name]: gid = str(action[name]["gid"]) if security.create_group(name, gid): groups_created.append(name) return groups_created
def apply(self, action): """ Create groups, returning a list of groups that were created Arguments: action -- a dict of group name to attributes, where attributes has keys of: gid: the gid of the user (a string or int) Exceptions: ToolError -- on expected failures """ groups_created = [] if not action: log.debug("No groups specified") return groups_created for name in sorted(action.keys()): gid = None if "gid" in action[name]: gid = str(action[name]["gid"]) if security.create_group(name, gid): groups_created.append(name) return groups_created
Python
def where(): """Return the preferred certificate bundle.""" if os.name == 'nt': override_path = os.path.expandvars(r'${SystemDrive}\cfn\ca-override.pem') else: override_path = '/etc/cfn/ca-override.pem' override_path = os.getenv('CA_OVERRIDE', override_path) if os.path.isfile(override_path): return override_path # Modification for compilation by py2exe: if (hasattr(sys, "frozen") or # new py2exe hasattr(sys, "importers") # old py2exe or imp.is_frozen("__main__")): # tools/freeze return os.path.join(os.path.dirname(sys.executable), 'cacert.pem') # end py2exe # vendored bundle inside Requests return os.path.join(os.path.dirname(__file__), 'cacert.pem')
def where(): """Return the preferred certificate bundle.""" if os.name == 'nt': override_path = os.path.expandvars(r'${SystemDrive}\cfn\ca-override.pem') else: override_path = '/etc/cfn/ca-override.pem' override_path = os.getenv('CA_OVERRIDE', override_path) if os.path.isfile(override_path): return override_path # Modification for compilation by py2exe: if (hasattr(sys, "frozen") or # new py2exe hasattr(sys, "importers") # old py2exe or imp.is_frozen("__main__")): # tools/freeze return os.path.join(os.path.dirname(sys.executable), 'cacert.pem') # end py2exe # vendored bundle inside Requests return os.path.join(os.path.dirname(__file__), 'cacert.pem')
Python
def apply(self, action, auth_config=None): """ Install a set of packages via easy_install, returning the packages actually installed or updated. Arguments: action -- a dict of package name to version; version can be empty, a single string or a list of strings Exceptions: ToolError -- on expected failures (such as a non-zero exit code) """ pkgs_changed = [] if not action.keys(): log.debug("No packages specified for python") return pkgs_changed pkgs = [] for pkg in action: if not action[pkg] or isinstance(action[pkg], basestring): pkgs.append(PythonTool._pkg_spec(pkg, action[pkg])) else: pkgs.extend(PythonTool._pkg_spec(pkg, ver) for ver in action[pkg]) pkgs_changed.append(pkg) log.info("Attempting to install %s via easy_install", pkgs) result = ProcessHelper(['easy_install'] + pkgs).call() if result.returncode: log.error("easy_install failed. Output: %s", result.stdout) raise ToolError("Could not successfully install python packages", result.returncode) else: log.info("easy_install installed %s", pkgs) log.debug("easy_install output: %s", result.stdout) return pkgs_changed
def apply(self, action, auth_config=None): """ Install a set of packages via easy_install, returning the packages actually installed or updated. Arguments: action -- a dict of package name to version; version can be empty, a single string or a list of strings Exceptions: ToolError -- on expected failures (such as a non-zero exit code) """ pkgs_changed = [] if not action.keys(): log.debug("No packages specified for python") return pkgs_changed pkgs = [] for pkg in action: if not action[pkg] or isinstance(action[pkg], basestring): pkgs.append(PythonTool._pkg_spec(pkg, action[pkg])) else: pkgs.extend(PythonTool._pkg_spec(pkg, ver) for ver in action[pkg]) pkgs_changed.append(pkg) log.info("Attempting to install %s via easy_install", pkgs) result = ProcessHelper(['easy_install'] + pkgs).call() if result.returncode: log.error("easy_install failed. Output: %s", result.stdout) raise ToolError("Could not successfully install python packages", result.returncode) else: log.info("easy_install installed %s", pkgs) log.debug("easy_install output: %s", result.stdout) return pkgs_changed
Python
def apply(self, action, auth_config=None): """ Install a set of packages via rubygems, returning the packages actually installed or updated. Arguments: action -- a dict of package name to version; version can be empty, a single string or a list of strings Exceptions: ToolError -- on expected failures (such as a non-zero exit code) """ pkgs_changed = [] if not action.keys(): log.debug("No gems specified") return pkgs_changed for pkg in action: installed = False if not action[pkg]: installed = self._install_gem(pkg) else: if isinstance(action[pkg], basestring): installed = self._install_gem(pkg, action[pkg]) else: for ver in action[pkg]: if self._install_gem(pkg, ver): installed = True if installed: pkgs_changed.append(pkg) return pkgs_changed
def apply(self, action, auth_config=None): """ Install a set of packages via rubygems, returning the packages actually installed or updated. Arguments: action -- a dict of package name to version; version can be empty, a single string or a list of strings Exceptions: ToolError -- on expected failures (such as a non-zero exit code) """ pkgs_changed = [] if not action.keys(): log.debug("No gems specified") return pkgs_changed for pkg in action: installed = False if not action[pkg]: installed = self._install_gem(pkg) else: if isinstance(action[pkg], basestring): installed = self._install_gem(pkg, action[pkg]) else: for ver in action[pkg]: if self._install_gem(pkg, ver): installed = True if installed: pkgs_changed.append(pkg) return pkgs_changed
Python
def _gem_is_installed(self, pkg, ver=None): """" Check to see if a package at version ver is installed. If ver is not specified, just check for the package. """ log.debug("Checking to see if %s-%s is already installed", pkg, ver) queryCmd = ['gem', 'query', '-i', '-n', '^%s$' % pkg] if ver: queryCmd.extend(['-v', '%s' % ver]) result = ProcessHelper(queryCmd).call() if result.returncode: return False else: return True
def _gem_is_installed(self, pkg, ver=None): """" Check to see if a package at version ver is installed. If ver is not specified, just check for the package. """ log.debug("Checking to see if %s-%s is already installed", pkg, ver) queryCmd = ['gem', 'query', '-i', '-n', '^%s$' % pkg] if ver: queryCmd.extend(['-v', '%s' % ver]) result = ProcessHelper(queryCmd).call() if result.returncode: return False else: return True
Python
def _install_gem(self, pkg, ver=None): """Install a gem if the version is not already installed; return True if installed, False if skipped.""" if self._gem_is_installed(pkg, ver): log.info("%s-%s is already installed, skipping.", pkg, ver) return False else: log.info("Installing %s version %s via gem", pkg, ver) install_command = ['gem', 'install', '-b', '--no-ri', '--no-rdoc', pkg]; if ver: install_command.extend(['-v', '= %s' % ver]) result = ProcessHelper(install_command).call() if result.returncode: log.error("Gem failed. Output: %s", result.stdout) raise ToolError("Failed to install gem: %s-%s" % (pkg, ver), result.returncode) else: log.info("Gem installed: %s-%s", pkg, ver) log.debug("Gem output: %s", result.stdout) return True
def _install_gem(self, pkg, ver=None): """Install a gem if the version is not already installed; return True if installed, False if skipped.""" if self._gem_is_installed(pkg, ver): log.info("%s-%s is already installed, skipping.", pkg, ver) return False else: log.info("Installing %s version %s via gem", pkg, ver) install_command = ['gem', 'install', '-b', '--no-ri', '--no-rdoc', pkg]; if ver: install_command.extend(['-v', '= %s' % ver]) result = ProcessHelper(install_command).call() if result.returncode: log.error("Gem failed. Output: %s", result.stdout) raise ToolError("Failed to install gem: %s-%s" % (pkg, ver), result.returncode) else: log.info("Gem installed: %s-%s", pkg, ver) log.debug("Gem output: %s", result.stdout) return True
Python
def extract_all(self, dest): """ The zipfile module doesn't preserve file permissions when extracting, so each file needs to have its mode changed via chmod after extracting it. The file's original permission bits are stored in the external attributes of the file's ZipInfo object, and we retrieve them by shifting right 16 bits. """ for info in self.file.infolist(): ext_attr = info.external_attr mode = ext_attr >> 16 target_path = self.file.extract(info, dest) # ignoring chmod for Windows if os.name != 'nt': chmod(target_path, mode)
def extract_all(self, dest): """ The zipfile module doesn't preserve file permissions when extracting, so each file needs to have its mode changed via chmod after extracting it. The file's original permission bits are stored in the external attributes of the file's ZipInfo object, and we retrieve them by shifting right 16 bits. """ for info in self.file.infolist(): ext_attr = info.external_attr mode = ext_attr >> 16 target_path = self.file.extract(info, dest) # ignoring chmod for Windows if os.name != 'nt': chmod(target_path, mode)
Python
def create_group(group_name, gid=None): """Create a group in the OS, returning True if one is created""" try: group_record = grp.getgrnam(group_name) if gid and str(group_record[2]) != gid: raise ToolError("Group %s exists with gid %s, but gid %s was requested" % (group_name, group_record[2], gid)) log.debug("Group %s already exists", group_name) return False except KeyError: pass cmd = ['/usr/sbin/groupadd', '-r'] if gid: cmd.extend(['-g', gid]) cmd.append(group_name) result = ProcessHelper(cmd).call() if result.returncode: log.error("Failed to create group %s", group_name) log.debug("Groupadd output: %s", result.stdout) raise ToolError("Failed to create group %s" % group_name) else: log.info("Created group %s successfully", group_name) return True
def create_group(group_name, gid=None): """Create a group in the OS, returning True if one is created""" try: group_record = grp.getgrnam(group_name) if gid and str(group_record[2]) != gid: raise ToolError("Group %s exists with gid %s, but gid %s was requested" % (group_name, group_record[2], gid)) log.debug("Group %s already exists", group_name) return False except KeyError: pass cmd = ['/usr/sbin/groupadd', '-r'] if gid: cmd.extend(['-g', gid]) cmd.append(group_name) result = ProcessHelper(cmd).call() if result.returncode: log.error("Failed to create group %s", group_name) log.debug("Groupadd output: %s", result.stdout) raise ToolError("Failed to create group %s" % group_name) else: log.info("Created group %s successfully", group_name) return True
Python
def create_or_modify_user(user_name, groups=[], homedir=None, uid=None): """Create or modify a user in the OS, returning True if action was taken""" try: user_record = pwd.getpwnam(user_name) if uid and str(user_record[2]) != uid: raise ToolError("User %s exists with uid %s, but uid %s was requested" % (user_name, user_record[2], uid)) return _modify_user(user_name, groups, homedir) except KeyError: _create_user(user_name, groups, homedir, uid) return True
def create_or_modify_user(user_name, groups=[], homedir=None, uid=None): """Create or modify a user in the OS, returning True if action was taken""" try: user_record = pwd.getpwnam(user_name) if uid and str(user_record[2]) != uid: raise ToolError("User %s exists with uid %s, but uid %s was requested" % (user_name, user_record[2], uid)) return _modify_user(user_name, groups, homedir) except KeyError: _create_user(user_name, groups, homedir, uid) return True
Python
def _modify_user(user_name, groups=[], homedir=None): """ Modify a user and return True, else return False """ if not homedir and not groups: log.info("No homedir or groups specified; not modifying %s", user_name) return False cmd = ['/usr/sbin/usermod'] if groups: gids = _get_gids(groups) current_gids = _gids_for_user(user_name) if frozenset(gids) ^ frozenset(current_gids): cmd.extend(['-G', ','.join(gids)]) else: log.debug("Groups have not changed for %s", user_name) if homedir: if homedir != _get_user_homedir(user_name): cmd.extend(['-d', homedir]) else: log.debug("Homedir has not changed for %s", user_name) if len(cmd) == 1: log.debug("User %s does not need modification", user_name) return False cmd.append(user_name) result = ProcessHelper(cmd).call() if result.returncode: log.error("Failed to modify user %s", user_name) log.debug("Usermod output: %s", result.stdout) raise ToolError("Failed to modify user %s" % user_name) else: log.info("Modified user %s successfully", user_name) return True
def _modify_user(user_name, groups=[], homedir=None): """ Modify a user and return True, else return False """ if not homedir and not groups: log.info("No homedir or groups specified; not modifying %s", user_name) return False cmd = ['/usr/sbin/usermod'] if groups: gids = _get_gids(groups) current_gids = _gids_for_user(user_name) if frozenset(gids) ^ frozenset(current_gids): cmd.extend(['-G', ','.join(gids)]) else: log.debug("Groups have not changed for %s", user_name) if homedir: if homedir != _get_user_homedir(user_name): cmd.extend(['-d', homedir]) else: log.debug("Homedir has not changed for %s", user_name) if len(cmd) == 1: log.debug("User %s does not need modification", user_name) return False cmd.append(user_name) result = ProcessHelper(cmd).call() if result.returncode: log.error("Failed to modify user %s", user_name) log.debug("Usermod output: %s", result.stdout) raise ToolError("Failed to modify user %s" % user_name) else: log.info("Modified user %s successfully", user_name) return True
Python
def apply(self, action, changes = collections.defaultdict(list)): """ Takes a dict of service name to dict. Keys we look for are: - "enabled" (setting a service to "Automatic") - "ensureRunning" (actually start the service) """ if not action.keys(): log.debug("No Windows services specified") return if not _windows_supported: raise ToolError("Cannot modify windows services without pywin32") manager = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS) try: for service, serviceProperties in action.iteritems(): handle = win32service.OpenService(manager, service, win32service.SERVICE_ALL_ACCESS) try: if "enabled" in serviceProperties: start_type = win32service.SERVICE_AUTO_START if util.interpret_boolean(serviceProperties["enabled"]) else win32service.SERVICE_DEMAND_START self._set_service_startup_type(handle, start_type) else: log.debug("Not modifying enabled state of service %s", service) if self._detect_required_restart(serviceProperties, changes): log.debug("Restarting %s due to change detected in dependency", service) win32serviceutil.RestartService(service) elif "ensureRunning" in serviceProperties: ensureRunning = util.interpret_boolean(serviceProperties["ensureRunning"]) status = win32service.QueryServiceStatus(handle)[1] isRunning = status & win32service.SERVICE_RUNNING or status & win32service.SERVICE_START_PENDING if ensureRunning and not isRunning: log.debug("Starting service %s as it is not running", service) win32service.StartService(handle, None) elif not ensureRunning and isRunning: log.debug("Stopping service %s as it is running", service) win32service.ControlService(handle, win32service.SERVICE_CONTROL_STOP) else: log.debug("No need to modify running state of service %s", service) else: log.debug("Not modifying running state of service %s", service) finally: win32service.CloseServiceHandle(handle) finally: win32service.CloseServiceHandle(manager)
def apply(self, action, changes = collections.defaultdict(list)): """ Takes a dict of service name to dict. Keys we look for are: - "enabled" (setting a service to "Automatic") - "ensureRunning" (actually start the service) """ if not action.keys(): log.debug("No Windows services specified") return if not _windows_supported: raise ToolError("Cannot modify windows services without pywin32") manager = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS) try: for service, serviceProperties in action.iteritems(): handle = win32service.OpenService(manager, service, win32service.SERVICE_ALL_ACCESS) try: if "enabled" in serviceProperties: start_type = win32service.SERVICE_AUTO_START if util.interpret_boolean(serviceProperties["enabled"]) else win32service.SERVICE_DEMAND_START self._set_service_startup_type(handle, start_type) else: log.debug("Not modifying enabled state of service %s", service) if self._detect_required_restart(serviceProperties, changes): log.debug("Restarting %s due to change detected in dependency", service) win32serviceutil.RestartService(service) elif "ensureRunning" in serviceProperties: ensureRunning = util.interpret_boolean(serviceProperties["ensureRunning"]) status = win32service.QueryServiceStatus(handle)[1] isRunning = status & win32service.SERVICE_RUNNING or status & win32service.SERVICE_START_PENDING if ensureRunning and not isRunning: log.debug("Starting service %s as it is not running", service) win32service.StartService(handle, None) elif not ensureRunning and isRunning: log.debug("Stopping service %s as it is running", service) win32service.ControlService(handle, win32service.SERVICE_CONTROL_STOP) else: log.debug("No need to modify running state of service %s", service) else: log.debug("Not modifying running state of service %s", service) finally: win32service.CloseServiceHandle(handle) finally: win32service.CloseServiceHandle(manager)
Python
def update_target_model( self ): ''' Updates target model by copying from prediction model ''' self.target_model = deepcopy( self.prediction_model )
def update_target_model( self ): ''' Updates target model by copying from prediction model ''' self.target_model = deepcopy( self.prediction_model )
Python
def forward( self, x ): ''' Required Pytorch Function. Defines the forward pass for this encoder ''' # Initialize hidden state with zeros h0 = torch.zeros( self.lstm_layer_dim, x.size(0), self.hidden_dim ) if self.device: h0 = h0.to( self.device ) # Initialize cell state c0 = torch.zeros( self.lstm_layer_dim, x.size(0), self.hidden_dim ) if self.device: c0 = c0.to( self.device ) out, (hn, cn) = self.lstm( x.float(), ( h0, c0 ) ) return out
def forward( self, x ): ''' Required Pytorch Function. Defines the forward pass for this encoder ''' # Initialize hidden state with zeros h0 = torch.zeros( self.lstm_layer_dim, x.size(0), self.hidden_dim ) if self.device: h0 = h0.to( self.device ) # Initialize cell state c0 = torch.zeros( self.lstm_layer_dim, x.size(0), self.hidden_dim ) if self.device: c0 = c0.to( self.device ) out, (hn, cn) = self.lstm( x.float(), ( h0, c0 ) ) return out
Python
def change_path( src ): """ src will be copied to /export/`src` and a symlink will be placed in src pointing to /export/ """ if os.path.exists( src ): dest = os.path.join( '/export/', src.strip('/') ) # if destination is empty move all files into /export/ and symlink back to source if not os.path.exists( dest ): dest_dir = os.path.dirname(dest) if not os.path.exists( dest_dir ): os.makedirs(dest_dir) shutil.move( src, dest ) os.symlink( dest, src.rstrip('/') ) # if destination exists (e.g. continuing a previous session), remove source and symlink else: if os.path.isdir( src ): shutil.rmtree( src ) else: os.unlink( src ) os.symlink( dest, src.rstrip('/') )
def change_path( src ): """ src will be copied to /export/`src` and a symlink will be placed in src pointing to /export/ """ if os.path.exists( src ): dest = os.path.join( '/export/', src.strip('/') ) # if destination is empty move all files into /export/ and symlink back to source if not os.path.exists( dest ): dest_dir = os.path.dirname(dest) if not os.path.exists( dest_dir ): os.makedirs(dest_dir) shutil.move( src, dest ) os.symlink( dest, src.rstrip('/') ) # if destination exists (e.g. continuing a previous session), remove source and symlink else: if os.path.isdir( src ): shutil.rmtree( src ) else: os.unlink( src ) os.symlink( dest, src.rstrip('/') )
Python
def create_pg_db(user, password, database, database_path): """ Initialize PostgreSQL Database, add database user und create the Galaxy Database. """ os.makedirs( database_path ) set_pg_permission( database_path ) # initialize a new postgres database subprocess.call('sudo -u postgres %s --auth=trust --pgdata=%s' % (os.path.join(PG_BIN, 'initdb'), database_path), shell=True) shutil.copy('/etc/ssl/certs/ssl-cert-snakeoil.pem', os.path.join(database_path, 'server.crt')) shutil.copy('/etc/ssl/private/ssl-cert-snakeoil.key', os.path.join(database_path, 'server.key')) set_pg_permission( os.path.join(database_path, 'server.crt') ) set_pg_permission( os.path.join(database_path, 'server.key') ) # change data_directory in postgresql.conf and start the service with the new location pg_ctl( database_path, 'start' ) password = "'%s'" % ('galaxy') subprocess.call( 'sudo -u postgres psql --command "CREATE USER galaxy WITH SUPERUSER PASSWORD %s;"' % (password), shell=True ) subprocess.call('sudo -u postgres createdb -O %s %s' % (user, database), shell=True)
def create_pg_db(user, password, database, database_path): """ Initialize PostgreSQL Database, add database user und create the Galaxy Database. """ os.makedirs( database_path ) set_pg_permission( database_path ) # initialize a new postgres database subprocess.call('sudo -u postgres %s --auth=trust --pgdata=%s' % (os.path.join(PG_BIN, 'initdb'), database_path), shell=True) shutil.copy('/etc/ssl/certs/ssl-cert-snakeoil.pem', os.path.join(database_path, 'server.crt')) shutil.copy('/etc/ssl/private/ssl-cert-snakeoil.key', os.path.join(database_path, 'server.key')) set_pg_permission( os.path.join(database_path, 'server.crt') ) set_pg_permission( os.path.join(database_path, 'server.key') ) # change data_directory in postgresql.conf and start the service with the new location pg_ctl( database_path, 'start' ) password = "'%s'" % ('galaxy') subprocess.call( 'sudo -u postgres psql --command "CREATE USER galaxy WITH SUPERUSER PASSWORD %s;"' % (password), shell=True ) subprocess.call('sudo -u postgres createdb -O %s %s' % (user, database), shell=True)
Python
def deep_learning_turbidite(resdir, X_train_raw, y_train_raw, X_test_raw, y_test_raw, lr=0.02, decay=0, validation_split=0.2, batch_size=32, momentum=0.9, nesterov=True, num_layers=4, dropout=0.5, node_num=2000, epochs=4000): """ Creating the inversion model of turbidity currents by deep learning """ #Normalizing dataset X_train = get_normalized_data(X_train_raw, min_x, max_x) X_test = get_normalized_data(X_test_raw, min_x, max_x) y_train = get_normalized_data(y_train_raw, min_y, max_y) y_test = get_normalized_data(y_test_raw, min_y, max_y) # Generate the model # mirrored_strategy = MirroredStrategy() # with mirrored_strategy.scope(): model = Sequential() model.add( Dense(node_num, input_dim=X_train.shape[1], activation='relu', kernel_initializer='glorot_uniform')) #1st layer model.add(Dropout(dropout)) for i in range(num_layers - 2): model.add( Dense(node_num, activation='relu', kernel_initializer='glorot_uniform')) #2nd layer model.add(Dropout(dropout)) model.add( Dense(y_train.shape[1], activation='relu', kernel_initializer='glorot_uniform')) #last layer # Compilation of the model model.compile( loss="mean_squared_error", optimizer=SGD(lr=lr, decay=decay, momentum=momentum, nesterov=nesterov), #optimizer=Adadelta(), metrics=["mean_squared_error"]) # Start training t = time.time() check = ModelCheckpoint(filepath=os.path.join(resdir, "model.hdf5"), monitor='val_loss', save_freq=1000, save_weights_only=True, mode='min', save_best_only=True) #es_cb = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto') tb_cb = TensorBoard(log_dir=os.path.join(resdir, 'logs'), histogram_freq=0, write_graph=False, write_images=False) history = model.fit(X_train, y_train, epochs=epochs, validation_split=validation_split, batch_size=batch_size, callbacks=[check, tb_cb]) return model, history
def deep_learning_turbidite(resdir, X_train_raw, y_train_raw, X_test_raw, y_test_raw, lr=0.02, decay=0, validation_split=0.2, batch_size=32, momentum=0.9, nesterov=True, num_layers=4, dropout=0.5, node_num=2000, epochs=4000): """ Creating the inversion model of turbidity currents by deep learning """ #Normalizing dataset X_train = get_normalized_data(X_train_raw, min_x, max_x) X_test = get_normalized_data(X_test_raw, min_x, max_x) y_train = get_normalized_data(y_train_raw, min_y, max_y) y_test = get_normalized_data(y_test_raw, min_y, max_y) # Generate the model # mirrored_strategy = MirroredStrategy() # with mirrored_strategy.scope(): model = Sequential() model.add( Dense(node_num, input_dim=X_train.shape[1], activation='relu', kernel_initializer='glorot_uniform')) #1st layer model.add(Dropout(dropout)) for i in range(num_layers - 2): model.add( Dense(node_num, activation='relu', kernel_initializer='glorot_uniform')) #2nd layer model.add(Dropout(dropout)) model.add( Dense(y_train.shape[1], activation='relu', kernel_initializer='glorot_uniform')) #last layer # Compilation of the model model.compile( loss="mean_squared_error", optimizer=SGD(lr=lr, decay=decay, momentum=momentum, nesterov=nesterov), #optimizer=Adadelta(), metrics=["mean_squared_error"]) # Start training t = time.time() check = ModelCheckpoint(filepath=os.path.join(resdir, "model.hdf5"), monitor='val_loss', save_freq=1000, save_weights_only=True, mode='min', save_best_only=True) #es_cb = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto') tb_cb = TensorBoard(log_dir=os.path.join(resdir, 'logs'), histogram_freq=0, write_graph=False, write_images=False) history = model.fit(X_train, y_train, epochs=epochs, validation_split=validation_split, batch_size=batch_size, callbacks=[check, tb_cb]) return model, history
Python
def apply_model(model, X, min_x, max_x, min_y, max_y): """ Apply the model to data sets """ X_norm = (X - min_x) / (max_x - min_x) Y_norm = model.predict(X_norm) Y = Y_norm * (max_y - min_y) + min_y return Y
def apply_model(model, X, min_x, max_x, min_y, max_y): """ Apply the model to data sets """ X_norm = (X - min_x) / (max_x - min_x) Y_norm = model.predict(X_norm) Y = Y_norm * (max_y - min_y) + min_y return Y
Python
def load_data(datadir): """ This function load training and test data sets, and returns variables """ global min_x, max_x, min_y, max_y x_train = np.load(os.path.join(datadir, 'H_train.npy')) x_test = np.load(os.path.join(datadir, 'H_test.npy')) y_train = np.load(os.path.join(datadir, 'icond_train.npy')) y_test = np.load(os.path.join(datadir, 'icond_test.npy')) min_y = np.load(os.path.join(datadir, 'icond_min.npy')) max_y = np.load(os.path.join(datadir, 'icond_max.npy')) [min_x, max_x] = np.load(os.path.join(datadir, 'x_minmax.npy')) return x_train, y_train, x_test, y_test
def load_data(datadir): """ This function load training and test data sets, and returns variables """ global min_x, max_x, min_y, max_y x_train = np.load(os.path.join(datadir, 'H_train.npy')) x_test = np.load(os.path.join(datadir, 'H_test.npy')) y_train = np.load(os.path.join(datadir, 'icond_train.npy')) y_test = np.load(os.path.join(datadir, 'icond_test.npy')) min_y = np.load(os.path.join(datadir, 'icond_min.npy')) max_y = np.load(os.path.join(datadir, 'icond_max.npy')) [min_x, max_x] = np.load(os.path.join(datadir, 'x_minmax.npy')) return x_train, y_train, x_test, y_test
Python
def qt_session(func): ''' Qt session decorator. When the visualise ''' def wrapper_gui_qt(*args, **kwargs): visualise = kwargs.get('visualise') if visualise is not None: use = visualise doing = 'initiating' if use else 'will not initiate' print(LINE) print(f'Visualise is {use}, {doing} qt session') else: use = True if use: with napari.gui_qt(): func(*args, **kwargs) else: func(*args, **kwargs) return wrapper_gui_qt
def qt_session(func): ''' Qt session decorator. When the visualise ''' def wrapper_gui_qt(*args, **kwargs): visualise = kwargs.get('visualise') if visualise is not None: use = visualise doing = 'initiating' if use else 'will not initiate' print(LINE) print(f'Visualise is {use}, {doing} qt session') else: use = True if use: with napari.gui_qt(): func(*args, **kwargs) else: func(*args, **kwargs) return wrapper_gui_qt
Python
def train_unet( # training data x, vx, y, vy, ids, vids, # output information out_dir, name, channels=None, # training variables validate=True, log=True, epochs=3, lr=0.01, loss_function='WeightedBCE', chan_weights=(1., 2., 2.), # for weighted BCE weights=None, update_every=20, losses=None, chan_losses=None, # network architechture fork_channels=None, chan_final_activations=None, **kwargs ): ''' Train a basic U-Net on affinities data. Parameters ---------- xs: list of torch.tensor Input images for which the network will be trained to predict inputted labels. ys: list of torch.tensor Input labels that represent target output that the network will be trained to predict. ids: list of str ID strings corresponding to xs and ys (as they are named on disk). Used for saving output. out_dir: str Directory to which to save network output suffix: str Suffix used in naming pytorch state dictionary file channels: tuple of str or None Names of output channels to be used for labeling channelwise loss columns in output loss csv. If none, names are generated. v_xs: list of torch.Tensor or None Validation images v_y: list of torch.Tensor or None Validation labels v_ids: list of str or None Validation IDs validate: bool Will a validation be done at the end of every epoch? log: bool Will a log.txt file containing all console print outs be saved? epochs: int How many times should we go through the training data? lr: float Learning rate for Adam optimiser loss_function: str Which loss function will be used for training & validation? Current options include: 'BCELoss': Binary cross entropy loss 'WeightedBCE': Binary cross entropy loss whereby channels are weighted according to chan_weights parameter. Quick way to force network to favour learning information about a given channel/s. 'DiceLoss': 1 - DICE coefficient of the output-target pair chan_weights: tuple of float WEIGHTEDBCE: Weights for BCE loss for each output channel. weights: None or nn.Model().state_dict() Prior weights with which to initalise the network. update_every: int Determines how many batches are processed before printing loss Returns ------- unet: UNet (unet.py) Notes ----- When data is loaded from a directory, it will be recognised according to the following naming convention: IDs: YYMMDD_HHMMSS_{digit/s} Images: YYMMDD_HHMMSS_{digit/s}_image.tif Affinities: YYMMDD_HHMMSS_{digit/s}_labels.tif E.g., 210309_152717_7_image.tif, 210309_152717_7_labels.tif For each ID, a labels and an image file must be found or else an assertion error will be raised. ''' # Device device_name = 'cuda' if torch.cuda.is_available() else 'cpu' device = torch.device(device_name) # initialise U-net if fork_channels is None: unet = UNet(out_channels=len(channels), chan_final_activations=chan_final_activations).to(device, dtype=torch.float32) else: unet = UNet(out_channels=fork_channels, chan_final_activations=chan_final_activations).to(device, dtype=torch.float32) # load weights if applicable weights_are = _load_weights(weights, unet) # define the optimiser optimiser = optim.Adam(unet.parameters(), lr=lr) # define the loss function loss = _get_loss_function(loss_function, chan_weights, device, losses, chan_losses) # get the dictionary that will be converted to a csv of losses # contains columns for each channel, as we record channel-wise # BCE loss in addition to the loss used for backprop channels = _index_channels_if_none(channels, x) loss_dict = _get_loss_dict(channels) v_loss = _get_loss_function(loss_function, chan_weights, device, losses, chan_losses) validation_dict = { 'epoch' : [], 'validation_loss' : [], 'data_id' : [], 'batch_id': [] } if validate: no_iter = (epochs * len(x)) + ((epochs + 1) * len(vx)) else: no_iter = epochs * len(x) # print the training into and log if applicable bce_weights = _bce_weights(loss) # gets weights if using WeightedBCE _print_train_info(loss_function, bce_weights, epochs, lr, weights_are, device_name, out_dir, log, chan_losses, losses, channels, fork_channels) # loop over training data y_hats, v_y_hats = _train_loop(no_iter, epochs, x, y, ids, device, unet, out_dir, optimiser, loss, loss_dict, validate, vx, vy, vids, validation_dict, v_loss, update_every, log, name, channels) unet_path = _save_final_results(unet, out_dir, name, y_hats, ids, validate, loss_dict, v_y_hats, vids, validation_dict) #_plots(out_dir, name, loss_function, validate) # 2 leaked semaphore objects... pytorch x mpl?? return unet, unet_path
def train_unet( # training data x, vx, y, vy, ids, vids, # output information out_dir, name, channels=None, # training variables validate=True, log=True, epochs=3, lr=0.01, loss_function='WeightedBCE', chan_weights=(1., 2., 2.), # for weighted BCE weights=None, update_every=20, losses=None, chan_losses=None, # network architechture fork_channels=None, chan_final_activations=None, **kwargs ): ''' Train a basic U-Net on affinities data. Parameters ---------- xs: list of torch.tensor Input images for which the network will be trained to predict inputted labels. ys: list of torch.tensor Input labels that represent target output that the network will be trained to predict. ids: list of str ID strings corresponding to xs and ys (as they are named on disk). Used for saving output. out_dir: str Directory to which to save network output suffix: str Suffix used in naming pytorch state dictionary file channels: tuple of str or None Names of output channels to be used for labeling channelwise loss columns in output loss csv. If none, names are generated. v_xs: list of torch.Tensor or None Validation images v_y: list of torch.Tensor or None Validation labels v_ids: list of str or None Validation IDs validate: bool Will a validation be done at the end of every epoch? log: bool Will a log.txt file containing all console print outs be saved? epochs: int How many times should we go through the training data? lr: float Learning rate for Adam optimiser loss_function: str Which loss function will be used for training & validation? Current options include: 'BCELoss': Binary cross entropy loss 'WeightedBCE': Binary cross entropy loss whereby channels are weighted according to chan_weights parameter. Quick way to force network to favour learning information about a given channel/s. 'DiceLoss': 1 - DICE coefficient of the output-target pair chan_weights: tuple of float WEIGHTEDBCE: Weights for BCE loss for each output channel. weights: None or nn.Model().state_dict() Prior weights with which to initalise the network. update_every: int Determines how many batches are processed before printing loss Returns ------- unet: UNet (unet.py) Notes ----- When data is loaded from a directory, it will be recognised according to the following naming convention: IDs: YYMMDD_HHMMSS_{digit/s} Images: YYMMDD_HHMMSS_{digit/s}_image.tif Affinities: YYMMDD_HHMMSS_{digit/s}_labels.tif E.g., 210309_152717_7_image.tif, 210309_152717_7_labels.tif For each ID, a labels and an image file must be found or else an assertion error will be raised. ''' # Device device_name = 'cuda' if torch.cuda.is_available() else 'cpu' device = torch.device(device_name) # initialise U-net if fork_channels is None: unet = UNet(out_channels=len(channels), chan_final_activations=chan_final_activations).to(device, dtype=torch.float32) else: unet = UNet(out_channels=fork_channels, chan_final_activations=chan_final_activations).to(device, dtype=torch.float32) # load weights if applicable weights_are = _load_weights(weights, unet) # define the optimiser optimiser = optim.Adam(unet.parameters(), lr=lr) # define the loss function loss = _get_loss_function(loss_function, chan_weights, device, losses, chan_losses) # get the dictionary that will be converted to a csv of losses # contains columns for each channel, as we record channel-wise # BCE loss in addition to the loss used for backprop channels = _index_channels_if_none(channels, x) loss_dict = _get_loss_dict(channels) v_loss = _get_loss_function(loss_function, chan_weights, device, losses, chan_losses) validation_dict = { 'epoch' : [], 'validation_loss' : [], 'data_id' : [], 'batch_id': [] } if validate: no_iter = (epochs * len(x)) + ((epochs + 1) * len(vx)) else: no_iter = epochs * len(x) # print the training into and log if applicable bce_weights = _bce_weights(loss) # gets weights if using WeightedBCE _print_train_info(loss_function, bce_weights, epochs, lr, weights_are, device_name, out_dir, log, chan_losses, losses, channels, fork_channels) # loop over training data y_hats, v_y_hats = _train_loop(no_iter, epochs, x, y, ids, device, unet, out_dir, optimiser, loss, loss_dict, validate, vx, vy, vids, validation_dict, v_loss, update_every, log, name, channels) unet_path = _save_final_results(unet, out_dir, name, y_hats, ids, validate, loss_dict, v_y_hats, vids, validation_dict) #_plots(out_dir, name, loss_function, validate) # 2 leaked semaphore objects... pytorch x mpl?? return unet, unet_path
Python
def train_unet_from_directory( out_dir, suffix, data_dir, validation_dir=None, epochs=4, lr=0.01, loss_function='BCELoss', chan_weights=(1., 2., 2.), # for weighted BCE weights=None, update_every=20, channels=None, **kwargs ): ''' Train a basic U-Net on affinities data. Load chunks of training data from directory. Parameters ---------- out_dir: str Directory to which to save network output suffix: str Suffix used in naming pytorch state dictionary file data_dir: None or str LOAD: Only applicable when loading training data. If None training data is assumed to be in the output directory. Otherwise, data_dir should be the directory in which training data is located validation_dir: None or str LOAD: If none, no validation is performed. If provided, validation data is loaded from the given directory according to the same naming convention as training data. Validation is performed at the end of every epoch. Labels are expected to be in int form (typical segmentation) channels: tuple of str Types of output channels to be obtained. Affinities: 'axis-n' (pattern: r'[xyz]-\d+' e.g., 'z-1') Centreness: 'centreness' epochs: int How many times should we go through the training data? lr: float Learning rate for Adam optimiser loss_function: str Which loss function will be used for training & validation? Current options include: 'BCELoss': Binary cross entropy loss 'WeightedBCE': Binary cross entropy loss whereby channels are weighted according to chan_weights parameter. Quick way to force network to favour learning information about a given channel/s. 'DiceLoss': 1 - DICE coefficient of the output-target pair chan_weights: tuple of float WEIGHTEDBCE: Weights for BCE loss for each output channel. weights: None or nn.Model().state_dict() Prior weights with which to initalise the network. update_every: int Determines how many batches are processed before printing loss Returns ------- unet: UNet (unet.py) Notes ----- When data is loaded from a directory, it will be recognised according to the following naming convention: IDs: YYMMDD_HHMMSS_{digit/s} Images: YYMMDD_HHMMSS_{digit/s}_image.tif Affinities: YYMMDD_HHMMSS_{digit/s}_labels.tif E.g., 210309_152717_7_image.tif, 210309_152717_7_labels.tif For each ID, a labels and an image file must be found or else an assertion error will be raised. ''' log = True if data_dir == None: d = out_dir else: d = data_dir x, y, ids = load_train_data(d) # if applicable, load the validation data if validation_dir is not None: validate = True vx, vy, vids = _load_validation(validation_dir, out_dir, log) else: v_xs, v_ys, v_ids = None, None, None validate = False unet = train_unet( # training data xs, ys, ids, # output information out_dir, suffix, channels, # validation data v_xs=v_xs, v_ys=v_ys, v_ids=v_ids, validate=validate, # training variables log=log, epochs=epochs, lr=lr, loss_function=loss_function, chan_weights=chan_weights, # for weighted BCE weights=weights, update_every=update_every ) return unet
def train_unet_from_directory( out_dir, suffix, data_dir, validation_dir=None, epochs=4, lr=0.01, loss_function='BCELoss', chan_weights=(1., 2., 2.), # for weighted BCE weights=None, update_every=20, channels=None, **kwargs ): ''' Train a basic U-Net on affinities data. Load chunks of training data from directory. Parameters ---------- out_dir: str Directory to which to save network output suffix: str Suffix used in naming pytorch state dictionary file data_dir: None or str LOAD: Only applicable when loading training data. If None training data is assumed to be in the output directory. Otherwise, data_dir should be the directory in which training data is located validation_dir: None or str LOAD: If none, no validation is performed. If provided, validation data is loaded from the given directory according to the same naming convention as training data. Validation is performed at the end of every epoch. Labels are expected to be in int form (typical segmentation) channels: tuple of str Types of output channels to be obtained. Affinities: 'axis-n' (pattern: r'[xyz]-\d+' e.g., 'z-1') Centreness: 'centreness' epochs: int How many times should we go through the training data? lr: float Learning rate for Adam optimiser loss_function: str Which loss function will be used for training & validation? Current options include: 'BCELoss': Binary cross entropy loss 'WeightedBCE': Binary cross entropy loss whereby channels are weighted according to chan_weights parameter. Quick way to force network to favour learning information about a given channel/s. 'DiceLoss': 1 - DICE coefficient of the output-target pair chan_weights: tuple of float WEIGHTEDBCE: Weights for BCE loss for each output channel. weights: None or nn.Model().state_dict() Prior weights with which to initalise the network. update_every: int Determines how many batches are processed before printing loss Returns ------- unet: UNet (unet.py) Notes ----- When data is loaded from a directory, it will be recognised according to the following naming convention: IDs: YYMMDD_HHMMSS_{digit/s} Images: YYMMDD_HHMMSS_{digit/s}_image.tif Affinities: YYMMDD_HHMMSS_{digit/s}_labels.tif E.g., 210309_152717_7_image.tif, 210309_152717_7_labels.tif For each ID, a labels and an image file must be found or else an assertion error will be raised. ''' log = True if data_dir == None: d = out_dir else: d = data_dir x, y, ids = load_train_data(d) # if applicable, load the validation data if validation_dir is not None: validate = True vx, vy, vids = _load_validation(validation_dir, out_dir, log) else: v_xs, v_ys, v_ids = None, None, None validate = False unet = train_unet( # training data xs, ys, ids, # output information out_dir, suffix, channels, # validation data v_xs=v_xs, v_ys=v_ys, v_ids=v_ids, validate=validate, # training variables log=log, epochs=epochs, lr=lr, loss_function=loss_function, chan_weights=chan_weights, # for weighted BCE weights=weights, update_every=update_every ) return unet
Python
def train_unet_get_labels( out_dir, image_paths, labels_paths, suffix='', channels=('z-1', 'y-1', 'x-1', 'centreness'), n_each=100, validation_prop=None, scale=(4, 1, 1), epochs=3, lr=0.01, loss_function='BCELoss', chan_weights=(1., 2., 2.), # for weighted BCE weights=None, update_every=20, losses=None, chan_losses=None, fork_channels=None, chan_final_activations=None, **kwargs ): ''' Train a basic U-Net on affinities data. Generates chunks of training data in which case chunks with spatial dimensions of (10, 256, 256). Different types of channels can be generated from a segmentation as training data. These include z, y, and x affinities of specified degree and scores for centreness (i.e., scores segmented voxels according to closeness to centre). Parameters ---------- out_dir: str Directory to which to save network output suffix: str Suffix used in naming pytorch state dictionary file image_paths: None or list of str Only applicable if generating trainig data from volumes. Paths to whole voume images. labels_paths: None or list of str Only applicable if generating trainig data from volumes. Paths to whole voume labels. Labels are expected to be in int form (typical segmentation) channels: tuple of str Types of output channels to be obtained. Affinities: 'axis-n' (pattern: r'[xyz]-\d+' e.g., 'z-1') Centreness: 'centreness' n_each: int Number of image-labels pairs to obtain from each image-GT volume provided. scale: tuple of numeric Scale of channels. This is used in calculating centreness score. validation_prop: float If greater than 0, validation data will be generated and a validation performed at the end of every epoch. The number of pairs generated correspond to the proportion inputted. epochs: int How many times should we go through the training data? lr: float Learning rate for Adam optimiser loss_function: str Which loss function will be used for training & validation? Current options include: 'BCELoss': Binary cross entropy loss 'WeightedBCE': Binary cross entropy loss whereby channels are weighted according to chan_weights parameter. Quick way to force network to favour learning information about a given channel/s. 'DiceLoss': 1 - DICE coefficient of the output-target pair chan_weights: tuple of float WEIGHTEDBCE: Weights for BCE loss for each output channel. weights: None or nn.Model().state_dict() Prior weights with which to initalise the network. update_every: int Determines how many batches are processed before printing loss Returns ------- unet: UNet (unet.py) ''' log = True validate = False xs, ys, ids = get_train_data( image_paths, labels_paths, out_dir, n_each=n_each, channels=channels, scale=scale, log=log ) if validation_prop > 0: validate = True v_n_each = np.round(validation_prop * n_each) v_xs, v_ys, v_ids = get_train_data( image_paths, labels_paths, out_dir, n_each=v_n_each, channels=channels, scale=scale, log=log ) else: v_xs, v_ys, v_ids = None, None, None unet = train_unet( # training data xs, ys, ids, # output information out_dir, suffix, channels, # validation data v_xs=v_xs, v_ys=v_ys, v_ids=v_ids, validate=validate, # training variables log=log, epochs=epochs, lr=lr, loss_function=loss_function, chan_weights=chan_weights, # for weighted BCE weights=weights, update_every=update_every, fork_channels=fork_channels, losses=losses, chan_losses=chan_losses, chan_final_activations=chan_final_activations ) return unet
def train_unet_get_labels( out_dir, image_paths, labels_paths, suffix='', channels=('z-1', 'y-1', 'x-1', 'centreness'), n_each=100, validation_prop=None, scale=(4, 1, 1), epochs=3, lr=0.01, loss_function='BCELoss', chan_weights=(1., 2., 2.), # for weighted BCE weights=None, update_every=20, losses=None, chan_losses=None, fork_channels=None, chan_final_activations=None, **kwargs ): ''' Train a basic U-Net on affinities data. Generates chunks of training data in which case chunks with spatial dimensions of (10, 256, 256). Different types of channels can be generated from a segmentation as training data. These include z, y, and x affinities of specified degree and scores for centreness (i.e., scores segmented voxels according to closeness to centre). Parameters ---------- out_dir: str Directory to which to save network output suffix: str Suffix used in naming pytorch state dictionary file image_paths: None or list of str Only applicable if generating trainig data from volumes. Paths to whole voume images. labels_paths: None or list of str Only applicable if generating trainig data from volumes. Paths to whole voume labels. Labels are expected to be in int form (typical segmentation) channels: tuple of str Types of output channels to be obtained. Affinities: 'axis-n' (pattern: r'[xyz]-\d+' e.g., 'z-1') Centreness: 'centreness' n_each: int Number of image-labels pairs to obtain from each image-GT volume provided. scale: tuple of numeric Scale of channels. This is used in calculating centreness score. validation_prop: float If greater than 0, validation data will be generated and a validation performed at the end of every epoch. The number of pairs generated correspond to the proportion inputted. epochs: int How many times should we go through the training data? lr: float Learning rate for Adam optimiser loss_function: str Which loss function will be used for training & validation? Current options include: 'BCELoss': Binary cross entropy loss 'WeightedBCE': Binary cross entropy loss whereby channels are weighted according to chan_weights parameter. Quick way to force network to favour learning information about a given channel/s. 'DiceLoss': 1 - DICE coefficient of the output-target pair chan_weights: tuple of float WEIGHTEDBCE: Weights for BCE loss for each output channel. weights: None or nn.Model().state_dict() Prior weights with which to initalise the network. update_every: int Determines how many batches are processed before printing loss Returns ------- unet: UNet (unet.py) ''' log = True validate = False xs, ys, ids = get_train_data( image_paths, labels_paths, out_dir, n_each=n_each, channels=channels, scale=scale, log=log ) if validation_prop > 0: validate = True v_n_each = np.round(validation_prop * n_each) v_xs, v_ys, v_ids = get_train_data( image_paths, labels_paths, out_dir, n_each=v_n_each, channels=channels, scale=scale, log=log ) else: v_xs, v_ys, v_ids = None, None, None unet = train_unet( # training data xs, ys, ids, # output information out_dir, suffix, channels, # validation data v_xs=v_xs, v_ys=v_ys, v_ids=v_ids, validate=validate, # training variables log=log, epochs=epochs, lr=lr, loss_function=loss_function, chan_weights=chan_weights, # for weighted BCE weights=weights, update_every=update_every, fork_channels=fork_channels, losses=losses, chan_losses=chan_losses, chan_final_activations=chan_final_activations ) return unet
Python
def sentence_case(string): """ Capitalize the first letter of the string. Does not capitalize after full stops and other sentence dividing punctuation. :param string: a string to capitalize :type string: str :return: :rtype: str """ return "{}{}".format(string[0].capitalize(), string[1:])
def sentence_case(string): """ Capitalize the first letter of the string. Does not capitalize after full stops and other sentence dividing punctuation. :param string: a string to capitalize :type string: str :return: :rtype: str """ return "{}{}".format(string[0].capitalize(), string[1:])