language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def network_border_group(self) -> str: """The name of the network border group. A network border group is a unique set of Availability Zones or Local Zones from where AWS advertises IP addresses. """ return self._network_border_group
def network_border_group(self) -> str: """The name of the network border group. A network border group is a unique set of Availability Zones or Local Zones from where AWS advertises IP addresses. """ return self._network_border_group
Python
def services(self) -> Tuple[str, ...]: """Services that use IP addresses in this IP prefix. The addresses listed for `API_GATEWAY` are egress only. The service `"AMAZON"` is not a service but rather an identifier used to get all IP address ranges - meaning that every prefix is contained in the subset of prefixes tagged with the `"AMAZON"` service. Some IP address ranges are only tagged with the `"AMAZON"` service. """ return self._services
def services(self) -> Tuple[str, ...]: """Services that use IP addresses in this IP prefix. The addresses listed for `API_GATEWAY` are egress only. The service `"AMAZON"` is not a service but rather an identifier used to get all IP address ranges - meaning that every prefix is contained in the subset of prefixes tagged with the `"AMAZON"` service. Some IP address ranges are only tagged with the `"AMAZON"` service. """ return self._services
Python
def ip_prefix(self) -> IPv4Network: """The public IPv4 network prefix. This is a convenience attribute to maintain API compatibility with the JSON attribute names. """ return self._prefix
def ip_prefix(self) -> IPv4Network: """The public IPv4 network prefix. This is a convenience attribute to maintain API compatibility with the JSON attribute names. """ return self._prefix
Python
def ipv6_prefix(self) -> IPv6Network: """The public IPv6 network prefix. This is a convenience attribute to maintain API compatibility with the JSON attribute names. """ return self._prefix
def ipv6_prefix(self) -> IPv6Network: """The public IPv6 network prefix. This is a convenience attribute to maintain API compatibility with the JSON attribute names. """ return self._prefix
Python
def aws_ip_prefix(json_data: Dict[str, str]) -> Union[AWSIPv4Prefix, AWSIPv6Prefix]: """Factory function to create AWS IP Prefix objects from JSON data.""" check_type("data", json_data, dict) assert "ip_prefix" in json_data or "ipv6_prefix" in json_data assert "region" in json_data assert "network_border_group" in json_data assert "service" in json_data if "ip_prefix" in json_data: return AWSIPv4Prefix( prefix=json_data["ip_prefix"], region=json_data["region"], network_border_group=json_data["network_border_group"], services=json_data["service"], ) if "ipv6_prefix" in json_data: return AWSIPv6Prefix( prefix=json_data["ipv6_prefix"], region=json_data["region"], network_border_group=json_data["network_border_group"], services=json_data["service"], )
def aws_ip_prefix(json_data: Dict[str, str]) -> Union[AWSIPv4Prefix, AWSIPv6Prefix]: """Factory function to create AWS IP Prefix objects from JSON data.""" check_type("data", json_data, dict) assert "ip_prefix" in json_data or "ipv6_prefix" in json_data assert "region" in json_data assert "network_border_group" in json_data assert "service" in json_data if "ip_prefix" in json_data: return AWSIPv4Prefix( prefix=json_data["ip_prefix"], region=json_data["region"], network_border_group=json_data["network_border_group"], services=json_data["service"], ) if "ipv6_prefix" in json_data: return AWSIPv6Prefix( prefix=json_data["ipv6_prefix"], region=json_data["region"], network_border_group=json_data["network_border_group"], services=json_data["service"], )
Python
def check_type( variable_name: str, obj: Any, acceptable_types: Union[Type, Tuple[Type, ...]], optional: bool = False, ): """Object is an instance of one of the acceptable types or None. Args: variable_name: The name of the variable being inspected. obj: The object to inspect. acceptable_types: A type or tuple of acceptable types. optional(bool): Whether or not the object may be None. Raises: TypeError: If the object is not an instance of one of the acceptable types, or if the object is None and optional=False. """ assert isinstance(variable_name, str) if not isinstance(acceptable_types, tuple): acceptable_types = (acceptable_types,) assert isinstance(optional, bool) if isinstance(obj, acceptable_types): # Object is an instance of an acceptable type. return elif optional and obj is None: # Object is None, and that is okay! return else: # Object is something else. raise TypeError( f"{variable_name} should be a " f"{', '.join([t.__name__ for t in acceptable_types])}" f"{', or None' if optional else ''}. Received {obj!r} which is a " f"{type(obj).__name__}." )
def check_type( variable_name: str, obj: Any, acceptable_types: Union[Type, Tuple[Type, ...]], optional: bool = False, ): """Object is an instance of one of the acceptable types or None. Args: variable_name: The name of the variable being inspected. obj: The object to inspect. acceptable_types: A type or tuple of acceptable types. optional(bool): Whether or not the object may be None. Raises: TypeError: If the object is not an instance of one of the acceptable types, or if the object is None and optional=False. """ assert isinstance(variable_name, str) if not isinstance(acceptable_types, tuple): acceptable_types = (acceptable_types,) assert isinstance(optional, bool) if isinstance(obj, acceptable_types): # Object is an instance of an acceptable type. return elif optional and obj is None: # Object is None, and that is okay! return else: # Object is something else. raise TypeError( f"{variable_name} should be a " f"{', '.join([t.__name__ for t in acceptable_types])}" f"{', or None' if optional else ''}. Received {obj!r} which is a " f"{type(obj).__name__}." )
Python
def normalize_to_set( value: Union[None, str, int, Iterable[Union[str, int]]] ) -> Set[Union[str, int]]: """Normalize an optional or iterable variable to a set of unique values.""" if value is None: return set() if isinstance(value, (str, int)): return {value} if isinstance(value, Iterable): return set(value) raise TypeError("The value must be a string, integer, iterable type, or None.")
def normalize_to_set( value: Union[None, str, int, Iterable[Union[str, int]]] ) -> Set[Union[str, int]]: """Normalize an optional or iterable variable to a set of unique values.""" if value is None: return set() if isinstance(value, (str, int)): return {value} if isinstance(value, Iterable): return set(value) raise TypeError("The value must be a string, integer, iterable type, or None.")
Python
def validate_values( variable_name: str, values: Set[Union[str, int]], valid_values: FrozenSet[Union[str, int]], ): """Validate the values in a set against a set of valid values.""" if not values.issubset(valid_values): raise ValueError( f"One or more of the provided {variable_name} {values!r} do not " f"exist in this set of AWS IP address ranges. " f"Valid {variable_name}: {valid_values}" )
def validate_values( variable_name: str, values: Set[Union[str, int]], valid_values: FrozenSet[Union[str, int]], ): """Validate the values in a set against a set of valid values.""" if not values.issubset(valid_values): raise ValueError( f"One or more of the provided {variable_name} {values!r} do not " f"exist in this set of AWS IP address ranges. " f"Valid {variable_name}: {valid_values}" )
Python
def supernets( subnet: Union[IPv4Network, IPv6Network] ) -> Generator[Union[IPv4Network, IPv6Network], None, None]: """Incrementally yield the supernets of the provided subnet.""" for prefix_length in range(subnet.prefixlen, 0, -1): yield subnet.supernet(new_prefix=prefix_length)
def supernets( subnet: Union[IPv4Network, IPv6Network] ) -> Generator[Union[IPv4Network, IPv6Network], None, None]: """Incrementally yield the supernets of the provided subnet.""" for prefix_length in range(subnet.prefixlen, 0, -1): yield subnet.supernet(new_prefix=prefix_length)
Python
def json_data() -> Dict[str, Any]: """Retrieve and parse JSON data from a URL.""" with urllib.request.urlopen(AWS_IP_ADDRESS_RANGES_URL) as response: response_data = json.load(response) return response_data
def json_data() -> Dict[str, Any]: """Retrieve and parse JSON data from a URL.""" with urllib.request.urlopen(AWS_IP_ADDRESS_RANGES_URL) as response: response_data = json.load(response) return response_data
Python
def create_date(json_data) -> datetime: """The JSON file publication date and time as a Python datetime object. createDate is the JSON document's publication date and time, in UTC YY-MM-DD-hh-mm-ss format. """ assert "createDate" in json_data create_date_string = json_data["createDate"] assert isinstance(create_date_string, str) create_date_datetime = datetime.strptime(create_date_string, CREATE_DATE_FORMAT) create_date_datetime = create_date_datetime.replace(tzinfo=CREATE_DATE_TIMEZONE) return create_date_datetime
def create_date(json_data) -> datetime: """The JSON file publication date and time as a Python datetime object. createDate is the JSON document's publication date and time, in UTC YY-MM-DD-hh-mm-ss format. """ assert "createDate" in json_data create_date_string = json_data["createDate"] assert isinstance(create_date_string, str) create_date_datetime = datetime.strptime(create_date_string, CREATE_DATE_FORMAT) create_date_datetime = create_date_datetime.replace(tzinfo=CREATE_DATE_TIMEZONE) return create_date_datetime
Python
def deduplicated_prefixes(json_data) -> Dict[IPv4Network, List[Dict[str, str]]]: """Dictionary of `prefixes` indexed by IPv4Network.""" deduplicated_prefixes = defaultdict(list) for prefix in json_data["prefixes"]: prefix_string = prefix["ip_prefix"] prefix_network = ip_network(prefix_string) deduplicated_prefixes[prefix_network].append(prefix) return deduplicated_prefixes
def deduplicated_prefixes(json_data) -> Dict[IPv4Network, List[Dict[str, str]]]: """Dictionary of `prefixes` indexed by IPv4Network.""" deduplicated_prefixes = defaultdict(list) for prefix in json_data["prefixes"]: prefix_string = prefix["ip_prefix"] prefix_network = ip_network(prefix_string) deduplicated_prefixes[prefix_network].append(prefix) return deduplicated_prefixes
Python
def _process_prefixes( prefixes: Iterable[Union[AWSIPv4Prefix, AWSIPv6Prefix]], ) -> Tuple[Union[AWSIPv4Prefix, AWSIPv6Prefix], ...]: """Create a deduplicated sorted tuple of AWS IP prefixes.""" collect_duplicates = defaultdict(list) for prefix in prefixes: collect_duplicates[prefix.prefix].append(prefix) deduplicated_prefixes = list() for prefixes in collect_duplicates.values(): if len(prefixes) == 1: prefix = prefixes[0] else: prefix = combine_prefixes(prefixes) deduplicated_prefixes.append(prefix) deduplicated_prefixes.sort() return tuple(deduplicated_prefixes)
def _process_prefixes( prefixes: Iterable[Union[AWSIPv4Prefix, AWSIPv6Prefix]], ) -> Tuple[Union[AWSIPv4Prefix, AWSIPv6Prefix], ...]: """Create a deduplicated sorted tuple of AWS IP prefixes.""" collect_duplicates = defaultdict(list) for prefix in prefixes: collect_duplicates[prefix.prefix].append(prefix) deduplicated_prefixes = list() for prefixes in collect_duplicates.values(): if len(prefixes) == 1: prefix = prefixes[0] else: prefix = combine_prefixes(prefixes) deduplicated_prefixes.append(prefix) deduplicated_prefixes.sort() return tuple(deduplicated_prefixes)
Python
def _get_prefix( self, prefix: Union[str, IPv4Network, IPv6Network] ) -> Union[None, AWSIPv4Prefix, AWSIPv6Prefix]: """Retrieve a specific prefix from the AWS IP address ranges.""" check_type("prefix", prefix, (str, IPv4Network, IPv6Network)) if isinstance(prefix, str): prefix = ip_network(prefix) if isinstance(prefix, IPv4Network): prefixes_collection = self.ipv4_prefixes elif isinstance(prefix, IPv6Network): prefixes_collection = self.ipv6_prefixes else: raise TypeError("`prefix` must be an IPv4Network or IPv6Network object.") # Retrieve the prefix from the collection index = bisect_left(prefixes_collection, prefix) if ( index != len(prefixes_collection) and prefixes_collection[index].prefix == prefix ): return prefixes_collection[index] else: # Not found return None
def _get_prefix( self, prefix: Union[str, IPv4Network, IPv6Network] ) -> Union[None, AWSIPv4Prefix, AWSIPv6Prefix]: """Retrieve a specific prefix from the AWS IP address ranges.""" check_type("prefix", prefix, (str, IPv4Network, IPv6Network)) if isinstance(prefix, str): prefix = ip_network(prefix) if isinstance(prefix, IPv4Network): prefixes_collection = self.ipv4_prefixes elif isinstance(prefix, IPv6Network): prefixes_collection = self.ipv6_prefixes else: raise TypeError("`prefix` must be an IPv4Network or IPv6Network object.") # Retrieve the prefix from the collection index = bisect_left(prefixes_collection, prefix) if ( index != len(prefixes_collection) and prefixes_collection[index].prefix == prefix ): return prefixes_collection[index] else: # Not found return None
Python
def createDate(self) -> datetime: # noqa """The publication date and time, in UTC. This is a convenience attribute to maintain API compatibility with the JSON attribute names. """ return self._create_date
def createDate(self) -> datetime: # noqa """The publication date and time, in UTC. This is a convenience attribute to maintain API compatibility with the JSON attribute names. """ return self._create_date
Python
def prefixes(self) -> Tuple[AWSIPv4Prefix, ...]: """The IPv4 prefixes in the collection. This is a convenience attribute to maintain API compatibility with the JSON attribute names. """ return self._ipv4_prefixes
def prefixes(self) -> Tuple[AWSIPv4Prefix, ...]: """The IPv4 prefixes in the collection. This is a convenience attribute to maintain API compatibility with the JSON attribute names. """ return self._ipv4_prefixes
Python
def md5(self) -> Optional[str]: """The MD5 cryptographic hash value of the ip-ranges.json file. You can use this value to verify the integrity of the downloaded file. """ return self._md5
def md5(self) -> Optional[str]: """The MD5 cryptographic hash value of the ip-ranges.json file. You can use this value to verify the integrity of the downloaded file. """ return self._md5
Python
def regions(self) -> FrozenSet[str]: """The set of regions in the collection.""" if self._regions is None: self._regions = frozenset((prefix.region for prefix in self)) return self._regions
def regions(self) -> FrozenSet[str]: """The set of regions in the collection.""" if self._regions is None: self._regions = frozenset((prefix.region for prefix in self)) return self._regions
Python
def network_border_groups(self) -> FrozenSet[str]: """The set of network border groups in the collection.""" if self._network_border_groups is None: self._network_border_groups = frozenset( (prefix.network_border_group for prefix in self) ) return self._network_border_groups
def network_border_groups(self) -> FrozenSet[str]: """The set of network border groups in the collection.""" if self._network_border_groups is None: self._network_border_groups = frozenset( (prefix.network_border_group for prefix in self) ) return self._network_border_groups
Python
def services(self) -> FrozenSet[str]: """The set of services in the collection. The service `"AMAZON"` is not a service but rather an identifier used to get all IP address ranges - meaning that every prefix is contained in the subset of prefixes tagged with the `"AMAZON"` service. Some IP address ranges are only tagged with the `"AMAZON"` service. """ if self._services is None: self._services = frozenset( (service for prefix in self for service in prefix.services) ) return self._services
def services(self) -> FrozenSet[str]: """The set of services in the collection. The service `"AMAZON"` is not a service but rather an identifier used to get all IP address ranges - meaning that every prefix is contained in the subset of prefixes tagged with the `"AMAZON"` service. Some IP address ranges are only tagged with the `"AMAZON"` service. """ if self._services is None: self._services = frozenset( (service for prefix in self for service in prefix.services) ) return self._services
Python
def filter( self, regions: Union[None, str, Iterable[str]] = None, network_border_groups: Union[None, str, Iterable[str]] = None, services: Union[None, str, Iterable[str]] = None, versions: Union[None, int, Iterable[int]] = None, ): """Filter the AWS IP address ranges. The service `"AMAZON"` is not a service but rather an identifier used to get all IP address ranges - meaning that every prefix is contained in the subset of prefixes tagged with the `"AMAZON"` service. Some IP address ranges are only tagged with the `"AMAZON"` service. **Parameters:** - **regions** (_optional_ str or iterable sequence of strings) - the AWS Regions to include in the subset - **network_border_groups** (_optional_ str or iterable sequence of strings) - the AWS network border groups to include in the subset - **services** (_optional_ str or iterable sequence of strings) - the AWS services to include in the subset - **versions** (_optional_ int) - the IP address version (4, 6) to include in the subset **Returns:** A new `AWSIPPrefixes` object that contains the subset of IP prefixes that match your filter criteria. """ # Normalize, validate, and process the input variables # regions regions = normalize_to_set(regions) or self.regions validate_values("region", regions, valid_values=self.regions) # network_border_groups network_border_groups = ( normalize_to_set(network_border_groups) or self.network_border_groups ) validate_values( "network_border_group", network_border_groups, valid_values=self.network_border_groups, ) # services services = normalize_to_set(services) or self.services validate_values("services", services, valid_values=self.services) # prefix_type -> prefix_version versions = normalize_to_set(versions) or {4, 6} validate_values("versions", versions, valid_values=frozenset((4, 6))) # Generate the filtered prefix list return self.__class__( sync_token=self.sync_token, create_date=self.create_date, ipv4_prefixes=tuple() if 4 not in versions else ( prefix for prefix in self.ipv4_prefixes if prefix.region in regions if prefix.network_border_group in network_border_groups if set(prefix.services).intersection(services) ), ipv6_prefixes=tuple() if 6 not in versions else ( prefix for prefix in self.ipv6_prefixes if prefix.region in regions if prefix.network_border_group in network_border_groups if set(prefix.services).intersection(services) ), )
def filter( self, regions: Union[None, str, Iterable[str]] = None, network_border_groups: Union[None, str, Iterable[str]] = None, services: Union[None, str, Iterable[str]] = None, versions: Union[None, int, Iterable[int]] = None, ): """Filter the AWS IP address ranges. The service `"AMAZON"` is not a service but rather an identifier used to get all IP address ranges - meaning that every prefix is contained in the subset of prefixes tagged with the `"AMAZON"` service. Some IP address ranges are only tagged with the `"AMAZON"` service. **Parameters:** - **regions** (_optional_ str or iterable sequence of strings) - the AWS Regions to include in the subset - **network_border_groups** (_optional_ str or iterable sequence of strings) - the AWS network border groups to include in the subset - **services** (_optional_ str or iterable sequence of strings) - the AWS services to include in the subset - **versions** (_optional_ int) - the IP address version (4, 6) to include in the subset **Returns:** A new `AWSIPPrefixes` object that contains the subset of IP prefixes that match your filter criteria. """ # Normalize, validate, and process the input variables # regions regions = normalize_to_set(regions) or self.regions validate_values("region", regions, valid_values=self.regions) # network_border_groups network_border_groups = ( normalize_to_set(network_border_groups) or self.network_border_groups ) validate_values( "network_border_group", network_border_groups, valid_values=self.network_border_groups, ) # services services = normalize_to_set(services) or self.services validate_values("services", services, valid_values=self.services) # prefix_type -> prefix_version versions = normalize_to_set(versions) or {4, 6} validate_values("versions", versions, valid_values=frozenset((4, 6))) # Generate the filtered prefix list return self.__class__( sync_token=self.sync_token, create_date=self.create_date, ipv4_prefixes=tuple() if 4 not in versions else ( prefix for prefix in self.ipv4_prefixes if prefix.region in regions if prefix.network_border_group in network_border_groups if set(prefix.services).intersection(services) ), ipv6_prefixes=tuple() if 6 not in versions else ( prefix for prefix in self.ipv6_prefixes if prefix.region in regions if prefix.network_border_group in network_border_groups if set(prefix.services).intersection(services) ), )
Python
def raise_for_status(response): """Raise an HTTPError on 4xx and 5xx status codes.""" # Get the status code if hasattr(response, "status"): status = int(response.status) elif hasattr(response, "code"): status = int(response.code) elif hasattr(response, "getstatus"): status = int(response.getstatus()) else: raise ValueError( f"Response object {response!r} does not contain a status code." ) # Get the URL if hasattr(response, "url"): url = response.url elif hasattr(response, "geturl"): url = response.geturl() else: raise ValueError(f"Response object {response!r} does not contain a url.") # Get the reason, if available reason = response.reason if hasattr(response, "reason") else None if 400 <= status < 500: raise HTTPError(f"Client error for URL: {url}", status=status, reason=reason) if 500 <= status < 600: raise HTTPError(f"Server error for URL: {url}", status=status, reason=reason)
def raise_for_status(response): """Raise an HTTPError on 4xx and 5xx status codes.""" # Get the status code if hasattr(response, "status"): status = int(response.status) elif hasattr(response, "code"): status = int(response.code) elif hasattr(response, "getstatus"): status = int(response.getstatus()) else: raise ValueError( f"Response object {response!r} does not contain a status code." ) # Get the URL if hasattr(response, "url"): url = response.url elif hasattr(response, "geturl"): url = response.geturl() else: raise ValueError(f"Response object {response!r} does not contain a url.") # Get the reason, if available reason = response.reason if hasattr(response, "reason") else None if 400 <= status < 500: raise HTTPError(f"Client error for URL: {url}", status=status, reason=reason) if 500 <= status < 600: raise HTTPError(f"Server error for URL: {url}", status=status, reason=reason)
Python
def calculate_subject_name_hash(pem: str) -> str: """Calculate the OpenSSL subject_name_hash for a certificate in PEM format.""" assert isinstance(pem, str) certificate = crypto.load_certificate(crypto.FILETYPE_PEM, pem.encode()) return format(certificate.subject_name_hash(), "02x")
def calculate_subject_name_hash(pem: str) -> str: """Calculate the OpenSSL subject_name_hash for a certificate in PEM format.""" assert isinstance(pem, str) certificate = crypto.load_certificate(crypto.FILETYPE_PEM, pem.encode()) return format(certificate.subject_name_hash(), "02x")
Python
def save_to_stacked_certificate_file( certificates: Iterable[str], file_path: Path ) -> Path: """Save certificates (in PEM format) to a directory of hashed certificates.""" assert isinstance(certificates, Iterable) assert isinstance(file_path, Path) stacked_certificates = "\n".join( (certificate.strip() for certificate in certificates) ) with open(file_path, "w") as file: file.write(stacked_certificates) return file_path
def save_to_stacked_certificate_file( certificates: Iterable[str], file_path: Path ) -> Path: """Save certificates (in PEM format) to a directory of hashed certificates.""" assert isinstance(certificates, Iterable) assert isinstance(file_path, Path) stacked_certificates = "\n".join( (certificate.strip() for certificate in certificates) ) with open(file_path, "w") as file: file.write(stacked_certificates) return file_path
Python
def save_to_directory_of_hashed_certificates(pem: str, directory: Path) -> Path: """Save a certificate (in PEM format) to a directory of hashed certificates.""" assert isinstance(pem, str) assert isinstance(directory, Path) assert directory.is_dir() subject_name_hash = calculate_subject_name_hash(pem) certificate_number = 0 while True: file_path = directory / f"{subject_name_hash}.{certificate_number}" if file_path.exists(): certificate_number += 1 continue else: with open(file_path, "w") as file: file.write(pem) break return file_path
def save_to_directory_of_hashed_certificates(pem: str, directory: Path) -> Path: """Save a certificate (in PEM format) to a directory of hashed certificates.""" assert isinstance(pem, str) assert isinstance(directory, Path) assert directory.is_dir() subject_name_hash = calculate_subject_name_hash(pem) certificate_number = 0 while True: file_path = directory / f"{subject_name_hash}.{certificate_number}" if file_path.exists(): certificate_number += 1 continue else: with open(file_path, "w") as file: file.write(pem) break return file_path
Python
def amazon_root_certificates() -> Dict[str, str]: """Download the Amazon root certificates from Amazon Trust Services.""" amazon_root_certificates = {} for ca_filename in AMAZON_ROOT_CA_FILENAMES: with urlopen( urljoin(AMAZON_TRUST_SERVICES_REPOSITORY_URL, ca_filename) ) as response: assert response.status == 200 certificate_contents = response.read().decode() amazon_root_certificates[ca_filename] = certificate_contents return amazon_root_certificates
def amazon_root_certificates() -> Dict[str, str]: """Download the Amazon root certificates from Amazon Trust Services.""" amazon_root_certificates = {} for ca_filename in AMAZON_ROOT_CA_FILENAMES: with urlopen( urljoin(AMAZON_TRUST_SERVICES_REPOSITORY_URL, ca_filename) ) as response: assert response.status == 200 certificate_contents = response.read().decode() amazon_root_certificates[ca_filename] = certificate_contents return amazon_root_certificates
Python
def hash_append(h, x): """ h: a hash representing a list x: an object to be appended to that list """ return md5(h + md5(x))
def hash_append(h, x): """ h: a hash representing a list x: an object to be appended to that list """ return md5(h + md5(x))
Python
def hash_empty(): """ returns a hash representing an empty list """ return md5("")
def hash_empty(): """ returns a hash representing an empty list """ return md5("")
Python
def make_ALBA(distill, amplify): """ distill: takes as input an expensive agent, uses it to train a cheap learning agent amplify: takes as input a weak agent and a human, uses it to produce a slow but powerful agent """ def ALBA(H, n): overseer = H if n == 0 else amplify(ALBA(H, n-1), H, n) return distill(overseer, n) return ALBA
def make_ALBA(distill, amplify): """ distill: takes as input an expensive agent, uses it to train a cheap learning agent amplify: takes as input a weak agent and a human, uses it to produce a slow but powerful agent """ def ALBA(H, n): overseer = H if n == 0 else amplify(ALBA(H, n-1), H, n) return distill(overseer, n) return ALBA
Python
def act(self, obs): """ returns (action, state of Agent after computing action) """ raise NotImplementedError("Agents must define act")
def act(self, obs): """ returns (action, state of Agent after computing action) """ raise NotImplementedError("Agents must define act")
Python
def state_free(self): """ set allows us to set the agent to the state it would occupy if it had seen a particular sequence of observations and actions set is only available on agents that don't maintain internal state this property is used when we want to assert that an agent is state free """ return hasattr(self, 'set')
def state_free(self): """ set allows us to set the agent to the state it would occupy if it had seen a particular sequence of observations and actions set is only available on agents that don't maintain internal state this property is used when we want to assert that an agent is state free """ return hasattr(self, 'set')
Python
def act(self, obs, budget): """ returns (action, state of Agent after computing action, remaining budget) The method tracks some kind of resources; it should only use resources in budget, and it should return any unused resources. """ raise NotImplementedError("BudgetedAgents must define act")
def act(self, obs, budget): """ returns (action, state of Agent after computing action, remaining budget) The method tracks some kind of resources; it should only use resources in budget, and it should return any unused resources. """ raise NotImplementedError("BudgetedAgents must define act")
Python
def redirects(response, url=None, scheme=None, domain=None, port=None, path=None, query=None, fragment=None): """ Given a Django response, asserts that it redirects to another URL, and that URL has various characteristics (e.g. response.path == "/foo"). """ assert response.status_code == 302 if url: assert response["Location"] == url parts = urlsplit(response["Location"]) if scheme: assert parts.scheme == scheme if domain: assert parts.hostname == domain if port: assert parts.port == port if path: assert parts.path == path if query: assert parts.query == query if fragment: assert parts.fragment == fragment return True
def redirects(response, url=None, scheme=None, domain=None, port=None, path=None, query=None, fragment=None): """ Given a Django response, asserts that it redirects to another URL, and that URL has various characteristics (e.g. response.path == "/foo"). """ assert response.status_code == 302 if url: assert response["Location"] == url parts = urlsplit(response["Location"]) if scheme: assert parts.scheme == scheme if domain: assert parts.hostname == domain if port: assert parts.port == port if path: assert parts.path == path if query: assert parts.query == query if fragment: assert parts.fragment == fragment return True
Python
def queries(count=None, using=None): """ A context manager that captures the queries that were made. :param count: assert this number of queries were made :param using: alias of the database to monitor .. note:: The `list` of queries is not populated until after the context manager exits. Usage:: with queries() as qs: User.objects.count() assert len(qs) == 5 # The same could be rewritten as with queries(count=5): User.objects.count() """ if using is None: using = DEFAULT_DB_ALIAS conn = connections[using] # For compatbility with Django 1.2, apply necessary patching. patches = [] if not hasattr(conn, "use_debug_cursor"): patches.append(hacks.django12_debug_cursor(conn)) with utils.nested(*patches): # A debug cursor saves all the queries to conn.queries, in case one isn't # already being used, restore the current state after the test. was_debug_cursor = conn.use_debug_cursor conn.use_debug_cursor = True prior = len(conn.queries) executed = [] request_started.disconnect(reset_queries) try: yield executed finally: request_started.connect(reset_queries) conn.use_debug_cursor = was_debug_cursor executed[:] = conn.queries[prior:] if count is not None: assert len(executed) == count
def queries(count=None, using=None): """ A context manager that captures the queries that were made. :param count: assert this number of queries were made :param using: alias of the database to monitor .. note:: The `list` of queries is not populated until after the context manager exits. Usage:: with queries() as qs: User.objects.count() assert len(qs) == 5 # The same could be rewritten as with queries(count=5): User.objects.count() """ if using is None: using = DEFAULT_DB_ALIAS conn = connections[using] # For compatbility with Django 1.2, apply necessary patching. patches = [] if not hasattr(conn, "use_debug_cursor"): patches.append(hacks.django12_debug_cursor(conn)) with utils.nested(*patches): # A debug cursor saves all the queries to conn.queries, in case one isn't # already being used, restore the current state after the test. was_debug_cursor = conn.use_debug_cursor conn.use_debug_cursor = True prior = len(conn.queries) executed = [] request_started.disconnect(reset_queries) try: yield executed finally: request_started.connect(reset_queries) conn.use_debug_cursor = was_debug_cursor executed[:] = conn.queries[prior:] if count is not None: assert len(executed) == count
Python
def testing_environment(): """ Context manager to put Django into a state suitable for testing. """ teardown = setup() try: yield finally: teardown()
def testing_environment(): """ Context manager to put Django into a state suitable for testing. """ teardown = setup() try: yield finally: teardown()
Python
def urlconf(patterns): """ A context manager that turns URL patterns into the global URLconf. This is useful when you have a local variable of URL patterns that :param patterns: list of `RegexURLPattern` or `RegexURLResolver` (what you get from `patterns`) .. note:: Not thread safe """ NOTSET = object() global urlpatterns # The extravagent effort here to preserve the current value of urlpatterns # is done to ensure nesting of `urlconf`. try: old = urlpatterns except NameError: old = NOTSET urlpatterns = patterns try: with settings(ROOT_URLCONF=__name__): yield finally: if old is NOTSET: del urlpatterns else: urlpatterns = old
def urlconf(patterns): """ A context manager that turns URL patterns into the global URLconf. This is useful when you have a local variable of URL patterns that :param patterns: list of `RegexURLPattern` or `RegexURLResolver` (what you get from `patterns`) .. note:: Not thread safe """ NOTSET = object() global urlpatterns # The extravagent effort here to preserve the current value of urlpatterns # is done to ensure nesting of `urlconf`. try: old = urlpatterns except NameError: old = NOTSET urlpatterns = patterns try: with settings(ROOT_URLCONF=__name__): yield finally: if old is NOTSET: del urlpatterns else: urlpatterns = old
Python
def resolve(self, dependency, destination): """Copies the files from the defined `depdir` into the destination directory. Args: dependency (string): This is the name of the dependency file. It must given relative to `depdir`. destination (string): This is the directory where the dependencies should be copied to """ shutil.copyfile(os.path.join(self.depdir, dependency), os.path.join(destination, dependency))
def resolve(self, dependency, destination): """Copies the files from the defined `depdir` into the destination directory. Args: dependency (string): This is the name of the dependency file. It must given relative to `depdir`. destination (string): This is the directory where the dependencies should be copied to """ shutil.copyfile(os.path.join(self.depdir, dependency), os.path.join(destination, dependency))
Python
def resolve(self, dependency, destionation): """Copies the files from the defined `baseurl` into the destination directory. Args: dependency (string): This is the name of the dependency file. The download url is [baseurl]/[dependency]. destination (string): This is the directory where the dependencies should be copied to """ urlretrieve(self.baseurl + '/' + dependency, os.path.join(destionation, dependency))
def resolve(self, dependency, destionation): """Copies the files from the defined `baseurl` into the destination directory. Args: dependency (string): This is the name of the dependency file. The download url is [baseurl]/[dependency]. destination (string): This is the directory where the dependencies should be copied to """ urlretrieve(self.baseurl + '/' + dependency, os.path.join(destionation, dependency))
Python
def do_version_properties(self): '''Write a .properties file with version information, if conf string `version_properties` is given.''' if self.version_properties and self.version: with open(self.version_properties, 'w') as file: file.write('VERSION=%s' % self.version)
def do_version_properties(self): '''Write a .properties file with version information, if conf string `version_properties` is given.''' if self.version_properties and self.version: with open(self.version_properties, 'w') as file: file.write('VERSION=%s' % self.version)
Python
def do_dependencies(self): '''Get required dependencies from specified `dependency_resolver`.''' if not os.path.exists(self.libdir): os.mkdir(self.libdir) for dep in self.depends: self.dependency_resolver.resolve(dep, self.libdir)
def do_dependencies(self): '''Get required dependencies from specified `dependency_resolver`.''' if not os.path.exists(self.libdir): os.mkdir(self.libdir) for dep in self.depends: self.dependency_resolver.resolve(dep, self.libdir)
Python
def do_crypt(self): '''Crypt all class files and jars that are specified in `crypt`. To do that it uses the Allatori Obfuscator. It tries to get this dependency from the `dependency_resolver`. ''' self.output('\n ') shutil.copytree(self.bindir + '/classes/', self.bindir + '/classes_temp/') shutil.rmtree(self.bindir + '/classes/') for crypt in self.crypt: self.output(' -> ' + crypt + ' ... ') self.unpack('lib/%s' % crypt, self.bindir + '/classes_temp/') os.remove('lib/%s' % crypt) self.output('Ok\n ', ok=True) if not os.path.exists('buildlibs'): os.mkdir('buildlibs') self.dependency_resolver.resolve('allatori-%s.jar' % self.allatori_version, 'buildlibs') self.output(' crypt all ... ') self.run(['java', '-jar', 'buildlibs/allatori-%s.jar' % self.allatori_version, 'cfg/allatori.xml']) shutil.rmtree(self.bindir + '/classes_temp/')
def do_crypt(self): '''Crypt all class files and jars that are specified in `crypt`. To do that it uses the Allatori Obfuscator. It tries to get this dependency from the `dependency_resolver`. ''' self.output('\n ') shutil.copytree(self.bindir + '/classes/', self.bindir + '/classes_temp/') shutil.rmtree(self.bindir + '/classes/') for crypt in self.crypt: self.output(' -> ' + crypt + ' ... ') self.unpack('lib/%s' % crypt, self.bindir + '/classes_temp/') os.remove('lib/%s' % crypt) self.output('Ok\n ', ok=True) if not os.path.exists('buildlibs'): os.mkdir('buildlibs') self.dependency_resolver.resolve('allatori-%s.jar' % self.allatori_version, 'buildlibs') self.output(' crypt all ... ') self.run(['java', '-jar', 'buildlibs/allatori-%s.jar' % self.allatori_version, 'cfg/allatori.xml']) shutil.rmtree(self.bindir + '/classes_temp/')
Python
def do_copy_meta_inf(self): '''Copies the contents of the META-INF folder if any into the `bindir`.''' src = '%s/META-INF/' % self.srcdir dest = '%s/classes/META-INF/' % self.bindir if os.path.exists(src): if os.path.exists(dest): shutil.rmtree(dest) self.copytree(src, dest)
def do_copy_meta_inf(self): '''Copies the contents of the META-INF folder if any into the `bindir`.''' src = '%s/META-INF/' % self.srcdir dest = '%s/classes/META-INF/' % self.bindir if os.path.exists(src): if os.path.exists(dest): shutil.rmtree(dest) self.copytree(src, dest)
Python
def find_share_file(package_name, file_dir, file_name): """Locate the path to a file within a package's share directory. Locate the path to a file within a package's share directory. * package_name -- is the name of the package * file_dir -- is the package directory containing the file (or None) * file_name -- is the name of the file to find """ if file_dir is None: file_dir = "" return join(get_package_share_directory(package_name), file_dir, file_name)
def find_share_file(package_name, file_dir, file_name): """Locate the path to a file within a package's share directory. Locate the path to a file within a package's share directory. * package_name -- is the name of the package * file_dir -- is the package directory containing the file (or None) * file_name -- is the name of the file to find """ if file_dir is None: file_dir = "" return join(get_package_share_directory(package_name), file_dir, file_name)
Python
def find_executable(package_name, executable_name): """Locate the path to a node within a package. Locate the path to a node within a package. * package_name -- is the name of the package * executable_name -- is the name of the executable """ return get_executable_path( package_name=package_name, executable_name=executable_name)
def find_executable(package_name, executable_name): """Locate the path to a node within a package. Locate the path to a node within a package. * package_name -- is the name of the package * executable_name -- is the name of the executable """ return get_executable_path( package_name=package_name, executable_name=executable_name)
Python
def add_node(ld, package_name, node_name, args=None): """Add a node to the launch tree. Add a node to the launch tree. * ld -- is the launch descriptor object * package_name -- is the name of the package * node_name -- is the name of the node within the package * args -- the args to pass to the node """ # Get the path to the executable executable = find_executable(package_name, node_name) # Make sure the node exists if not exists(executable): raise Exception("Failed to find '%s' node in package '" % (node_name, package_name)) # Add the node to the launch tree if args is None: args = [] ld.add_process(cmd=[executable] + args)
def add_node(ld, package_name, node_name, args=None): """Add a node to the launch tree. Add a node to the launch tree. * ld -- is the launch descriptor object * package_name -- is the name of the package * node_name -- is the name of the node within the package * args -- the args to pass to the node """ # Get the path to the executable executable = find_executable(package_name, node_name) # Make sure the node exists if not exists(executable): raise Exception("Failed to find '%s' node in package '" % (node_name, package_name)) # Add the node to the launch tree if args is None: args = [] ld.add_process(cmd=[executable] + args)
Python
def add_static_transform_publisher( ld, parent_frame, child_frame, x=0, y=0, z=0, roll=0, pitch=0, yaw=0): """Add a static transform publisher node to the launch tree. Add a static transform publisher node to the launch tree. * ld -- is the launch descriptor object * parent_frame -- is the name of the parent tf frame * child_frame -- is the name of the child tf frame * x -- the x offset for the transform * y -- the y offset for the transform * z -- the z offset for the transform * roll -- the roll offset for the transform * pitch -- the pitch offset for the transform * yaw -- the yaw offset for the transform """ STATIC_TRANSFORM_PUBLISHER = get_executable_path( package_name="tf2_ros", executable_name="static_transform_publisher") ld.add_process(cmd=[ STATIC_TRANSFORM_PUBLISHER, str(x), str(y), str(z), str(yaw), str(pitch), str(roll), parent_frame, child_frame ])
def add_static_transform_publisher( ld, parent_frame, child_frame, x=0, y=0, z=0, roll=0, pitch=0, yaw=0): """Add a static transform publisher node to the launch tree. Add a static transform publisher node to the launch tree. * ld -- is the launch descriptor object * parent_frame -- is the name of the parent tf frame * child_frame -- is the name of the child tf frame * x -- the x offset for the transform * y -- the y offset for the transform * z -- the z offset for the transform * roll -- the roll offset for the transform * pitch -- the pitch offset for the transform * yaw -- the yaw offset for the transform """ STATIC_TRANSFORM_PUBLISHER = get_executable_path( package_name="tf2_ros", executable_name="static_transform_publisher") ld.add_process(cmd=[ STATIC_TRANSFORM_PUBLISHER, str(x), str(y), str(z), str(yaw), str(pitch), str(roll), parent_frame, child_frame ])
Python
def add_launch_file( ld, package_name, launch_file, launch_dir="launch", argv=None): """Add a launch file to the launch tree. Add a launch file to the launch tree. * ld -- is the launch descriptor object * package_name -- is the name of the package containing the launch file * launch_file -- is the name of the launch file * launch_dir -- is the package directory containing the launch file * argv -- is the dictionary of input arguments """ # Locate the launch file if launch_dir is None: launch_dir = "" package_launch_file = find_share_file(package_name, launch_dir, launch_file) if not exists(package_launch_file): raise Exception("Failed to locate launch file: %s" % package_launch_file) # Import the launch module module_name = splitext(basename(package_launch_file))[0] launch_module = load_source(module_name, package_launch_file) if launch_module is None: raise Exception("Failed to import launch module: %s" % package_launch_file) # Make sure the launch function exists in the module if not hasattr(launch_module, "launch"): raise Exception("Imported invalid launch module: %s" % package_launch_file) try: if argv is None: args = {} launch_module.launch(ld, argv) except Exception as e: raise Exception("Failed to add launch file: %s, error: %s" % (package_launch_file, e))
def add_launch_file( ld, package_name, launch_file, launch_dir="launch", argv=None): """Add a launch file to the launch tree. Add a launch file to the launch tree. * ld -- is the launch descriptor object * package_name -- is the name of the package containing the launch file * launch_file -- is the name of the launch file * launch_dir -- is the package directory containing the launch file * argv -- is the dictionary of input arguments """ # Locate the launch file if launch_dir is None: launch_dir = "" package_launch_file = find_share_file(package_name, launch_dir, launch_file) if not exists(package_launch_file): raise Exception("Failed to locate launch file: %s" % package_launch_file) # Import the launch module module_name = splitext(basename(package_launch_file))[0] launch_module = load_source(module_name, package_launch_file) if launch_module is None: raise Exception("Failed to import launch module: %s" % package_launch_file) # Make sure the launch function exists in the module if not hasattr(launch_module, "launch"): raise Exception("Imported invalid launch module: %s" % package_launch_file) try: if argv is None: args = {} launch_module.launch(ld, argv) except Exception as e: raise Exception("Failed to add launch file: %s, error: %s" % (package_launch_file, e))
Python
def xacro_to_urdf(package_name, xacro_dir, xacro_file, urdf_file=None): """Convert a xacro file to URDF. Convert a xacro file to URDF. * package_name -- is the name of the package that contains the xacro file * xacro_dir -- the name of the directory containing the xacro file * xacro_file -- is the name of xacro file * urdf_file -- the path to the URDF file to save (None to use a temporary file) """ # Locate the xacro file xacroFile = find_share_file(package_name, xacro_dir, xacro_file) # Convert the xacro file to urdf, and return the path # to the urdf file import xacro return xacro.to_urdf(xacroFile, urdf_path=urdf_file)
def xacro_to_urdf(package_name, xacro_dir, xacro_file, urdf_file=None): """Convert a xacro file to URDF. Convert a xacro file to URDF. * package_name -- is the name of the package that contains the xacro file * xacro_dir -- the name of the directory containing the xacro file * xacro_file -- is the name of xacro file * urdf_file -- the path to the URDF file to save (None to use a temporary file) """ # Locate the xacro file xacroFile = find_share_file(package_name, xacro_dir, xacro_file) # Convert the xacro file to urdf, and return the path # to the urdf file import xacro return xacro.to_urdf(xacroFile, urdf_path=urdf_file)
Python
def add_robot_state_publisher_urdf(ld, urdf_package, urdf_dir, urdf_file): """Add a robot state publisher node to the launch tree. Add a robot state publisher node to the launch tree using the given urdf file. * urdf_package -- is the name of the package that contains the urdf file * urdf_dir -- the name of the directory containing the urdf file * urdf_file -- is the name of urdf file """ # Find the URDF file urdf = find_share_file(urdf_package, urdf_dir, urdf_file) # Launch the robot state publisher with the desired URDF add_node(ld, "robot_state_publisher", "robot_state_publisher", [urdf])
def add_robot_state_publisher_urdf(ld, urdf_package, urdf_dir, urdf_file): """Add a robot state publisher node to the launch tree. Add a robot state publisher node to the launch tree using the given urdf file. * urdf_package -- is the name of the package that contains the urdf file * urdf_dir -- the name of the directory containing the urdf file * urdf_file -- is the name of urdf file """ # Find the URDF file urdf = find_share_file(urdf_package, urdf_dir, urdf_file) # Launch the robot state publisher with the desired URDF add_node(ld, "robot_state_publisher", "robot_state_publisher", [urdf])
Python
def add_robot_state_publisher_xacro(ld, xacro_package, xacro_dir, xacro_file): """Add a robot state publisher node to the launch tree. Add a robot state publisher node to the launch tree using the given xacro file. * xacro_package -- is the name of the package that contains the xacro file * xacro_dir -- the name of the directory containing the xacro file * xacro_file -- is the name of xacro file """ # Convert the xacro file to URDF urdf = xacro_to_urdf(xacro_package, xacro_dir, xacro_file) # Launch the robot state publisher with the desired URDF add_node(ld, "robot_state_publisher", "robot_state_publisher", [urdf])
def add_robot_state_publisher_xacro(ld, xacro_package, xacro_dir, xacro_file): """Add a robot state publisher node to the launch tree. Add a robot state publisher node to the launch tree using the given xacro file. * xacro_package -- is the name of the package that contains the xacro file * xacro_dir -- the name of the directory containing the xacro file * xacro_file -- is the name of xacro file """ # Convert the xacro file to URDF urdf = xacro_to_urdf(xacro_package, xacro_dir, xacro_file) # Launch the robot state publisher with the desired URDF add_node(ld, "robot_state_publisher", "robot_state_publisher", [urdf])
Python
def create_args_list(arg_map): """Create a list of arguments for a node from a map. Create a list of arguments that can be passed to a node from a dictionay of key, value argument pairs. * arg_map -- the map of arguments """ args = [] for key, value in arg_map.items(): args.append(str(key)) args.append(str(value)) return args
def create_args_list(arg_map): """Create a list of arguments for a node from a map. Create a list of arguments that can be passed to a node from a dictionay of key, value argument pairs. * arg_map -- the map of arguments """ args = [] for key, value in arg_map.items(): args.append(str(key)) args.append(str(value)) return args
Python
def measure_curvature_real(lefty, righty, leftx, rightx): global left_curveradius global right_curveradius ''' Calculates the curvature of polynomial functions in meters. ''' # Define conversions in x and y from pixels space to meters ym_per_pix = 30 / 720 # meters per pixel in y dimension xm_per_pix = 3.7 / 220 # meters per pixel in x dimension left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2) right_fit_cr = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2) # Define y-value where we want radius of curvature # We'll choose the maximum y-value, corresponding to the bottom of the image y_eval = np.max(lefty) + 1 # Calculation of R_curve (radius of curvature) left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute( 2 * left_fit_cr[0]) right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute( 2 * right_fit_cr[0]) left_curveradius = left_curverad right_curveradius = right_curverad return left_curverad, right_curverad
def measure_curvature_real(lefty, righty, leftx, rightx): global left_curveradius global right_curveradius ''' Calculates the curvature of polynomial functions in meters. ''' # Define conversions in x and y from pixels space to meters ym_per_pix = 30 / 720 # meters per pixel in y dimension xm_per_pix = 3.7 / 220 # meters per pixel in x dimension left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2) right_fit_cr = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2) # Define y-value where we want radius of curvature # We'll choose the maximum y-value, corresponding to the bottom of the image y_eval = np.max(lefty) + 1 # Calculation of R_curve (radius of curvature) left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute( 2 * left_fit_cr[0]) right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute( 2 * right_fit_cr[0]) left_curveradius = left_curverad right_curveradius = right_curverad return left_curverad, right_curverad
Python
def model_loss(input_real, input_z, out_channel_dim): """ Get the loss for the discriminator and generator :param input_real: Images from the real dataset :param input_z: Z input :param out_channel_dim: The number of channels in the output image :return: A tuple of (discriminator loss, generator loss) """ model_fake = generator(input_z, out_channel_dim, is_train=True) real_out, real_logits = discriminator(input_real) fake_out, fake_logits = discriminator(model_fake, reuse=True) d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=real_logits, labels=tf.ones_like(real_out))) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logits, labels=tf.zeros_like(fake_out))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logits, labels=tf.ones_like(fake_out))) return d_loss, g_loss
def model_loss(input_real, input_z, out_channel_dim): """ Get the loss for the discriminator and generator :param input_real: Images from the real dataset :param input_z: Z input :param out_channel_dim: The number of channels in the output image :return: A tuple of (discriminator loss, generator loss) """ model_fake = generator(input_z, out_channel_dim, is_train=True) real_out, real_logits = discriminator(input_real) fake_out, fake_logits = discriminator(model_fake, reuse=True) d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=real_logits, labels=tf.ones_like(real_out))) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logits, labels=tf.zeros_like(fake_out))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logits, labels=tf.ones_like(fake_out))) return d_loss, g_loss
Python
def init(infile, outfile, overwrite, maxBufferSize=None, maxTotalMemory=None): r"""! Initialize a new measurement driver. Reads parameters and other metadata from the input file to set up a new Measure driver. Ensures that the output file can be written to and stores the meatadata in it if necessary. \param infile Path to the input file. Must contain configurations. \param outfile Path to the output file. May be the same as `infile`. If this file exists, it must be compatible with the metadata and measurements. Conflicts with existing measurement results are only checked by the driver when the actual measurements are known. \param overwrite Indicate whether data in the output file may be overwritten. \param maxBufferSize Maximum size that may be used for result buffers in bytes (per buffer). \param maxBufferSize Maximum total memory that may be used for result buffers in bytes (all buffers). \returns A new isle.drivers.meas.Measure driver. """ if infile is None: getLogger(__name__).error("No input file given to meas driver.") raise ValueError("No intput file") if outfile is None: getLogger(__name__).info("No output file given to meas driver. " "Writing to input file.") outfile = infile lattice, params, makeActionSrc, versions = fileio.h5.readMetadata(infile) verifyVersionsByException(versions, infile) _ensureIsValidOutfile(outfile, lattice, params, makeActionSrc) return Measure(lattice, params, callFunctionFromSource(makeActionSrc, lattice, params), infile, outfile, overwrite, maxBufferSize, maxTotalMemory)
def init(infile, outfile, overwrite, maxBufferSize=None, maxTotalMemory=None): r"""! Initialize a new measurement driver. Reads parameters and other metadata from the input file to set up a new Measure driver. Ensures that the output file can be written to and stores the meatadata in it if necessary. \param infile Path to the input file. Must contain configurations. \param outfile Path to the output file. May be the same as `infile`. If this file exists, it must be compatible with the metadata and measurements. Conflicts with existing measurement results are only checked by the driver when the actual measurements are known. \param overwrite Indicate whether data in the output file may be overwritten. \param maxBufferSize Maximum size that may be used for result buffers in bytes (per buffer). \param maxBufferSize Maximum total memory that may be used for result buffers in bytes (all buffers). \returns A new isle.drivers.meas.Measure driver. """ if infile is None: getLogger(__name__).error("No input file given to meas driver.") raise ValueError("No intput file") if outfile is None: getLogger(__name__).info("No output file given to meas driver. " "Writing to input file.") outfile = infile lattice, params, makeActionSrc, versions = fileio.h5.readMetadata(infile) verifyVersionsByException(versions, infile) _ensureIsValidOutfile(outfile, lattice, params, makeActionSrc) return Measure(lattice, params, callFunctionFromSource(makeActionSrc, lattice, params), infile, outfile, overwrite, maxBufferSize, maxTotalMemory)
Python
def _isValidPath(path): """!Check if parameter is a valid path to a measurement inside an HDF5 file.""" nonempty = [component for component in str(path).split("/") if component.strip()] if len(nonempty) == 0: getLogger(__name__).error( "Output path of a measurement is the root HDF5 group. " "All measurements must be stored in a subgroup") return False return True
def _isValidPath(path): """!Check if parameter is a valid path to a measurement inside an HDF5 file.""" nonempty = [component for component in str(path).split("/") if component.strip()] if len(nonempty) == 0: getLogger(__name__).error( "Output path of a measurement is the root HDF5 group. " "All measurements must be stored in a subgroup") return False return True
Python
def _ensureCanWriteMeas(outfile, measurements, overwrite): r"""! Ensure that measurements can be written to the output file. \param outfile The output file, must already exist! \param measurements Measurements that want to save at some point. \param overwrite If `True`, erase all existing HDF5 objects under the given paths. if `False`, fail if an object exists under any given path. """ paths = [measurement.savePath for measurement in measurements] # check if all paths are OK for path in paths: if not _isValidPath(path): raise ValueError(f"Invalid output path for measurement: {path}") # make sure that no paths point to existing objects in the output file with h5.File(outfile, "a" if overwrite else "r") as h5f: for path in paths: if path in h5f: if overwrite: # preemptively get rid of that HDF5 object getLogger(__name__).warning("Removing object from output file: %s", path) del h5f[path] else: # can't remove old object, needs action from user getLogger(__name__).error("Object exists in output file: %s\n" " Not allowed to overwrite", path) raise RuntimeError("Object exists in output file")
def _ensureCanWriteMeas(outfile, measurements, overwrite): r"""! Ensure that measurements can be written to the output file. \param outfile The output file, must already exist! \param measurements Measurements that want to save at some point. \param overwrite If `True`, erase all existing HDF5 objects under the given paths. if `False`, fail if an object exists under any given path. """ paths = [measurement.savePath for measurement in measurements] # check if all paths are OK for path in paths: if not _isValidPath(path): raise ValueError(f"Invalid output path for measurement: {path}") # make sure that no paths point to existing objects in the output file with h5.File(outfile, "a" if overwrite else "r") as h5f: for path in paths: if path in h5f: if overwrite: # preemptively get rid of that HDF5 object getLogger(__name__).warning("Removing object from output file: %s", path) del h5f[path] else: # can't remove old object, needs action from user getLogger(__name__).error("Object exists in output file: %s\n" " Not allowed to overwrite", path) raise RuntimeError("Object exists in output file")
Python
def _ensureIsValidOutfile(outfile, lattice, params, makeActionSrc): r"""! Check if the output file is a valid parameter. If the file does not yet exists, create and initialize it. \throws ValueError if output file type is not supported. """ if fileio.fileType(outfile) != fileio.FileType.HDF5: getLogger(__name__).error("Output file type not supported by Meas driver: %s", outfile) raise ValueError("Output file type no supported by Meas driver. " f"Output file is '{outfile}'") outfile = Path(outfile) if not outfile.exists(): # the easy case, just make a new file fileio.h5.initializeNewFile(outfile, False, lattice, params, makeActionSrc) # else: # Check measurement paths when running driver or saving measurements. # Can't do this here because the paths are not know at this point.
def _ensureIsValidOutfile(outfile, lattice, params, makeActionSrc): r"""! Check if the output file is a valid parameter. If the file does not yet exists, create and initialize it. \throws ValueError if output file type is not supported. """ if fileio.fileType(outfile) != fileio.FileType.HDF5: getLogger(__name__).error("Output file type not supported by Meas driver: %s", outfile) raise ValueError("Output file type no supported by Meas driver. " f"Output file is '{outfile}'") outfile = Path(outfile) if not outfile.exists(): # the easy case, just make a new file fileio.h5.initializeNewFile(outfile, False, lattice, params, makeActionSrc) # else: # Check measurement paths when running driver or saving measurements. # Can't do this here because the paths are not know at this point.
Python
def _adjustConfigSlices(measurements, configurations): r"""! Change the configSlices of all measurements to reflect the actual range of configurations. \param measurements List of measurement objects. Each element's configSlice member is modified. \param configurations List of tuples `(index, ...)`. The indices are used to determine the configSlices for the measurements. All other tuple elements are ignored. """ configStep = configurations[1][0] - configurations[0][0] # last index in the list plus the stride to go '1 past the end' length = configurations[-1][0] + configStep for measurement in measurements: try: # replace step first (needed by other with* functions) aux = withStart(withStep(measurement.configSlice, configStep), configurations[0][0]) measurement.configSlice = withStop(aux, length) if aux.stop is None else aux except ValueError: getLogger(__name__).error("Invalid configuration slice %s in measurement %s " "given the actual configurations", measurement.configSlice, type(measurement)) raise
def _adjustConfigSlices(measurements, configurations): r"""! Change the configSlices of all measurements to reflect the actual range of configurations. \param measurements List of measurement objects. Each element's configSlice member is modified. \param configurations List of tuples `(index, ...)`. The indices are used to determine the configSlices for the measurements. All other tuple elements are ignored. """ configStep = configurations[1][0] - configurations[0][0] # last index in the list plus the stride to go '1 past the end' length = configurations[-1][0] + configStep for measurement in measurements: try: # replace step first (needed by other with* functions) aux = withStart(withStep(measurement.configSlice, configStep), configurations[0][0]) measurement.configSlice = withStop(aux, length) if aux.stop is None else aux except ValueError: getLogger(__name__).error("Invalid configuration slice %s in measurement %s " "given the actual configurations", measurement.configSlice, type(measurement)) raise
Python
def _totalMemoryAllowance(lattice, bufferFactor=0.8, maxBufferSize=None, maxTotalMemory=None): r"""! Return the total amount of memory that may be used for storing measurement results in bytes. """ log = getLogger(__name__) available = _availableMemory() if maxTotalMemory: if available < maxTotalMemory: log.info(f"The given maxiumum memory ({maxTotalMemory:,} B) is more " f"than the available memory ({available:,} B).") else: available = maxTotalMemory allowance = int(bufferFactor * (available - 10 * lattice.lattSize() * 16)) message = f"""Maximum allowed memory usage by measurements: {allowance:,} B Based on lattice size {lattice.lattSize()} and reserving {100 - bufferFactor*100}% of available memory for other purposes.""" if maxBufferSize: message += f"\n Restricted to buffers of size {maxBufferSize:,} B." log.info(message) return allowance
def _totalMemoryAllowance(lattice, bufferFactor=0.8, maxBufferSize=None, maxTotalMemory=None): r"""! Return the total amount of memory that may be used for storing measurement results in bytes. """ log = getLogger(__name__) available = _availableMemory() if maxTotalMemory: if available < maxTotalMemory: log.info(f"The given maxiumum memory ({maxTotalMemory:,} B) is more " f"than the available memory ({available:,} B).") else: available = maxTotalMemory allowance = int(bufferFactor * (available - 10 * lattice.lattSize() * 16)) message = f"""Maximum allowed memory usage by measurements: {allowance:,} B Based on lattice size {lattice.lattSize()} and reserving {100 - bufferFactor*100}% of available memory for other purposes.""" if maxBufferSize: message += f"\n Restricted to buffers of size {maxBufferSize:,} B." log.info(message) return allowance
Python
def _confIntProbabilities(probabilities, quantileProb): r"""! Compute a confidence interval for the probabilities assuming a normal distribution. This is not entirely correct as the probabilities are at best distributed according to min(1, N(mu, sigma)) where N is a gaussian. But it should be close enough for tuning. """ mean = np.mean(probabilities) err = np.std(probabilities) # Sometimes, all probabilities are (almost) identical, just return a small # interval in order not to break later code (norm.interval would return NaN). if err < 1e-6: return mean-1e-6, mean+1e-6 # endpoints of quantileProb confidence interval result = norm.interval(quantileProb, loc=mean, scale=err) return result[0], result[1]
def _confIntProbabilities(probabilities, quantileProb): r"""! Compute a confidence interval for the probabilities assuming a normal distribution. This is not entirely correct as the probabilities are at best distributed according to min(1, N(mu, sigma)) where N is a gaussian. But it should be close enough for tuning. """ mean = np.mean(probabilities) err = np.std(probabilities) # Sometimes, all probabilities are (almost) identical, just return a small # interval in order not to break later code (norm.interval would return NaN). if err < 1e-6: return mean-1e-6, mean+1e-6 # endpoints of quantileProb confidence interval result = norm.interval(quantileProb, loc=mean, scale=err) return result[0], result[1]
Python
def confIntProbabilities(self, quantileProb): r"""! Compute the given confidence interval for acceptance probabilities. \see _confIntProbabilities for a caveat. """ return _confIntProbabilities(self.probabilities, quantileProb)
def confIntProbabilities(self, quantileProb): r"""! Compute the given confidence interval for acceptance probabilities. \see _confIntProbabilities for a caveat. """ return _confIntProbabilities(self.probabilities, quantileProb)
Python
def save(self, h5group): """!Save to an HDF5 group.""" h5group["length"] = self.length h5group["nstep"] = self.nstep h5group["probabilities"] = self.probabilities h5group["trajPoints"] = self.trajPoints h5group["verification"] = self.verification
def save(self, h5group): """!Save to an HDF5 group.""" h5group["length"] = self.length h5group["nstep"] = self.nstep h5group["probabilities"] = self.probabilities h5group["trajPoints"] = self.trajPoints h5group["verification"] = self.verification
Python
def fromH5(cls, h5group): """!Construct an instance from an HDF5 group.""" record = cls(h5group["length"][()], h5group["nstep"][()], h5group["verification"][()]) record.probabilities = list(h5group["probabilities"][()]) record.trajPoints = list(h5group["trajPoints"][()]) return record
def fromH5(cls, h5group): """!Construct an instance from an HDF5 group.""" record = cls(h5group["length"][()], h5group["nstep"][()], h5group["verification"][()]) record.probabilities = list(h5group["probabilities"][()]) record.trajPoints = list(h5group["trajPoints"][()]) return record
Python
def save(self, h5group): r"""! Save all records and fit results to an HDF5 group. Extends existing saves if they are compatible with the data currently in memory. \throws RuntimeError if some data has already been saved to the file which is incompatible with the current state of the registrar. \param h5group HDF5 group to save to. """ self._saveRecords(createH5Group(h5group, "records")) self._saveFitResults(createH5Group(h5group, "fitResults"))
def save(self, h5group): r"""! Save all records and fit results to an HDF5 group. Extends existing saves if they are compatible with the data currently in memory. \throws RuntimeError if some data has already been saved to the file which is incompatible with the current state of the registrar. \param h5group HDF5 group to save to. """ self._saveRecords(createH5Group(h5group, "records")) self._saveFitResults(createH5Group(h5group, "fitResults"))
Python
def fitNstep(self, probabilityPoints, trajPointPoints): r"""! Fit a skewnormal CDF to both acceptance probability and rate. \returns Fitter.Result with the results from all successful fits or `None` if no fit succeeded. """ # prepare inputs independent, dependent, dependenterr = self._joinFitData(probabilityPoints, trajPointPoints) startParams = self._startParams + (self._lastFit if self._lastFit is not None else []) fittedParams = [] for guess in startParams: try: fittedParams.append(curve_fit(_fitFunction, independent, dependent, p0=guess, sigma=dependenterr, absolute_sigma=True, method="trf")[0]) except RuntimeError as err: # don't save this one but continue with others getLogger(__name__).info("Fit failed with starting parameters %s: %s", guess, err) if not fittedParams: getLogger(__name__).error("No fit converged, unable to continue tuning.") return None bestFit, *otherFits = sorted( fittedParams, key=lambda params: _sumSquares(_fitFunction, independent, dependent, dependenterr, params)) self._lastFit = bestFit return self.Result(bestFit, otherFits)
def fitNstep(self, probabilityPoints, trajPointPoints): r"""! Fit a skewnormal CDF to both acceptance probability and rate. \returns Fitter.Result with the results from all successful fits or `None` if no fit succeeded. """ # prepare inputs independent, dependent, dependenterr = self._joinFitData(probabilityPoints, trajPointPoints) startParams = self._startParams + (self._lastFit if self._lastFit is not None else []) fittedParams = [] for guess in startParams: try: fittedParams.append(curve_fit(_fitFunction, independent, dependent, p0=guess, sigma=dependenterr, absolute_sigma=True, method="trf")[0]) except RuntimeError as err: # don't save this one but continue with others getLogger(__name__).info("Fit failed with starting parameters %s: %s", guess, err) if not fittedParams: getLogger(__name__).error("No fit converged, unable to continue tuning.") return None bestFit, *otherFits = sorted( fittedParams, key=lambda params: _sumSquares(_fitFunction, independent, dependent, dependenterr, params)) self._lastFit = bestFit return self.Result(bestFit, otherFits)
Python
def evolve(self, stage): r"""! Run one step of leapfrog integration and tune parameters. \param stage EvolutionStage at the beginning of this evolution step. \returns EvolutionStage at the end of this evolution step. """ # do not evolve any more, signal the driver to stop if self._finished: raise StopIteration() stage = self._doEvolve(stage) log = getLogger(__name__) currentRecord = self.registrar.currentRecord() # check if the minimum number of runs has been reached if len(currentRecord) >= self.runsPerParam[0]: # get errors for current run errProb = _errorProbabilities(currentRecord.probabilities, TWO_SIGMA_PROB) errTP = _errorTrajPoints(currentRecord.trajPoints, TWO_SIGMA_PROB) if errTP < self.targetConfIntTP: log.info("Reached target confidence for trajectory point, picking next nstep") self._pickNextNstep() elif errProb < self.targetConfIntProb: log.info("Reached target confidence for probability, picking next nstep") self._pickNextNstep() elif len(currentRecord) > self.runsPerParam[1]: log.debug("Reached maximum number of runs for current nstep, picking next nstep") self._pickNextNstep() # Check here not at the beginning of the function because # one of the above steps may have inserted a new record. if not self._finished and len(self.registrar) > self.maxRuns: log.error("Tuning was unsuccessful within the given maximum number of runs") self._finalize(None) return stage
def evolve(self, stage): r"""! Run one step of leapfrog integration and tune parameters. \param stage EvolutionStage at the beginning of this evolution step. \returns EvolutionStage at the end of this evolution step. """ # do not evolve any more, signal the driver to stop if self._finished: raise StopIteration() stage = self._doEvolve(stage) log = getLogger(__name__) currentRecord = self.registrar.currentRecord() # check if the minimum number of runs has been reached if len(currentRecord) >= self.runsPerParam[0]: # get errors for current run errProb = _errorProbabilities(currentRecord.probabilities, TWO_SIGMA_PROB) errTP = _errorTrajPoints(currentRecord.trajPoints, TWO_SIGMA_PROB) if errTP < self.targetConfIntTP: log.info("Reached target confidence for trajectory point, picking next nstep") self._pickNextNstep() elif errProb < self.targetConfIntProb: log.info("Reached target confidence for probability, picking next nstep") self._pickNextNstep() elif len(currentRecord) > self.runsPerParam[1]: log.debug("Reached maximum number of runs for current nstep, picking next nstep") self._pickNextNstep() # Check here not at the beginning of the function because # one of the above steps may have inserted a new record. if not self._finished and len(self.registrar) > self.maxRuns: log.error("Tuning was unsuccessful within the given maximum number of runs") self._finalize(None) return stage
Python
def _doEvolve(self, stage): r"""! Do the leapfrog integration and record probability and trajectory point. """ params = self.currentParams() # get start phi for MD integration phiMD, logdetJ = backwardTransform(self.transform, stage) if self.transform is not None and "logdetJ" not in stage.logWeights: stage.logWeights["logdetJ"] = logdetJ # do MD integration pi = Vector(self.rng.normal(0, 1, len(stage.phi))+0j) phiMD1, pi1, actValMD1 = leapfrog(phiMD, pi, self.action, params["length"], params["nstep"]) # transform to MC manifold phi1, actVal1, logdetJ1 = forwardTransform(self.transform, phiMD1, actValMD1) # accept/reject on MC manifold energy0 = stage.sumLogWeights()+np.linalg.norm(pi)**2/2 energy1 = actVal1+logdetJ1+np.linalg.norm(pi1)**2/2 trajPoint1 = self._selector.selectTrajPoint(energy0, energy1) self.registrar.currentRecord().add(min(1, exp(np.real(energy0 - energy1))), trajPoint1) logWeights = None if self.transform is None \ else {"logdetJ": (logdetJ, logdetJ1)[trajPoint1]} return stage.accept(phi1, actVal1, logWeights) if trajPoint1 == 1 \ else stage.reject()
def _doEvolve(self, stage): r"""! Do the leapfrog integration and record probability and trajectory point. """ params = self.currentParams() # get start phi for MD integration phiMD, logdetJ = backwardTransform(self.transform, stage) if self.transform is not None and "logdetJ" not in stage.logWeights: stage.logWeights["logdetJ"] = logdetJ # do MD integration pi = Vector(self.rng.normal(0, 1, len(stage.phi))+0j) phiMD1, pi1, actValMD1 = leapfrog(phiMD, pi, self.action, params["length"], params["nstep"]) # transform to MC manifold phi1, actVal1, logdetJ1 = forwardTransform(self.transform, phiMD1, actValMD1) # accept/reject on MC manifold energy0 = stage.sumLogWeights()+np.linalg.norm(pi)**2/2 energy1 = actVal1+logdetJ1+np.linalg.norm(pi1)**2/2 trajPoint1 = self._selector.selectTrajPoint(energy0, energy1) self.registrar.currentRecord().add(min(1, exp(np.real(energy0 - energy1))), trajPoint1) logWeights = None if self.transform is None \ else {"logdetJ": (logdetJ, logdetJ1)[trajPoint1]} return stage.accept(phi1, actVal1, logWeights) if trajPoint1 == 1 \ else stage.reject()
Python
def _nstepFromFit(self): r"""! Compute the optimum nstep as a float from fitting to the current recording. Returns None if the fit is unsuccessful. """ log = getLogger(__name__) fitResult = self._fitter.fitNstep(*self.registrar.gather( length=self.currentParams()["length"])) if fitResult is not None: # pick nstep from fit log.info("Completed fit for run %d, best parameters: %s", len(self.registrar)-1, fitResult.bestFit) self.registrar.addFitResult(fitResult) floatStep = fitResult.bestNstep(self.targetAccRate) log.info("Optimal nstep from current fit: %f", floatStep) return floatStep return None
def _nstepFromFit(self): r"""! Compute the optimum nstep as a float from fitting to the current recording. Returns None if the fit is unsuccessful. """ log = getLogger(__name__) fitResult = self._fitter.fitNstep(*self.registrar.gather( length=self.currentParams()["length"])) if fitResult is not None: # pick nstep from fit log.info("Completed fit for run %d, best parameters: %s", len(self.registrar)-1, fitResult.bestFit) self.registrar.addFitResult(fitResult) floatStep = fitResult.bestNstep(self.targetAccRate) log.info("Optimal nstep from current fit: %f", floatStep) return floatStep return None
Python
def _pickNextNstep_search(self): r"""! Choose a new nstep based on the entire current recording to continue the search for the optimum. Switches to the verification stage if all candidates for nstep have already been visited. """ log = getLogger(__name__) floatStep = self._nstepFromFit() self.saveRecording() # save including the fit result if floatStep is None: log.info("Fit unsuccessful, shifting nstep") # try a different nstep at an extreme end to stabilise the fit nextStep = self._shiftNstep() else: # try floor or ceil nextStep = max(int(floor(floatStep)), 1) if self.registrar.seenBefore(nstep=nextStep): nextStep = int(ceil(floatStep)) if self.registrar.seenBefore(nstep=nextStep): self._enterVerification(floatStep) return if nextStep > self.maxNstep: attemptedStep = nextStep nextStep = self.maxNstep while self.registrar.seenBefore(nstep=nextStep): if nextStep == 1: raise RuntimeError("Exhausted all nstep values between 1 and maximum") nextStep -= 1 log.warning("Tried to use nstep=%d which is above maximum of %d. Lowered to %d", attemptedStep, self.maxNstep, nextStep) self.registrar.newRecord(self.currentParams()["length"], nextStep) getLogger(__name__).debug("New nstep: %d", nextStep)
def _pickNextNstep_search(self): r"""! Choose a new nstep based on the entire current recording to continue the search for the optimum. Switches to the verification stage if all candidates for nstep have already been visited. """ log = getLogger(__name__) floatStep = self._nstepFromFit() self.saveRecording() # save including the fit result if floatStep is None: log.info("Fit unsuccessful, shifting nstep") # try a different nstep at an extreme end to stabilise the fit nextStep = self._shiftNstep() else: # try floor or ceil nextStep = max(int(floor(floatStep)), 1) if self.registrar.seenBefore(nstep=nextStep): nextStep = int(ceil(floatStep)) if self.registrar.seenBefore(nstep=nextStep): self._enterVerification(floatStep) return if nextStep > self.maxNstep: attemptedStep = nextStep nextStep = self.maxNstep while self.registrar.seenBefore(nstep=nextStep): if nextStep == 1: raise RuntimeError("Exhausted all nstep values between 1 and maximum") nextStep -= 1 log.warning("Tried to use nstep=%d which is above maximum of %d. Lowered to %d", attemptedStep, self.maxNstep, nextStep) self.registrar.newRecord(self.currentParams()["length"], nextStep) getLogger(__name__).debug("New nstep: %d", nextStep)
Python
def _verificationIntStep(self, oldFloatStep): r"""! Compute an integer nstep from a fit during verification. Aborts verification if the new floatStep differs from the old one by more than one or if the fit fails. """ log = getLogger(__name__) floatStep = self._nstepFromFit() self.saveRecording() if floatStep is None: log.info("Fit unsuccessful in verification") self._cancelVerification(self._shiftNstep()) return None if abs(floatStep-oldFloatStep) > 1: log.info("Nstep changed by more than 1 in verification: %d vs %d", floatStep, oldFloatStep) self._cancelVerification(max(int(floor(floatStep)), 1)) return None return floatStep
def _verificationIntStep(self, oldFloatStep): r"""! Compute an integer nstep from a fit during verification. Aborts verification if the new floatStep differs from the old one by more than one or if the fit fails. """ log = getLogger(__name__) floatStep = self._nstepFromFit() self.saveRecording() if floatStep is None: log.info("Fit unsuccessful in verification") self._cancelVerification(self._shiftNstep()) return None if abs(floatStep-oldFloatStep) > 1: log.info("Nstep changed by more than 1 in verification: %d vs %d", floatStep, oldFloatStep) self._cancelVerification(max(int(floor(floatStep)), 1)) return None return floatStep
Python
def _enterVerification(self, floatStep): r"""! Switch to the verification stage. Starts a new run using floor(floatStep) and registers a new pickNstep which proceeds to ceil(floatStep) and potentially terminates. \param floatStep Floating point number for optimal nstep given current recording. """ def _pickNextNstep_verificationUpper(): """!Check run with upper end of interval around floatStep.""" getLogger(__name__).debug("Checking upper end of interval around floatStep") nextFloatStep = self._verificationIntStep(floatStep) if nextFloatStep is not None: self._finalize(nextFloatStep) else: # something is seriously unstable if this happens getLogger(__name__).error("The final fit did not converge, " "unable to extract nstep from tuning results. " "Continuing search.") # verification has been canceled => do nothing more here def _pickNextNstep_verificationLower(): """!Check run with lower end of interval around floatStep.""" getLogger(__name__).debug("Checking lower end of interval around floatStep") nextFloatStep = self._verificationIntStep(floatStep) if nextFloatStep is not None: # run with upper end of interval next self.registrar.newRecord(self.currentParams()["length"], int(ceil(floatStep)), True) self._pickNextNstep = _pickNextNstep_verificationUpper # else: verification has been canceled => do nothing here getLogger(__name__).info("Entering verification stage with nstep = %f", floatStep) getLogger(__name__).debug("Checking lower end of interval around floatStep") # run with lower end of interval next self.registrar.newRecord(self.currentParams()["length"], max(int(floor(floatStep)), 1), True) self._pickNextNstep = _pickNextNstep_verificationLower
def _enterVerification(self, floatStep): r"""! Switch to the verification stage. Starts a new run using floor(floatStep) and registers a new pickNstep which proceeds to ceil(floatStep) and potentially terminates. \param floatStep Floating point number for optimal nstep given current recording. """ def _pickNextNstep_verificationUpper(): """!Check run with upper end of interval around floatStep.""" getLogger(__name__).debug("Checking upper end of interval around floatStep") nextFloatStep = self._verificationIntStep(floatStep) if nextFloatStep is not None: self._finalize(nextFloatStep) else: # something is seriously unstable if this happens getLogger(__name__).error("The final fit did not converge, " "unable to extract nstep from tuning results. " "Continuing search.") # verification has been canceled => do nothing more here def _pickNextNstep_verificationLower(): """!Check run with lower end of interval around floatStep.""" getLogger(__name__).debug("Checking lower end of interval around floatStep") nextFloatStep = self._verificationIntStep(floatStep) if nextFloatStep is not None: # run with upper end of interval next self.registrar.newRecord(self.currentParams()["length"], int(ceil(floatStep)), True) self._pickNextNstep = _pickNextNstep_verificationUpper # else: verification has been canceled => do nothing here getLogger(__name__).info("Entering verification stage with nstep = %f", floatStep) getLogger(__name__).debug("Checking lower end of interval around floatStep") # run with lower end of interval next self.registrar.newRecord(self.currentParams()["length"], max(int(floor(floatStep)), 1), True) self._pickNextNstep = _pickNextNstep_verificationLower
Python
def _pickNextNstep_verificationUpper(): """!Check run with upper end of interval around floatStep.""" getLogger(__name__).debug("Checking upper end of interval around floatStep") nextFloatStep = self._verificationIntStep(floatStep) if nextFloatStep is not None: self._finalize(nextFloatStep) else: # something is seriously unstable if this happens getLogger(__name__).error("The final fit did not converge, " "unable to extract nstep from tuning results. " "Continuing search.") # verification has been canceled => do nothing more here
def _pickNextNstep_verificationUpper(): """!Check run with upper end of interval around floatStep.""" getLogger(__name__).debug("Checking upper end of interval around floatStep") nextFloatStep = self._verificationIntStep(floatStep) if nextFloatStep is not None: self._finalize(nextFloatStep) else: # something is seriously unstable if this happens getLogger(__name__).error("The final fit did not converge, " "unable to extract nstep from tuning results. " "Continuing search.") # verification has been canceled => do nothing more here
Python
def _pickNextNstep_verificationLower(): """!Check run with lower end of interval around floatStep.""" getLogger(__name__).debug("Checking lower end of interval around floatStep") nextFloatStep = self._verificationIntStep(floatStep) if nextFloatStep is not None: # run with upper end of interval next self.registrar.newRecord(self.currentParams()["length"], int(ceil(floatStep)), True) self._pickNextNstep = _pickNextNstep_verificationUpper # else: verification has been canceled => do nothing here
def _pickNextNstep_verificationLower(): """!Check run with lower end of interval around floatStep.""" getLogger(__name__).debug("Checking lower end of interval around floatStep") nextFloatStep = self._verificationIntStep(floatStep) if nextFloatStep is not None: # run with upper end of interval next self.registrar.newRecord(self.currentParams()["length"], int(ceil(floatStep)), True) self._pickNextNstep = _pickNextNstep_verificationUpper # else: verification has been canceled => do nothing here
Python
def _cancelVerification(self, nextStep): r"""! Exit verification stage and revert back to the search stage with given nstep. """ getLogger(__name__).info("Cancelling verification, reverting back to search") self.registrar.newRecord(self.currentParams()["length"], nextStep, False) self._pickNextNstep = self._pickNextNstep_search
def _cancelVerification(self, nextStep): r"""! Exit verification stage and revert back to the search stage with given nstep. """ getLogger(__name__).info("Cancelling verification, reverting back to search") self.registrar.newRecord(self.currentParams()["length"], nextStep, False) self._pickNextNstep = self._pickNextNstep_search
Python
def _finalize(self, finalFloatStep): r"""! Wrap up after successful tuning. Estimate an optimum trajectory length based on given optimal nstep (float). Stores results in the record file. """ self._finished = True self.saveRecording() if finalFloatStep is not None: nstep = max(int(floor(finalFloatStep)), 1) # linearly interpolate between floor(floatStep) and ceil(floatStep) length = nstep / finalFloatStep self._tunedParameters = {"nstep": nstep, "length": length} with h5.File(self.recordFname, "a") as h5f: h5f["leapfrogTuner/tuned_length"] = length h5f["leapfrogTuner/tuned_nstep"] = nstep getLogger(__name__).info("Finished tuning with length = %f and nstep = %d", length, nstep)
def _finalize(self, finalFloatStep): r"""! Wrap up after successful tuning. Estimate an optimum trajectory length based on given optimal nstep (float). Stores results in the record file. """ self._finished = True self.saveRecording() if finalFloatStep is not None: nstep = max(int(floor(finalFloatStep)), 1) # linearly interpolate between floor(floatStep) and ceil(floatStep) length = nstep / finalFloatStep self._tunedParameters = {"nstep": nstep, "length": length} with h5.File(self.recordFname, "a") as h5f: h5f["leapfrogTuner/tuned_length"] = length h5f["leapfrogTuner/tuned_nstep"] = nstep getLogger(__name__).info("Finished tuning with length = %f and nstep = %d", length, nstep)
Python
def saveRecording(self): r"""! Save the current state of the recording. Can be incorporated into an existing save. """ getLogger(__name__).info("Saving current recording") with h5.File(self.recordFname, "a") as h5f: self.registrar.save(createH5Group(h5f, "leapfrogTuner"))
def saveRecording(self): r"""! Save the current state of the recording. Can be incorporated into an existing save. """ getLogger(__name__).info("Saving current recording") with h5.File(self.recordFname, "a") as h5f: self.registrar.save(createH5Group(h5f, "leapfrogTuner"))
Python
def tunedEvolver(self, rng=None): r"""! Construct a new leapfrog evolver with tuned parameters. \param rng Use this RNG for the evolver or use the one passed to the constructor of the tuner if `rng is None`. \throws RuntimeError if tuning is not complete/successful. \returns A new instance of evolver.leapfrog.ConstStepLeapfrog with the tuned length and nstep. """ params = self.tunedParameters() return ConstStepLeapfrog(self.action, params["length"], params["nstep"], self._selector.rng if rng is None else rng, transform=self.transform)
def tunedEvolver(self, rng=None): r"""! Construct a new leapfrog evolver with tuned parameters. \param rng Use this RNG for the evolver or use the one passed to the constructor of the tuner if `rng is None`. \throws RuntimeError if tuning is not complete/successful. \returns A new instance of evolver.leapfrog.ConstStepLeapfrog with the tuned length and nstep. """ params = self.tunedParameters() return ConstStepLeapfrog(self.action, params["length"], params["nstep"], self._selector.rng if rng is None else rng, transform=self.transform)
Python
def loadTunedEvolver(cls, h5group, action, rng): r"""! Construct a new leapfrog evolver with tuned parameters loaded from HDF5. \param h5group Base group that contains the tuner group, i.e. `h5group['leapfrogTuner']` must exist. \param action Instance of isle.Action to use for molecular dynamics. \param rng Central random number generator for the run. Used for accept/reject. \throws RuntimeError if tuning is not complete/successful. \returns A new instance of evolver.leapfrog.ConstStepLeapfrog with the tuned length and nstep. """ params = cls.loadTunedParameters(h5group) return ConstStepLeapfrog(action, params["length"], params["nstep"], rng)
def loadTunedEvolver(cls, h5group, action, rng): r"""! Construct a new leapfrog evolver with tuned parameters loaded from HDF5. \param h5group Base group that contains the tuner group, i.e. `h5group['leapfrogTuner']` must exist. \param action Instance of isle.Action to use for molecular dynamics. \param rng Central random number generator for the run. Used for accept/reject. \throws RuntimeError if tuning is not complete/successful. \returns A new instance of evolver.leapfrog.ConstStepLeapfrog with the tuned length and nstep. """ params = cls.loadTunedParameters(h5group) return ConstStepLeapfrog(action, params["length"], params["nstep"], rng)
Python
def report(self): r"""! Return a string summarizing the evolution since the evolver was constructed including by fromH5. """ return f"""<Autotuner> (0x{id(self):x}) record file = {self.recordFname}"""
def report(self): r"""! Return a string summarizing the evolution since the evolver was constructed including by fromH5. """ return f"""<Autotuner> (0x{id(self):x}) record file = {self.recordFname}"""
Python
def evolve(self, stage): r"""! Run one step of leapfrog integration and tune parameters. \param stage EvolutionStage at the beginning of this evolution step. \returns EvolutionStage at the end of this evolution step. """ # do not evolve any more, signal the driver to stop if self._finished: raise StopIteration() stage = self._doEvolve(stage) log = getLogger(__name__) currentRecord = self.registrar.currentRecord() # check if the minimum number of runs has been reached if len(currentRecord) >= self.runsPerParam[0]: # get errors for current run errProb = _errorProbabilities(currentRecord.probabilities, TWO_SIGMA_PROB) errTP = _errorTrajPoints(currentRecord.trajPoints, TWO_SIGMA_PROB) if errTP < self.targetConfIntTP: log.info("Reached target confidence for trajectory point, picking next Length") self._pickNextLength() elif errProb < self.targetConfIntProb: log.info("Reached target confidence for probability, picking next Length") self._pickNextLength() elif len(currentRecord) > self.runsPerParam[1]: log.debug("Reached maximum number of runs for current length, picking next Length") self._pickNextLength() # Check here not at the beginning of the function because # one of the above steps may have inserted a new record. if not self._finished and len(self.registrar) > self.maxRuns: log.error("Tuning was unsuccessful within the given maximum number of runs") self._finalize(None) return stage
def evolve(self, stage): r"""! Run one step of leapfrog integration and tune parameters. \param stage EvolutionStage at the beginning of this evolution step. \returns EvolutionStage at the end of this evolution step. """ # do not evolve any more, signal the driver to stop if self._finished: raise StopIteration() stage = self._doEvolve(stage) log = getLogger(__name__) currentRecord = self.registrar.currentRecord() # check if the minimum number of runs has been reached if len(currentRecord) >= self.runsPerParam[0]: # get errors for current run errProb = _errorProbabilities(currentRecord.probabilities, TWO_SIGMA_PROB) errTP = _errorTrajPoints(currentRecord.trajPoints, TWO_SIGMA_PROB) if errTP < self.targetConfIntTP: log.info("Reached target confidence for trajectory point, picking next Length") self._pickNextLength() elif errProb < self.targetConfIntProb: log.info("Reached target confidence for probability, picking next Length") self._pickNextLength() elif len(currentRecord) > self.runsPerParam[1]: log.debug("Reached maximum number of runs for current length, picking next Length") self._pickNextLength() # Check here not at the beginning of the function because # one of the above steps may have inserted a new record. if not self._finished and len(self.registrar) > self.maxRuns: log.error("Tuning was unsuccessful within the given maximum number of runs") self._finalize(None) return stage
Python
def _lengthFromFit(self): r"""! Compute the optimum length as a float from fitting to the current recording. Returns None if the fit is unsuccessful. """ log = getLogger(__name__) fitResult = self._fitter.fitLength(*self.registrar.gather( nstep=self.currentParams()["nstep"])) if fitResult is not None: # pick length from fit log.info("Completed fit for run %d, best parameters: %s", len(self.registrar)-1, fitResult.bestFit) self.registrar.addFitResult(fitResult) length = fitResult.bestLength(self.targetAccRate) log.info("Optimal length from current fit: %f", length) return length return None
def _lengthFromFit(self): r"""! Compute the optimum length as a float from fitting to the current recording. Returns None if the fit is unsuccessful. """ log = getLogger(__name__) fitResult = self._fitter.fitLength(*self.registrar.gather( nstep=self.currentParams()["nstep"])) if fitResult is not None: # pick length from fit log.info("Completed fit for run %d, best parameters: %s", len(self.registrar)-1, fitResult.bestFit) self.registrar.addFitResult(fitResult) length = fitResult.bestLength(self.targetAccRate) log.info("Optimal length from current fit: %f", length) return length return None
Python
def _pickNextLength_search(self): r"""! Choose a new length based on the entire current recording to continue the search for the optimum. Switches to the verification stage if all candidates for length have already been visited. """ log = getLogger(__name__)# #length = self.currentParams()["length"] length = self._lengthFromFit() self.saveRecording() # save including the fit result if length is None: log.info("Fit unsuccessful, shifting length") # try a different length at an extreme end to stabilise the fit nextLength = self._shiftLength() else: nextLength = length acceptanceRate = np.mean(self.registrar.currentRecord().trajPoints) if abs(self.currentParams()["length"]/nextLength - 1) < 0.1 and abs(self.targetAccRate - acceptanceRate) < 0.025: self._enterVerification(nextLength) return if nextLength > self.maxLength: attemptedLength = nextLength nextLength = self.maxLength while self.registrar.seenBefore(length=nextLength): nextLength -= 1 log.warning("Tried to use length=%f which is above maximum of %f. Lowered to %f", attemptedLength, self.maxLength, nextLength) self.registrar.newRecord(nextLength, self.currentParams()["nstep"]) getLogger(__name__).debug("New length: %f", nextLength)
def _pickNextLength_search(self): r"""! Choose a new length based on the entire current recording to continue the search for the optimum. Switches to the verification stage if all candidates for length have already been visited. """ log = getLogger(__name__)# #length = self.currentParams()["length"] length = self._lengthFromFit() self.saveRecording() # save including the fit result if length is None: log.info("Fit unsuccessful, shifting length") # try a different length at an extreme end to stabilise the fit nextLength = self._shiftLength() else: nextLength = length acceptanceRate = np.mean(self.registrar.currentRecord().trajPoints) if abs(self.currentParams()["length"]/nextLength - 1) < 0.1 and abs(self.targetAccRate - acceptanceRate) < 0.025: self._enterVerification(nextLength) return if nextLength > self.maxLength: attemptedLength = nextLength nextLength = self.maxLength while self.registrar.seenBefore(length=nextLength): nextLength -= 1 log.warning("Tried to use length=%f which is above maximum of %f. Lowered to %f", attemptedLength, self.maxLength, nextLength) self.registrar.newRecord(nextLength, self.currentParams()["nstep"]) getLogger(__name__).debug("New length: %f", nextLength)
Python
def _verificationLength(self, oldLength): r"""! Compute length from a fit during verification. Aborts verification if the new length differs from the old one by more than 5% or if the fit fails. """ log = getLogger(__name__) length = self._lengthFromFit() acceptanceRate = np.mean(self.registrar.currentRecord().trajPoints) self.saveRecording() if length is None: log.info("Fit unsuccessful in verification") self._cancelVerification(self._shiftLength()) return None if abs(length/oldLength-1) > 0.05 or abs(acceptanceRate - self.targetAccRate) > 0.025: log.info("length changed by more than 5%% in verification: %f vs %f\n or target acceptance rate missed by more that 0.025: %f vs %f", length, oldLength, self.targetAccRate, acceptanceRate) self._cancelVerification(length) return None log.info("acceptance rate = %f",acceptanceRate) return length
def _verificationLength(self, oldLength): r"""! Compute length from a fit during verification. Aborts verification if the new length differs from the old one by more than 5% or if the fit fails. """ log = getLogger(__name__) length = self._lengthFromFit() acceptanceRate = np.mean(self.registrar.currentRecord().trajPoints) self.saveRecording() if length is None: log.info("Fit unsuccessful in verification") self._cancelVerification(self._shiftLength()) return None if abs(length/oldLength-1) > 0.05 or abs(acceptanceRate - self.targetAccRate) > 0.025: log.info("length changed by more than 5%% in verification: %f vs %f\n or target acceptance rate missed by more that 0.025: %f vs %f", length, oldLength, self.targetAccRate, acceptanceRate) self._cancelVerification(length) return None log.info("acceptance rate = %f",acceptanceRate) return length
Python
def _enterVerification(self, length): r"""! Switch to the verification stage. Starts a new run using length and registers a new pickLength which proceeds to length*0.9 and potentially terminates. \param length Floating point number for optimal length given current recording. """ getLogger(__name__).info("Entering verification stage with length = %f", length) self.runsPerParam = tuple([4*x for x in self.runsPerParam]) def _pickNextLength_verification(): """!Check run with lower end of interval around floatStep.""" getLogger(__name__).debug("Checking upper end of interval around floatStep") nextLength = self._verificationLength(length) if nextLength is not None: self._finalize(nextLength) else: # something is seriously unstable if this happens getLogger(__name__).error("The final fit did not converge, " "unable to extract nstep from tuning results. " "Continuing search.") # verification has been canceled => do nothing more here # run with length next self.registrar.newRecord(length, self.currentParams()["nstep"], True) self._pickNextLength = _pickNextLength_verification
def _enterVerification(self, length): r"""! Switch to the verification stage. Starts a new run using length and registers a new pickLength which proceeds to length*0.9 and potentially terminates. \param length Floating point number for optimal length given current recording. """ getLogger(__name__).info("Entering verification stage with length = %f", length) self.runsPerParam = tuple([4*x for x in self.runsPerParam]) def _pickNextLength_verification(): """!Check run with lower end of interval around floatStep.""" getLogger(__name__).debug("Checking upper end of interval around floatStep") nextLength = self._verificationLength(length) if nextLength is not None: self._finalize(nextLength) else: # something is seriously unstable if this happens getLogger(__name__).error("The final fit did not converge, " "unable to extract nstep from tuning results. " "Continuing search.") # verification has been canceled => do nothing more here # run with length next self.registrar.newRecord(length, self.currentParams()["nstep"], True) self._pickNextLength = _pickNextLength_verification
Python
def _pickNextLength_verification(): """!Check run with lower end of interval around floatStep.""" getLogger(__name__).debug("Checking upper end of interval around floatStep") nextLength = self._verificationLength(length) if nextLength is not None: self._finalize(nextLength) else: # something is seriously unstable if this happens getLogger(__name__).error("The final fit did not converge, " "unable to extract nstep from tuning results. " "Continuing search.") # verification has been canceled => do nothing more here
def _pickNextLength_verification(): """!Check run with lower end of interval around floatStep.""" getLogger(__name__).debug("Checking upper end of interval around floatStep") nextLength = self._verificationLength(length) if nextLength is not None: self._finalize(nextLength) else: # something is seriously unstable if this happens getLogger(__name__).error("The final fit did not converge, " "unable to extract nstep from tuning results. " "Continuing search.") # verification has been canceled => do nothing more here
Python
def _cancelVerification(self, nextLength): r"""! Exit verification stage and revert back to the search stage with given length. """ getLogger(__name__).info("Cancelling verification, reverting back to search") self.runsPerParam = tuple([x/4 for x in self.runsPerParam]) self.registrar.newRecord(nextLength, self.currentParams()["nstep"], False) self._pickNextLength = self._pickNextLength_search
def _cancelVerification(self, nextLength): r"""! Exit verification stage and revert back to the search stage with given length. """ getLogger(__name__).info("Cancelling verification, reverting back to search") self.runsPerParam = tuple([x/4 for x in self.runsPerParam]) self.registrar.newRecord(nextLength, self.currentParams()["nstep"], False) self._pickNextLength = self._pickNextLength_search
Python
def loadTunedEvolver(cls, h5group, action, rng, trafo=None): r"""! Construct a new leapfrog evolver with tuned parameters loaded from HDF5. \param h5group Base group that contains the tuner group, i.e. `h5group['leapfrogTuner']` must exist. \param action Instance of isle.Action to use for molecular dynamics. \param rng Central random number generator for the run. Used for accept/reject. \throws RuntimeError if tuning is not complete/successful. \returns A new instance of evolver.leapfrog.ConstStepLeapfrog with the tuned length and nstep. """ params = cls.loadTunedParameters(h5group) return ConstStepLeapfrog(action, params["length"], params["nstep"], rng, transform=trafo)
def loadTunedEvolver(cls, h5group, action, rng, trafo=None): r"""! Construct a new leapfrog evolver with tuned parameters loaded from HDF5. \param h5group Base group that contains the tuner group, i.e. `h5group['leapfrogTuner']` must exist. \param action Instance of isle.Action to use for molecular dynamics. \param rng Central random number generator for the run. Used for accept/reject. \throws RuntimeError if tuning is not complete/successful. \returns A new instance of evolver.leapfrog.ConstStepLeapfrog with the tuned length and nstep. """ params = cls.loadTunedParameters(h5group) return ConstStepLeapfrog(action, params["length"], params["nstep"], rng, transform=trafo)
Python
def _representLattice(dumper, lat): """! Create a YAML representation of a Lattice using a `!lattice` node. """ if lat.nx() > 1: adj, hopping = zip(*[([i, neigh[0]], neigh[1]) for i in range(lat.nx()) for neigh in lat.getNeighbors(i) if neigh[0] > i]) else: adj, hopping = [], [] positions = [list(lat.position(i)) for i in range(lat.nx())] return dumper.represent_mapping("!lattice", {"name": lat.name, "comment": lat.comment, "nt": lat.nt(), "adjacency": list(adj), "hopping": list(hopping), "positions": positions}, flow_style=False)
def _representLattice(dumper, lat): """! Create a YAML representation of a Lattice using a `!lattice` node. """ if lat.nx() > 1: adj, hopping = zip(*[([i, neigh[0]], neigh[1]) for i in range(lat.nx()) for neigh in lat.getNeighbors(i) if neigh[0] > i]) else: adj, hopping = [], [] positions = [list(lat.position(i)) for i in range(lat.nx())] return dumper.represent_mapping("!lattice", {"name": lat.name, "comment": lat.comment, "nt": lat.nt(), "adjacency": list(adj), "hopping": list(hopping), "positions": positions}, flow_style=False)
Python
def loadLattice(fname): """!Load a Lattice from a YAML file.""" if hasattr(fname, "read"): string = fname.read() else: with open(fname, "r") as yamlf: string = yamlf.read() return yaml.safe_load(string)
def loadLattice(fname): """!Load a Lattice from a YAML file.""" if hasattr(fname, "read"): string = fname.read() else: with open(fname, "r") as yamlf: string = yamlf.read() return yaml.safe_load(string)
Python
def saveFieldAndCheckpoint(self, stage, evolver): """! Write a trajectory (endpoint) and checkpoint to file. """ with h5.File(self.outfname, "a") as outf: cfgGrp = self._writeTrajectory(outf, stage) self._writeCheckpoint(outf, cfgGrp, evolver)
def saveFieldAndCheckpoint(self, stage, evolver): """! Write a trajectory (endpoint) and checkpoint to file. """ with h5.File(self.outfname, "a") as outf: cfgGrp = self._writeTrajectory(outf, stage) self._writeCheckpoint(outf, cfgGrp, evolver)
Python
def save(self, stage): """! Write a trajectory (endpoint) to file. """ with h5.File(self.outfname, "a") as outf: self._writeTrajectory(outf, stage)
def save(self, stage): """! Write a trajectory (endpoint) to file. """ with h5.File(self.outfname, "a") as outf: self._writeTrajectory(outf, stage)
Python
def advance(self, amount=1): """! Advance the internal trajectory counter by amount without saving. """ self._trajIdx += amount
def advance(self, amount=1): """! Advance the internal trajectory counter by amount without saving. """ self._trajIdx += amount
Python
def resetIndex(self, idx=0): """! Reset the internal trajectory index to idx. """ self._trajIdx = idx
def resetIndex(self, idx=0): """! Reset the internal trajectory index to idx. """ self._trajIdx = idx
Python
def _saveConditionally(self, stage, evolver, saveFreq, checkpointFreq): """!Save the trajectory and checkpoint if frequencies permit.""" if saveFreq != 0 and self._trajIdx % saveFreq == 0: if checkpointFreq != 0 and self._trajIdx % checkpointFreq == 0: self.saveFieldAndCheckpoint(stage, evolver) else: self.save(stage)
def _saveConditionally(self, stage, evolver, saveFreq, checkpointFreq): """!Save the trajectory and checkpoint if frequencies permit.""" if saveFreq != 0 and self._trajIdx % saveFreq == 0: if checkpointFreq != 0 and self._trajIdx % checkpointFreq == 0: self.saveFieldAndCheckpoint(stage, evolver) else: self.save(stage)
Python
def _writeTrajectory(self, h5file, stage): """! Write a trajectory (endpoint) to a HDF5 group. """ try: return fileio.h5.writeTrajectory(h5file["configuration"], self._trajIdx, stage) except (ValueError, RuntimeError) as err: if "name already exists" in err.args[0]: getLogger(__name__).error("Cannot write trajectory %d to file %s." " A dataset with the same name already exists.", self._trajIdx, self.outfname) raise
def _writeTrajectory(self, h5file, stage): """! Write a trajectory (endpoint) to a HDF5 group. """ try: return fileio.h5.writeTrajectory(h5file["configuration"], self._trajIdx, stage) except (ValueError, RuntimeError) as err: if "name already exists" in err.args[0]: getLogger(__name__).error("Cannot write trajectory %d to file %s." " A dataset with the same name already exists.", self._trajIdx, self.outfname) raise
Python
def _writeCheckpoint(self, h5file, trajGrp, evolver): """! Write a checkpoint to a HDF5 group. """ try: return fileio.h5.writeCheckpoint(h5file["checkpoint"], self._trajIdx, self.rng, trajGrp.name, evolver, self._evManager) except (ValueError, RuntimeError) as err: if "name already exists" in err.args[0]: getLogger(__name__).error("Cannot write checkpoint for trajectory %d to file %s." " A dataset with the same name already exists.", self._trajIdx, self.outfname) raise
def _writeCheckpoint(self, h5file, trajGrp, evolver): """! Write a checkpoint to a HDF5 group. """ try: return fileio.h5.writeCheckpoint(h5file["checkpoint"], self._trajIdx, self.rng, trajGrp.name, evolver, self._evManager) except (ValueError, RuntimeError) as err: if "name already exists" in err.args[0]: getLogger(__name__).error("Cannot write checkpoint for trajectory %d to file %s." " A dataset with the same name already exists.", self._trajIdx, self.outfname) raise
Python
def newRun(lattice, params, rng, makeAction, outfile, overwrite, definitions={}): r"""! Start a fresh %HMC run. Constructs a %HMC driver from given parameters and initializes the output file. Most parameters are stored in the HMC object under the same name. \param lattice Lattice to run simulation on, passed to `makeAction`. \param params Parameters passed to `makeAction`. \param rng Random number generator used for all random numbers needed during %HMC evolution. \param makeAction Function or source code of a function to construct an action. Must be self-contained! \param outfile Name (Path) of the output file. Must not exist unless `overwrite==True`. \param overwrite If `False`, nothing in the output file will be erased/overwritten. If `True`, the file is removed and re-initialized, whereby all content is lost. \param definitions Dictionary of mapping names to custom types. Used to control how evolvers are stored for checkpoints. \returns A new HMC instance to control evolution initialized with given parameters. """ if outfile is None: getLogger(__name__).error("No output file given for HMC driver") raise ValueError("No output file") makeActionSrc = makeAction if isinstance(makeAction, str) else sourceOfFunction(makeAction) fileio.h5.initializeNewFile(outfile, overwrite, lattice, params, makeActionSrc, ["/configuration", "/checkpoint"]) return HMC(lattice, params, rng, callFunctionFromSource(makeActionSrc, lattice, params), outfile, 0, definitions)
def newRun(lattice, params, rng, makeAction, outfile, overwrite, definitions={}): r"""! Start a fresh %HMC run. Constructs a %HMC driver from given parameters and initializes the output file. Most parameters are stored in the HMC object under the same name. \param lattice Lattice to run simulation on, passed to `makeAction`. \param params Parameters passed to `makeAction`. \param rng Random number generator used for all random numbers needed during %HMC evolution. \param makeAction Function or source code of a function to construct an action. Must be self-contained! \param outfile Name (Path) of the output file. Must not exist unless `overwrite==True`. \param overwrite If `False`, nothing in the output file will be erased/overwritten. If `True`, the file is removed and re-initialized, whereby all content is lost. \param definitions Dictionary of mapping names to custom types. Used to control how evolvers are stored for checkpoints. \returns A new HMC instance to control evolution initialized with given parameters. """ if outfile is None: getLogger(__name__).error("No output file given for HMC driver") raise ValueError("No output file") makeActionSrc = makeAction if isinstance(makeAction, str) else sourceOfFunction(makeAction) fileio.h5.initializeNewFile(outfile, overwrite, lattice, params, makeActionSrc, ["/configuration", "/checkpoint"]) return HMC(lattice, params, rng, callFunctionFromSource(makeActionSrc, lattice, params), outfile, 0, definitions)
Python
def continueRun(infile, outfile, startIdx, overwrite, definitions={}): r"""! Continue a previous %HMC run. Loads metadata and a given checkpoint from the input file, constructs a new HMC driver object from them, and initializes the output file. \param infile Name of the input file. Must contain at least one checkpoint to continue from. \param outfile Name of the output file. Can be `None`, which means equal to the input file. \param startIdx Index of the checkpoint to start from. See parameter `overwrite`. Can be negative in which case it is counted from the end (-1 is last checkpoint). The number the checkpoint is saved as, not 'the nth checkpoint in the file'. \param overwrite If `False`, nothing in the output file will be erased/overwritten. If `True`, - (`infile==outfile`): all configurations and checkpoints newer than `startIdx` are removed and have to be re-computed. - (`infile!=outfile`): outfile is removed and re-initialized, whereby all content is lost. \param definitions Dictionary of mapping names to custom types. Used to control how evolvers are stored for checkpoints. \returns In order: - Instance of HMC constructed from parameters found in `infile`. - Configuration loaded from checkpoint. - Evolver loaded from checkpoint. - Save frequency computed on last two configurations. `None` if there is only one configuration. - Checkpoint frequency computed on last two checkpoints. `None` if there is only one checkpoint. """ if infile is None: getLogger(__name__).error("No input file given for HMC driver in continuation run") raise ValueError("No input file") if outfile is None: getLogger(__name__).info("No output file given for HMC driver") outfile = infile lattice, params, makeActionSrc, versions = fileio.h5.readMetadata(infile) verifyVersionsByException(versions, infile) action = callFunctionFromSource(makeActionSrc, lattice, params) if outfile != infile: fileio.h5.initializeNewFile(outfile, overwrite, lattice, params, makeActionSrc, ["/configuration", "/checkpoint"]) configurations, checkpoints = _loadIndices(infile) evManager = EvolverManager(infile, definitions=definitions) checkpointIdx, rng, stage, evolver = _loadCheckpoint(infile, startIdx, checkpoints, evManager, action, lattice) if outfile == infile: _ensureNoNewerConfigs(infile, checkpointIdx, checkpoints, configurations, overwrite) return (HMC(lattice, params, rng, action, outfile, checkpointIdx, definitions, evManager if infile == outfile else None), # need to re-init manager for new outfile stage, evolver, _stride(configurations), _stride(checkpoints))
def continueRun(infile, outfile, startIdx, overwrite, definitions={}): r"""! Continue a previous %HMC run. Loads metadata and a given checkpoint from the input file, constructs a new HMC driver object from them, and initializes the output file. \param infile Name of the input file. Must contain at least one checkpoint to continue from. \param outfile Name of the output file. Can be `None`, which means equal to the input file. \param startIdx Index of the checkpoint to start from. See parameter `overwrite`. Can be negative in which case it is counted from the end (-1 is last checkpoint). The number the checkpoint is saved as, not 'the nth checkpoint in the file'. \param overwrite If `False`, nothing in the output file will be erased/overwritten. If `True`, - (`infile==outfile`): all configurations and checkpoints newer than `startIdx` are removed and have to be re-computed. - (`infile!=outfile`): outfile is removed and re-initialized, whereby all content is lost. \param definitions Dictionary of mapping names to custom types. Used to control how evolvers are stored for checkpoints. \returns In order: - Instance of HMC constructed from parameters found in `infile`. - Configuration loaded from checkpoint. - Evolver loaded from checkpoint. - Save frequency computed on last two configurations. `None` if there is only one configuration. - Checkpoint frequency computed on last two checkpoints. `None` if there is only one checkpoint. """ if infile is None: getLogger(__name__).error("No input file given for HMC driver in continuation run") raise ValueError("No input file") if outfile is None: getLogger(__name__).info("No output file given for HMC driver") outfile = infile lattice, params, makeActionSrc, versions = fileio.h5.readMetadata(infile) verifyVersionsByException(versions, infile) action = callFunctionFromSource(makeActionSrc, lattice, params) if outfile != infile: fileio.h5.initializeNewFile(outfile, overwrite, lattice, params, makeActionSrc, ["/configuration", "/checkpoint"]) configurations, checkpoints = _loadIndices(infile) evManager = EvolverManager(infile, definitions=definitions) checkpointIdx, rng, stage, evolver = _loadCheckpoint(infile, startIdx, checkpoints, evManager, action, lattice) if outfile == infile: _ensureNoNewerConfigs(infile, checkpointIdx, checkpoints, configurations, overwrite) return (HMC(lattice, params, rng, action, outfile, checkpointIdx, definitions, evManager if infile == outfile else None), # need to re-init manager for new outfile stage, evolver, _stride(configurations), _stride(checkpoints))
Python
def _stride(values): """! Calculate difference in values in an array. """ try: return values[-1] - values[-2] except IndexError: return None
def _stride(values): """! Calculate difference in values in an array. """ try: return values[-1] - values[-2] except IndexError: return None
Python
def _loadIndices(fname): """! Load all configuration anc checkpoint indices from a file. """ with h5.File(str(fname), "r") as h5f: configurations = sorted(map(int, h5f["configuration"].keys())) checkpoints = sorted(map(int, h5f["checkpoint"].keys())) return configurations, checkpoints
def _loadIndices(fname): """! Load all configuration anc checkpoint indices from a file. """ with h5.File(str(fname), "r") as h5f: configurations = sorted(map(int, h5f["configuration"].keys())) checkpoints = sorted(map(int, h5f["checkpoint"].keys())) return configurations, checkpoints