repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
_fitnesses_to_probabilities
def _fitnesses_to_probabilities(fitnesses): """Return a list of probabilities proportional to fitnesses.""" # Do not allow negative fitness values min_fitness = min(fitnesses) if min_fitness < 0.0: # Make smallest fitness value 0 fitnesses = map(lambda f: f - min_fitness, fitnesses) fitness_sum = sum(fitnesses) # Generate probabilities # Creates a list of increasing values. # The greater the gap between two values, the greater the probability. # Ex. [0.1, 0.23, 0.56, 1.0] prob_sum = 0.0 probabilities = [] for fitness in fitnesses: if fitness < 0: raise ValueError( "Fitness cannot be negative, fitness = {}.".format(fitness)) prob_sum += (fitness / fitness_sum) probabilities.append(prob_sum) probabilities[-1] += 0.0001 # to compensate for rounding errors return probabilities
python
def _fitnesses_to_probabilities(fitnesses): """Return a list of probabilities proportional to fitnesses.""" # Do not allow negative fitness values min_fitness = min(fitnesses) if min_fitness < 0.0: # Make smallest fitness value 0 fitnesses = map(lambda f: f - min_fitness, fitnesses) fitness_sum = sum(fitnesses) # Generate probabilities # Creates a list of increasing values. # The greater the gap between two values, the greater the probability. # Ex. [0.1, 0.23, 0.56, 1.0] prob_sum = 0.0 probabilities = [] for fitness in fitnesses: if fitness < 0: raise ValueError( "Fitness cannot be negative, fitness = {}.".format(fitness)) prob_sum += (fitness / fitness_sum) probabilities.append(prob_sum) probabilities[-1] += 0.0001 # to compensate for rounding errors return probabilities
Return a list of probabilities proportional to fitnesses.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L182-L206
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
one_point_crossover
def one_point_crossover(parents): """Perform one point crossover on two parent chromosomes. Select a random position in the chromosome. Take genes to the left from one parent and the rest from the other parent. Ex. p1 = xxxxx, p2 = yyyyy, position = 2 (starting at 0), child = xxyyy """ # The point that the chromosomes will be crossed at (see Ex. above) crossover_point = random.randint(1, len(parents[0]) - 1) return (_one_parent_crossover(parents[0], parents[1], crossover_point), _one_parent_crossover(parents[1], parents[0], crossover_point))
python
def one_point_crossover(parents): """Perform one point crossover on two parent chromosomes. Select a random position in the chromosome. Take genes to the left from one parent and the rest from the other parent. Ex. p1 = xxxxx, p2 = yyyyy, position = 2 (starting at 0), child = xxyyy """ # The point that the chromosomes will be crossed at (see Ex. above) crossover_point = random.randint(1, len(parents[0]) - 1) return (_one_parent_crossover(parents[0], parents[1], crossover_point), _one_parent_crossover(parents[1], parents[0], crossover_point))
Perform one point crossover on two parent chromosomes. Select a random position in the chromosome. Take genes to the left from one parent and the rest from the other parent. Ex. p1 = xxxxx, p2 = yyyyy, position = 2 (starting at 0), child = xxyyy
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L212-L223
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
uniform_crossover
def uniform_crossover(parents): """Perform uniform crossover on two parent chromosomes. Randomly take genes from one parent or the other. Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy """ chromosome_length = len(parents[0]) children = [[], []] for i in range(chromosome_length): selected_parent = random.randint(0, 1) # Take from the selected parent, and add it to child 1 # Take from the other parent, and add it to child 2 children[0].append(parents[selected_parent][i]) children[1].append(parents[1 - selected_parent][i]) return children
python
def uniform_crossover(parents): """Perform uniform crossover on two parent chromosomes. Randomly take genes from one parent or the other. Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy """ chromosome_length = len(parents[0]) children = [[], []] for i in range(chromosome_length): selected_parent = random.randint(0, 1) # Take from the selected parent, and add it to child 1 # Take from the other parent, and add it to child 2 children[0].append(parents[selected_parent][i]) children[1].append(parents[1 - selected_parent][i]) return children
Perform uniform crossover on two parent chromosomes. Randomly take genes from one parent or the other. Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L230-L248
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
random_flip_mutate
def random_flip_mutate(population, mutation_chance): """Mutate every chromosome in a population, list is modified in place. Mutation occurs by randomly flipping bits (genes). """ for chromosome in population: # For every chromosome in the population for i in range(len(chromosome)): # For every bit in the chromosome # If mutation takes place if random.uniform(0.0, 1.0) <= mutation_chance: chromosome[i] = 1 - chromosome[i]
python
def random_flip_mutate(population, mutation_chance): """Mutate every chromosome in a population, list is modified in place. Mutation occurs by randomly flipping bits (genes). """ for chromosome in population: # For every chromosome in the population for i in range(len(chromosome)): # For every bit in the chromosome # If mutation takes place if random.uniform(0.0, 1.0) <= mutation_chance: chromosome[i] = 1 - chromosome[i]
Mutate every chromosome in a population, list is modified in place. Mutation occurs by randomly flipping bits (genes).
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L254-L263
JustinLovinger/optimal
optimal/optimize.py
_duplicates
def _duplicates(list_): """Return dict mapping item -> indices.""" item_indices = {} for i, item in enumerate(list_): try: item_indices[item].append(i) except KeyError: # First time seen item_indices[item] = [i] return item_indices
python
def _duplicates(list_): """Return dict mapping item -> indices.""" item_indices = {} for i, item in enumerate(list_): try: item_indices[item].append(i) except KeyError: # First time seen item_indices[item] = [i] return item_indices
Return dict mapping item -> indices.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L718-L726
JustinLovinger/optimal
optimal/optimize.py
_parse_parameter_locks
def _parse_parameter_locks(optimizer, meta_parameters, parameter_locks): """Synchronize meta_parameters and locked_values. The union of these two sets will have all necessary parameters. locked_values will have the parameters specified in parameter_locks. """ # WARNING: meta_parameters is modified inline locked_values = {} if parameter_locks: for name in parameter_locks: # Store the current optimzier value # and remove from our dictionary of paramaters to optimize locked_values[name] = getattr(optimizer, name) meta_parameters.pop(name) return locked_values
python
def _parse_parameter_locks(optimizer, meta_parameters, parameter_locks): """Synchronize meta_parameters and locked_values. The union of these two sets will have all necessary parameters. locked_values will have the parameters specified in parameter_locks. """ # WARNING: meta_parameters is modified inline locked_values = {} if parameter_locks: for name in parameter_locks: # Store the current optimzier value # and remove from our dictionary of paramaters to optimize locked_values[name] = getattr(optimizer, name) meta_parameters.pop(name) return locked_values
Synchronize meta_parameters and locked_values. The union of these two sets will have all necessary parameters. locked_values will have the parameters specified in parameter_locks.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L740-L756
JustinLovinger/optimal
optimal/optimize.py
_get_hyperparameter_solution_size
def _get_hyperparameter_solution_size(meta_parameters): """Determine size of binary encoding of parameters. Also adds binary size information for each parameter. """ # WARNING: meta_parameters is modified inline solution_size = 0 for _, parameters in meta_parameters.iteritems(): if parameters['type'] == 'discrete': # Binary encoding of discrete values -> log_2 N num_values = len(parameters['values']) binary_size = helpers.binary_size(num_values) elif parameters['type'] == 'int': # Use enough bits to cover range from min to max # + 1 to include max in range int_range = parameters['max'] - parameters['min'] + 1 binary_size = helpers.binary_size(int_range) elif parameters['type'] == 'float': # Use enough bits to provide fine steps between min and max float_range = parameters['max'] - parameters['min'] # * 1000 provides 1000 values between each natural number binary_size = helpers.binary_size(float_range * 1000) else: raise ValueError('Parameter type "{}" does not match known values'. format(parameters['type'])) # Store binary size with parameters for use in decode function parameters['binary_size'] = binary_size solution_size += binary_size return solution_size
python
def _get_hyperparameter_solution_size(meta_parameters): """Determine size of binary encoding of parameters. Also adds binary size information for each parameter. """ # WARNING: meta_parameters is modified inline solution_size = 0 for _, parameters in meta_parameters.iteritems(): if parameters['type'] == 'discrete': # Binary encoding of discrete values -> log_2 N num_values = len(parameters['values']) binary_size = helpers.binary_size(num_values) elif parameters['type'] == 'int': # Use enough bits to cover range from min to max # + 1 to include max in range int_range = parameters['max'] - parameters['min'] + 1 binary_size = helpers.binary_size(int_range) elif parameters['type'] == 'float': # Use enough bits to provide fine steps between min and max float_range = parameters['max'] - parameters['min'] # * 1000 provides 1000 values between each natural number binary_size = helpers.binary_size(float_range * 1000) else: raise ValueError('Parameter type "{}" does not match known values'. format(parameters['type'])) # Store binary size with parameters for use in decode function parameters['binary_size'] = binary_size solution_size += binary_size return solution_size
Determine size of binary encoding of parameters. Also adds binary size information for each parameter.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L759-L791
JustinLovinger/optimal
optimal/optimize.py
_make_hyperparameter_decode_func
def _make_hyperparameter_decode_func(locked_values, meta_parameters): """Create a function that converts the binary solution to parameters.""" # Locked parameters are also returned by decode function, but are not # based on solution def decode(solution): """Convert solution into dict of hyperparameters.""" # Start with out stationary (locked) paramaters hyperparameters = copy.deepcopy(locked_values) # Obtain moving hyperparameters from binary solution index = 0 for name, parameters in meta_parameters.iteritems(): # Obtain binary for this hyperparameter binary_size = parameters['binary_size'] binary = solution[index:index + binary_size] index += binary_size # Just index to start of next hyperparameter # Decode binary if parameters['type'] == 'discrete': i = helpers.binary_to_int( binary, upper_bound=len(parameters['values']) - 1) value = parameters['values'][i] elif parameters['type'] == 'int': value = helpers.binary_to_int( binary, lower_bound=parameters['min'], upper_bound=parameters['max']) elif parameters['type'] == 'float': value = helpers.binary_to_float( binary, lower_bound=parameters['min'], upper_bound=parameters['max']) else: raise ValueError( 'Parameter type "{}" does not match known values'.format( parameters['type'])) # Store value hyperparameters[name] = value return hyperparameters return decode
python
def _make_hyperparameter_decode_func(locked_values, meta_parameters): """Create a function that converts the binary solution to parameters.""" # Locked parameters are also returned by decode function, but are not # based on solution def decode(solution): """Convert solution into dict of hyperparameters.""" # Start with out stationary (locked) paramaters hyperparameters = copy.deepcopy(locked_values) # Obtain moving hyperparameters from binary solution index = 0 for name, parameters in meta_parameters.iteritems(): # Obtain binary for this hyperparameter binary_size = parameters['binary_size'] binary = solution[index:index + binary_size] index += binary_size # Just index to start of next hyperparameter # Decode binary if parameters['type'] == 'discrete': i = helpers.binary_to_int( binary, upper_bound=len(parameters['values']) - 1) value = parameters['values'][i] elif parameters['type'] == 'int': value = helpers.binary_to_int( binary, lower_bound=parameters['min'], upper_bound=parameters['max']) elif parameters['type'] == 'float': value = helpers.binary_to_float( binary, lower_bound=parameters['min'], upper_bound=parameters['max']) else: raise ValueError( 'Parameter type "{}" does not match known values'.format( parameters['type'])) # Store value hyperparameters[name] = value return hyperparameters return decode
Create a function that converts the binary solution to parameters.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L794-L838
JustinLovinger/optimal
optimal/optimize.py
_meta_fitness_func
def _meta_fitness_func(parameters, _optimizer, _problems, _master_fitness_dict, _runs=20): """Test a metaheuristic with parameters encoded in solution. Our goal is to minimize number of evaluation runs until a solution is found, while maximizing chance of finding solution to the underlying problem NOTE: while meta optimization requires a 'known' solution, this solution can be an estimate to provide the meta optimizer with a sense of progress. """ # Create the optimizer with parameters encoded in solution optimizer = copy.deepcopy(_optimizer) optimizer._set_hyperparameters(parameters) optimizer.logging = False # Preload fitness dictionary from master, and disable clearing dict # NOTE: master_fitness_dict will be modified inline, and therefore, # we do not need to take additional steps to update it if _master_fitness_dict != None: # None means low memory mode optimizer.clear_cache = False optimizer._Optimizer__encoded_cache = _master_fitness_dict # Because metaheuristics are stochastic, we run the optimizer multiple times, # to obtain an average of performance all_evaluation_runs = [] solutions_found = [] for _ in range(_runs): for problem in _problems: # Get performance for problem optimizer.optimize(problem) all_evaluation_runs.append(optimizer.fitness_runs) if optimizer.solution_found: solutions_found.append(1.0) else: solutions_found.append(0.0) # Our main goal is to minimize time the optimizer takes fitness = 1.0 / helpers.avg(all_evaluation_runs) # Optimizer is heavily penalized for missing solutions # To avoid 0 fitness fitness = fitness * helpers.avg(solutions_found)**2 + 1e-19 return fitness
python
def _meta_fitness_func(parameters, _optimizer, _problems, _master_fitness_dict, _runs=20): """Test a metaheuristic with parameters encoded in solution. Our goal is to minimize number of evaluation runs until a solution is found, while maximizing chance of finding solution to the underlying problem NOTE: while meta optimization requires a 'known' solution, this solution can be an estimate to provide the meta optimizer with a sense of progress. """ # Create the optimizer with parameters encoded in solution optimizer = copy.deepcopy(_optimizer) optimizer._set_hyperparameters(parameters) optimizer.logging = False # Preload fitness dictionary from master, and disable clearing dict # NOTE: master_fitness_dict will be modified inline, and therefore, # we do not need to take additional steps to update it if _master_fitness_dict != None: # None means low memory mode optimizer.clear_cache = False optimizer._Optimizer__encoded_cache = _master_fitness_dict # Because metaheuristics are stochastic, we run the optimizer multiple times, # to obtain an average of performance all_evaluation_runs = [] solutions_found = [] for _ in range(_runs): for problem in _problems: # Get performance for problem optimizer.optimize(problem) all_evaluation_runs.append(optimizer.fitness_runs) if optimizer.solution_found: solutions_found.append(1.0) else: solutions_found.append(0.0) # Our main goal is to minimize time the optimizer takes fitness = 1.0 / helpers.avg(all_evaluation_runs) # Optimizer is heavily penalized for missing solutions # To avoid 0 fitness fitness = fitness * helpers.avg(solutions_found)**2 + 1e-19 return fitness
Test a metaheuristic with parameters encoded in solution. Our goal is to minimize number of evaluation runs until a solution is found, while maximizing chance of finding solution to the underlying problem NOTE: while meta optimization requires a 'known' solution, this solution can be an estimate to provide the meta optimizer with a sense of progress.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L841-L886
JustinLovinger/optimal
optimal/optimize.py
Problem.copy
def copy(self, fitness_function=None, decode_function=None, fitness_args=None, decode_args=None, fitness_kwargs=None, decode_kwargs=None): """Return a copy of this problem. Optionally replace this problems arguments with those passed in. """ if fitness_function is None: fitness_function = self._fitness_function if decode_function is None: decode_function = self._decode_function if fitness_args is None: fitness_args = self._fitness_args if decode_args is None: decode_args = self._decode_args if fitness_kwargs is None: fitness_kwargs = self._fitness_kwargs if decode_kwargs is None: decode_kwargs = self._decode_kwargs return Problem( fitness_function, decode_function=decode_function, fitness_args=fitness_args, decode_args=decode_args, fitness_kwargs=fitness_kwargs, decode_kwargs=decode_kwargs)
python
def copy(self, fitness_function=None, decode_function=None, fitness_args=None, decode_args=None, fitness_kwargs=None, decode_kwargs=None): """Return a copy of this problem. Optionally replace this problems arguments with those passed in. """ if fitness_function is None: fitness_function = self._fitness_function if decode_function is None: decode_function = self._decode_function if fitness_args is None: fitness_args = self._fitness_args if decode_args is None: decode_args = self._decode_args if fitness_kwargs is None: fitness_kwargs = self._fitness_kwargs if decode_kwargs is None: decode_kwargs = self._decode_kwargs return Problem( fitness_function, decode_function=decode_function, fitness_args=fitness_args, decode_args=decode_args, fitness_kwargs=fitness_kwargs, decode_kwargs=decode_kwargs)
Return a copy of this problem. Optionally replace this problems arguments with those passed in.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L101-L131
JustinLovinger/optimal
optimal/optimize.py
Problem.get_fitness
def get_fitness(self, solution): """Return fitness for the given solution.""" return self._fitness_function(solution, *self._fitness_args, **self._fitness_kwargs)
python
def get_fitness(self, solution): """Return fitness for the given solution.""" return self._fitness_function(solution, *self._fitness_args, **self._fitness_kwargs)
Return fitness for the given solution.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L133-L136
JustinLovinger/optimal
optimal/optimize.py
Problem.decode_solution
def decode_solution(self, encoded_solution): """Return solution from an encoded representation.""" return self._decode_function(encoded_solution, *self._decode_args, **self._decode_kwargs)
python
def decode_solution(self, encoded_solution): """Return solution from an encoded representation.""" return self._decode_function(encoded_solution, *self._decode_args, **self._decode_kwargs)
Return solution from an encoded representation.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L138-L141
JustinLovinger/optimal
optimal/optimize.py
Optimizer.optimize
def optimize(self, problem, max_iterations=100, max_seconds=float('inf'), cache_encoded=True, cache_solution=False, clear_cache=True, logging_func=_print_fitnesses, n_processes=0): """Find the optimal inputs for a given fitness function. Args: problem: An instance of Problem. The problem to solve. max_iterations: The number of iterations to optimize before stopping. max_seconds: Maximum number of seconds to optimize for, before stopping. Note that condition is only checked one per iteration, meaning optimization can take more than max_seconds, especially if fitnesses take a long time to calculate. cache_encoded: bool; Whether or not to cache fitness of encoded strings. Encoded strings are produced directly by the optimizer. If an encoded string is found in cache, it will not be decoded. cache_solution: bool; Whether or not to cache fitness of decoded solutions. Decoded solution is provided by problems decode function. If problem does not provide a hash solution function, Various naive hashing methods will be attempted, including: tuple, tuple(sorted(dict.items)), str. clear_cache: bool; Whether or not to reset cache after optimization. Disable if you want to run optimize multiple times on the same problem. logging_func: func/None; Function taking: iteration, population, solutions, fitnesses, best_solution, best_fitness Called after every iteration. Use for custom logging, or set to None to disable logging. Note that best_solution and best_fitness are the best of all iterations so far. n_processes: int; Number of processes to use for multiprocessing. If <= 0, do not use multiprocessing. Returns: object; The best solution, after decoding. """ if not isinstance(problem, Problem): raise TypeError('problem must be an instance of Problem class') # Prepare pool for multiprocessing if n_processes > 0: try: pool = multiprocessing.Pool(processes=n_processes) except NameError: raise ImportError( 'pickle, dill, or multiprocessing library is not available.' ) else: pool = None # Set first, incase optimizer uses _max_iterations in initialization self.__max_iterations = max_iterations # Initialize algorithm self._reset() best_solution = {'solution': None, 'fitness': None} population = self.initial_population() try: # Begin optimization loop start = time.clock() for self.iteration in itertools.count(1): # Infinite sequence of iterations # Evaluate potential solutions solutions, fitnesses, finished = self._get_fitnesses( problem, population, cache_encoded=cache_encoded, cache_solution=cache_solution, pool=pool) # If the best fitness from this iteration is better than # the global best best_index, best_fitness = max( enumerate(fitnesses), key=operator.itemgetter(1)) if best_fitness > best_solution['fitness']: # Store the new best solution best_solution['fitness'] = best_fitness best_solution['solution'] = solutions[best_index] if logging_func: logging_func(self.iteration, population, solutions, fitnesses, best_solution['solution'], best_solution['fitness']) # Break if solution found if finished: self.solution_found = True break # Break if out of time if time.clock() - start >= max_seconds: break # Break if out of iterations if self.iteration >= max_iterations: break # Continue optimizing population = self.next_population(population, fitnesses) # Store best internally, before returning self.best_solution = best_solution['solution'] self.best_fitness = best_solution['fitness'] finally: # Clear caches if clear_cache: # Clear caches from memory self.__encoded_cache = {} self.__solution_cache = {} # Reset encoded, and decoded key functions self._get_encoded_key = self._get_encoded_key_type self._get_solution_key = self._get_solution_key_type # Clean up multiprocesses try: pool.terminate() # Kill outstanding work pool.close() # Close child processes except AttributeError: # No pool assert pool is None return self.best_solution
python
def optimize(self, problem, max_iterations=100, max_seconds=float('inf'), cache_encoded=True, cache_solution=False, clear_cache=True, logging_func=_print_fitnesses, n_processes=0): """Find the optimal inputs for a given fitness function. Args: problem: An instance of Problem. The problem to solve. max_iterations: The number of iterations to optimize before stopping. max_seconds: Maximum number of seconds to optimize for, before stopping. Note that condition is only checked one per iteration, meaning optimization can take more than max_seconds, especially if fitnesses take a long time to calculate. cache_encoded: bool; Whether or not to cache fitness of encoded strings. Encoded strings are produced directly by the optimizer. If an encoded string is found in cache, it will not be decoded. cache_solution: bool; Whether or not to cache fitness of decoded solutions. Decoded solution is provided by problems decode function. If problem does not provide a hash solution function, Various naive hashing methods will be attempted, including: tuple, tuple(sorted(dict.items)), str. clear_cache: bool; Whether or not to reset cache after optimization. Disable if you want to run optimize multiple times on the same problem. logging_func: func/None; Function taking: iteration, population, solutions, fitnesses, best_solution, best_fitness Called after every iteration. Use for custom logging, or set to None to disable logging. Note that best_solution and best_fitness are the best of all iterations so far. n_processes: int; Number of processes to use for multiprocessing. If <= 0, do not use multiprocessing. Returns: object; The best solution, after decoding. """ if not isinstance(problem, Problem): raise TypeError('problem must be an instance of Problem class') # Prepare pool for multiprocessing if n_processes > 0: try: pool = multiprocessing.Pool(processes=n_processes) except NameError: raise ImportError( 'pickle, dill, or multiprocessing library is not available.' ) else: pool = None # Set first, incase optimizer uses _max_iterations in initialization self.__max_iterations = max_iterations # Initialize algorithm self._reset() best_solution = {'solution': None, 'fitness': None} population = self.initial_population() try: # Begin optimization loop start = time.clock() for self.iteration in itertools.count(1): # Infinite sequence of iterations # Evaluate potential solutions solutions, fitnesses, finished = self._get_fitnesses( problem, population, cache_encoded=cache_encoded, cache_solution=cache_solution, pool=pool) # If the best fitness from this iteration is better than # the global best best_index, best_fitness = max( enumerate(fitnesses), key=operator.itemgetter(1)) if best_fitness > best_solution['fitness']: # Store the new best solution best_solution['fitness'] = best_fitness best_solution['solution'] = solutions[best_index] if logging_func: logging_func(self.iteration, population, solutions, fitnesses, best_solution['solution'], best_solution['fitness']) # Break if solution found if finished: self.solution_found = True break # Break if out of time if time.clock() - start >= max_seconds: break # Break if out of iterations if self.iteration >= max_iterations: break # Continue optimizing population = self.next_population(population, fitnesses) # Store best internally, before returning self.best_solution = best_solution['solution'] self.best_fitness = best_solution['fitness'] finally: # Clear caches if clear_cache: # Clear caches from memory self.__encoded_cache = {} self.__solution_cache = {} # Reset encoded, and decoded key functions self._get_encoded_key = self._get_encoded_key_type self._get_solution_key = self._get_solution_key_type # Clean up multiprocesses try: pool.terminate() # Kill outstanding work pool.close() # Close child processes except AttributeError: # No pool assert pool is None return self.best_solution
Find the optimal inputs for a given fitness function. Args: problem: An instance of Problem. The problem to solve. max_iterations: The number of iterations to optimize before stopping. max_seconds: Maximum number of seconds to optimize for, before stopping. Note that condition is only checked one per iteration, meaning optimization can take more than max_seconds, especially if fitnesses take a long time to calculate. cache_encoded: bool; Whether or not to cache fitness of encoded strings. Encoded strings are produced directly by the optimizer. If an encoded string is found in cache, it will not be decoded. cache_solution: bool; Whether or not to cache fitness of decoded solutions. Decoded solution is provided by problems decode function. If problem does not provide a hash solution function, Various naive hashing methods will be attempted, including: tuple, tuple(sorted(dict.items)), str. clear_cache: bool; Whether or not to reset cache after optimization. Disable if you want to run optimize multiple times on the same problem. logging_func: func/None; Function taking: iteration, population, solutions, fitnesses, best_solution, best_fitness Called after every iteration. Use for custom logging, or set to None to disable logging. Note that best_solution and best_fitness are the best of all iterations so far. n_processes: int; Number of processes to use for multiprocessing. If <= 0, do not use multiprocessing. Returns: object; The best solution, after decoding.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L198-L319
JustinLovinger/optimal
optimal/optimize.py
Optimizer._reset_bookkeeping
def _reset_bookkeeping(self): """Reset bookkeeping parameters to initial values. Call before beginning optimization. """ self.iteration = 0 self.fitness_runs = 0 self.best_solution = None self.best_fitness = None self.solution_found = False
python
def _reset_bookkeeping(self): """Reset bookkeeping parameters to initial values. Call before beginning optimization. """ self.iteration = 0 self.fitness_runs = 0 self.best_solution = None self.best_fitness = None self.solution_found = False
Reset bookkeeping parameters to initial values. Call before beginning optimization.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L325-L334
JustinLovinger/optimal
optimal/optimize.py
Optimizer._get_fitnesses
def _get_fitnesses(self, problem, population, cache_encoded=True, cache_solution=False, pool=None): """Get the fitness for every solution in a population. Args: problem: Problem; The problem that defines fitness. population: list; List of potential solutions. pool: None/multiprocessing.Pool; Pool of processes for parallel decoding and evaluation. """ fitnesses = [None] * len(population) ############################# # Decoding ############################# if cache_encoded: try: encoded_keys = map(self._get_encoded_key, population) # Get all fitnesses from encoded_solution cache to_decode_indices = [] for i, encoded_key in enumerate(encoded_keys): try: fitnesses[i] = self.__encoded_cache[encoded_key] # Note that this fitness will never be better than the current best # because we have already evaluted it, # Therefore, we do not need to worry about decoding the solution except KeyError: # Cache miss to_decode_indices.append(i) except UnhashableError: # Cannot hash encoded solution encoded_keys = None to_decode_indices = range(len(population)) else: encoded_keys = None to_decode_indices = range(len(population)) # Decode all that need to be decoded, and combine back into list the same length # as population if encoded_keys is None: to_decode_keys = None else: to_decode_keys = [encoded_keys[i] for i in to_decode_indices] solutions = [None] * len(population) for i, solution in zip(to_decode_indices, self._pmap( problem.decode_solution, [population[i] for i in to_decode_indices], to_decode_keys, pool)): solutions[i] = solution ############################# # Evaluating ############################# if cache_solution: try: # Try to make solutions hashable # Use user provided hash function if available if problem.hash_solution: hash_solution_func = problem.hash_solution else: # Otherwise, default to built in "smart" hash function hash_solution_func = self._get_solution_key solution_keys = [ hash_solution_func(solution) # None corresponds to encoded_solutions found in cache if solution is not None else None for solution in solutions ] # Get all fitnesses from solution cache to_eval_indices = [] for i, solution_key in enumerate(solution_keys): if solution_key is not None: # Otherwise, fitness already found in encoded cache try: fitnesses[i] = self.__solution_cache[solution_key] except KeyError: # Cache miss to_eval_indices.append(i) except UnhashableError: # Cannot hash solution solution_keys = None to_eval_indices = to_decode_indices[:] else: solution_keys = None to_eval_indices = to_decode_indices[:] # Evaluate all that need to be evaluated, and combine back into fitnesses list if solution_keys is None: if encoded_keys is None: # No way to detect duplicates to_eval_keys = None else: # Cannot use decoded keys, default to encoded keys to_eval_keys = [encoded_keys[i] for i in to_eval_indices] else: to_eval_keys = [solution_keys[i] for i in to_eval_indices] finished = False eval_bookkeeping = {} for i, fitness_finished in zip(to_eval_indices, self._pmap( problem.get_fitness, [solutions[i] for i in to_eval_indices], to_eval_keys, pool, bookkeeping_dict=eval_bookkeeping)): # Unpack fitness_finished tuple try: fitness, maybe_finished = fitness_finished if maybe_finished: finished = True except TypeError: # Not (fitness, finished) tuple fitness = fitness_finished fitnesses[i] = fitness ############################# # Finishing ############################# # Bookkeeping # keep track of how many times fitness is evaluated self.fitness_runs += len(eval_bookkeeping['key_indices']) # Evaled once for each unique key # Add evaluated fitnesses to caches (both of them) if cache_encoded and encoded_keys is not None: for i in to_decode_indices: # Encoded cache misses self.__encoded_cache[encoded_keys[i]] = fitnesses[i] if cache_solution and solution_keys is not None: for i in to_eval_indices: # Decoded cache misses self.__solution_cache[solution_keys[i]] = fitnesses[i] # Return # assert None not in fitnesses # Un-comment for debugging return solutions, fitnesses, finished
python
def _get_fitnesses(self, problem, population, cache_encoded=True, cache_solution=False, pool=None): """Get the fitness for every solution in a population. Args: problem: Problem; The problem that defines fitness. population: list; List of potential solutions. pool: None/multiprocessing.Pool; Pool of processes for parallel decoding and evaluation. """ fitnesses = [None] * len(population) ############################# # Decoding ############################# if cache_encoded: try: encoded_keys = map(self._get_encoded_key, population) # Get all fitnesses from encoded_solution cache to_decode_indices = [] for i, encoded_key in enumerate(encoded_keys): try: fitnesses[i] = self.__encoded_cache[encoded_key] # Note that this fitness will never be better than the current best # because we have already evaluted it, # Therefore, we do not need to worry about decoding the solution except KeyError: # Cache miss to_decode_indices.append(i) except UnhashableError: # Cannot hash encoded solution encoded_keys = None to_decode_indices = range(len(population)) else: encoded_keys = None to_decode_indices = range(len(population)) # Decode all that need to be decoded, and combine back into list the same length # as population if encoded_keys is None: to_decode_keys = None else: to_decode_keys = [encoded_keys[i] for i in to_decode_indices] solutions = [None] * len(population) for i, solution in zip(to_decode_indices, self._pmap( problem.decode_solution, [population[i] for i in to_decode_indices], to_decode_keys, pool)): solutions[i] = solution ############################# # Evaluating ############################# if cache_solution: try: # Try to make solutions hashable # Use user provided hash function if available if problem.hash_solution: hash_solution_func = problem.hash_solution else: # Otherwise, default to built in "smart" hash function hash_solution_func = self._get_solution_key solution_keys = [ hash_solution_func(solution) # None corresponds to encoded_solutions found in cache if solution is not None else None for solution in solutions ] # Get all fitnesses from solution cache to_eval_indices = [] for i, solution_key in enumerate(solution_keys): if solution_key is not None: # Otherwise, fitness already found in encoded cache try: fitnesses[i] = self.__solution_cache[solution_key] except KeyError: # Cache miss to_eval_indices.append(i) except UnhashableError: # Cannot hash solution solution_keys = None to_eval_indices = to_decode_indices[:] else: solution_keys = None to_eval_indices = to_decode_indices[:] # Evaluate all that need to be evaluated, and combine back into fitnesses list if solution_keys is None: if encoded_keys is None: # No way to detect duplicates to_eval_keys = None else: # Cannot use decoded keys, default to encoded keys to_eval_keys = [encoded_keys[i] for i in to_eval_indices] else: to_eval_keys = [solution_keys[i] for i in to_eval_indices] finished = False eval_bookkeeping = {} for i, fitness_finished in zip(to_eval_indices, self._pmap( problem.get_fitness, [solutions[i] for i in to_eval_indices], to_eval_keys, pool, bookkeeping_dict=eval_bookkeeping)): # Unpack fitness_finished tuple try: fitness, maybe_finished = fitness_finished if maybe_finished: finished = True except TypeError: # Not (fitness, finished) tuple fitness = fitness_finished fitnesses[i] = fitness ############################# # Finishing ############################# # Bookkeeping # keep track of how many times fitness is evaluated self.fitness_runs += len(eval_bookkeeping['key_indices']) # Evaled once for each unique key # Add evaluated fitnesses to caches (both of them) if cache_encoded and encoded_keys is not None: for i in to_decode_indices: # Encoded cache misses self.__encoded_cache[encoded_keys[i]] = fitnesses[i] if cache_solution and solution_keys is not None: for i in to_eval_indices: # Decoded cache misses self.__solution_cache[solution_keys[i]] = fitnesses[i] # Return # assert None not in fitnesses # Un-comment for debugging return solutions, fitnesses, finished
Get the fitness for every solution in a population. Args: problem: Problem; The problem that defines fitness. population: list; List of potential solutions. pool: None/multiprocessing.Pool; Pool of processes for parallel decoding and evaluation.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L336-L475
JustinLovinger/optimal
optimal/optimize.py
Optimizer._pmap
def _pmap(self, func, items, keys, pool, bookkeeping_dict=None): """Efficiently map func over all items. Calls func only once for duplicate items. Item duplicates are detected by corresponding keys. Unless keys is None. Serial if pool is None, but still skips duplicates. """ if keys is not None: # Otherwise, cannot hash items # Remove duplicates first (use keys) # Create mapping (dict) of key to list of indices key_indices = _duplicates(keys).values() else: # Cannot hash items # Assume no duplicates key_indices = [[i] for i in range(len(items))] # Use only the first of duplicate indices in decoding if pool is not None: # Parallel map results = pool.map( functools.partial(_unpickle_run, pickle.dumps(func)), [items[i[0]] for i in key_indices]) else: results = map(func, [items[i[0]] for i in key_indices]) # Add bookkeeping if bookkeeping_dict is not None: bookkeeping_dict['key_indices'] = key_indices # Combine duplicates back into list all_results = [None] * len(items) for indices, result in zip(key_indices, results): for j, i in enumerate(indices): # Avoid duplicate result objects in list, # in case they are used in functions with side effects if j > 0: result = copy.deepcopy(result) all_results[i] = result return all_results
python
def _pmap(self, func, items, keys, pool, bookkeeping_dict=None): """Efficiently map func over all items. Calls func only once for duplicate items. Item duplicates are detected by corresponding keys. Unless keys is None. Serial if pool is None, but still skips duplicates. """ if keys is not None: # Otherwise, cannot hash items # Remove duplicates first (use keys) # Create mapping (dict) of key to list of indices key_indices = _duplicates(keys).values() else: # Cannot hash items # Assume no duplicates key_indices = [[i] for i in range(len(items))] # Use only the first of duplicate indices in decoding if pool is not None: # Parallel map results = pool.map( functools.partial(_unpickle_run, pickle.dumps(func)), [items[i[0]] for i in key_indices]) else: results = map(func, [items[i[0]] for i in key_indices]) # Add bookkeeping if bookkeeping_dict is not None: bookkeeping_dict['key_indices'] = key_indices # Combine duplicates back into list all_results = [None] * len(items) for indices, result in zip(key_indices, results): for j, i in enumerate(indices): # Avoid duplicate result objects in list, # in case they are used in functions with side effects if j > 0: result = copy.deepcopy(result) all_results[i] = result return all_results
Efficiently map func over all items. Calls func only once for duplicate items. Item duplicates are detected by corresponding keys. Unless keys is None. Serial if pool is None, but still skips duplicates.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L477-L517
JustinLovinger/optimal
optimal/optimize.py
Optimizer._set_hyperparameters
def _set_hyperparameters(self, parameters): """Set internal optimization parameters.""" for name, value in parameters.iteritems(): try: getattr(self, name) except AttributeError: raise ValueError( 'Each parameter in parameters must be an attribute. ' '{} is not.'.format(name)) setattr(self, name, value)
python
def _set_hyperparameters(self, parameters): """Set internal optimization parameters.""" for name, value in parameters.iteritems(): try: getattr(self, name) except AttributeError: raise ValueError( 'Each parameter in parameters must be an attribute. ' '{} is not.'.format(name)) setattr(self, name, value)
Set internal optimization parameters.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L533-L542
JustinLovinger/optimal
optimal/optimize.py
Optimizer._get_hyperparameters
def _get_hyperparameters(self): """Get internal optimization parameters.""" hyperparameters = {} for key in self._hyperparameters: hyperparameters[key] = getattr(self, key) return hyperparameters
python
def _get_hyperparameters(self): """Get internal optimization parameters.""" hyperparameters = {} for key in self._hyperparameters: hyperparameters[key] = getattr(self, key) return hyperparameters
Get internal optimization parameters.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L544-L549
JustinLovinger/optimal
optimal/optimize.py
Optimizer.optimize_hyperparameters
def optimize_hyperparameters(self, problems, parameter_locks=None, smoothing=20, max_iterations=100, _meta_optimizer=None, _low_memory=True): """Optimize hyperparameters for a given problem. Args: parameter_locks: a list of strings, each corresponding to a hyperparamter that should not be optimized. problems: Either a single problem, or a list of problem instances, allowing optimization based on multiple similar problems. smoothing: int; number of runs to average over for each set of hyperparameters. max_iterations: The number of iterations to optimize before stopping. _low_memory: disable performance enhancements to save memory (they use a lot of memory otherwise). """ if smoothing <= 0: raise ValueError('smoothing must be > 0') # problems supports either one or many problem instances if isinstance(problems, collections.Iterable): for problem in problems: if not isinstance(problem, Problem): raise TypeError( 'problem must be Problem instance or list of Problem instances' ) elif isinstance(problems, Problem): problems = [problems] else: raise TypeError( 'problem must be Problem instance or list of Problem instances' ) # Copy to avoid permanent modification meta_parameters = copy.deepcopy(self._hyperparameters) # First, handle parameter locks, since it will modify our # meta_parameters dict locked_values = _parse_parameter_locks(self, meta_parameters, parameter_locks) # We need to know the size of our chromosome, # based on the hyperparameters to optimize solution_size = _get_hyperparameter_solution_size(meta_parameters) # We also need to create a decode function to transform the binary solution # into parameters for the metaheuristic decode = _make_hyperparameter_decode_func(locked_values, meta_parameters) # A master fitness dictionary can be stored for use between calls # to meta_fitness if _low_memory: master_fitness_dict = None else: master_fitness_dict = {} additional_parameters = { '_optimizer': self, '_problems': problems, '_runs': smoothing, '_master_fitness_dict': master_fitness_dict, } META_FITNESS = Problem( _meta_fitness_func, decode_function=decode, fitness_kwargs=additional_parameters) if _meta_optimizer is None: # Initialize default meta optimizer # GenAlg is used because it supports both discrete and continous # attributes from optimal import GenAlg # Create metaheuristic with computed decode function and soltuion # size _meta_optimizer = GenAlg(solution_size) else: # Adjust supplied metaheuristic for this problem _meta_optimizer._solution_size = solution_size # Determine the best hyperparameters with a metaheuristic best_parameters = _meta_optimizer.optimize( META_FITNESS, max_iterations=max_iterations) # Set the hyperparameters inline self._set_hyperparameters(best_parameters) # And return return best_parameters
python
def optimize_hyperparameters(self, problems, parameter_locks=None, smoothing=20, max_iterations=100, _meta_optimizer=None, _low_memory=True): """Optimize hyperparameters for a given problem. Args: parameter_locks: a list of strings, each corresponding to a hyperparamter that should not be optimized. problems: Either a single problem, or a list of problem instances, allowing optimization based on multiple similar problems. smoothing: int; number of runs to average over for each set of hyperparameters. max_iterations: The number of iterations to optimize before stopping. _low_memory: disable performance enhancements to save memory (they use a lot of memory otherwise). """ if smoothing <= 0: raise ValueError('smoothing must be > 0') # problems supports either one or many problem instances if isinstance(problems, collections.Iterable): for problem in problems: if not isinstance(problem, Problem): raise TypeError( 'problem must be Problem instance or list of Problem instances' ) elif isinstance(problems, Problem): problems = [problems] else: raise TypeError( 'problem must be Problem instance or list of Problem instances' ) # Copy to avoid permanent modification meta_parameters = copy.deepcopy(self._hyperparameters) # First, handle parameter locks, since it will modify our # meta_parameters dict locked_values = _parse_parameter_locks(self, meta_parameters, parameter_locks) # We need to know the size of our chromosome, # based on the hyperparameters to optimize solution_size = _get_hyperparameter_solution_size(meta_parameters) # We also need to create a decode function to transform the binary solution # into parameters for the metaheuristic decode = _make_hyperparameter_decode_func(locked_values, meta_parameters) # A master fitness dictionary can be stored for use between calls # to meta_fitness if _low_memory: master_fitness_dict = None else: master_fitness_dict = {} additional_parameters = { '_optimizer': self, '_problems': problems, '_runs': smoothing, '_master_fitness_dict': master_fitness_dict, } META_FITNESS = Problem( _meta_fitness_func, decode_function=decode, fitness_kwargs=additional_parameters) if _meta_optimizer is None: # Initialize default meta optimizer # GenAlg is used because it supports both discrete and continous # attributes from optimal import GenAlg # Create metaheuristic with computed decode function and soltuion # size _meta_optimizer = GenAlg(solution_size) else: # Adjust supplied metaheuristic for this problem _meta_optimizer._solution_size = solution_size # Determine the best hyperparameters with a metaheuristic best_parameters = _meta_optimizer.optimize( META_FITNESS, max_iterations=max_iterations) # Set the hyperparameters inline self._set_hyperparameters(best_parameters) # And return return best_parameters
Optimize hyperparameters for a given problem. Args: parameter_locks: a list of strings, each corresponding to a hyperparamter that should not be optimized. problems: Either a single problem, or a list of problem instances, allowing optimization based on multiple similar problems. smoothing: int; number of runs to average over for each set of hyperparameters. max_iterations: The number of iterations to optimize before stopping. _low_memory: disable performance enhancements to save memory (they use a lot of memory otherwise).
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L551-L642
JustinLovinger/optimal
optimal/benchmark.py
compare
def compare(optimizers, problems, runs=20, all_kwargs={}): """Compare a set of optimizers. Args: optimizers: list/Optimizer; Either a list of optimizers to compare, or a single optimizer to test on each problem. problems: list/Problem; Either a problem instance or a list of problem instances, one for each optimizer. all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments for all optimizers, or a list of keyword arguments, one for each optimizer. runs: int; How many times to run each optimizer (smoothness) Returns: dict; mapping optimizer identifier to stats. """ if not (isinstance(optimizers, collections.Iterable) or isinstance(problems, collections.Iterable)): raise TypeError('optimizers or problems must be iterable') # If optimizers is not a list, repeat into list for each problem if not isinstance(optimizers, collections.Iterable): optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))] # If problems is not a list, repeat into list for each optimizer if not isinstance(problems, collections.Iterable): problems = [copy.deepcopy(problems) for _ in range(len(optimizers))] # If all_kwargs is not a list, repeat it into a list if isinstance(all_kwargs, dict): all_kwargs = [all_kwargs] * len(optimizers) elif not isinstance(all_kwargs, collections.Iterable): raise TypeError('all_kwargs must be dict or list of dict') stats = {} key_counts = {} for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs): # For nice human readable dictionaries, extract useful names from # optimizer class_name = optimizer.__class__.__name__ fitness_func_name = problem._fitness_function.__name__ key_name = '{} {}'.format(class_name, fitness_func_name) # Keep track of how many optimizers of each class / fitness func # for better keys in stats dict try: key_counts[key_name] += 1 except KeyError: key_counts[key_name] = 1 # Foo 1, Foo 2, Bar 1, etc. key = '{} {}'.format(key_name, key_counts[key_name]) print key + ': ', # Finally, get the actual stats stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs) print return stats
python
def compare(optimizers, problems, runs=20, all_kwargs={}): """Compare a set of optimizers. Args: optimizers: list/Optimizer; Either a list of optimizers to compare, or a single optimizer to test on each problem. problems: list/Problem; Either a problem instance or a list of problem instances, one for each optimizer. all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments for all optimizers, or a list of keyword arguments, one for each optimizer. runs: int; How many times to run each optimizer (smoothness) Returns: dict; mapping optimizer identifier to stats. """ if not (isinstance(optimizers, collections.Iterable) or isinstance(problems, collections.Iterable)): raise TypeError('optimizers or problems must be iterable') # If optimizers is not a list, repeat into list for each problem if not isinstance(optimizers, collections.Iterable): optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))] # If problems is not a list, repeat into list for each optimizer if not isinstance(problems, collections.Iterable): problems = [copy.deepcopy(problems) for _ in range(len(optimizers))] # If all_kwargs is not a list, repeat it into a list if isinstance(all_kwargs, dict): all_kwargs = [all_kwargs] * len(optimizers) elif not isinstance(all_kwargs, collections.Iterable): raise TypeError('all_kwargs must be dict or list of dict') stats = {} key_counts = {} for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs): # For nice human readable dictionaries, extract useful names from # optimizer class_name = optimizer.__class__.__name__ fitness_func_name = problem._fitness_function.__name__ key_name = '{} {}'.format(class_name, fitness_func_name) # Keep track of how many optimizers of each class / fitness func # for better keys in stats dict try: key_counts[key_name] += 1 except KeyError: key_counts[key_name] = 1 # Foo 1, Foo 2, Bar 1, etc. key = '{} {}'.format(key_name, key_counts[key_name]) print key + ': ', # Finally, get the actual stats stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs) print return stats
Compare a set of optimizers. Args: optimizers: list/Optimizer; Either a list of optimizers to compare, or a single optimizer to test on each problem. problems: list/Problem; Either a problem instance or a list of problem instances, one for each optimizer. all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments for all optimizers, or a list of keyword arguments, one for each optimizer. runs: int; How many times to run each optimizer (smoothness) Returns: dict; mapping optimizer identifier to stats.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/benchmark.py#L37-L96
JustinLovinger/optimal
optimal/benchmark.py
benchmark
def benchmark(optimizer, problem, runs=20, **kwargs): """Run an optimizer through a problem multiple times. Args: optimizer: Optimizer; The optimizer to benchmark. problem: Problem; The problem to benchmark on. runs: int > 0; Number of times that optimize is called on problem. Returns: dict; A dictionary of various statistics. """ stats = {'runs': []} # Disable logging, to avoid spamming the user # TODO: Maybe we shouldn't disable by default? kwargs = copy.copy(kwargs) kwargs['logging_func'] = None # Determine effectiveness of metaheuristic over many runs # The stochastic nature of metaheuristics make this necessary # for an accurate evaluation for _ in range(runs): optimizer.optimize(problem, **kwargs) # Convert bool to number for mean and standard deviation calculations if optimizer.solution_found: finished_num = 1.0 else: finished_num = 0.0 stats_ = { 'fitness': optimizer.best_fitness, 'fitness_runs': optimizer.fitness_runs, 'solution_found': finished_num } stats['runs'].append(stats_) # Little progress 'bar' print '.', # Mean gives a good overall idea of the metaheuristics effectiveness # Standard deviation (SD) shows consistency of performance _add_mean_sd_to_stats(stats) return stats
python
def benchmark(optimizer, problem, runs=20, **kwargs): """Run an optimizer through a problem multiple times. Args: optimizer: Optimizer; The optimizer to benchmark. problem: Problem; The problem to benchmark on. runs: int > 0; Number of times that optimize is called on problem. Returns: dict; A dictionary of various statistics. """ stats = {'runs': []} # Disable logging, to avoid spamming the user # TODO: Maybe we shouldn't disable by default? kwargs = copy.copy(kwargs) kwargs['logging_func'] = None # Determine effectiveness of metaheuristic over many runs # The stochastic nature of metaheuristics make this necessary # for an accurate evaluation for _ in range(runs): optimizer.optimize(problem, **kwargs) # Convert bool to number for mean and standard deviation calculations if optimizer.solution_found: finished_num = 1.0 else: finished_num = 0.0 stats_ = { 'fitness': optimizer.best_fitness, 'fitness_runs': optimizer.fitness_runs, 'solution_found': finished_num } stats['runs'].append(stats_) # Little progress 'bar' print '.', # Mean gives a good overall idea of the metaheuristics effectiveness # Standard deviation (SD) shows consistency of performance _add_mean_sd_to_stats(stats) return stats
Run an optimizer through a problem multiple times. Args: optimizer: Optimizer; The optimizer to benchmark. problem: Problem; The problem to benchmark on. runs: int > 0; Number of times that optimize is called on problem. Returns: dict; A dictionary of various statistics.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/benchmark.py#L99-L143
JustinLovinger/optimal
optimal/benchmark.py
aggregate
def aggregate(all_stats): """Combine stats for multiple optimizers to obtain one mean and sd. Useful for combining stats for the same optimizer class and multiple problems. Args: all_stats: dict; output from compare. """ aggregate_stats = {'means': [], 'standard_deviations': []} for optimizer_key in all_stats: # runs is the mean, for add_mean_sd function mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean']) mean_stats['name'] = optimizer_key aggregate_stats['means'].append(mean_stats) # also keep track of standard deviations sd_stats = copy.deepcopy( all_stats[optimizer_key]['standard_deviation']) sd_stats['name'] = optimizer_key aggregate_stats['standard_deviations'].append(sd_stats) _add_mean_sd_to_stats(aggregate_stats, 'means') return aggregate_stats
python
def aggregate(all_stats): """Combine stats for multiple optimizers to obtain one mean and sd. Useful for combining stats for the same optimizer class and multiple problems. Args: all_stats: dict; output from compare. """ aggregate_stats = {'means': [], 'standard_deviations': []} for optimizer_key in all_stats: # runs is the mean, for add_mean_sd function mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean']) mean_stats['name'] = optimizer_key aggregate_stats['means'].append(mean_stats) # also keep track of standard deviations sd_stats = copy.deepcopy( all_stats[optimizer_key]['standard_deviation']) sd_stats['name'] = optimizer_key aggregate_stats['standard_deviations'].append(sd_stats) _add_mean_sd_to_stats(aggregate_stats, 'means') return aggregate_stats
Combine stats for multiple optimizers to obtain one mean and sd. Useful for combining stats for the same optimizer class and multiple problems. Args: all_stats: dict; output from compare.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/benchmark.py#L146-L169
JustinLovinger/optimal
optimal/benchmark.py
_mean_of_runs
def _mean_of_runs(stats, key='runs'): """Obtain the mean of stats. Args: stats: dict; A set of stats, structured as above. key: str; Optional key to determine where list of runs is found in stats """ num_runs = len(stats[key]) first = stats[key][0] mean = {} for stat_key in first: # Skip non numberic attributes if isinstance(first[stat_key], numbers.Number): mean[stat_key] = sum(run[stat_key] for run in stats[key]) / float(num_runs) return mean
python
def _mean_of_runs(stats, key='runs'): """Obtain the mean of stats. Args: stats: dict; A set of stats, structured as above. key: str; Optional key to determine where list of runs is found in stats """ num_runs = len(stats[key]) first = stats[key][0] mean = {} for stat_key in first: # Skip non numberic attributes if isinstance(first[stat_key], numbers.Number): mean[stat_key] = sum(run[stat_key] for run in stats[key]) / float(num_runs) return mean
Obtain the mean of stats. Args: stats: dict; A set of stats, structured as above. key: str; Optional key to determine where list of runs is found in stats
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/benchmark.py#L180-L198
JustinLovinger/optimal
optimal/benchmark.py
_sd_of_runs
def _sd_of_runs(stats, mean, key='runs'): """Obtain the standard deviation of stats. Args: stats: dict; A set of stats, structured as above. mean: dict; Mean for each key in stats. key: str; Optional key to determine where list of runs is found in stats """ num_runs = len(stats[key]) first = stats[key][0] standard_deviation = {} for stat_key in first: # Skip non numberic attributes if isinstance(first[stat_key], numbers.Number): standard_deviation[stat_key] = math.sqrt( sum((run[stat_key] - mean[stat_key])**2 for run in stats[key]) / float(num_runs)) return standard_deviation
python
def _sd_of_runs(stats, mean, key='runs'): """Obtain the standard deviation of stats. Args: stats: dict; A set of stats, structured as above. mean: dict; Mean for each key in stats. key: str; Optional key to determine where list of runs is found in stats """ num_runs = len(stats[key]) first = stats[key][0] standard_deviation = {} for stat_key in first: # Skip non numberic attributes if isinstance(first[stat_key], numbers.Number): standard_deviation[stat_key] = math.sqrt( sum((run[stat_key] - mean[stat_key])**2 for run in stats[key]) / float(num_runs)) return standard_deviation
Obtain the standard deviation of stats. Args: stats: dict; A set of stats, structured as above. mean: dict; Mean for each key in stats. key: str; Optional key to determine where list of runs is found in stats
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/benchmark.py#L201-L221
JustinLovinger/optimal
optimal/algorithms/pbil.py
_sample
def _sample(probability_vec): """Return random binary string, with given probabilities.""" return map(int, numpy.random.random(probability_vec.size) <= probability_vec)
python
def _sample(probability_vec): """Return random binary string, with given probabilities.""" return map(int, numpy.random.random(probability_vec.size) <= probability_vec)
Return random binary string, with given probabilities.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/pbil.py#L126-L129
JustinLovinger/optimal
optimal/algorithms/pbil.py
_adjust_probability_vec_best
def _adjust_probability_vec_best(population, fitnesses, probability_vec, adjust_rate): """Shift probabilities towards the best solution.""" best_solution = max(zip(fitnesses, population))[1] # Shift probabilities towards best solution return _adjust(probability_vec, best_solution, adjust_rate)
python
def _adjust_probability_vec_best(population, fitnesses, probability_vec, adjust_rate): """Shift probabilities towards the best solution.""" best_solution = max(zip(fitnesses, population))[1] # Shift probabilities towards best solution return _adjust(probability_vec, best_solution, adjust_rate)
Shift probabilities towards the best solution.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/pbil.py#L132-L138
JustinLovinger/optimal
optimal/algorithms/pbil.py
_mutate_probability_vec
def _mutate_probability_vec(probability_vec, mutation_chance, mutation_adjust_rate): """Randomly adjust probabilities. WARNING: Modifies probability_vec argument. """ bits_to_mutate = numpy.random.random(probability_vec.size) <= mutation_chance probability_vec[bits_to_mutate] = _adjust( probability_vec[bits_to_mutate], numpy.random.random(numpy.sum(bits_to_mutate)), mutation_adjust_rate)
python
def _mutate_probability_vec(probability_vec, mutation_chance, mutation_adjust_rate): """Randomly adjust probabilities. WARNING: Modifies probability_vec argument. """ bits_to_mutate = numpy.random.random(probability_vec.size) <= mutation_chance probability_vec[bits_to_mutate] = _adjust( probability_vec[bits_to_mutate], numpy.random.random(numpy.sum(bits_to_mutate)), mutation_adjust_rate)
Randomly adjust probabilities. WARNING: Modifies probability_vec argument.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/pbil.py#L141-L149
JustinLovinger/optimal
optimal/algorithms/pbil.py
PBIL.next_population
def next_population(self, population, fitnesses): """Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. """ # Update probability vector self._probability_vec = _adjust_probability_vec_best( population, fitnesses, self._probability_vec, self._adjust_rate) # Mutate probability vector _mutate_probability_vec(self._probability_vec, self._mutation_chance, self._mutation_adjust_rate) # Return new samples return [ _sample(self._probability_vec) for _ in range(self._population_size) ]
python
def next_population(self, population, fitnesses): """Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. """ # Update probability vector self._probability_vec = _adjust_probability_vec_best( population, fitnesses, self._probability_vec, self._adjust_rate) # Mutate probability vector _mutate_probability_vec(self._probability_vec, self._mutation_chance, self._mutation_adjust_rate) # Return new samples return [ _sample(self._probability_vec) for _ in range(self._population_size) ]
Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/pbil.py#L102-L123
JustinLovinger/optimal
optimal/examples/benchmark_gaoperators.py
benchmark_multi
def benchmark_multi(optimizer): """Benchmark an optimizer configuration on multiple functions.""" # Get our benchmark stats all_stats = benchmark.compare(optimizer, PROBLEMS, runs=100) return benchmark.aggregate(all_stats)
python
def benchmark_multi(optimizer): """Benchmark an optimizer configuration on multiple functions.""" # Get our benchmark stats all_stats = benchmark.compare(optimizer, PROBLEMS, runs=100) return benchmark.aggregate(all_stats)
Benchmark an optimizer configuration on multiple functions.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/examples/benchmark_gaoperators.py#L49-L53
JustinLovinger/optimal
optimal/algorithms/crossentropy.py
_sample
def _sample(probabilities, population_size): """Return a random population, drawn with regard to a set of probabilities""" population = [] for _ in range(population_size): solution = [] for probability in probabilities: # probability of 1.0: always 1 # probability of 0.0: always 0 if random.uniform(0.0, 1.0) < probability: solution.append(1) else: solution.append(0) population.append(solution) return population
python
def _sample(probabilities, population_size): """Return a random population, drawn with regard to a set of probabilities""" population = [] for _ in range(population_size): solution = [] for probability in probabilities: # probability of 1.0: always 1 # probability of 0.0: always 0 if random.uniform(0.0, 1.0) < probability: solution.append(1) else: solution.append(0) population.append(solution) return population
Return a random population, drawn with regard to a set of probabilities
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/crossentropy.py#L112-L125
JustinLovinger/optimal
optimal/algorithms/crossentropy.py
_chance
def _chance(solution, pdf): """Return the chance of obtaining a solution from a pdf. The probability of many independant weighted "coin flips" (one for each bit) """ # 1.0 - abs(bit - p) gives probability of bit given p return _prod([1.0 - abs(bit - p) for bit, p in zip(solution, pdf)])
python
def _chance(solution, pdf): """Return the chance of obtaining a solution from a pdf. The probability of many independant weighted "coin flips" (one for each bit) """ # 1.0 - abs(bit - p) gives probability of bit given p return _prod([1.0 - abs(bit - p) for bit, p in zip(solution, pdf)])
Return the chance of obtaining a solution from a pdf. The probability of many independant weighted "coin flips" (one for each bit)
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/crossentropy.py#L132-L138
JustinLovinger/optimal
optimal/algorithms/crossentropy.py
_pdf_value
def _pdf_value(pdf, population, fitnesses, fitness_threshold): """Give the value of a pdf. This represents the likelihood of a pdf generating solutions that exceed the threshold. """ # Add the chance of obtaining a solution from the pdf # when the fitness for that solution exceeds a threshold value = 0.0 for solution, fitness in zip(population, fitnesses): if fitness >= fitness_threshold: # 1.0 + chance to avoid issues with chance of 0 value += math.log(1.0 + _chance(solution, pdf)) # The official equation states that value is now divided by len(fitnesses) # however, this is unnecessary when we are only obtaining the best pdf, # because every solution is of the same size return value
python
def _pdf_value(pdf, population, fitnesses, fitness_threshold): """Give the value of a pdf. This represents the likelihood of a pdf generating solutions that exceed the threshold. """ # Add the chance of obtaining a solution from the pdf # when the fitness for that solution exceeds a threshold value = 0.0 for solution, fitness in zip(population, fitnesses): if fitness >= fitness_threshold: # 1.0 + chance to avoid issues with chance of 0 value += math.log(1.0 + _chance(solution, pdf)) # The official equation states that value is now divided by len(fitnesses) # however, this is unnecessary when we are only obtaining the best pdf, # because every solution is of the same size return value
Give the value of a pdf. This represents the likelihood of a pdf generating solutions that exceed the threshold.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/crossentropy.py#L141-L158
JustinLovinger/optimal
optimal/algorithms/crossentropy.py
_update_pdf
def _update_pdf(population, fitnesses, pdfs, quantile): """Find a better pdf, based on fitnesses.""" # First we determine a fitness threshold based on a quantile of fitnesses fitness_threshold = _get_quantile_cutoff(fitnesses, quantile) # Then check all of our possible pdfs with a stochastic program return _best_pdf(pdfs, population, fitnesses, fitness_threshold)
python
def _update_pdf(population, fitnesses, pdfs, quantile): """Find a better pdf, based on fitnesses.""" # First we determine a fitness threshold based on a quantile of fitnesses fitness_threshold = _get_quantile_cutoff(fitnesses, quantile) # Then check all of our possible pdfs with a stochastic program return _best_pdf(pdfs, population, fitnesses, fitness_threshold)
Find a better pdf, based on fitnesses.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/crossentropy.py#L175-L181
JustinLovinger/optimal
optimal/helpers.py
binary_to_float
def binary_to_float(binary_list, lower_bound, upper_bound): """Return a floating point number between lower and upper bounds, from binary. Args: binary_list: list<int>; List of 0s and 1s. The number of bits in this list determine the number of possible values between lower and upper bound. Increase the size of binary_list for more precise floating points. lower_bound: Minimum value for output, inclusive. A binary list of 0s will have this value. upper_bound: Maximum value for output, inclusive. A binary list of 1s will have this value. Returns: float; A floating point number. """ # Edge case for empty binary_list if binary_list == []: # With 0 bits, only one value can be represented, # and we default to lower_bound return lower_bound # A little bit of math gets us a floating point # number between upper and lower bound # We look at the relative position of # the integer corresponding to our binary list # between the upper and lower bound, # and offset that by lower bound return (( # Range between lower and upper bound float(upper_bound - lower_bound) # Divided by the maximum possible integer / (2**len(binary_list) - 1) # Times the integer represented by the given binary * binary_to_int(binary_list)) # Plus the lower bound + lower_bound)
python
def binary_to_float(binary_list, lower_bound, upper_bound): """Return a floating point number between lower and upper bounds, from binary. Args: binary_list: list<int>; List of 0s and 1s. The number of bits in this list determine the number of possible values between lower and upper bound. Increase the size of binary_list for more precise floating points. lower_bound: Minimum value for output, inclusive. A binary list of 0s will have this value. upper_bound: Maximum value for output, inclusive. A binary list of 1s will have this value. Returns: float; A floating point number. """ # Edge case for empty binary_list if binary_list == []: # With 0 bits, only one value can be represented, # and we default to lower_bound return lower_bound # A little bit of math gets us a floating point # number between upper and lower bound # We look at the relative position of # the integer corresponding to our binary list # between the upper and lower bound, # and offset that by lower bound return (( # Range between lower and upper bound float(upper_bound - lower_bound) # Divided by the maximum possible integer / (2**len(binary_list) - 1) # Times the integer represented by the given binary * binary_to_int(binary_list)) # Plus the lower bound + lower_bound)
Return a floating point number between lower and upper bounds, from binary. Args: binary_list: list<int>; List of 0s and 1s. The number of bits in this list determine the number of possible values between lower and upper bound. Increase the size of binary_list for more precise floating points. lower_bound: Minimum value for output, inclusive. A binary list of 0s will have this value. upper_bound: Maximum value for output, inclusive. A binary list of 1s will have this value. Returns: float; A floating point number.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/helpers.py#L34-L71
JustinLovinger/optimal
optimal/helpers.py
binary_to_int
def binary_to_int(binary_list, lower_bound=0, upper_bound=None): """Return the base 10 integer corresponding to a binary list. The maximum value is determined by the number of bits in binary_list, and upper_bound. The greater allowed by the two. Args: binary_list: list<int>; List of 0s and 1s. lower_bound: Minimum value for output, inclusive. A binary list of 0s will have this value. upper_bound: Maximum value for output, inclusive. If greater than this bound, we "bounce back". Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0] Ex. raw_integer = 11, upper_bound = 10, return = 10 raw_integer = 12, upper_bound = 10, return = 9 Returns: int; Integer value of the binary input. """ # Edge case for empty binary_list if binary_list == []: # With 0 bits, only one value can be represented, # and we default to lower_bound return lower_bound else: # The builtin int construction can take a base argument, # but it requires a string, # so we convert our binary list to a string integer = int(''.join([str(bit) for bit in binary_list]), 2) # Trim if over upper_bound if (upper_bound is not None) and integer + lower_bound > upper_bound: # Bounce back. Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0] return upper_bound - (integer % (upper_bound - lower_bound + 1)) else: # Not over upper_bound return integer + lower_bound
python
def binary_to_int(binary_list, lower_bound=0, upper_bound=None): """Return the base 10 integer corresponding to a binary list. The maximum value is determined by the number of bits in binary_list, and upper_bound. The greater allowed by the two. Args: binary_list: list<int>; List of 0s and 1s. lower_bound: Minimum value for output, inclusive. A binary list of 0s will have this value. upper_bound: Maximum value for output, inclusive. If greater than this bound, we "bounce back". Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0] Ex. raw_integer = 11, upper_bound = 10, return = 10 raw_integer = 12, upper_bound = 10, return = 9 Returns: int; Integer value of the binary input. """ # Edge case for empty binary_list if binary_list == []: # With 0 bits, only one value can be represented, # and we default to lower_bound return lower_bound else: # The builtin int construction can take a base argument, # but it requires a string, # so we convert our binary list to a string integer = int(''.join([str(bit) for bit in binary_list]), 2) # Trim if over upper_bound if (upper_bound is not None) and integer + lower_bound > upper_bound: # Bounce back. Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0] return upper_bound - (integer % (upper_bound - lower_bound + 1)) else: # Not over upper_bound return integer + lower_bound
Return the base 10 integer corresponding to a binary list. The maximum value is determined by the number of bits in binary_list, and upper_bound. The greater allowed by the two. Args: binary_list: list<int>; List of 0s and 1s. lower_bound: Minimum value for output, inclusive. A binary list of 0s will have this value. upper_bound: Maximum value for output, inclusive. If greater than this bound, we "bounce back". Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0] Ex. raw_integer = 11, upper_bound = 10, return = 10 raw_integer = 12, upper_bound = 10, return = 9 Returns: int; Integer value of the binary input.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/helpers.py#L74-L111
JustinLovinger/optimal
optimal/algorithms/baseline.py
_int_to_binary
def _int_to_binary(integer, size=None): """Return bit list representation of integer. If size is given, binary string is padded with 0s, or clipped to the size. """ binary_list = map(int, format(integer, 'b')) if size is None: return binary_list else: if len(binary_list) > size: # Too long, take only last n return binary_list[len(binary_list)-size:] elif size > len(binary_list): # Too short, pad return [0]*(size-len(binary_list)) + binary_list else: # Just right return binary_list
python
def _int_to_binary(integer, size=None): """Return bit list representation of integer. If size is given, binary string is padded with 0s, or clipped to the size. """ binary_list = map(int, format(integer, 'b')) if size is None: return binary_list else: if len(binary_list) > size: # Too long, take only last n return binary_list[len(binary_list)-size:] elif size > len(binary_list): # Too short, pad return [0]*(size-len(binary_list)) + binary_list else: # Just right return binary_list
Return bit list representation of integer. If size is given, binary string is padded with 0s, or clipped to the size.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/baseline.py#L164-L182
JustinLovinger/optimal
optimal/algorithms/baseline.py
_RandomOptimizer.next_population
def next_population(self, population, fitnesses): """Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. """ return common.make_population(self._population_size, self._generate_solution)
python
def next_population(self, population, fitnesses): """Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. """ return common.make_population(self._population_size, self._generate_solution)
Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/baseline.py#L63-L73
JustinLovinger/optimal
optimal/algorithms/baseline.py
RandomReal._generate_solution
def _generate_solution(self): """Return a single random solution.""" return common.random_real_solution( self._solution_size, self._lower_bounds, self._upper_bounds)
python
def _generate_solution(self): """Return a single random solution.""" return common.random_real_solution( self._solution_size, self._lower_bounds, self._upper_bounds)
Return a single random solution.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/baseline.py#L108-L111
JustinLovinger/optimal
optimal/algorithms/baseline.py
ExhaustiveBinary.next_population
def next_population(self, population, fitnesses): """Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. """ return [self._next_solution() for _ in range(self._population_size)]
python
def next_population(self, population, fitnesses): """Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions. """ return [self._next_solution() for _ in range(self._population_size)]
Make a new population after each optimization iteration. Args: population: The population current population of solutions. fitnesses: The fitness associated with each solution in the population Returns: list; a list of solutions.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/baseline.py#L147-L156
cloudsigma/cgroupspy
cgroupspy/trees.py
BaseTree._build_tree
def _build_tree(self): """ Build a full or a partial tree, depending on the groups/sub-groups specified. """ groups = self._groups or self.get_children_paths(self.root_path) for group in groups: node = Node(name=group, parent=self.root) self.root.children.append(node) self._init_sub_groups(node)
python
def _build_tree(self): """ Build a full or a partial tree, depending on the groups/sub-groups specified. """ groups = self._groups or self.get_children_paths(self.root_path) for group in groups: node = Node(name=group, parent=self.root) self.root.children.append(node) self._init_sub_groups(node)
Build a full or a partial tree, depending on the groups/sub-groups specified.
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/trees.py#L71-L80
cloudsigma/cgroupspy
cgroupspy/trees.py
BaseTree._init_sub_groups
def _init_sub_groups(self, parent): """ Initialise sub-groups, and create any that do not already exist. """ if self._sub_groups: for sub_group in self._sub_groups: for component in split_path_components(sub_group): fp = os.path.join(parent.full_path, component) if os.path.exists(fp): node = Node(name=component, parent=parent) parent.children.append(node) else: node = parent.create_cgroup(component) parent = node self._init_children(node) else: self._init_children(parent)
python
def _init_sub_groups(self, parent): """ Initialise sub-groups, and create any that do not already exist. """ if self._sub_groups: for sub_group in self._sub_groups: for component in split_path_components(sub_group): fp = os.path.join(parent.full_path, component) if os.path.exists(fp): node = Node(name=component, parent=parent) parent.children.append(node) else: node = parent.create_cgroup(component) parent = node self._init_children(node) else: self._init_children(parent)
Initialise sub-groups, and create any that do not already exist.
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/trees.py#L82-L99
cloudsigma/cgroupspy
cgroupspy/trees.py
BaseTree._init_children
def _init_children(self, parent): """ Initialise each node's children - essentially build the tree. """ for dir_name in self.get_children_paths(parent.full_path): child = Node(name=dir_name, parent=parent) parent.children.append(child) self._init_children(child)
python
def _init_children(self, parent): """ Initialise each node's children - essentially build the tree. """ for dir_name in self.get_children_paths(parent.full_path): child = Node(name=dir_name, parent=parent) parent.children.append(child) self._init_children(child)
Initialise each node's children - essentially build the tree.
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/trees.py#L101-L109
cloudsigma/cgroupspy
cgroupspy/nodes.py
Node.full_path
def full_path(self): """Absolute system path to the node""" if self.parent: return os.path.join(self.parent.full_path, self.name) return self.name
python
def full_path(self): """Absolute system path to the node""" if self.parent: return os.path.join(self.parent.full_path, self.name) return self.name
Absolute system path to the node
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L87-L92
cloudsigma/cgroupspy
cgroupspy/nodes.py
Node.path
def path(self): """Node's relative path from the root node""" if self.parent: try: parent_path = self.parent.path.encode() except AttributeError: parent_path = self.parent.path return os.path.join(parent_path, self.name) return b"/"
python
def path(self): """Node's relative path from the root node""" if self.parent: try: parent_path = self.parent.path.encode() except AttributeError: parent_path = self.parent.path return os.path.join(parent_path, self.name) return b"/"
Node's relative path from the root node
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L95-L104
cloudsigma/cgroupspy
cgroupspy/nodes.py
Node._get_node_type
def _get_node_type(self): """Returns the current node's type""" if self.parent is None: return self.NODE_ROOT elif self.parent.node_type == self.NODE_ROOT: return self.NODE_CONTROLLER_ROOT elif b".slice" in self.name or b'.partition' in self.name: return self.NODE_SLICE elif b".scope" in self.name: return self.NODE_SCOPE else: return self.NODE_CGROUP
python
def _get_node_type(self): """Returns the current node's type""" if self.parent is None: return self.NODE_ROOT elif self.parent.node_type == self.NODE_ROOT: return self.NODE_CONTROLLER_ROOT elif b".slice" in self.name or b'.partition' in self.name: return self.NODE_SLICE elif b".scope" in self.name: return self.NODE_SCOPE else: return self.NODE_CGROUP
Returns the current node's type
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L106-L118
cloudsigma/cgroupspy
cgroupspy/nodes.py
Node._get_controller_type
def _get_controller_type(self): """Returns the current node's controller type""" if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS: return self.name elif self.parent: return self.parent.controller_type else: return None
python
def _get_controller_type(self): """Returns the current node's controller type""" if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS: return self.name elif self.parent: return self.parent.controller_type else: return None
Returns the current node's controller type
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L120-L128
cloudsigma/cgroupspy
cgroupspy/nodes.py
Node.create_cgroup
def create_cgroup(self, name): """ Create a cgroup by name and attach it under this node. """ node = Node(name, parent=self) if node in self.children: raise RuntimeError('Node {} already exists under {}'.format(name, self.path)) name = name.encode() fp = os.path.join(self.full_path, name) os.mkdir(fp) self.children.append(node) return node
python
def create_cgroup(self, name): """ Create a cgroup by name and attach it under this node. """ node = Node(name, parent=self) if node in self.children: raise RuntimeError('Node {} already exists under {}'.format(name, self.path)) name = name.encode() fp = os.path.join(self.full_path, name) os.mkdir(fp) self.children.append(node) return node
Create a cgroup by name and attach it under this node.
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L137-L149
cloudsigma/cgroupspy
cgroupspy/nodes.py
Node.delete_cgroup
def delete_cgroup(self, name): """ Delete a cgroup by name and detach it from this node. Raises OSError if the cgroup is not empty. """ name = name.encode() fp = os.path.join(self.full_path, name) if os.path.exists(fp): os.rmdir(fp) node = Node(name, parent=self) try: self.children.remove(node) except ValueError: return
python
def delete_cgroup(self, name): """ Delete a cgroup by name and detach it from this node. Raises OSError if the cgroup is not empty. """ name = name.encode() fp = os.path.join(self.full_path, name) if os.path.exists(fp): os.rmdir(fp) node = Node(name, parent=self) try: self.children.remove(node) except ValueError: return
Delete a cgroup by name and detach it from this node. Raises OSError if the cgroup is not empty.
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L151-L164
cloudsigma/cgroupspy
cgroupspy/nodes.py
Node.delete_empty_children
def delete_empty_children(self): """ Walk through the children of this node and delete any that are empty. """ for child in self.children: child.delete_empty_children() try: if os.path.exists(child.full_path): os.rmdir(child.full_path) except OSError: pass else: self.children.remove(child)
python
def delete_empty_children(self): """ Walk through the children of this node and delete any that are empty. """ for child in self.children: child.delete_empty_children() try: if os.path.exists(child.full_path): os.rmdir(child.full_path) except OSError: pass else: self.children.remove(child)
Walk through the children of this node and delete any that are empty.
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L166-L176
cloudsigma/cgroupspy
cgroupspy/nodes.py
NodeControlGroup.add_node
def add_node(self, node): """ A a Node object to the group. Only one node per cgroup is supported """ if self.controllers.get(node.controller_type, None): raise RuntimeError("Cannot add node {} to the node group. A node for {} group is already assigned".format( node, node.controller_type )) self.nodes.append(node) if node.controller: self.controllers[node.controller_type] = node.controller setattr(self, node.controller_type, node.controller)
python
def add_node(self, node): """ A a Node object to the group. Only one node per cgroup is supported """ if self.controllers.get(node.controller_type, None): raise RuntimeError("Cannot add node {} to the node group. A node for {} group is already assigned".format( node, node.controller_type )) self.nodes.append(node) if node.controller: self.controllers[node.controller_type] = node.controller setattr(self, node.controller_type, node.controller)
A a Node object to the group. Only one node per cgroup is supported
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L219-L231
cloudsigma/cgroupspy
cgroupspy/nodes.py
NodeControlGroup.group_tasks
def group_tasks(self): """All tasks in the hierarchy, affected by this group.""" tasks = set() for node in walk_tree(self): for ctrl in node.controllers.values(): tasks.update(ctrl.tasks) return tasks
python
def group_tasks(self): """All tasks in the hierarchy, affected by this group.""" tasks = set() for node in walk_tree(self): for ctrl in node.controllers.values(): tasks.update(ctrl.tasks) return tasks
All tasks in the hierarchy, affected by this group.
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L241-L247
cloudsigma/cgroupspy
cgroupspy/nodes.py
NodeControlGroup.tasks
def tasks(self): """Tasks in this exact group""" tasks = set() for ctrl in self.controllers.values(): tasks.update(ctrl.tasks) return tasks
python
def tasks(self): """Tasks in this exact group""" tasks = set() for ctrl in self.controllers.values(): tasks.update(ctrl.tasks) return tasks
Tasks in this exact group
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L250-L255
cloudsigma/cgroupspy
cgroupspy/controllers.py
Controller.filepath
def filepath(self, filename): """The full path to a file""" return os.path.join(self.node.full_path, filename)
python
def filepath(self, filename): """The full path to a file""" return os.path.join(self.node.full_path, filename)
The full path to a file
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/controllers.py#L48-L51
cloudsigma/cgroupspy
cgroupspy/controllers.py
Controller.get_property
def get_property(self, filename): """Opens the file and reads the value""" with open(self.filepath(filename)) as f: return f.read().strip()
python
def get_property(self, filename): """Opens the file and reads the value""" with open(self.filepath(filename)) as f: return f.read().strip()
Opens the file and reads the value
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/controllers.py#L53-L57
cloudsigma/cgroupspy
cgroupspy/controllers.py
Controller.set_property
def set_property(self, filename, value): """Opens the file and writes the value""" with open(self.filepath(filename), "w") as f: return f.write(str(value))
python
def set_property(self, filename, value): """Opens the file and writes the value""" with open(self.filepath(filename), "w") as f: return f.write(str(value))
Opens the file and writes the value
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/controllers.py#L59-L63
cloudsigma/cgroupspy
cgroupspy/utils.py
walk_tree
def walk_tree(root): """Pre-order depth-first""" yield root for child in root.children: for el in walk_tree(child): yield el
python
def walk_tree(root): """Pre-order depth-first""" yield root for child in root.children: for el in walk_tree(child): yield el
Pre-order depth-first
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/utils.py#L32-L38
cloudsigma/cgroupspy
cgroupspy/utils.py
walk_up_tree
def walk_up_tree(root): """Post-order depth-first""" for child in root.children: for el in walk_up_tree(child): yield el yield root
python
def walk_up_tree(root): """Post-order depth-first""" for child in root.children: for el in walk_up_tree(child): yield el yield root
Post-order depth-first
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/utils.py#L41-L47
cloudsigma/cgroupspy
cgroupspy/utils.py
get_device_major_minor
def get_device_major_minor(dev_path): """ Returns the device (major, minor) tuple for simplicity :param dev_path: Path to the device :return: (device major, device minor) :rtype: (int, int) """ stat = os.lstat(dev_path) return os.major(stat.st_rdev), os.minor(stat.st_rdev)
python
def get_device_major_minor(dev_path): """ Returns the device (major, minor) tuple for simplicity :param dev_path: Path to the device :return: (device major, device minor) :rtype: (int, int) """ stat = os.lstat(dev_path) return os.major(stat.st_rdev), os.minor(stat.st_rdev)
Returns the device (major, minor) tuple for simplicity :param dev_path: Path to the device :return: (device major, device minor) :rtype: (int, int)
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/utils.py#L50-L58
globality-corp/openapi
openapi/base.py
SchemaAware.validate
def validate(self): """ Validate that this instance matches its schema. """ schema = Schema(self.__class__.SCHEMA) resolver = RefResolver.from_schema( schema, store=REGISTRY, ) validate(self, schema, resolver=resolver)
python
def validate(self): """ Validate that this instance matches its schema. """ schema = Schema(self.__class__.SCHEMA) resolver = RefResolver.from_schema( schema, store=REGISTRY, ) validate(self, schema, resolver=resolver)
Validate that this instance matches its schema.
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/base.py#L48-L58
globality-corp/openapi
openapi/base.py
SchemaAware.dumps
def dumps(self): """ Dump this instance as YAML. """ with closing(StringIO()) as fileobj: self.dump(fileobj) return fileobj.getvalue()
python
def dumps(self): """ Dump this instance as YAML. """ with closing(StringIO()) as fileobj: self.dump(fileobj) return fileobj.getvalue()
Dump this instance as YAML.
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/base.py#L67-L74
globality-corp/openapi
openapi/base.py
SchemaAware.loads
def loads(cls, s): """ Load an instance of this class from YAML. """ with closing(StringIO(s)) as fileobj: return cls.load(fileobj)
python
def loads(cls, s): """ Load an instance of this class from YAML. """ with closing(StringIO(s)) as fileobj: return cls.load(fileobj)
Load an instance of this class from YAML.
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/base.py#L85-L91
globality-corp/openapi
openapi/base.py
SchemaAwareDict.property_schema
def property_schema(self, key): """ Lookup the schema for a specific property. """ schema = self.__class__.SCHEMA # first try plain properties plain_schema = schema.get("properties", {}).get(key) if plain_schema is not None: return plain_schema # then try pattern properties pattern_properties = schema.get("patternProperties", {}) for pattern, pattern_schema in pattern_properties.items(): if match(pattern, key): return pattern_schema # finally try additional properties (defaults to true per JSON Schema) return schema.get("additionalProperties", True)
python
def property_schema(self, key): """ Lookup the schema for a specific property. """ schema = self.__class__.SCHEMA # first try plain properties plain_schema = schema.get("properties", {}).get(key) if plain_schema is not None: return plain_schema # then try pattern properties pattern_properties = schema.get("patternProperties", {}) for pattern, pattern_schema in pattern_properties.items(): if match(pattern, key): return pattern_schema # finally try additional properties (defaults to true per JSON Schema) return schema.get("additionalProperties", True)
Lookup the schema for a specific property.
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/base.py#L141-L158
globality-corp/openapi
openapi/model.py
make
def make(class_name, base, schema): """ Create a new schema aware type. """ return type(class_name, (base,), dict(SCHEMA=schema))
python
def make(class_name, base, schema): """ Create a new schema aware type. """ return type(class_name, (base,), dict(SCHEMA=schema))
Create a new schema aware type.
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/model.py#L11-L15
globality-corp/openapi
openapi/model.py
make_definition
def make_definition(name, base, schema): """ Create a new definition. """ class_name = make_class_name(name) cls = register(make(class_name, base, schema)) globals()[class_name] = cls
python
def make_definition(name, base, schema): """ Create a new definition. """ class_name = make_class_name(name) cls = register(make(class_name, base, schema)) globals()[class_name] = cls
Create a new definition.
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/model.py#L18-L25
globality-corp/openapi
openapi/registry.py
register
def register(cls): """ Register a class. """ definition_name = make_definition_name(cls.__name__) REGISTRY[definition_name] = cls return cls
python
def register(cls): """ Register a class. """ definition_name = make_definition_name(cls.__name__) REGISTRY[definition_name] = cls return cls
Register a class.
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/registry.py#L12-L19
globality-corp/openapi
openapi/registry.py
lookup
def lookup(schema): """ Lookup a class by property schema. """ if not isinstance(schema, dict) or "$ref" not in schema: return None ref = schema["$ref"] return REGISTRY.get(ref)
python
def lookup(schema): """ Lookup a class by property schema. """ if not isinstance(schema, dict) or "$ref" not in schema: return None ref = schema["$ref"] return REGISTRY.get(ref)
Lookup a class by property schema.
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/registry.py#L22-L32
thecynic/pylutron
pylutron/__init__.py
LutronConnection.connect
def connect(self): """Connects to the lutron controller.""" if self._connected or self.is_alive(): raise ConnectionExistsError("Already connected") # After starting the thread we wait for it to post us # an event signifying that connection is established. This # ensures that the caller only resumes when we are fully connected. self.start() with self._lock: self._connect_cond.wait_for(lambda: self._connected)
python
def connect(self): """Connects to the lutron controller.""" if self._connected or self.is_alive(): raise ConnectionExistsError("Already connected") # After starting the thread we wait for it to post us # an event signifying that connection is established. This # ensures that the caller only resumes when we are fully connected. self.start() with self._lock: self._connect_cond.wait_for(lambda: self._connected)
Connects to the lutron controller.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L63-L72
thecynic/pylutron
pylutron/__init__.py
LutronConnection._send_locked
def _send_locked(self, cmd): """Sends the specified command to the lutron controller. Assumes self._lock is held. """ _LOGGER.debug("Sending: %s" % cmd) try: self._telnet.write(cmd.encode('ascii') + b'\r\n') except BrokenPipeError: self._disconnect_locked()
python
def _send_locked(self, cmd): """Sends the specified command to the lutron controller. Assumes self._lock is held. """ _LOGGER.debug("Sending: %s" % cmd) try: self._telnet.write(cmd.encode('ascii') + b'\r\n') except BrokenPipeError: self._disconnect_locked()
Sends the specified command to the lutron controller. Assumes self._lock is held.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L74-L83
thecynic/pylutron
pylutron/__init__.py
LutronConnection._do_login_locked
def _do_login_locked(self): """Executes the login procedure (telnet) as well as setting up some connection defaults like turning off the prompt, etc.""" self._telnet = telnetlib.Telnet(self._host) self._telnet.read_until(LutronConnection.USER_PROMPT) self._telnet.write(self._user + b'\r\n') self._telnet.read_until(LutronConnection.PW_PROMPT) self._telnet.write(self._password + b'\r\n') self._telnet.read_until(LutronConnection.PROMPT) self._send_locked("#MONITORING,12,2") self._send_locked("#MONITORING,255,2") self._send_locked("#MONITORING,3,1") self._send_locked("#MONITORING,4,1") self._send_locked("#MONITORING,5,1") self._send_locked("#MONITORING,6,1") self._send_locked("#MONITORING,8,1")
python
def _do_login_locked(self): """Executes the login procedure (telnet) as well as setting up some connection defaults like turning off the prompt, etc.""" self._telnet = telnetlib.Telnet(self._host) self._telnet.read_until(LutronConnection.USER_PROMPT) self._telnet.write(self._user + b'\r\n') self._telnet.read_until(LutronConnection.PW_PROMPT) self._telnet.write(self._password + b'\r\n') self._telnet.read_until(LutronConnection.PROMPT) self._send_locked("#MONITORING,12,2") self._send_locked("#MONITORING,255,2") self._send_locked("#MONITORING,3,1") self._send_locked("#MONITORING,4,1") self._send_locked("#MONITORING,5,1") self._send_locked("#MONITORING,6,1") self._send_locked("#MONITORING,8,1")
Executes the login procedure (telnet) as well as setting up some connection defaults like turning off the prompt, etc.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L93-L109
thecynic/pylutron
pylutron/__init__.py
LutronConnection._disconnect_locked
def _disconnect_locked(self): """Closes the current connection. Assume self._lock is held.""" self._connected = False self._connect_cond.notify_all() self._telnet = None _LOGGER.warning("Disconnected")
python
def _disconnect_locked(self): """Closes the current connection. Assume self._lock is held.""" self._connected = False self._connect_cond.notify_all() self._telnet = None _LOGGER.warning("Disconnected")
Closes the current connection. Assume self._lock is held.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L111-L116
thecynic/pylutron
pylutron/__init__.py
LutronConnection._maybe_reconnect
def _maybe_reconnect(self): """Reconnects to the controller if we have been previously disconnected.""" with self._lock: if not self._connected: _LOGGER.info("Connecting") self._do_login_locked() self._connected = True self._connect_cond.notify_all() _LOGGER.info("Connected")
python
def _maybe_reconnect(self): """Reconnects to the controller if we have been previously disconnected.""" with self._lock: if not self._connected: _LOGGER.info("Connecting") self._do_login_locked() self._connected = True self._connect_cond.notify_all() _LOGGER.info("Connected")
Reconnects to the controller if we have been previously disconnected.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L118-L126
thecynic/pylutron
pylutron/__init__.py
LutronConnection.run
def run(self): """Main thread function to maintain connection and receive remote status.""" _LOGGER.info("Started") while True: self._maybe_reconnect() line = '' try: # If someone is sending a command, we can lose our connection so grab a # copy beforehand. We don't need the lock because if the connection is # open, we are the only ones that will read from telnet (the reconnect # code runs synchronously in this loop). t = self._telnet if t is not None: line = t.read_until(b"\n") except EOFError: try: self._lock.acquire() self._disconnect_locked() continue finally: self._lock.release() self._recv_cb(line.decode('ascii').rstrip())
python
def run(self): """Main thread function to maintain connection and receive remote status.""" _LOGGER.info("Started") while True: self._maybe_reconnect() line = '' try: # If someone is sending a command, we can lose our connection so grab a # copy beforehand. We don't need the lock because if the connection is # open, we are the only ones that will read from telnet (the reconnect # code runs synchronously in this loop). t = self._telnet if t is not None: line = t.read_until(b"\n") except EOFError: try: self._lock.acquire() self._disconnect_locked() continue finally: self._lock.release() self._recv_cb(line.decode('ascii').rstrip())
Main thread function to maintain connection and receive remote status.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L128-L149
thecynic/pylutron
pylutron/__init__.py
LutronXmlDbParser.parse
def parse(self): """Main entrypoint into the parser. It interprets and creates all the relevant Lutron objects and stuffs them into the appropriate hierarchy.""" import xml.etree.ElementTree as ET root = ET.fromstring(self._xml_db_str) # The structure is something like this: # <Areas> # <Area ...> # <DeviceGroups ...> # <Scenes ...> # <ShadeGroups ...> # <Outputs ...> # <Areas ...> # <Area ...> # First area is useless, it's the top-level project area that defines the # "house". It contains the real nested Areas tree, which is the one we want. top_area = root.find('Areas').find('Area') self.project_name = top_area.get('Name') areas = top_area.find('Areas') for area_xml in areas.getiterator('Area'): area = self._parse_area(area_xml) self.areas.append(area) return True
python
def parse(self): """Main entrypoint into the parser. It interprets and creates all the relevant Lutron objects and stuffs them into the appropriate hierarchy.""" import xml.etree.ElementTree as ET root = ET.fromstring(self._xml_db_str) # The structure is something like this: # <Areas> # <Area ...> # <DeviceGroups ...> # <Scenes ...> # <ShadeGroups ...> # <Outputs ...> # <Areas ...> # <Area ...> # First area is useless, it's the top-level project area that defines the # "house". It contains the real nested Areas tree, which is the one we want. top_area = root.find('Areas').find('Area') self.project_name = top_area.get('Name') areas = top_area.find('Areas') for area_xml in areas.getiterator('Area'): area = self._parse_area(area_xml) self.areas.append(area) return True
Main entrypoint into the parser. It interprets and creates all the relevant Lutron objects and stuffs them into the appropriate hierarchy.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L166-L190
thecynic/pylutron
pylutron/__init__.py
LutronXmlDbParser._parse_area
def _parse_area(self, area_xml): """Parses an Area tag, which is effectively a room, depending on how the Lutron controller programming was done.""" area = Area(self._lutron, name=area_xml.get('Name'), integration_id=int(area_xml.get('IntegrationID')), occupancy_group_id=area_xml.get('OccupancyGroupAssignedToID')) for output_xml in area_xml.find('Outputs'): output = self._parse_output(output_xml) area.add_output(output) # device group in our case means keypad # device_group.get('Name') is the location of the keypad for device_group in area_xml.find('DeviceGroups'): if device_group.tag == 'DeviceGroup': devs = device_group.find('Devices') elif device_group.tag == 'Device': devs = [device_group] else: _LOGGER.info("Unknown tag in DeviceGroups child %s" % devs) devs = [] for device_xml in devs: if device_xml.tag != 'Device': continue if device_xml.get('DeviceType') in ( 'SEETOUCH_KEYPAD', 'SEETOUCH_TABLETOP_KEYPAD', 'PICO_KEYPAD', 'HYBRID_SEETOUCH_KEYPAD', 'MAIN_REPEATER'): keypad = self._parse_keypad(device_xml) area.add_keypad(keypad) elif device_xml.get('DeviceType') == 'MOTION_SENSOR': motion_sensor = self._parse_motion_sensor(device_xml) area.add_sensor(motion_sensor) #elif device_xml.get('DeviceType') == 'VISOR_CONTROL_RECEIVER': return area
python
def _parse_area(self, area_xml): """Parses an Area tag, which is effectively a room, depending on how the Lutron controller programming was done.""" area = Area(self._lutron, name=area_xml.get('Name'), integration_id=int(area_xml.get('IntegrationID')), occupancy_group_id=area_xml.get('OccupancyGroupAssignedToID')) for output_xml in area_xml.find('Outputs'): output = self._parse_output(output_xml) area.add_output(output) # device group in our case means keypad # device_group.get('Name') is the location of the keypad for device_group in area_xml.find('DeviceGroups'): if device_group.tag == 'DeviceGroup': devs = device_group.find('Devices') elif device_group.tag == 'Device': devs = [device_group] else: _LOGGER.info("Unknown tag in DeviceGroups child %s" % devs) devs = [] for device_xml in devs: if device_xml.tag != 'Device': continue if device_xml.get('DeviceType') in ( 'SEETOUCH_KEYPAD', 'SEETOUCH_TABLETOP_KEYPAD', 'PICO_KEYPAD', 'HYBRID_SEETOUCH_KEYPAD', 'MAIN_REPEATER'): keypad = self._parse_keypad(device_xml) area.add_keypad(keypad) elif device_xml.get('DeviceType') == 'MOTION_SENSOR': motion_sensor = self._parse_motion_sensor(device_xml) area.add_sensor(motion_sensor) #elif device_xml.get('DeviceType') == 'VISOR_CONTROL_RECEIVER': return area
Parses an Area tag, which is effectively a room, depending on how the Lutron controller programming was done.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L192-L227
thecynic/pylutron
pylutron/__init__.py
LutronXmlDbParser._parse_output
def _parse_output(self, output_xml): """Parses an output, which is generally a switch controlling a set of lights/outlets, etc.""" output = Output(self._lutron, name=output_xml.get('Name'), watts=int(output_xml.get('Wattage')), output_type=output_xml.get('OutputType'), integration_id=int(output_xml.get('IntegrationID'))) return output
python
def _parse_output(self, output_xml): """Parses an output, which is generally a switch controlling a set of lights/outlets, etc.""" output = Output(self._lutron, name=output_xml.get('Name'), watts=int(output_xml.get('Wattage')), output_type=output_xml.get('OutputType'), integration_id=int(output_xml.get('IntegrationID'))) return output
Parses an output, which is generally a switch controlling a set of lights/outlets, etc.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L229-L237
thecynic/pylutron
pylutron/__init__.py
LutronXmlDbParser._parse_keypad
def _parse_keypad(self, keypad_xml): """Parses a keypad device (the Visor receiver is technically a keypad too).""" keypad = Keypad(self._lutron, name=keypad_xml.get('Name'), integration_id=int(keypad_xml.get('IntegrationID'))) components = keypad_xml.find('Components') if not components: return keypad for comp in components: if comp.tag != 'Component': continue comp_type = comp.get('ComponentType') if comp_type == 'BUTTON': button = self._parse_button(keypad, comp) keypad.add_button(button) elif comp_type == 'LED': led = self._parse_led(keypad, comp) keypad.add_led(led) return keypad
python
def _parse_keypad(self, keypad_xml): """Parses a keypad device (the Visor receiver is technically a keypad too).""" keypad = Keypad(self._lutron, name=keypad_xml.get('Name'), integration_id=int(keypad_xml.get('IntegrationID'))) components = keypad_xml.find('Components') if not components: return keypad for comp in components: if comp.tag != 'Component': continue comp_type = comp.get('ComponentType') if comp_type == 'BUTTON': button = self._parse_button(keypad, comp) keypad.add_button(button) elif comp_type == 'LED': led = self._parse_led(keypad, comp) keypad.add_led(led) return keypad
Parses a keypad device (the Visor receiver is technically a keypad too).
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L239-L257
thecynic/pylutron
pylutron/__init__.py
LutronXmlDbParser._parse_button
def _parse_button(self, keypad, component_xml): """Parses a button device that part of a keypad.""" button_xml = component_xml.find('Button') name = button_xml.get('Engraving') button_type = button_xml.get('ButtonType') direction = button_xml.get('Direction') # Hybrid keypads have dimmer buttons which have no engravings. if button_type == 'SingleSceneRaiseLower': name = 'Dimmer ' + direction if not name: name = "Unknown Button" button = Button(self._lutron, keypad, name=name, num=int(component_xml.get('ComponentNumber')), button_type=button_type, direction=direction) return button
python
def _parse_button(self, keypad, component_xml): """Parses a button device that part of a keypad.""" button_xml = component_xml.find('Button') name = button_xml.get('Engraving') button_type = button_xml.get('ButtonType') direction = button_xml.get('Direction') # Hybrid keypads have dimmer buttons which have no engravings. if button_type == 'SingleSceneRaiseLower': name = 'Dimmer ' + direction if not name: name = "Unknown Button" button = Button(self._lutron, keypad, name=name, num=int(component_xml.get('ComponentNumber')), button_type=button_type, direction=direction) return button
Parses a button device that part of a keypad.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L259-L275
thecynic/pylutron
pylutron/__init__.py
LutronXmlDbParser._parse_led
def _parse_led(self, keypad, component_xml): """Parses an LED device that part of a keypad.""" component_num = int(component_xml.get('ComponentNumber')) led_num = component_num - 80 led = Led(self._lutron, keypad, name=('LED %d' % led_num), led_num=led_num, component_num=component_num) return led
python
def _parse_led(self, keypad, component_xml): """Parses an LED device that part of a keypad.""" component_num = int(component_xml.get('ComponentNumber')) led_num = component_num - 80 led = Led(self._lutron, keypad, name=('LED %d' % led_num), led_num=led_num, component_num=component_num) return led
Parses an LED device that part of a keypad.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L277-L285
thecynic/pylutron
pylutron/__init__.py
LutronXmlDbParser._parse_motion_sensor
def _parse_motion_sensor(self, sensor_xml): """Parses a motion sensor object. TODO: We don't actually do anything with these yet. There's a lot of info that needs to be managed to do this right. We'd have to manage the occupancy groups, what's assigned to them, and when they go (un)occupied. We'll handle this later. """ return MotionSensor(self._lutron, name=sensor_xml.get('Name'), integration_id=int(sensor_xml.get('IntegrationID')))
python
def _parse_motion_sensor(self, sensor_xml): """Parses a motion sensor object. TODO: We don't actually do anything with these yet. There's a lot of info that needs to be managed to do this right. We'd have to manage the occupancy groups, what's assigned to them, and when they go (un)occupied. We'll handle this later. """ return MotionSensor(self._lutron, name=sensor_xml.get('Name'), integration_id=int(sensor_xml.get('IntegrationID')))
Parses a motion sensor object. TODO: We don't actually do anything with these yet. There's a lot of info that needs to be managed to do this right. We'd have to manage the occupancy groups, what's assigned to them, and when they go (un)occupied. We'll handle this later.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L287-L297
thecynic/pylutron
pylutron/__init__.py
Lutron.subscribe
def subscribe(self, obj, handler): """Subscribes to status updates of the requested object. DEPRECATED The handler will be invoked when the controller sends a notification regarding changed state. The user can then further query the object for the state itself.""" if not isinstance(obj, LutronEntity): raise InvalidSubscription("Subscription target not a LutronEntity") _LOGGER.warning("DEPRECATED: Subscribing via Lutron.subscribe is obsolete. " "Please use LutronEntity.subscribe") if obj not in self._legacy_subscribers: self._legacy_subscribers[obj] = handler obj.subscribe(self._dispatch_legacy_subscriber, None)
python
def subscribe(self, obj, handler): """Subscribes to status updates of the requested object. DEPRECATED The handler will be invoked when the controller sends a notification regarding changed state. The user can then further query the object for the state itself.""" if not isinstance(obj, LutronEntity): raise InvalidSubscription("Subscription target not a LutronEntity") _LOGGER.warning("DEPRECATED: Subscribing via Lutron.subscribe is obsolete. " "Please use LutronEntity.subscribe") if obj not in self._legacy_subscribers: self._legacy_subscribers[obj] = handler obj.subscribe(self._dispatch_legacy_subscriber, None)
Subscribes to status updates of the requested object. DEPRECATED The handler will be invoked when the controller sends a notification regarding changed state. The user can then further query the object for the state itself.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L330-L344
thecynic/pylutron
pylutron/__init__.py
Lutron.register_id
def register_id(self, cmd_type, obj): """Registers an object (through its integration id) to receive update notifications. This is the core mechanism how Output and Keypad objects get notified when the controller sends status updates.""" ids = self._ids.setdefault(cmd_type, {}) if obj.id in ids: raise IntegrationIdExistsError self._ids[cmd_type][obj.id] = obj
python
def register_id(self, cmd_type, obj): """Registers an object (through its integration id) to receive update notifications. This is the core mechanism how Output and Keypad objects get notified when the controller sends status updates.""" ids = self._ids.setdefault(cmd_type, {}) if obj.id in ids: raise IntegrationIdExistsError self._ids[cmd_type][obj.id] = obj
Registers an object (through its integration id) to receive update notifications. This is the core mechanism how Output and Keypad objects get notified when the controller sends status updates.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L346-L353
thecynic/pylutron
pylutron/__init__.py
Lutron._dispatch_legacy_subscriber
def _dispatch_legacy_subscriber(self, obj, *args, **kwargs): """This dispatches the registered callback for 'obj'. This is only used for legacy subscribers since new users should register with the target object directly.""" if obj in self._legacy_subscribers: self._legacy_subscribers[obj](obj)
python
def _dispatch_legacy_subscriber(self, obj, *args, **kwargs): """This dispatches the registered callback for 'obj'. This is only used for legacy subscribers since new users should register with the target object directly.""" if obj in self._legacy_subscribers: self._legacy_subscribers[obj](obj)
This dispatches the registered callback for 'obj'. This is only used for legacy subscribers since new users should register with the target object directly.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L355-L360
thecynic/pylutron
pylutron/__init__.py
Lutron._recv
def _recv(self, line): """Invoked by the connection manager to process incoming data.""" if line == '': return # Only handle query response messages, which are also sent on remote status # updates (e.g. user manually pressed a keypad button) if line[0] != Lutron.OP_RESPONSE: _LOGGER.debug("ignoring %s" % line) return parts = line[1:].split(',') cmd_type = parts[0] integration_id = int(parts[1]) args = parts[2:] if cmd_type not in self._ids: _LOGGER.info("Unknown cmd %s (%s)" % (cmd_type, line)) return ids = self._ids[cmd_type] if integration_id not in ids: _LOGGER.warning("Unknown id %d (%s)" % (integration_id, line)) return obj = ids[integration_id] handled = obj.handle_update(args)
python
def _recv(self, line): """Invoked by the connection manager to process incoming data.""" if line == '': return # Only handle query response messages, which are also sent on remote status # updates (e.g. user manually pressed a keypad button) if line[0] != Lutron.OP_RESPONSE: _LOGGER.debug("ignoring %s" % line) return parts = line[1:].split(',') cmd_type = parts[0] integration_id = int(parts[1]) args = parts[2:] if cmd_type not in self._ids: _LOGGER.info("Unknown cmd %s (%s)" % (cmd_type, line)) return ids = self._ids[cmd_type] if integration_id not in ids: _LOGGER.warning("Unknown id %d (%s)" % (integration_id, line)) return obj = ids[integration_id] handled = obj.handle_update(args)
Invoked by the connection manager to process incoming data.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L362-L383
thecynic/pylutron
pylutron/__init__.py
Lutron.send
def send(self, op, cmd, integration_id, *args): """Formats and sends the requested command to the Lutron controller.""" out_cmd = ",".join( (cmd, str(integration_id)) + tuple((str(x) for x in args))) self._conn.send(op + out_cmd)
python
def send(self, op, cmd, integration_id, *args): """Formats and sends the requested command to the Lutron controller.""" out_cmd = ",".join( (cmd, str(integration_id)) + tuple((str(x) for x in args))) self._conn.send(op + out_cmd)
Formats and sends the requested command to the Lutron controller.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L389-L393
thecynic/pylutron
pylutron/__init__.py
Lutron.load_xml_db
def load_xml_db(self): """Load the Lutron database from the server.""" import urllib.request xmlfile = urllib.request.urlopen('http://' + self._host + '/DbXmlInfo.xml') xml_db = xmlfile.read() xmlfile.close() _LOGGER.info("Loaded xml db") parser = LutronXmlDbParser(lutron=self, xml_db_str=xml_db) assert(parser.parse()) # throw our own exception self._areas = parser.areas self._name = parser.project_name _LOGGER.info('Found Lutron project: %s, %d areas' % ( self._name, len(self.areas))) return True
python
def load_xml_db(self): """Load the Lutron database from the server.""" import urllib.request xmlfile = urllib.request.urlopen('http://' + self._host + '/DbXmlInfo.xml') xml_db = xmlfile.read() xmlfile.close() _LOGGER.info("Loaded xml db") parser = LutronXmlDbParser(lutron=self, xml_db_str=xml_db) assert(parser.parse()) # throw our own exception self._areas = parser.areas self._name = parser.project_name _LOGGER.info('Found Lutron project: %s, %d areas' % ( self._name, len(self.areas))) return True
Load the Lutron database from the server.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L395-L412
thecynic/pylutron
pylutron/__init__.py
_RequestHelper.request
def request(self, action): """Request an action to be performed, in case one.""" ev = threading.Event() first = False with self.__lock: if len(self.__events) == 0: first = True self.__events.append(ev) if first: action() return ev
python
def request(self, action): """Request an action to be performed, in case one.""" ev = threading.Event() first = False with self.__lock: if len(self.__events) == 0: first = True self.__events.append(ev) if first: action() return ev
Request an action to be performed, in case one.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L441-L451
thecynic/pylutron
pylutron/__init__.py
LutronEntity._dispatch_event
def _dispatch_event(self, event: LutronEvent, params: Dict): """Dispatches the specified event to all the subscribers.""" for handler, context in self._subscribers: handler(self, context, event, params)
python
def _dispatch_event(self, event: LutronEvent, params: Dict): """Dispatches the specified event to all the subscribers.""" for handler, context in self._subscribers: handler(self, context, event, params)
Dispatches the specified event to all the subscribers.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L485-L488
thecynic/pylutron
pylutron/__init__.py
LutronEntity.subscribe
def subscribe(self, handler: LutronEventHandler, context): """Subscribes to events from this entity. handler: A callable object that takes the following arguments (in order) obj: the LutrongEntity object that generated the event context: user-supplied (to subscribe()) context object event: the LutronEvent that was generated. params: a dict of event-specific parameters context: User-supplied, opaque object that will be passed to handler. """ self._subscribers.append((handler, context))
python
def subscribe(self, handler: LutronEventHandler, context): """Subscribes to events from this entity. handler: A callable object that takes the following arguments (in order) obj: the LutrongEntity object that generated the event context: user-supplied (to subscribe()) context object event: the LutronEvent that was generated. params: a dict of event-specific parameters context: User-supplied, opaque object that will be passed to handler. """ self._subscribers.append((handler, context))
Subscribes to events from this entity. handler: A callable object that takes the following arguments (in order) obj: the LutrongEntity object that generated the event context: user-supplied (to subscribe()) context object event: the LutronEvent that was generated. params: a dict of event-specific parameters context: User-supplied, opaque object that will be passed to handler.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L490-L501
thecynic/pylutron
pylutron/__init__.py
Output.handle_update
def handle_update(self, args): """Handles an event update for this object, e.g. dimmer level change.""" _LOGGER.debug("handle_update %d -- %s" % (self._integration_id, args)) state = int(args[0]) if state != Output._ACTION_ZONE_LEVEL: return False level = float(args[1]) _LOGGER.debug("Updating %d(%s): s=%d l=%f" % ( self._integration_id, self._name, state, level)) self._level = level self._query_waiters.notify() self._dispatch_event(Output.Event.LEVEL_CHANGED, {'level': self._level}) return True
python
def handle_update(self, args): """Handles an event update for this object, e.g. dimmer level change.""" _LOGGER.debug("handle_update %d -- %s" % (self._integration_id, args)) state = int(args[0]) if state != Output._ACTION_ZONE_LEVEL: return False level = float(args[1]) _LOGGER.debug("Updating %d(%s): s=%d l=%f" % ( self._integration_id, self._name, state, level)) self._level = level self._query_waiters.notify() self._dispatch_event(Output.Event.LEVEL_CHANGED, {'level': self._level}) return True
Handles an event update for this object, e.g. dimmer level change.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L555-L567
thecynic/pylutron
pylutron/__init__.py
Output.__do_query_level
def __do_query_level(self): """Helper to perform the actual query the current dimmer level of the output. For pure on/off loads the result is either 0.0 or 100.0.""" self._lutron.send(Lutron.OP_QUERY, Output._CMD_TYPE, self._integration_id, Output._ACTION_ZONE_LEVEL)
python
def __do_query_level(self): """Helper to perform the actual query the current dimmer level of the output. For pure on/off loads the result is either 0.0 or 100.0.""" self._lutron.send(Lutron.OP_QUERY, Output._CMD_TYPE, self._integration_id, Output._ACTION_ZONE_LEVEL)
Helper to perform the actual query the current dimmer level of the output. For pure on/off loads the result is either 0.0 or 100.0.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L569-L573
thecynic/pylutron
pylutron/__init__.py
Output.level
def level(self): """Returns the current output level by querying the remote controller.""" ev = self._query_waiters.request(self.__do_query_level) ev.wait(1.0) return self._level
python
def level(self): """Returns the current output level by querying the remote controller.""" ev = self._query_waiters.request(self.__do_query_level) ev.wait(1.0) return self._level
Returns the current output level by querying the remote controller.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L580-L584
thecynic/pylutron
pylutron/__init__.py
Output.level
def level(self, new_level): """Sets the new output level.""" if self._level == new_level: return self._lutron.send(Lutron.OP_EXECUTE, Output._CMD_TYPE, self._integration_id, Output._ACTION_ZONE_LEVEL, "%.2f" % new_level) self._level = new_level
python
def level(self, new_level): """Sets the new output level.""" if self._level == new_level: return self._lutron.send(Lutron.OP_EXECUTE, Output._CMD_TYPE, self._integration_id, Output._ACTION_ZONE_LEVEL, "%.2f" % new_level) self._level = new_level
Sets the new output level.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L587-L593
thecynic/pylutron
pylutron/__init__.py
KeypadComponent.handle_update
def handle_update(self, action, params): """Handle the specified action on this component.""" _LOGGER.debug('Keypad: "%s" Handling "%s" Action: %s Params: %s"' % ( self._keypad.name, self.name, action, params)) return False
python
def handle_update(self, action, params): """Handle the specified action on this component.""" _LOGGER.debug('Keypad: "%s" Handling "%s" Action: %s Params: %s"' % ( self._keypad.name, self.name, action, params)) return False
Handle the specified action on this component.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L640-L644
thecynic/pylutron
pylutron/__init__.py
Button.press
def press(self): """Triggers a simulated button press to the Keypad.""" self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Button._ACTION_PRESS)
python
def press(self): """Triggers a simulated button press to the Keypad.""" self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Button._ACTION_PRESS)
Triggers a simulated button press to the Keypad.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L687-L690
thecynic/pylutron
pylutron/__init__.py
Button.handle_update
def handle_update(self, action, params): """Handle the specified action on this component.""" _LOGGER.debug('Keypad: "%s" %s Action: %s Params: %s"' % ( self._keypad.name, self, action, params)) ev_map = { Button._ACTION_PRESS: Button.Event.PRESSED, Button._ACTION_RELEASE: Button.Event.RELEASED } if action not in ev_map: _LOGGER.debug("Unknown action %d for button %d in keypad %d" % ( action, self.number, self.keypad.name)) return False self._dispatch_event(ev_map[action], {}) return True
python
def handle_update(self, action, params): """Handle the specified action on this component.""" _LOGGER.debug('Keypad: "%s" %s Action: %s Params: %s"' % ( self._keypad.name, self, action, params)) ev_map = { Button._ACTION_PRESS: Button.Event.PRESSED, Button._ACTION_RELEASE: Button.Event.RELEASED } if action not in ev_map: _LOGGER.debug("Unknown action %d for button %d in keypad %d" % ( action, self.number, self.keypad.name)) return False self._dispatch_event(ev_map[action], {}) return True
Handle the specified action on this component.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L692-L705
thecynic/pylutron
pylutron/__init__.py
Led.__do_query_state
def __do_query_state(self): """Helper to perform the actual query for the current LED state.""" self._lutron.send(Lutron.OP_QUERY, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Led._ACTION_LED_STATE)
python
def __do_query_state(self): """Helper to perform the actual query for the current LED state.""" self._lutron.send(Lutron.OP_QUERY, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Led._ACTION_LED_STATE)
Helper to perform the actual query for the current LED state.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L738-L741
thecynic/pylutron
pylutron/__init__.py
Led.state
def state(self): """Returns the current LED state by querying the remote controller.""" ev = self._query_waiters.request(self.__do_query_state) ev.wait(1.0) return self._state
python
def state(self): """Returns the current LED state by querying the remote controller.""" ev = self._query_waiters.request(self.__do_query_state) ev.wait(1.0) return self._state
Returns the current LED state by querying the remote controller.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L749-L753
thecynic/pylutron
pylutron/__init__.py
Led.state
def state(self, new_state: bool): """Sets the new led state. new_state: bool """ self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Led._ACTION_LED_STATE, int(new_state)) self._state = new_state
python
def state(self, new_state: bool): """Sets the new led state. new_state: bool """ self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Led._ACTION_LED_STATE, int(new_state)) self._state = new_state
Sets the new led state. new_state: bool
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L756-L764
thecynic/pylutron
pylutron/__init__.py
Led.handle_update
def handle_update(self, action, params): """Handle the specified action on this component.""" _LOGGER.debug('Keypad: "%s" %s Action: %s Params: %s"' % ( self._keypad.name, self, action, params)) if action != Led._ACTION_LED_STATE: _LOGGER.debug("Unknown action %d for led %d in keypad %d" % ( action, self.number, self.keypad.name)) return False elif len(params) < 1: _LOGGER.debug("Unknown params %s (action %d on led %d in keypad %d)" % ( params, action, self.number, self.keypad.name)) return False self._state = bool(params[0]) self._query_waiters.notify() self._dispatch_event(Led.Event.STATE_CHANGED, {'state': self._state}) return True
python
def handle_update(self, action, params): """Handle the specified action on this component.""" _LOGGER.debug('Keypad: "%s" %s Action: %s Params: %s"' % ( self._keypad.name, self, action, params)) if action != Led._ACTION_LED_STATE: _LOGGER.debug("Unknown action %d for led %d in keypad %d" % ( action, self.number, self.keypad.name)) return False elif len(params) < 1: _LOGGER.debug("Unknown params %s (action %d on led %d in keypad %d)" % ( params, action, self.number, self.keypad.name)) return False self._state = bool(params[0]) self._query_waiters.notify() self._dispatch_event(Led.Event.STATE_CHANGED, {'state': self._state}) return True
Handle the specified action on this component.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L766-L781
thecynic/pylutron
pylutron/__init__.py
Keypad.add_button
def add_button(self, button): """Adds a button that's part of this keypad. We'll use this to dispatch button events.""" self._buttons.append(button) self._components[button.component_number] = button
python
def add_button(self, button): """Adds a button that's part of this keypad. We'll use this to dispatch button events.""" self._buttons.append(button) self._components[button.component_number] = button
Adds a button that's part of this keypad. We'll use this to dispatch button events.
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L802-L806