repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
learningequality/iceqube
src/iceqube/worker/backends/base.py
BaseWorkerBackend.start_message_processing
def start_message_processing(self): """ Starts up the message processor thread, that continuously reads messages sent to self.incoming_message_mailbox, and starts or cancels jobs based on the message received. Returns: the Thread object. """ t = InfiniteLoopThread(self.process_messages, thread_name="MESSAGEPROCESSOR", wait_between_runs=0.5) t.start() return t
python
def start_message_processing(self): """ Starts up the message processor thread, that continuously reads messages sent to self.incoming_message_mailbox, and starts or cancels jobs based on the message received. Returns: the Thread object. """ t = InfiniteLoopThread(self.process_messages, thread_name="MESSAGEPROCESSOR", wait_between_runs=0.5) t.start() return t
Starts up the message processor thread, that continuously reads messages sent to self.incoming_message_mailbox, and starts or cancels jobs based on the message received. Returns: the Thread object.
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/worker/backends/base.py#L47-L56
learningequality/iceqube
src/iceqube/worker/backends/base.py
BaseWorkerBackend.process_messages
def process_messages(self): """ Read from the incoming_message_mailbox and report to the storage backend based on the first message found there. Returns: None """ try: msg = self.msgbackend.pop(self.incoming_message_mailbox) self.handle_incoming_message(msg) except queue.Empty: logger.debug("Worker message queue currently empty.")
python
def process_messages(self): """ Read from the incoming_message_mailbox and report to the storage backend based on the first message found there. Returns: None """ try: msg = self.msgbackend.pop(self.incoming_message_mailbox) self.handle_incoming_message(msg) except queue.Empty: logger.debug("Worker message queue currently empty.")
Read from the incoming_message_mailbox and report to the storage backend based on the first message found there. Returns: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/worker/backends/base.py#L58-L68
learningequality/iceqube
src/iceqube/worker/backends/base.py
BaseWorkerBackend.handle_incoming_message
def handle_incoming_message(self, msg): """ Start or cancel a job, based on the msg. If msg.type == MessageType.START_JOB, then start the job given by msg.job. If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id. Args: msg (barbequeue.messaging.classes.Message): Returns: None """ if msg.type == MessageType.START_JOB: job = msg.message['job'] self.schedule_job(job) elif msg.type == MessageType.CANCEL_JOB: job_id = msg.message['job_id'] self.cancel(job_id)
python
def handle_incoming_message(self, msg): """ Start or cancel a job, based on the msg. If msg.type == MessageType.START_JOB, then start the job given by msg.job. If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id. Args: msg (barbequeue.messaging.classes.Message): Returns: None """ if msg.type == MessageType.START_JOB: job = msg.message['job'] self.schedule_job(job) elif msg.type == MessageType.CANCEL_JOB: job_id = msg.message['job_id'] self.cancel(job_id)
Start or cancel a job, based on the msg. If msg.type == MessageType.START_JOB, then start the job given by msg.job. If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id. Args: msg (barbequeue.messaging.classes.Message): Returns: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/worker/backends/base.py#L70-L89
learningequality/iceqube
src/iceqube/worker/backends/inmem.py
_reraise_with_traceback
def _reraise_with_traceback(f): """ Call the function normally. But if the function raises an error, attach the str(traceback) into the function.traceback attribute, then reraise the error. Args: f: The function to run. Returns: A function that wraps f, attaching the traceback if an error occurred. """ def wrap(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: traceback_str = traceback.format_exc() e.traceback = traceback_str raise e return wrap
python
def _reraise_with_traceback(f): """ Call the function normally. But if the function raises an error, attach the str(traceback) into the function.traceback attribute, then reraise the error. Args: f: The function to run. Returns: A function that wraps f, attaching the traceback if an error occurred. """ def wrap(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: traceback_str = traceback.format_exc() e.traceback = traceback_str raise e return wrap
Call the function normally. But if the function raises an error, attach the str(traceback) into the function.traceback attribute, then reraise the error. Args: f: The function to run. Returns: A function that wraps f, attaching the traceback if an error occurred.
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/worker/backends/inmem.py#L132-L151
learningequality/iceqube
src/iceqube/worker/backends/inmem.py
WorkerBackend.schedule_job
def schedule_job(self, job): """ schedule a job to the type of workers spawned by self.start_workers. :param job: the job to schedule for running. :return: """ l = _reraise_with_traceback(job.get_lambda_to_execute()) future = self.workers.submit(l, update_progress_func=self.update_progress, cancel_job_func=self._check_for_cancel) # assign the futures to a dict, mapping them to a job self.job_future_mapping[future] = job self.future_job_mapping[job.job_id] = future # callback for when the future is now! future.add_done_callback(self.handle_finished_future) # add the job to our cancel notifications data structure, with False at first self.cancel_notifications[job.job_id] = False return future
python
def schedule_job(self, job): """ schedule a job to the type of workers spawned by self.start_workers. :param job: the job to schedule for running. :return: """ l = _reraise_with_traceback(job.get_lambda_to_execute()) future = self.workers.submit(l, update_progress_func=self.update_progress, cancel_job_func=self._check_for_cancel) # assign the futures to a dict, mapping them to a job self.job_future_mapping[future] = job self.future_job_mapping[job.job_id] = future # callback for when the future is now! future.add_done_callback(self.handle_finished_future) # add the job to our cancel notifications data structure, with False at first self.cancel_notifications[job.job_id] = False return future
schedule a job to the type of workers spawned by self.start_workers. :param job: the job to schedule for running. :return:
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/worker/backends/inmem.py#L29-L50
learningequality/iceqube
src/iceqube/worker/backends/inmem.py
WorkerBackend.cancel
def cancel(self, job_id): """ Request a cancellation from the futures executor pool. If that didn't work (because it's already running), then mark a special variable inside the future that we can check inside a special check_for_cancel function passed to the job. :param job_id: :return: """ future = self.future_job_mapping[job_id] is_future_cancelled = future.cancel() if is_future_cancelled: # success! return True else: if future.running(): # Already running, but let's mark the future as cancelled # anyway, to make sure that calling future.result() will raise an error. # Our cancelling callback will then check this variable to see its state, # and exit if it's cancelled. from concurrent.futures._base import CANCELLED future._state = CANCELLED return False else: # probably finished already, too late to cancel! return False
python
def cancel(self, job_id): """ Request a cancellation from the futures executor pool. If that didn't work (because it's already running), then mark a special variable inside the future that we can check inside a special check_for_cancel function passed to the job. :param job_id: :return: """ future = self.future_job_mapping[job_id] is_future_cancelled = future.cancel() if is_future_cancelled: # success! return True else: if future.running(): # Already running, but let's mark the future as cancelled # anyway, to make sure that calling future.result() will raise an error. # Our cancelling callback will then check this variable to see its state, # and exit if it's cancelled. from concurrent.futures._base import CANCELLED future._state = CANCELLED return False else: # probably finished already, too late to cancel! return False
Request a cancellation from the futures executor pool. If that didn't work (because it's already running), then mark a special variable inside the future that we can check inside a special check_for_cancel function passed to the job. :param job_id: :return:
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/worker/backends/inmem.py#L86-L111
learningequality/iceqube
src/iceqube/worker/backends/inmem.py
WorkerBackend._check_for_cancel
def _check_for_cancel(self, job_id, current_stage=""): """ Check if a job has been requested to be cancelled. When called, the calling function can optionally give the stage it is currently in, so the user has information on where the job was before it was cancelled. :param job_id: The job_id to check :param current_stage: Where the job currently is :return: raises a CancelledError if we find out that we were cancelled. """ future = self.future_job_mapping[job_id] is_cancelled = future._state in [CANCELLED, CANCELLED_AND_NOTIFIED] if is_cancelled: raise UserCancelledError(last_stage=current_stage)
python
def _check_for_cancel(self, job_id, current_stage=""): """ Check if a job has been requested to be cancelled. When called, the calling function can optionally give the stage it is currently in, so the user has information on where the job was before it was cancelled. :param job_id: The job_id to check :param current_stage: Where the job currently is :return: raises a CancelledError if we find out that we were cancelled. """ future = self.future_job_mapping[job_id] is_cancelled = future._state in [CANCELLED, CANCELLED_AND_NOTIFIED] if is_cancelled: raise UserCancelledError(last_stage=current_stage)
Check if a job has been requested to be cancelled. When called, the calling function can optionally give the stage it is currently in, so the user has information on where the job was before it was cancelled. :param job_id: The job_id to check :param current_stage: Where the job currently is :return: raises a CancelledError if we find out that we were cancelled.
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/worker/backends/inmem.py#L113-L129
learningequality/iceqube
src/iceqube/scheduler/classes.py
Scheduler.start_scheduler
def start_scheduler(self): """ Start the scheduler thread. This thread reads the queue of jobs to be scheduled and sends them to the workers. Returns: None """ t = InfiniteLoopThread( func=self.schedule_next_job, thread_name="SCHEDULER", wait_between_runs=0.5) t.start() return t
python
def start_scheduler(self): """ Start the scheduler thread. This thread reads the queue of jobs to be scheduled and sends them to the workers. Returns: None """ t = InfiniteLoopThread( func=self.schedule_next_job, thread_name="SCHEDULER", wait_between_runs=0.5) t.start() return t
Start the scheduler thread. This thread reads the queue of jobs to be scheduled and sends them to the workers. Returns: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/scheduler/classes.py#L25-L37
learningequality/iceqube
src/iceqube/scheduler/classes.py
Scheduler.start_worker_message_handler
def start_worker_message_handler(self): """ Start the worker message handler thread, that loops over messages from workers (job progress updates, failures and successes etc.) and then updates the job's status. Returns: None """ t = InfiniteLoopThread( func=lambda: self.handle_worker_messages(timeout=2), thread_name="WORKERMESSAGEHANDLER", wait_between_runs=0.5) t.start() return t
python
def start_worker_message_handler(self): """ Start the worker message handler thread, that loops over messages from workers (job progress updates, failures and successes etc.) and then updates the job's status. Returns: None """ t = InfiniteLoopThread( func=lambda: self.handle_worker_messages(timeout=2), thread_name="WORKERMESSAGEHANDLER", wait_between_runs=0.5) t.start() return t
Start the worker message handler thread, that loops over messages from workers (job progress updates, failures and successes etc.) and then updates the job's status. Returns: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/scheduler/classes.py#L39-L51
learningequality/iceqube
src/iceqube/scheduler/classes.py
Scheduler.shutdown
def shutdown(self, wait=True): """ Shut down the worker message handler and scheduler threads. Args: wait: If true, block until both threads have successfully shut down. If False, return immediately. Returns: None """ self.scheduler_thread.stop() self.worker_message_handler_thread.stop() if wait: self.scheduler_thread.join() self.worker_message_handler_thread.join()
python
def shutdown(self, wait=True): """ Shut down the worker message handler and scheduler threads. Args: wait: If true, block until both threads have successfully shut down. If False, return immediately. Returns: None """ self.scheduler_thread.stop() self.worker_message_handler_thread.stop() if wait: self.scheduler_thread.join() self.worker_message_handler_thread.join()
Shut down the worker message handler and scheduler threads. Args: wait: If true, block until both threads have successfully shut down. If False, return immediately. Returns: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/scheduler/classes.py#L53-L67
learningequality/iceqube
src/iceqube/scheduler/classes.py
Scheduler.request_job_cancel
def request_job_cancel(self, job_id): """ Send a message to the workers to cancel the job with job_id. We then mark the job in the storage as being canceled. :param job_id: the job to cancel :return: None """ msg = CancelMessage(job_id) self.messaging_backend.send(self.worker_mailbox, msg) self.storage_backend.mark_job_as_canceling(job_id)
python
def request_job_cancel(self, job_id): """ Send a message to the workers to cancel the job with job_id. We then mark the job in the storage as being canceled. :param job_id: the job to cancel :return: None """ msg = CancelMessage(job_id) self.messaging_backend.send(self.worker_mailbox, msg) self.storage_backend.mark_job_as_canceling(job_id)
Send a message to the workers to cancel the job with job_id. We then mark the job in the storage as being canceled. :param job_id: the job to cancel :return: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/scheduler/classes.py#L69-L79
learningequality/iceqube
src/iceqube/scheduler/classes.py
Scheduler.schedule_next_job
def schedule_next_job(self): """ Get the next job in the queue to be scheduled, and send a message to the workers to start the job. Returns: None """ next_job = self.storage_backend.get_next_scheduled_job() # TODO: don't loop over if workers are already all running if not next_job: logging.debug("No job to schedule right now.") return try: self.messaging_backend.send(self.worker_mailbox, Message( type=MessageType.START_JOB, message={'job': next_job})) self.storage_backend.mark_job_as_queued(next_job.job_id) except Full: logging.debug( "Worker queue full; skipping scheduling of job {} for now.".format(next_job.job_id) ) return
python
def schedule_next_job(self): """ Get the next job in the queue to be scheduled, and send a message to the workers to start the job. Returns: None """ next_job = self.storage_backend.get_next_scheduled_job() # TODO: don't loop over if workers are already all running if not next_job: logging.debug("No job to schedule right now.") return try: self.messaging_backend.send(self.worker_mailbox, Message( type=MessageType.START_JOB, message={'job': next_job})) self.storage_backend.mark_job_as_queued(next_job.job_id) except Full: logging.debug( "Worker queue full; skipping scheduling of job {} for now.".format(next_job.job_id) ) return
Get the next job in the queue to be scheduled, and send a message to the workers to start the job. Returns: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/scheduler/classes.py#L81-L105
learningequality/iceqube
src/iceqube/scheduler/classes.py
Scheduler.handle_worker_messages
def handle_worker_messages(self, timeout): """ Read messages that are placed in self.incoming_mailbox, and then update the job states corresponding to each message. Args: timeout: How long to wait for an incoming message, if the mailbox is empty right now. Returns: None """ msgs = self.messaging_backend.popn(self.incoming_mailbox, n=20) for msg in msgs: self.handle_single_message(msg)
python
def handle_worker_messages(self, timeout): """ Read messages that are placed in self.incoming_mailbox, and then update the job states corresponding to each message. Args: timeout: How long to wait for an incoming message, if the mailbox is empty right now. Returns: None """ msgs = self.messaging_backend.popn(self.incoming_mailbox, n=20) for msg in msgs: self.handle_single_message(msg)
Read messages that are placed in self.incoming_mailbox, and then update the job states corresponding to each message. Args: timeout: How long to wait for an incoming message, if the mailbox is empty right now. Returns: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/scheduler/classes.py#L107-L121
learningequality/iceqube
src/iceqube/scheduler/classes.py
Scheduler.handle_single_message
def handle_single_message(self, msg): """ Handle one message and modify the job storage appropriately. :param msg: the message to handle :return: None """ job_id = msg.message['job_id'] actual_msg = msg.message if msg.type == MessageType.JOB_UPDATED: progress = actual_msg['progress'] total_progress = actual_msg['total_progress'] self.storage_backend.update_job_progress(job_id, progress, total_progress) elif msg.type == MessageType.JOB_COMPLETED: self.storage_backend.complete_job(job_id) elif msg.type == MessageType.JOB_FAILED: exc = actual_msg['exception'] trace = actual_msg['traceback'] self.storage_backend.mark_job_as_failed(job_id, exc, trace) elif msg.type == MessageType.JOB_CANCELED: self.storage_backend.mark_job_as_canceled(job_id) else: self.logger.error("Unknown message type: {}".format(msg.type))
python
def handle_single_message(self, msg): """ Handle one message and modify the job storage appropriately. :param msg: the message to handle :return: None """ job_id = msg.message['job_id'] actual_msg = msg.message if msg.type == MessageType.JOB_UPDATED: progress = actual_msg['progress'] total_progress = actual_msg['total_progress'] self.storage_backend.update_job_progress(job_id, progress, total_progress) elif msg.type == MessageType.JOB_COMPLETED: self.storage_backend.complete_job(job_id) elif msg.type == MessageType.JOB_FAILED: exc = actual_msg['exception'] trace = actual_msg['traceback'] self.storage_backend.mark_job_as_failed(job_id, exc, trace) elif msg.type == MessageType.JOB_CANCELED: self.storage_backend.mark_job_as_canceled(job_id) else: self.logger.error("Unknown message type: {}".format(msg.type))
Handle one message and modify the job storage appropriately. :param msg: the message to handle :return: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/scheduler/classes.py#L123-L145
learningequality/iceqube
src/iceqube/common/classes.py
Job.get_lambda_to_execute
def get_lambda_to_execute(self): """ return a function that executes the function assigned to this job. If job.track_progress is None (the default), the returned function accepts no argument and simply needs to be called. If job.track_progress is True, an update_progress function is passed in that can be used by the function to provide feedback progress back to the job scheduling system. :return: a function that executes the original function assigned to this job. """ def y(update_progress_func, cancel_job_func): """ Call the function stored in self.func, and passing in update_progress_func or cancel_job_func depending if self.track_progress or self.cancellable is defined, respectively. :param update_progress_func: The callback for when the job updates its progress. :param cancel_job_func: The function that the function has to call occasionally to see if the user wants to cancel the currently running job. :return: Any """ func = import_stringified_func(self.func) extrafunckwargs = {} args, kwargs = copy.copy(self.args), copy.copy(self.kwargs) if self.track_progress: extrafunckwargs["update_progress"] = partial(update_progress_func, self.job_id) if self.cancellable: extrafunckwargs["check_for_cancel"] = partial(cancel_job_func, self.job_id) kwargs.update(extrafunckwargs) return func(*args, **kwargs) return y
python
def get_lambda_to_execute(self): """ return a function that executes the function assigned to this job. If job.track_progress is None (the default), the returned function accepts no argument and simply needs to be called. If job.track_progress is True, an update_progress function is passed in that can be used by the function to provide feedback progress back to the job scheduling system. :return: a function that executes the original function assigned to this job. """ def y(update_progress_func, cancel_job_func): """ Call the function stored in self.func, and passing in update_progress_func or cancel_job_func depending if self.track_progress or self.cancellable is defined, respectively. :param update_progress_func: The callback for when the job updates its progress. :param cancel_job_func: The function that the function has to call occasionally to see if the user wants to cancel the currently running job. :return: Any """ func = import_stringified_func(self.func) extrafunckwargs = {} args, kwargs = copy.copy(self.args), copy.copy(self.kwargs) if self.track_progress: extrafunckwargs["update_progress"] = partial(update_progress_func, self.job_id) if self.cancellable: extrafunckwargs["check_for_cancel"] = partial(cancel_job_func, self.job_id) kwargs.update(extrafunckwargs) return func(*args, **kwargs) return y
return a function that executes the function assigned to this job. If job.track_progress is None (the default), the returned function accepts no argument and simply needs to be called. If job.track_progress is True, an update_progress function is passed in that can be used by the function to provide feedback progress back to the job scheduling system. :return: a function that executes the original function assigned to this job.
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/common/classes.py#L85-L122
learningequality/iceqube
src/iceqube/common/classes.py
Job.percentage_progress
def percentage_progress(self): """ Returns a float between 0 and 1, representing the current job's progress in its task. If total_progress is not given or 0, just return self.progress. :return: float corresponding to the total percentage progress of the job. """ if self.total_progress != 0: return float(self.progress) / self.total_progress else: return self.progress
python
def percentage_progress(self): """ Returns a float between 0 and 1, representing the current job's progress in its task. If total_progress is not given or 0, just return self.progress. :return: float corresponding to the total percentage progress of the job. """ if self.total_progress != 0: return float(self.progress) / self.total_progress else: return self.progress
Returns a float between 0 and 1, representing the current job's progress in its task. If total_progress is not given or 0, just return self.progress. :return: float corresponding to the total percentage progress of the job.
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/common/classes.py#L125-L136
learningequality/iceqube
src/iceqube/client.py
Client.schedule
def schedule(self, func, *args, **kwargs): """ Schedules a function func for execution. One special parameter is track_progress. If passed in and not None, the func will be passed in a keyword parameter called update_progress: def update_progress(progress, total_progress, stage=""): The running function can call the update_progress function to notify interested parties of the function's current progress. Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special "check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job to be cancelled. The caller can also pass in any pickleable object into the "extra_metadata" parameter. This data is stored within the job and can be retrieved when the job status is queried. All other parameters are directly passed to the function when it starts running. :type func: callable or str :param func: A callable object that will be scheduled for running. :return: a string representing the job_id. """ # if the func is already a job object, just schedule that directly. if isinstance(func, Job): job = func # else, turn it into a job first. else: job = Job(func, *args, **kwargs) job.track_progress = kwargs.pop('track_progress', False) job.cancellable = kwargs.pop('cancellable', False) job.extra_metadata = kwargs.pop('extra_metadata', {}) job_id = self.storage.schedule_job(job) return job_id
python
def schedule(self, func, *args, **kwargs): """ Schedules a function func for execution. One special parameter is track_progress. If passed in and not None, the func will be passed in a keyword parameter called update_progress: def update_progress(progress, total_progress, stage=""): The running function can call the update_progress function to notify interested parties of the function's current progress. Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special "check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job to be cancelled. The caller can also pass in any pickleable object into the "extra_metadata" parameter. This data is stored within the job and can be retrieved when the job status is queried. All other parameters are directly passed to the function when it starts running. :type func: callable or str :param func: A callable object that will be scheduled for running. :return: a string representing the job_id. """ # if the func is already a job object, just schedule that directly. if isinstance(func, Job): job = func # else, turn it into a job first. else: job = Job(func, *args, **kwargs) job.track_progress = kwargs.pop('track_progress', False) job.cancellable = kwargs.pop('cancellable', False) job.extra_metadata = kwargs.pop('extra_metadata', {}) job_id = self.storage.schedule_job(job) return job_id
Schedules a function func for execution. One special parameter is track_progress. If passed in and not None, the func will be passed in a keyword parameter called update_progress: def update_progress(progress, total_progress, stage=""): The running function can call the update_progress function to notify interested parties of the function's current progress. Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special "check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job to be cancelled. The caller can also pass in any pickleable object into the "extra_metadata" parameter. This data is stored within the job and can be retrieved when the job status is queried. All other parameters are directly passed to the function when it starts running. :type func: callable or str :param func: A callable object that will be scheduled for running. :return: a string representing the job_id.
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/client.py#L15-L52
learningequality/iceqube
src/iceqube/client.py
Client.wait
def wait(self, job_id, timeout=None): """ Wait until the job given by job_id has a new update. :param job_id: the id of the job to wait for. :param timeout: how long to wait for a job state change before timing out. :return: Job object corresponding to job_id """ return self.storage.wait_for_job_update(job_id, timeout=timeout)
python
def wait(self, job_id, timeout=None): """ Wait until the job given by job_id has a new update. :param job_id: the id of the job to wait for. :param timeout: how long to wait for a job state change before timing out. :return: Job object corresponding to job_id """ return self.storage.wait_for_job_update(job_id, timeout=timeout)
Wait until the job given by job_id has a new update. :param job_id: the id of the job to wait for. :param timeout: how long to wait for a job state change before timing out. :return: Job object corresponding to job_id
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/client.py#L86-L94
learningequality/iceqube
src/iceqube/client.py
Client.wait_for_completion
def wait_for_completion(self, job_id, timeout=None): """ Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a iceqube.exceptions.TimeoutError if timeout is exceeded before each job change. :param job_id: the id of the job to wait for. :param timeout: how long to wait for a job state change before timing out. """ while 1: job = self.wait(job_id, timeout=timeout) if job.state in [State.COMPLETED, State.FAILED, State.CANCELED]: return job else: continue
python
def wait_for_completion(self, job_id, timeout=None): """ Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a iceqube.exceptions.TimeoutError if timeout is exceeded before each job change. :param job_id: the id of the job to wait for. :param timeout: how long to wait for a job state change before timing out. """ while 1: job = self.wait(job_id, timeout=timeout) if job.state in [State.COMPLETED, State.FAILED, State.CANCELED]: return job else: continue
Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a iceqube.exceptions.TimeoutError if timeout is exceeded before each job change. :param job_id: the id of the job to wait for. :param timeout: how long to wait for a job state change before timing out.
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/client.py#L96-L109
learningequality/iceqube
src/iceqube/client.py
SimpleClient.shutdown
def shutdown(self): """ Shutdown the client and all of its managed resources: - the workers - the scheduler threads :return: None """ self._storage.clear() self._scheduler.shutdown(wait=False) self._workers.shutdown(wait=False)
python
def shutdown(self): """ Shutdown the client and all of its managed resources: - the workers - the scheduler threads :return: None """ self._storage.clear() self._scheduler.shutdown(wait=False) self._workers.shutdown(wait=False)
Shutdown the client and all of its managed resources: - the workers - the scheduler threads :return: None
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/client.py#L151-L162
gusdan/django-elasticache
django_elasticache/cluster_utils.py
get_cluster_info
def get_cluster_info(host, port, ignore_cluster_errors=False): """ return dict with info about nodes in cluster and current version { 'nodes': [ 'IP:port', 'IP:port', ], 'version': '1.4.4' } """ client = Telnet(host, int(port)) client.write(b'version\n') res = client.read_until(b'\r\n').strip() version_list = res.split(b' ') if len(version_list) not in [2, 3] or version_list[0] != b'VERSION': raise WrongProtocolData('version', res) version = version_list[1] if StrictVersion(smart_text(version)) >= StrictVersion('1.4.14'): cmd = b'config get cluster\n' else: cmd = b'get AmazonElastiCache:cluster\n' client.write(cmd) regex_index, match_object, res = client.expect([ re.compile(b'\n\r\nEND\r\n'), re.compile(b'ERROR\r\n') ]) client.close() if res == b'ERROR\r\n' and ignore_cluster_errors: return { 'version': version, 'nodes': [ '{0}:{1}'.format(smart_text(host), smart_text(port)) ] } ls = list(filter(None, re.compile(br'\r?\n').split(res))) if len(ls) != 4: raise WrongProtocolData(cmd, res) try: version = int(ls[1]) except ValueError: raise WrongProtocolData(cmd, res) nodes = [] try: for node in ls[2].split(b' '): host, ip, port = node.split(b'|') nodes.append('{0}:{1}'.format(smart_text(ip or host), smart_text(port))) except ValueError: raise WrongProtocolData(cmd, res) return { 'version': version, 'nodes': nodes }
python
def get_cluster_info(host, port, ignore_cluster_errors=False): """ return dict with info about nodes in cluster and current version { 'nodes': [ 'IP:port', 'IP:port', ], 'version': '1.4.4' } """ client = Telnet(host, int(port)) client.write(b'version\n') res = client.read_until(b'\r\n').strip() version_list = res.split(b' ') if len(version_list) not in [2, 3] or version_list[0] != b'VERSION': raise WrongProtocolData('version', res) version = version_list[1] if StrictVersion(smart_text(version)) >= StrictVersion('1.4.14'): cmd = b'config get cluster\n' else: cmd = b'get AmazonElastiCache:cluster\n' client.write(cmd) regex_index, match_object, res = client.expect([ re.compile(b'\n\r\nEND\r\n'), re.compile(b'ERROR\r\n') ]) client.close() if res == b'ERROR\r\n' and ignore_cluster_errors: return { 'version': version, 'nodes': [ '{0}:{1}'.format(smart_text(host), smart_text(port)) ] } ls = list(filter(None, re.compile(br'\r?\n').split(res))) if len(ls) != 4: raise WrongProtocolData(cmd, res) try: version = int(ls[1]) except ValueError: raise WrongProtocolData(cmd, res) nodes = [] try: for node in ls[2].split(b' '): host, ip, port = node.split(b'|') nodes.append('{0}:{1}'.format(smart_text(ip or host), smart_text(port))) except ValueError: raise WrongProtocolData(cmd, res) return { 'version': version, 'nodes': nodes }
return dict with info about nodes in cluster and current version { 'nodes': [ 'IP:port', 'IP:port', ], 'version': '1.4.4' }
https://github.com/gusdan/django-elasticache/blob/5f93c06ca8f264e3bd85b5f7044fd07733282e42/django_elasticache/cluster_utils.py#L20-L77
gusdan/django-elasticache
django_elasticache/memcached.py
invalidate_cache_after_error
def invalidate_cache_after_error(f): """ catch any exception and invalidate internal cache with list of nodes """ @wraps(f) def wrapper(self, *args, **kwds): try: return f(self, *args, **kwds) except Exception: self.clear_cluster_nodes_cache() raise return wrapper
python
def invalidate_cache_after_error(f): """ catch any exception and invalidate internal cache with list of nodes """ @wraps(f) def wrapper(self, *args, **kwds): try: return f(self, *args, **kwds) except Exception: self.clear_cluster_nodes_cache() raise return wrapper
catch any exception and invalidate internal cache with list of nodes
https://github.com/gusdan/django-elasticache/blob/5f93c06ca8f264e3bd85b5f7044fd07733282e42/django_elasticache/memcached.py#L11-L22
gusdan/django-elasticache
django_elasticache/memcached.py
ElastiCache.update_params
def update_params(self, params): """ update connection params to maximize performance """ if not params.get('BINARY', True): raise Warning('To increase performance please use ElastiCache' ' in binary mode') else: params['BINARY'] = True # patch params, set binary mode if 'OPTIONS' not in params: # set special 'behaviors' pylibmc attributes params['OPTIONS'] = { 'tcp_nodelay': True, 'ketama': True }
python
def update_params(self, params): """ update connection params to maximize performance """ if not params.get('BINARY', True): raise Warning('To increase performance please use ElastiCache' ' in binary mode') else: params['BINARY'] = True # patch params, set binary mode if 'OPTIONS' not in params: # set special 'behaviors' pylibmc attributes params['OPTIONS'] = { 'tcp_nodelay': True, 'ketama': True }
update connection params to maximize performance
https://github.com/gusdan/django-elasticache/blob/5f93c06ca8f264e3bd85b5f7044fd07733282e42/django_elasticache/memcached.py#L44-L58
gusdan/django-elasticache
django_elasticache/memcached.py
ElastiCache.get_cluster_nodes
def get_cluster_nodes(self): """ return list with all nodes in cluster """ if not hasattr(self, '_cluster_nodes_cache'): server, port = self._servers[0].split(':') try: self._cluster_nodes_cache = ( get_cluster_info(server, port, self._ignore_cluster_errors)['nodes']) except (socket.gaierror, socket.timeout) as err: raise Exception('Cannot connect to cluster {0} ({1})'.format( self._servers[0], err )) return self._cluster_nodes_cache
python
def get_cluster_nodes(self): """ return list with all nodes in cluster """ if not hasattr(self, '_cluster_nodes_cache'): server, port = self._servers[0].split(':') try: self._cluster_nodes_cache = ( get_cluster_info(server, port, self._ignore_cluster_errors)['nodes']) except (socket.gaierror, socket.timeout) as err: raise Exception('Cannot connect to cluster {0} ({1})'.format( self._servers[0], err )) return self._cluster_nodes_cache
return list with all nodes in cluster
https://github.com/gusdan/django-elasticache/blob/5f93c06ca8f264e3bd85b5f7044fd07733282e42/django_elasticache/memcached.py#L65-L79
ankitpopli1891/django-autotranslate
autotranslate/management/commands/translate_messages.py
humanize_placeholders
def humanize_placeholders(msgid): """Convert placeholders to the (google translate) service friendly form. %(name)s -> __name__ %s -> __item__ %d -> __number__ """ return re.sub( r'%(?:\((\w+)\))?([sd])', lambda match: r'__{0}__'.format( match.group(1).lower() if match.group(1) else 'number' if match.group(2) == 'd' else 'item'), msgid)
python
def humanize_placeholders(msgid): """Convert placeholders to the (google translate) service friendly form. %(name)s -> __name__ %s -> __item__ %d -> __number__ """ return re.sub( r'%(?:\((\w+)\))?([sd])', lambda match: r'__{0}__'.format( match.group(1).lower() if match.group(1) else 'number' if match.group(2) == 'd' else 'item'), msgid)
Convert placeholders to the (google translate) service friendly form. %(name)s -> __name__ %s -> __item__ %d -> __number__
https://github.com/ankitpopli1891/django-autotranslate/blob/ffdf120fa023b3e399cd37bc23e661a7be7b1718/autotranslate/management/commands/translate_messages.py#L157-L168
ankitpopli1891/django-autotranslate
autotranslate/management/commands/translate_messages.py
restore_placeholders
def restore_placeholders(msgid, translation): """Restore placeholders in the translated message.""" placehoders = re.findall(r'(\s*)(%(?:\(\w+\))?[sd])(\s*)', msgid) return re.sub( r'(\s*)(__[\w]+?__)(\s*)', lambda matches: '{0}{1}{2}'.format(placehoders[0][0], placehoders[0][1], placehoders.pop(0)[2]), translation)
python
def restore_placeholders(msgid, translation): """Restore placeholders in the translated message.""" placehoders = re.findall(r'(\s*)(%(?:\(\w+\))?[sd])(\s*)', msgid) return re.sub( r'(\s*)(__[\w]+?__)(\s*)', lambda matches: '{0}{1}{2}'.format(placehoders[0][0], placehoders[0][1], placehoders.pop(0)[2]), translation)
Restore placeholders in the translated message.
https://github.com/ankitpopli1891/django-autotranslate/blob/ffdf120fa023b3e399cd37bc23e661a7be7b1718/autotranslate/management/commands/translate_messages.py#L171-L177
ankitpopli1891/django-autotranslate
autotranslate/management/commands/translate_messages.py
Command.translate_file
def translate_file(self, root, file_name, target_language): """ convenience method for translating a pot file :param root: the absolute path of folder where the file is present :param file_name: name of the file to be translated (it should be a pot file) :param target_language: language in which the file needs to be translated """ logger.info('filling up translations for locale `{}`'.format(target_language)) po = polib.pofile(os.path.join(root, file_name)) strings = self.get_strings_to_translate(po) # translate the strings, # all the translated strings are returned # in the same order on the same index # viz. [a, b] -> [trans_a, trans_b] tl = get_translator() translated_strings = tl.translate_strings(strings, target_language, 'en', False) self.update_translations(po, translated_strings) po.save()
python
def translate_file(self, root, file_name, target_language): """ convenience method for translating a pot file :param root: the absolute path of folder where the file is present :param file_name: name of the file to be translated (it should be a pot file) :param target_language: language in which the file needs to be translated """ logger.info('filling up translations for locale `{}`'.format(target_language)) po = polib.pofile(os.path.join(root, file_name)) strings = self.get_strings_to_translate(po) # translate the strings, # all the translated strings are returned # in the same order on the same index # viz. [a, b] -> [trans_a, trans_b] tl = get_translator() translated_strings = tl.translate_strings(strings, target_language, 'en', False) self.update_translations(po, translated_strings) po.save()
convenience method for translating a pot file :param root: the absolute path of folder where the file is present :param file_name: name of the file to be translated (it should be a pot file) :param target_language: language in which the file needs to be translated
https://github.com/ankitpopli1891/django-autotranslate/blob/ffdf120fa023b3e399cd37bc23e661a7be7b1718/autotranslate/management/commands/translate_messages.py#L78-L98
ankitpopli1891/django-autotranslate
autotranslate/management/commands/translate_messages.py
Command.get_strings_to_translate
def get_strings_to_translate(self, po): """Return list of string to translate from po file. :param po: POFile object to translate :type po: polib.POFile :return: list of string to translate :rtype: collections.Iterable[six.text_type] """ strings = [] for index, entry in enumerate(po): if not self.need_translate(entry): continue strings.append(humanize_placeholders(entry.msgid)) if entry.msgid_plural: strings.append(humanize_placeholders(entry.msgid_plural)) return strings
python
def get_strings_to_translate(self, po): """Return list of string to translate from po file. :param po: POFile object to translate :type po: polib.POFile :return: list of string to translate :rtype: collections.Iterable[six.text_type] """ strings = [] for index, entry in enumerate(po): if not self.need_translate(entry): continue strings.append(humanize_placeholders(entry.msgid)) if entry.msgid_plural: strings.append(humanize_placeholders(entry.msgid_plural)) return strings
Return list of string to translate from po file. :param po: POFile object to translate :type po: polib.POFile :return: list of string to translate :rtype: collections.Iterable[six.text_type]
https://github.com/ankitpopli1891/django-autotranslate/blob/ffdf120fa023b3e399cd37bc23e661a7be7b1718/autotranslate/management/commands/translate_messages.py#L103-L118
ankitpopli1891/django-autotranslate
autotranslate/management/commands/translate_messages.py
Command.update_translations
def update_translations(self, entries, translated_strings): """Update translations in entries. The order and number of translations should match to get_strings_to_translate() result. :param entries: list of entries to translate :type entries: collections.Iterable[polib.POEntry] | polib.POFile :param translated_strings: list of translations :type translated_strings: collections.Iterable[six.text_type] """ translations = iter(translated_strings) for entry in entries: if not self.need_translate(entry): continue if entry.msgid_plural: # fill the first plural form with the entry.msgid translation translation = next(translations) translation = fix_translation(entry.msgid, translation) entry.msgstr_plural[0] = translation # fill the rest of plural forms with the entry.msgid_plural translation translation = next(translations) translation = fix_translation(entry.msgid_plural, translation) for k, v in entry.msgstr_plural.items(): if k != 0: entry.msgstr_plural[k] = translation else: translation = next(translations) translation = fix_translation(entry.msgid, translation) entry.msgstr = translation # Set the 'fuzzy' flag on translation if self.set_fuzzy and 'fuzzy' not in entry.flags: entry.flags.append('fuzzy')
python
def update_translations(self, entries, translated_strings): """Update translations in entries. The order and number of translations should match to get_strings_to_translate() result. :param entries: list of entries to translate :type entries: collections.Iterable[polib.POEntry] | polib.POFile :param translated_strings: list of translations :type translated_strings: collections.Iterable[six.text_type] """ translations = iter(translated_strings) for entry in entries: if not self.need_translate(entry): continue if entry.msgid_plural: # fill the first plural form with the entry.msgid translation translation = next(translations) translation = fix_translation(entry.msgid, translation) entry.msgstr_plural[0] = translation # fill the rest of plural forms with the entry.msgid_plural translation translation = next(translations) translation = fix_translation(entry.msgid_plural, translation) for k, v in entry.msgstr_plural.items(): if k != 0: entry.msgstr_plural[k] = translation else: translation = next(translations) translation = fix_translation(entry.msgid, translation) entry.msgstr = translation # Set the 'fuzzy' flag on translation if self.set_fuzzy and 'fuzzy' not in entry.flags: entry.flags.append('fuzzy')
Update translations in entries. The order and number of translations should match to get_strings_to_translate() result. :param entries: list of entries to translate :type entries: collections.Iterable[polib.POEntry] | polib.POFile :param translated_strings: list of translations :type translated_strings: collections.Iterable[six.text_type]
https://github.com/ankitpopli1891/django-autotranslate/blob/ffdf120fa023b3e399cd37bc23e661a7be7b1718/autotranslate/management/commands/translate_messages.py#L120-L154
julienr/meshcut
examples/ply.py
load_ply
def load_ply(fileobj): """Same as load_ply, but takes a file-like object""" def nextline(): """Read next line, skip comments""" while True: line = fileobj.readline() assert line != '' # eof if not line.startswith('comment'): return line.strip() assert nextline() == 'ply' assert nextline() == 'format ascii 1.0' line = nextline() assert line.startswith('element vertex') nverts = int(line.split()[2]) # print 'nverts : ', nverts assert nextline() == 'property float x' assert nextline() == 'property float y' assert nextline() == 'property float z' line = nextline() assert line.startswith('element face') nfaces = int(line.split()[2]) # print 'nfaces : ', nfaces assert nextline() == 'property list uchar int vertex_indices' line = nextline() has_texcoords = line == 'property list uchar float texcoord' if has_texcoords: assert nextline() == 'end_header' else: assert line == 'end_header' # Verts verts = np.zeros((nverts, 3)) for i in range(nverts): vals = nextline().split() verts[i, :] = [float(v) for v in vals[:3]] # Faces faces = [] faces_uv = [] for i in range(nfaces): vals = nextline().split() assert int(vals[0]) == 3 faces.append([int(v) for v in vals[1:4]]) if has_texcoords: assert len(vals) == 11 assert int(vals[4]) == 6 faces_uv.append([(float(vals[5]), float(vals[6])), (float(vals[7]), float(vals[8])), (float(vals[9]), float(vals[10]))]) # faces_uv.append([float(v) for v in vals[5:]]) else: assert len(vals) == 4 return verts, faces, faces_uv
python
def load_ply(fileobj): """Same as load_ply, but takes a file-like object""" def nextline(): """Read next line, skip comments""" while True: line = fileobj.readline() assert line != '' # eof if not line.startswith('comment'): return line.strip() assert nextline() == 'ply' assert nextline() == 'format ascii 1.0' line = nextline() assert line.startswith('element vertex') nverts = int(line.split()[2]) # print 'nverts : ', nverts assert nextline() == 'property float x' assert nextline() == 'property float y' assert nextline() == 'property float z' line = nextline() assert line.startswith('element face') nfaces = int(line.split()[2]) # print 'nfaces : ', nfaces assert nextline() == 'property list uchar int vertex_indices' line = nextline() has_texcoords = line == 'property list uchar float texcoord' if has_texcoords: assert nextline() == 'end_header' else: assert line == 'end_header' # Verts verts = np.zeros((nverts, 3)) for i in range(nverts): vals = nextline().split() verts[i, :] = [float(v) for v in vals[:3]] # Faces faces = [] faces_uv = [] for i in range(nfaces): vals = nextline().split() assert int(vals[0]) == 3 faces.append([int(v) for v in vals[1:4]]) if has_texcoords: assert len(vals) == 11 assert int(vals[4]) == 6 faces_uv.append([(float(vals[5]), float(vals[6])), (float(vals[7]), float(vals[8])), (float(vals[9]), float(vals[10]))]) # faces_uv.append([float(v) for v in vals[5:]]) else: assert len(vals) == 4 return verts, faces, faces_uv
Same as load_ply, but takes a file-like object
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/examples/ply.py#L4-L57
sorend/sshconf
sshconf.py
read_ssh_config
def read_ssh_config(path): """ Read ssh config file and return parsed SshConfig """ with open(path, "r") as fh_: lines = fh_.read().splitlines() return SshConfig(lines)
python
def read_ssh_config(path): """ Read ssh config file and return parsed SshConfig """ with open(path, "r") as fh_: lines = fh_.read().splitlines() return SshConfig(lines)
Read ssh config file and return parsed SshConfig
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L112-L118
sorend/sshconf
sshconf.py
_remap_key
def _remap_key(key): """ Change key into correct casing if we know the parameter """ if key in KNOWN_PARAMS: return key if key.lower() in known_params: return KNOWN_PARAMS[known_params.index(key.lower())] return key
python
def _remap_key(key): """ Change key into correct casing if we know the parameter """ if key in KNOWN_PARAMS: return key if key.lower() in known_params: return KNOWN_PARAMS[known_params.index(key.lower())] return key
Change key into correct casing if we know the parameter
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L130-L136
sorend/sshconf
sshconf.py
SshConfig.parse
def parse(self, lines): """Parse lines from ssh config file""" cur_entry = None for line in lines: kv_ = _key_value(line) if len(kv_) > 1: key, value = kv_ if key.lower() == "host": cur_entry = value self.hosts_.add(value) self.lines_.append(ConfigLine(line=line, host=cur_entry, key=key, value=value)) else: self.lines_.append(ConfigLine(line=line))
python
def parse(self, lines): """Parse lines from ssh config file""" cur_entry = None for line in lines: kv_ = _key_value(line) if len(kv_) > 1: key, value = kv_ if key.lower() == "host": cur_entry = value self.hosts_.add(value) self.lines_.append(ConfigLine(line=line, host=cur_entry, key=key, value=value)) else: self.lines_.append(ConfigLine(line=line))
Parse lines from ssh config file
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L147-L159
sorend/sshconf
sshconf.py
SshConfig.host
def host(self, host): """ Return the configuration of a specific host as a dictionary. Dictionary always contains lowercase versions of the attribute names. Parameters ---------- host : the host to return values for. Returns ------- dict of key value pairs, excluding "Host", empty map if host is not found. """ if host in self.hosts_: vals = defaultdict(list) for k, value in [(x.key.lower(), x.value) for x in self.lines_ if x.host == host and x.key.lower() != "host"]: vals[k].append(value) flatten = lambda x: x[0] if len(x) == 1 else x return {k: flatten(v) for k, v in vals.items()} return {}
python
def host(self, host): """ Return the configuration of a specific host as a dictionary. Dictionary always contains lowercase versions of the attribute names. Parameters ---------- host : the host to return values for. Returns ------- dict of key value pairs, excluding "Host", empty map if host is not found. """ if host in self.hosts_: vals = defaultdict(list) for k, value in [(x.key.lower(), x.value) for x in self.lines_ if x.host == host and x.key.lower() != "host"]: vals[k].append(value) flatten = lambda x: x[0] if len(x) == 1 else x return {k: flatten(v) for k, v in vals.items()} return {}
Return the configuration of a specific host as a dictionary. Dictionary always contains lowercase versions of the attribute names. Parameters ---------- host : the host to return values for. Returns ------- dict of key value pairs, excluding "Host", empty map if host is not found.
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L171-L192
sorend/sshconf
sshconf.py
SshConfig.set
def set(self, host, **kwargs): """ Set configuration values for an existing host. Overwrites values for existing settings, or adds new settings. Parameters ---------- host : the Host to modify. **kwargs : The new configuration parameters """ self.__check_host_args(host, kwargs) def update_line(key, value): """Produce new config line""" return " %s %s" % (key, value) for key, values in kwargs.items(): if type(values) not in [list, tuple]: # pylint: disable=unidiomatic-typecheck values = [values] lower_key = key.lower() update_idx = [idx for idx, x in enumerate(self.lines_) if x.host == host and x.key.lower() == lower_key] extra_remove = [] for idx in update_idx: if values: # values available, update the line value = values.pop() self.lines_[idx].line = update_line(self.lines_[idx].key, value) self.lines_[idx].value = value else: # no more values available, remove the line extra_remove.append(idx) for idx in reversed(sorted(extra_remove)): del self.lines_[idx] if values: mapped_key = _remap_key(key) max_idx = max([idx for idx, line in enumerate(self.lines_) if line.host == host]) for value in values: self.lines_.insert(max_idx + 1, ConfigLine(line=update_line(mapped_key, value), host=host, key=mapped_key, value=value))
python
def set(self, host, **kwargs): """ Set configuration values for an existing host. Overwrites values for existing settings, or adds new settings. Parameters ---------- host : the Host to modify. **kwargs : The new configuration parameters """ self.__check_host_args(host, kwargs) def update_line(key, value): """Produce new config line""" return " %s %s" % (key, value) for key, values in kwargs.items(): if type(values) not in [list, tuple]: # pylint: disable=unidiomatic-typecheck values = [values] lower_key = key.lower() update_idx = [idx for idx, x in enumerate(self.lines_) if x.host == host and x.key.lower() == lower_key] extra_remove = [] for idx in update_idx: if values: # values available, update the line value = values.pop() self.lines_[idx].line = update_line(self.lines_[idx].key, value) self.lines_[idx].value = value else: # no more values available, remove the line extra_remove.append(idx) for idx in reversed(sorted(extra_remove)): del self.lines_[idx] if values: mapped_key = _remap_key(key) max_idx = max([idx for idx, line in enumerate(self.lines_) if line.host == host]) for value in values: self.lines_.insert(max_idx + 1, ConfigLine(line=update_line(mapped_key, value), host=host, key=mapped_key, value=value))
Set configuration values for an existing host. Overwrites values for existing settings, or adds new settings. Parameters ---------- host : the Host to modify. **kwargs : The new configuration parameters
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L194-L235
sorend/sshconf
sshconf.py
SshConfig.unset
def unset(self, host, *args): """ Removes settings for a host. Parameters ---------- host : the host to remove settings from. *args : list of settings to removes. """ self.__check_host_args(host, args) remove_idx = [idx for idx, x in enumerate(self.lines_) if x.host == host and x.key.lower() in args] for idx in reversed(sorted(remove_idx)): del self.lines_[idx]
python
def unset(self, host, *args): """ Removes settings for a host. Parameters ---------- host : the host to remove settings from. *args : list of settings to removes. """ self.__check_host_args(host, args) remove_idx = [idx for idx, x in enumerate(self.lines_) if x.host == host and x.key.lower() in args] for idx in reversed(sorted(remove_idx)): del self.lines_[idx]
Removes settings for a host. Parameters ---------- host : the host to remove settings from. *args : list of settings to removes.
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L237-L250
sorend/sshconf
sshconf.py
SshConfig.__check_host_args
def __check_host_args(self, host, keys): """Checks parameters""" if host not in self.hosts_: raise ValueError("Host %s: not found" % host) if "host" in [x.lower() for x in keys]: raise ValueError("Cannot modify Host value")
python
def __check_host_args(self, host, keys): """Checks parameters""" if host not in self.hosts_: raise ValueError("Host %s: not found" % host) if "host" in [x.lower() for x in keys]: raise ValueError("Cannot modify Host value")
Checks parameters
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L252-L258
sorend/sshconf
sshconf.py
SshConfig.rename
def rename(self, old_host, new_host): """ Renames a host configuration. Parameters ---------- old_host : the host to rename. new_host : the new host value """ if new_host in self.hosts_: raise ValueError("Host %s: already exists." % new_host) for line in self.lines_: # update lines if line.host == old_host: line.host = new_host if line.key.lower() == "host": line.value = new_host line.line = "Host %s" % new_host self.hosts_.remove(old_host) # update host cache self.hosts_.add(new_host)
python
def rename(self, old_host, new_host): """ Renames a host configuration. Parameters ---------- old_host : the host to rename. new_host : the new host value """ if new_host in self.hosts_: raise ValueError("Host %s: already exists." % new_host) for line in self.lines_: # update lines if line.host == old_host: line.host = new_host if line.key.lower() == "host": line.value = new_host line.line = "Host %s" % new_host self.hosts_.remove(old_host) # update host cache self.hosts_.add(new_host)
Renames a host configuration. Parameters ---------- old_host : the host to rename. new_host : the new host value
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L260-L278
sorend/sshconf
sshconf.py
SshConfig.add
def add(self, host, **kwargs): """ Add another host to the SSH configuration. Parameters ---------- host: The Host entry to add. **kwargs: The parameters for the host (without "Host" parameter itself) """ if host in self.hosts_: raise ValueError("Host %s: exists (use update)." % host) self.hosts_.add(host) self.lines_.append(ConfigLine(line="", host=None)) self.lines_.append(ConfigLine(line="Host %s" % host, host=host, key="Host", value=host)) for k, v in kwargs.items(): if type(v) not in [list, tuple]: v = [v] mapped_k = _remap_key(k) for value in v: self.lines_.append(ConfigLine(line=" %s %s" % (mapped_k, str(value)), host=host, key=mapped_k, value=value)) self.lines_.append(ConfigLine(line="", host=None))
python
def add(self, host, **kwargs): """ Add another host to the SSH configuration. Parameters ---------- host: The Host entry to add. **kwargs: The parameters for the host (without "Host" parameter itself) """ if host in self.hosts_: raise ValueError("Host %s: exists (use update)." % host) self.hosts_.add(host) self.lines_.append(ConfigLine(line="", host=None)) self.lines_.append(ConfigLine(line="Host %s" % host, host=host, key="Host", value=host)) for k, v in kwargs.items(): if type(v) not in [list, tuple]: v = [v] mapped_k = _remap_key(k) for value in v: self.lines_.append(ConfigLine(line=" %s %s" % (mapped_k, str(value)), host=host, key=mapped_k, value=value)) self.lines_.append(ConfigLine(line="", host=None))
Add another host to the SSH configuration. Parameters ---------- host: The Host entry to add. **kwargs: The parameters for the host (without "Host" parameter itself)
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L280-L300
sorend/sshconf
sshconf.py
SshConfig.remove
def remove(self, host): """ Removes a host from the SSH configuration. Parameters ---------- host : The host to remove """ if host not in self.hosts_: raise ValueError("Host %s: not found." % host) self.hosts_.remove(host) # remove lines, including comments inside the host lines host_lines = [ idx for idx, x in enumerate(self.lines_) if x.host == host ] remove_range = reversed(range(min(host_lines), max(host_lines) + 1)) for idx in remove_range: del self.lines_[idx]
python
def remove(self, host): """ Removes a host from the SSH configuration. Parameters ---------- host : The host to remove """ if host not in self.hosts_: raise ValueError("Host %s: not found." % host) self.hosts_.remove(host) # remove lines, including comments inside the host lines host_lines = [ idx for idx, x in enumerate(self.lines_) if x.host == host ] remove_range = reversed(range(min(host_lines), max(host_lines) + 1)) for idx in remove_range: del self.lines_[idx]
Removes a host from the SSH configuration. Parameters ---------- host : The host to remove
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L302-L317
sorend/sshconf
sshconf.py
SshConfig.write
def write(self, path): """ Writes ssh config file Parameters ---------- path : The file to write to """ with open(path, "w") as fh_: fh_.write(self.config())
python
def write(self, path): """ Writes ssh config file Parameters ---------- path : The file to write to """ with open(path, "w") as fh_: fh_.write(self.config())
Writes ssh config file Parameters ---------- path : The file to write to
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L325-L334
julienr/meshcut
examples/utils.py
orthogonal_vector
def orthogonal_vector(v): """Return an arbitrary vector that is orthogonal to v""" if v[1] != 0 or v[2] != 0: c = (1, 0, 0) else: c = (0, 1, 0) return np.cross(v, c)
python
def orthogonal_vector(v): """Return an arbitrary vector that is orthogonal to v""" if v[1] != 0 or v[2] != 0: c = (1, 0, 0) else: c = (0, 1, 0) return np.cross(v, c)
Return an arbitrary vector that is orthogonal to v
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/examples/utils.py#L18-L24
julienr/meshcut
examples/utils.py
show_plane
def show_plane(orig, n, scale=1.0, **kwargs): """ Show the plane with the given origin and normal. scale give its size """ b1 = orthogonal_vector(n) b1 /= la.norm(b1) b2 = np.cross(b1, n) b2 /= la.norm(b2) verts = [orig + scale*(-b1 - b2), orig + scale*(b1 - b2), orig + scale*(b1 + b2), orig + scale*(-b1 + b2)] faces = [(0, 1, 2), (0, 2, 3)] trimesh3d(np.array(verts), faces, **kwargs)
python
def show_plane(orig, n, scale=1.0, **kwargs): """ Show the plane with the given origin and normal. scale give its size """ b1 = orthogonal_vector(n) b1 /= la.norm(b1) b2 = np.cross(b1, n) b2 /= la.norm(b2) verts = [orig + scale*(-b1 - b2), orig + scale*(b1 - b2), orig + scale*(b1 + b2), orig + scale*(-b1 + b2)] faces = [(0, 1, 2), (0, 2, 3)] trimesh3d(np.array(verts), faces, **kwargs)
Show the plane with the given origin and normal. scale give its size
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/examples/utils.py#L27-L40
julienr/meshcut
misc/experiments.py
slice_triangle_plane
def slice_triangle_plane(verts, tri, plane_orig, plane_norm): """ Args: verts : the vertices of the mesh tri: the face to cut plane_orig: origin of the plane plane_norm: normal to the plane """ dists = [point_to_plane_dist(p, plane_orig, plane_norm) for p in verts[tri]] if np.sign(dists[0]) == np.sign(dists[1]) \ and np.sign(dists[1]) == np.sign(dists[2]): # Triangle is on one side of the plane return [] # Iterate through the edges, cutting the ones that intersect intersect_points = [] for fi in range(3): v1 = verts[tri[fi]] d1 = dists[fi] v2 = verts[tri[(fi + 1) % 3]] d2 = dists[(fi + 1) % 3] if d1 * d2 < 0: # intersection factor (between 0 and 1) # here is a nice drawing : # https://ravehgonen.files.wordpress.com/2013/02/slide8.png s = d1 / (d1 - d2) vdir = v2 - v1 intersect_points.append(v1 + vdir * s) elif np.fabs(d1) < 1e-5: # point on plane intersect_points.append(v1) return intersect_points
python
def slice_triangle_plane(verts, tri, plane_orig, plane_norm): """ Args: verts : the vertices of the mesh tri: the face to cut plane_orig: origin of the plane plane_norm: normal to the plane """ dists = [point_to_plane_dist(p, plane_orig, plane_norm) for p in verts[tri]] if np.sign(dists[0]) == np.sign(dists[1]) \ and np.sign(dists[1]) == np.sign(dists[2]): # Triangle is on one side of the plane return [] # Iterate through the edges, cutting the ones that intersect intersect_points = [] for fi in range(3): v1 = verts[tri[fi]] d1 = dists[fi] v2 = verts[tri[(fi + 1) % 3]] d2 = dists[(fi + 1) % 3] if d1 * d2 < 0: # intersection factor (between 0 and 1) # here is a nice drawing : # https://ravehgonen.files.wordpress.com/2013/02/slide8.png s = d1 / (d1 - d2) vdir = v2 - v1 intersect_points.append(v1 + vdir * s) elif np.fabs(d1) < 1e-5: # point on plane intersect_points.append(v1) return intersect_points
Args: verts : the vertices of the mesh tri: the face to cut plane_orig: origin of the plane plane_norm: normal to the plane
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/misc/experiments.py#L57-L92
julienr/meshcut
meshcut.py
triangle_intersects_plane
def triangle_intersects_plane(mesh, tid, plane): """ Returns true if the given triangle is cut by the plane. This will return false if a single vertex of the triangle lies on the plane """ dists = [point_to_plane_dist(mesh.verts[vid], plane) for vid in mesh.tris[tid]] side = np.sign(dists) return not (side[0] == side[1] == side[2])
python
def triangle_intersects_plane(mesh, tid, plane): """ Returns true if the given triangle is cut by the plane. This will return false if a single vertex of the triangle lies on the plane """ dists = [point_to_plane_dist(mesh.verts[vid], plane) for vid in mesh.tris[tid]] side = np.sign(dists) return not (side[0] == side[1] == side[2])
Returns true if the given triangle is cut by the plane. This will return false if a single vertex of the triangle lies on the plane
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L83-L91
julienr/meshcut
meshcut.py
compute_triangle_plane_intersections
def compute_triangle_plane_intersections(mesh, tid, plane, dist_tol=1e-8): """ Compute the intersection between a triangle and a plane Returns a list of intersections in the form (INTERSECT_EDGE, <intersection point>, <edge>) for edges intersection (INTERSECT_VERTEX, <intersection point>, <vertex index>) for vertices This return between 0 and 2 intersections : - 0 : the plane does not intersect the plane - 1 : one of the triangle's vertices lies on the plane (so it just "touches" the plane without really intersecting) - 2 : the plane slice the triangle in two parts (either vertex-edge, vertex-vertex or edge-edge) """ # TODO: Use a distance cache dists = {vid: point_to_plane_dist(mesh.verts[vid], plane) for vid in mesh.tris[tid]} # TODO: Use an edge intersection cache (we currently compute each edge # intersection twice : once for each tri) # This is to avoid registering the same vertex intersection twice # from two different edges vert_intersect = {vid: False for vid in dists.keys()} # Iterate through the edges, cutting the ones that intersect intersections = [] for e in mesh.edges_for_triangle(tid): v1 = mesh.verts[e[0]] d1 = dists[e[0]] v2 = mesh.verts[e[1]] d2 = dists[e[1]] if np.fabs(d1) < dist_tol: # Avoid creating the vertex intersection twice if not vert_intersect[e[0]]: # point on plane intersections.append((INTERSECT_VERTEX, v1, e[0])) vert_intersect[e[0]] = True if np.fabs(d2) < dist_tol: if not vert_intersect[e[1]]: # point on plane intersections.append((INTERSECT_VERTEX, v2, e[1])) vert_intersect[e[1]] = True # If vertices are on opposite sides of the plane, we have an edge # intersection if d1 * d2 < 0: # Due to numerical accuracy, we could have both a vertex intersect # and an edge intersect on the same vertex, which is impossible if not vert_intersect[e[0]] and not vert_intersect[e[1]]: # intersection factor (between 0 and 1) # here is a nice drawing : # https://ravehgonen.files.wordpress.com/2013/02/slide8.png # keep in mind d1, d2 are *signed* distances (=> d1 - d2) s = d1 / (d1 - d2) vdir = v2 - v1 ipos = v1 + vdir * s intersections.append((INTERSECT_EDGE, ipos, e)) return intersections
python
def compute_triangle_plane_intersections(mesh, tid, plane, dist_tol=1e-8): """ Compute the intersection between a triangle and a plane Returns a list of intersections in the form (INTERSECT_EDGE, <intersection point>, <edge>) for edges intersection (INTERSECT_VERTEX, <intersection point>, <vertex index>) for vertices This return between 0 and 2 intersections : - 0 : the plane does not intersect the plane - 1 : one of the triangle's vertices lies on the plane (so it just "touches" the plane without really intersecting) - 2 : the plane slice the triangle in two parts (either vertex-edge, vertex-vertex or edge-edge) """ # TODO: Use a distance cache dists = {vid: point_to_plane_dist(mesh.verts[vid], plane) for vid in mesh.tris[tid]} # TODO: Use an edge intersection cache (we currently compute each edge # intersection twice : once for each tri) # This is to avoid registering the same vertex intersection twice # from two different edges vert_intersect = {vid: False for vid in dists.keys()} # Iterate through the edges, cutting the ones that intersect intersections = [] for e in mesh.edges_for_triangle(tid): v1 = mesh.verts[e[0]] d1 = dists[e[0]] v2 = mesh.verts[e[1]] d2 = dists[e[1]] if np.fabs(d1) < dist_tol: # Avoid creating the vertex intersection twice if not vert_intersect[e[0]]: # point on plane intersections.append((INTERSECT_VERTEX, v1, e[0])) vert_intersect[e[0]] = True if np.fabs(d2) < dist_tol: if not vert_intersect[e[1]]: # point on plane intersections.append((INTERSECT_VERTEX, v2, e[1])) vert_intersect[e[1]] = True # If vertices are on opposite sides of the plane, we have an edge # intersection if d1 * d2 < 0: # Due to numerical accuracy, we could have both a vertex intersect # and an edge intersect on the same vertex, which is impossible if not vert_intersect[e[0]] and not vert_intersect[e[1]]: # intersection factor (between 0 and 1) # here is a nice drawing : # https://ravehgonen.files.wordpress.com/2013/02/slide8.png # keep in mind d1, d2 are *signed* distances (=> d1 - d2) s = d1 / (d1 - d2) vdir = v2 - v1 ipos = v1 + vdir * s intersections.append((INTERSECT_EDGE, ipos, e)) return intersections
Compute the intersection between a triangle and a plane Returns a list of intersections in the form (INTERSECT_EDGE, <intersection point>, <edge>) for edges intersection (INTERSECT_VERTEX, <intersection point>, <vertex index>) for vertices This return between 0 and 2 intersections : - 0 : the plane does not intersect the plane - 1 : one of the triangle's vertices lies on the plane (so it just "touches" the plane without really intersecting) - 2 : the plane slice the triangle in two parts (either vertex-edge, vertex-vertex or edge-edge)
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L100-L161
julienr/meshcut
meshcut.py
get_next_triangle
def get_next_triangle(mesh, T, plane, intersection, dist_tol): """ Returns the next triangle to visit given the intersection and the list of unvisited triangles (T) We look for a triangle that is cut by the plane (2 intersections) as opposed to one that only touch the plane (1 vertex intersection) """ if intersection[0] == INTERSECT_EDGE: tris = mesh.triangles_for_edge(intersection[2]) elif intersection[0] == INTERSECT_VERTEX: tris = mesh.triangles_for_vert(intersection[2]) else: assert False, 'Invalid intersection[0] value : %d' % intersection[0] # Knowing where we come from is not enough. If an edge of the triangle # lies exactly on the plane, i.e. : # # /t1\ # -v1---v2- # \t2/ # # With v1, v2 being the vertices and t1, t2 being the triangles, then # if you just try to go to the next connected triangle that intersect, # you can visit v1 -> t1 -> v2 -> t2 -> v1 . # Therefore, we need to limit the new candidates to the set of unvisited # triangles and once we've visited a triangle and decided on a next one, # remove all the neighbors of the visited triangle so we don't come # back to it T = set(T) for tid in tris: if tid in T: intersections = compute_triangle_plane_intersections( mesh, tid, plane, dist_tol) if len(intersections) == 2: T = T.difference(tris) return tid, intersections, T return None, [], T
python
def get_next_triangle(mesh, T, plane, intersection, dist_tol): """ Returns the next triangle to visit given the intersection and the list of unvisited triangles (T) We look for a triangle that is cut by the plane (2 intersections) as opposed to one that only touch the plane (1 vertex intersection) """ if intersection[0] == INTERSECT_EDGE: tris = mesh.triangles_for_edge(intersection[2]) elif intersection[0] == INTERSECT_VERTEX: tris = mesh.triangles_for_vert(intersection[2]) else: assert False, 'Invalid intersection[0] value : %d' % intersection[0] # Knowing where we come from is not enough. If an edge of the triangle # lies exactly on the plane, i.e. : # # /t1\ # -v1---v2- # \t2/ # # With v1, v2 being the vertices and t1, t2 being the triangles, then # if you just try to go to the next connected triangle that intersect, # you can visit v1 -> t1 -> v2 -> t2 -> v1 . # Therefore, we need to limit the new candidates to the set of unvisited # triangles and once we've visited a triangle and decided on a next one, # remove all the neighbors of the visited triangle so we don't come # back to it T = set(T) for tid in tris: if tid in T: intersections = compute_triangle_plane_intersections( mesh, tid, plane, dist_tol) if len(intersections) == 2: T = T.difference(tris) return tid, intersections, T return None, [], T
Returns the next triangle to visit given the intersection and the list of unvisited triangles (T) We look for a triangle that is cut by the plane (2 intersections) as opposed to one that only touch the plane (1 vertex intersection)
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L164-L202
julienr/meshcut
meshcut.py
_walk_polyline
def _walk_polyline(tid, intersect, T, mesh, plane, dist_tol): """ Given an intersection, walk through the mesh triangles, computing intersection with the cut plane for each visited triangle and adding those intersection to a polyline. """ T = set(T) p = [] # Loop until we have explored all the triangles for the current # polyline while True: p.append(intersect[1]) tid, intersections, T = get_next_triangle(mesh, T, plane, intersect, dist_tol) if tid is None: break # get_next_triangle returns triangles that our plane actually # intersects (as opposed to touching only a single vertex), # hence the assert assert len(intersections) == 2 # Of the two returned intersections, one should have the # intersection point equal to p[-1] if la.norm(intersections[0][1] - p[-1]) < dist_tol: intersect = intersections[1] else: assert la.norm(intersections[1][1] - p[-1]) < dist_tol, \ '%s not close to %s' % (str(p[-1]), str(intersections)) intersect = intersections[0] return p, T
python
def _walk_polyline(tid, intersect, T, mesh, plane, dist_tol): """ Given an intersection, walk through the mesh triangles, computing intersection with the cut plane for each visited triangle and adding those intersection to a polyline. """ T = set(T) p = [] # Loop until we have explored all the triangles for the current # polyline while True: p.append(intersect[1]) tid, intersections, T = get_next_triangle(mesh, T, plane, intersect, dist_tol) if tid is None: break # get_next_triangle returns triangles that our plane actually # intersects (as opposed to touching only a single vertex), # hence the assert assert len(intersections) == 2 # Of the two returned intersections, one should have the # intersection point equal to p[-1] if la.norm(intersections[0][1] - p[-1]) < dist_tol: intersect = intersections[1] else: assert la.norm(intersections[1][1] - p[-1]) < dist_tol, \ '%s not close to %s' % (str(p[-1]), str(intersections)) intersect = intersections[0] return p, T
Given an intersection, walk through the mesh triangles, computing intersection with the cut plane for each visited triangle and adding those intersection to a polyline.
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L205-L237
julienr/meshcut
meshcut.py
cross_section_mesh
def cross_section_mesh(mesh, plane, dist_tol=1e-8): """ Args: mesh: A geom.TriangleMesh instance plane: The cut plane : geom.Plane instance dist_tol: If two points are closer than dist_tol, they are considered the same """ # Set of all triangles T = set(range(len(mesh.tris))) # List of all cross-section polylines P = [] while len(T) > 0: tid = T.pop() intersections = compute_triangle_plane_intersections( mesh, tid, plane, dist_tol) if len(intersections) == 2: for intersection in intersections: p, T = _walk_polyline(tid, intersection, T, mesh, plane, dist_tol) if len(p) > 1: P.append(np.array(p)) return P
python
def cross_section_mesh(mesh, plane, dist_tol=1e-8): """ Args: mesh: A geom.TriangleMesh instance plane: The cut plane : geom.Plane instance dist_tol: If two points are closer than dist_tol, they are considered the same """ # Set of all triangles T = set(range(len(mesh.tris))) # List of all cross-section polylines P = [] while len(T) > 0: tid = T.pop() intersections = compute_triangle_plane_intersections( mesh, tid, plane, dist_tol) if len(intersections) == 2: for intersection in intersections: p, T = _walk_polyline(tid, intersection, T, mesh, plane, dist_tol) if len(p) > 1: P.append(np.array(p)) return P
Args: mesh: A geom.TriangleMesh instance plane: The cut plane : geom.Plane instance dist_tol: If two points are closer than dist_tol, they are considered the same
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L240-L265
julienr/meshcut
meshcut.py
cross_section
def cross_section(verts, tris, plane_orig, plane_normal, **kwargs): """ Compute the planar cross section of a mesh. This returns a set of polylines. Args: verts: Nx3 array of the vertices position faces: Nx3 array of the faces, containing vertex indices plane_orig: 3-vector indicating the plane origin plane_normal: 3-vector indicating the plane normal Returns: A list of Nx3 arrays, each representing a disconnected portion of the cross section as a polyline """ mesh = TriangleMesh(verts, tris) plane = Plane(plane_orig, plane_normal) return cross_section_mesh(mesh, plane, **kwargs)
python
def cross_section(verts, tris, plane_orig, plane_normal, **kwargs): """ Compute the planar cross section of a mesh. This returns a set of polylines. Args: verts: Nx3 array of the vertices position faces: Nx3 array of the faces, containing vertex indices plane_orig: 3-vector indicating the plane origin plane_normal: 3-vector indicating the plane normal Returns: A list of Nx3 arrays, each representing a disconnected portion of the cross section as a polyline """ mesh = TriangleMesh(verts, tris) plane = Plane(plane_orig, plane_normal) return cross_section_mesh(mesh, plane, **kwargs)
Compute the planar cross section of a mesh. This returns a set of polylines. Args: verts: Nx3 array of the vertices position faces: Nx3 array of the faces, containing vertex indices plane_orig: 3-vector indicating the plane origin plane_normal: 3-vector indicating the plane normal Returns: A list of Nx3 arrays, each representing a disconnected portion of the cross section as a polyline
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L268-L285
julienr/meshcut
meshcut.py
pdist_squareformed_numpy
def pdist_squareformed_numpy(a): """ Compute spatial distance using pure numpy (similar to scipy.spatial.distance.cdist()) Thanks to Divakar Roy (@droyed) at stackoverflow.com Note this needs at least np.float64 precision! Returns: dist """ a = np.array(a, dtype=np.float64) a_sumrows = np.einsum('ij,ij->i', a, a) dist = a_sumrows[:, None] + a_sumrows - 2 * np.dot(a, a.T) np.fill_diagonal(dist, 0) return dist
python
def pdist_squareformed_numpy(a): """ Compute spatial distance using pure numpy (similar to scipy.spatial.distance.cdist()) Thanks to Divakar Roy (@droyed) at stackoverflow.com Note this needs at least np.float64 precision! Returns: dist """ a = np.array(a, dtype=np.float64) a_sumrows = np.einsum('ij,ij->i', a, a) dist = a_sumrows[:, None] + a_sumrows - 2 * np.dot(a, a.T) np.fill_diagonal(dist, 0) return dist
Compute spatial distance using pure numpy (similar to scipy.spatial.distance.cdist()) Thanks to Divakar Roy (@droyed) at stackoverflow.com Note this needs at least np.float64 precision! Returns: dist
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L288-L303
julienr/meshcut
meshcut.py
merge_close_vertices
def merge_close_vertices(verts, faces, close_epsilon=1e-5): """ Will merge vertices that are closer than close_epsilon. Warning, this has a O(n^2) memory usage because we compute the full vert-to-vert distance matrix. If you have a large mesh, might want to use some kind of spatial search structure like an octree or some fancy hashing scheme Returns: new_verts, new_faces """ # Pairwise distance between verts if USE_SCIPY: D = spdist.cdist(verts, verts) else: D = np.sqrt(np.abs(pdist_squareformed_numpy(verts))) # Compute a mapping from old to new : for each input vert, store the index # of the new vert it will be merged into old2new = np.zeros(D.shape[0], dtype=np.int) # A mask indicating if a vertex has already been merged into another merged_verts = np.zeros(D.shape[0], dtype=np.bool) new_verts = [] for i in range(D.shape[0]): if merged_verts[i]: continue else: # The vertices that will be merged into this one merged = np.flatnonzero(D[i, :] < close_epsilon) old2new[merged] = len(new_verts) new_verts.append(verts[i]) merged_verts[merged] = True new_verts = np.array(new_verts) # Recompute face indices to index in new_verts new_faces = np.zeros((len(faces), 3), dtype=np.int) for i, f in enumerate(faces): new_faces[i] = (old2new[f[0]], old2new[f[1]], old2new[f[2]]) # again, plot with utils.trimesh3d(new_verts, new_faces) return new_verts, new_faces
python
def merge_close_vertices(verts, faces, close_epsilon=1e-5): """ Will merge vertices that are closer than close_epsilon. Warning, this has a O(n^2) memory usage because we compute the full vert-to-vert distance matrix. If you have a large mesh, might want to use some kind of spatial search structure like an octree or some fancy hashing scheme Returns: new_verts, new_faces """ # Pairwise distance between verts if USE_SCIPY: D = spdist.cdist(verts, verts) else: D = np.sqrt(np.abs(pdist_squareformed_numpy(verts))) # Compute a mapping from old to new : for each input vert, store the index # of the new vert it will be merged into old2new = np.zeros(D.shape[0], dtype=np.int) # A mask indicating if a vertex has already been merged into another merged_verts = np.zeros(D.shape[0], dtype=np.bool) new_verts = [] for i in range(D.shape[0]): if merged_verts[i]: continue else: # The vertices that will be merged into this one merged = np.flatnonzero(D[i, :] < close_epsilon) old2new[merged] = len(new_verts) new_verts.append(verts[i]) merged_verts[merged] = True new_verts = np.array(new_verts) # Recompute face indices to index in new_verts new_faces = np.zeros((len(faces), 3), dtype=np.int) for i, f in enumerate(faces): new_faces[i] = (old2new[f[0]], old2new[f[1]], old2new[f[2]]) # again, plot with utils.trimesh3d(new_verts, new_faces) return new_verts, new_faces
Will merge vertices that are closer than close_epsilon. Warning, this has a O(n^2) memory usage because we compute the full vert-to-vert distance matrix. If you have a large mesh, might want to use some kind of spatial search structure like an octree or some fancy hashing scheme Returns: new_verts, new_faces
https://github.com/julienr/meshcut/blob/226c79d8da52b657d904f783940c258093c929a5/meshcut.py#L306-L347
aequitas/python-rflink
rflink/parser.py
signed_to_float
def signed_to_float(hex: str) -> float: """Convert signed hexadecimal to floating value.""" if int(hex, 16) & 0x8000: return -(int(hex, 16) & 0x7FFF) / 10 else: return int(hex, 16) / 10
python
def signed_to_float(hex: str) -> float: """Convert signed hexadecimal to floating value.""" if int(hex, 16) & 0x8000: return -(int(hex, 16) & 0x7FFF) / 10 else: return int(hex, 16) / 10
Convert signed hexadecimal to floating value.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/parser.py#L170-L175
aequitas/python-rflink
rflink/parser.py
decode_packet
def decode_packet(packet: str) -> dict: """Break packet down into primitives, and do basic interpretation. >>> decode_packet('20;06;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True """ node_id, _, protocol, attrs = packet.split(DELIM, 3) data = cast(Dict[str, Any], { 'node': PacketHeader(node_id).name, }) # make exception for version response data['protocol'] = UNKNOWN if '=' in protocol: attrs = protocol + DELIM + attrs # no attributes but instead the welcome banner elif 'RFLink Gateway' in protocol: data.update(parse_banner(protocol)) elif protocol == 'PONG': data['ping'] = protocol.lower() # debug response elif protocol == 'DEBUG': data['protocol'] = protocol.lower() data['tm'] = packet[3:5] # failure response elif protocol == 'CMD UNKNOWN': data['response'] = 'command_unknown' data['ok'] = False # ok response elif protocol == 'OK': data['ok'] = True # its a regular packet else: data['protocol'] = protocol.lower() # convert key=value pairs where needed for attr in filter(None, attrs.strip(DELIM).split(DELIM)): key, value = attr.lower().split('=') if key in VALUE_TRANSLATION: value = VALUE_TRANSLATION.get(key)(value) name = PACKET_FIELDS.get(key, key) data[name] = value unit = UNITS.get(key, None) if unit: data[name + '_unit'] = unit # correct KaKu device address if data.get('protocol', '') == 'kaku' and len(data['id']) != 6: data['id'] = '0000' + data['id'] return data
python
def decode_packet(packet: str) -> dict: """Break packet down into primitives, and do basic interpretation. >>> decode_packet('20;06;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True """ node_id, _, protocol, attrs = packet.split(DELIM, 3) data = cast(Dict[str, Any], { 'node': PacketHeader(node_id).name, }) # make exception for version response data['protocol'] = UNKNOWN if '=' in protocol: attrs = protocol + DELIM + attrs # no attributes but instead the welcome banner elif 'RFLink Gateway' in protocol: data.update(parse_banner(protocol)) elif protocol == 'PONG': data['ping'] = protocol.lower() # debug response elif protocol == 'DEBUG': data['protocol'] = protocol.lower() data['tm'] = packet[3:5] # failure response elif protocol == 'CMD UNKNOWN': data['response'] = 'command_unknown' data['ok'] = False # ok response elif protocol == 'OK': data['ok'] = True # its a regular packet else: data['protocol'] = protocol.lower() # convert key=value pairs where needed for attr in filter(None, attrs.strip(DELIM).split(DELIM)): key, value = attr.lower().split('=') if key in VALUE_TRANSLATION: value = VALUE_TRANSLATION.get(key)(value) name = PACKET_FIELDS.get(key, key) data[name] = value unit = UNITS.get(key, None) if unit: data[name + '_unit'] = unit # correct KaKu device address if data.get('protocol', '') == 'kaku' and len(data['id']) != 6: data['id'] = '0000' + data['id'] return data
Break packet down into primitives, and do basic interpretation. >>> decode_packet('20;06;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/parser.py#L225-L289
aequitas/python-rflink
rflink/parser.py
encode_packet
def encode_packet(packet: dict) -> str: """Construct packet string from packet dictionary. >>> encode_packet({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) '10;newkaku;000001;01;on;' """ if packet['protocol'] == 'rfdebug': return '10;RFDEBUG=' + packet['command'] + ';' elif packet['protocol'] == 'rfudebug': return '10;RFDEBUG=' + packet['command'] + ';' else: return SWITCH_COMMAND_TEMPLATE.format( node=PacketHeader.master.value, **packet )
python
def encode_packet(packet: dict) -> str: """Construct packet string from packet dictionary. >>> encode_packet({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) '10;newkaku;000001;01;on;' """ if packet['protocol'] == 'rfdebug': return '10;RFDEBUG=' + packet['command'] + ';' elif packet['protocol'] == 'rfudebug': return '10;RFDEBUG=' + packet['command'] + ';' else: return SWITCH_COMMAND_TEMPLATE.format( node=PacketHeader.master.value, **packet )
Construct packet string from packet dictionary. >>> encode_packet({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) '10;newkaku;000001;01;on;'
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/parser.py#L297-L316
aequitas/python-rflink
rflink/parser.py
serialize_packet_id
def serialize_packet_id(packet: dict) -> str: """Serialize packet identifiers into one reversable string. >>> serialize_packet_id({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) 'newkaku_000001_01' >>> serialize_packet_id({ ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... 'command': 'on', ... }) 'ikeakoppla_000080_0' >>> # unserializeable protocol name without explicit entry >>> # in translation table should be properly serialized >>> serialize_packet_id({ ... 'protocol': 'alecto v4', ... 'id': '000080', ... 'switch': '0', ... 'command': 'on', ... }) 'alectov4_000080_0' """ # translate protocol in something reversable protocol = protocol_translations[packet['protocol']] if protocol == UNKNOWN: protocol = 'rflink' return '_'.join(filter(None, [ protocol, packet.get('id', None), packet.get('switch', None), ]))
python
def serialize_packet_id(packet: dict) -> str: """Serialize packet identifiers into one reversable string. >>> serialize_packet_id({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) 'newkaku_000001_01' >>> serialize_packet_id({ ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... 'command': 'on', ... }) 'ikeakoppla_000080_0' >>> # unserializeable protocol name without explicit entry >>> # in translation table should be properly serialized >>> serialize_packet_id({ ... 'protocol': 'alecto v4', ... 'id': '000080', ... 'switch': '0', ... 'command': 'on', ... }) 'alectov4_000080_0' """ # translate protocol in something reversable protocol = protocol_translations[packet['protocol']] if protocol == UNKNOWN: protocol = 'rflink' return '_'.join(filter(None, [ protocol, packet.get('id', None), packet.get('switch', None), ]))
Serialize packet identifiers into one reversable string. >>> serialize_packet_id({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) 'newkaku_000001_01' >>> serialize_packet_id({ ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... 'command': 'on', ... }) 'ikeakoppla_000080_0' >>> # unserializeable protocol name without explicit entry >>> # in translation table should be properly serialized >>> serialize_packet_id({ ... 'protocol': 'alecto v4', ... 'id': '000080', ... 'switch': '0', ... 'command': 'on', ... }) 'alectov4_000080_0'
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/parser.py#L353-L390
aequitas/python-rflink
rflink/parser.py
deserialize_packet_id
def deserialize_packet_id(packet_id: str) -> dict: r"""Turn a packet id into individual packet components. >>> deserialize_packet_id('newkaku_000001_01') == { ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... } True >>> deserialize_packet_id('ikeakoppla_000080_0') == { ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... } True """ if packet_id == 'rflink': return {'protocol': UNKNOWN} protocol, *id_switch = packet_id.split(PACKET_ID_SEP) assert len(id_switch) < 3 packet_identifiers = { # lookup the reverse translation of the protocol in the translation # table, fallback to protocol. If this is a unserializable protocol # name, it has not been serialized before and is not in the # translate_protocols table this will result in an invalid command. 'protocol': protocol_translations.get(protocol, protocol), } if id_switch: packet_identifiers['id'] = id_switch[0] if len(id_switch) > 1: packet_identifiers['switch'] = id_switch[1] return packet_identifiers
python
def deserialize_packet_id(packet_id: str) -> dict: r"""Turn a packet id into individual packet components. >>> deserialize_packet_id('newkaku_000001_01') == { ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... } True >>> deserialize_packet_id('ikeakoppla_000080_0') == { ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... } True """ if packet_id == 'rflink': return {'protocol': UNKNOWN} protocol, *id_switch = packet_id.split(PACKET_ID_SEP) assert len(id_switch) < 3 packet_identifiers = { # lookup the reverse translation of the protocol in the translation # table, fallback to protocol. If this is a unserializable protocol # name, it has not been serialized before and is not in the # translate_protocols table this will result in an invalid command. 'protocol': protocol_translations.get(protocol, protocol), } if id_switch: packet_identifiers['id'] = id_switch[0] if len(id_switch) > 1: packet_identifiers['switch'] = id_switch[1] return packet_identifiers
r"""Turn a packet id into individual packet components. >>> deserialize_packet_id('newkaku_000001_01') == { ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... } True >>> deserialize_packet_id('ikeakoppla_000080_0') == { ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... } True
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/parser.py#L393-L427
aequitas/python-rflink
rflink/parser.py
packet_events
def packet_events(packet: dict) -> Generator: """Return list of all events in the packet. >>> x = list(packet_events({ ... 'protocol': 'alecto v1', ... 'id': 'ec02', ... 'temperature': 1.0, ... 'temperature_unit': '°C', ... 'humidity': 10, ... 'humidity_unit': '%', ... })) >>> assert { ... 'id': 'alectov1_ec02_temp', ... 'sensor': 'temperature', ... 'value': 1.0, ... 'unit': '°C', ... } in x >>> assert { ... 'id': 'alectov1_ec02_hum', ... 'sensor': 'humidity', ... 'value': 10, ... 'unit': '%', ... } in x >>> y = list(packet_events({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... })) >>> assert {'id': 'newkaku_000001_01', 'command': 'on'} in y """ field_abbrev = {v: k for k, v in PACKET_FIELDS.items()} packet_id = serialize_packet_id(packet) events = {f: v for f, v in packet.items() if f in field_abbrev} if 'command' in events or 'version' in events: # switch events only have one event in each packet yield dict(id=packet_id, **events) else: if packet_id == 'debug': yield { 'id': 'raw', 'value': packet.get('pulses(usec)'), 'tm': packet.get('tm'), 'pulses': packet.get('pulses'), } else: # sensors can have multiple for sensor, value in events.items(): unit = packet.get(sensor + '_unit', None) yield { 'id': packet_id + PACKET_ID_SEP + field_abbrev[sensor], 'sensor': sensor, 'value': value, 'unit': unit, } if packet_id != 'rflink': yield { 'id': packet_id + PACKET_ID_SEP + 'update_time', 'sensor': 'update_time', 'value': round(time.time()), 'unit': 's', }
python
def packet_events(packet: dict) -> Generator: """Return list of all events in the packet. >>> x = list(packet_events({ ... 'protocol': 'alecto v1', ... 'id': 'ec02', ... 'temperature': 1.0, ... 'temperature_unit': '°C', ... 'humidity': 10, ... 'humidity_unit': '%', ... })) >>> assert { ... 'id': 'alectov1_ec02_temp', ... 'sensor': 'temperature', ... 'value': 1.0, ... 'unit': '°C', ... } in x >>> assert { ... 'id': 'alectov1_ec02_hum', ... 'sensor': 'humidity', ... 'value': 10, ... 'unit': '%', ... } in x >>> y = list(packet_events({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... })) >>> assert {'id': 'newkaku_000001_01', 'command': 'on'} in y """ field_abbrev = {v: k for k, v in PACKET_FIELDS.items()} packet_id = serialize_packet_id(packet) events = {f: v for f, v in packet.items() if f in field_abbrev} if 'command' in events or 'version' in events: # switch events only have one event in each packet yield dict(id=packet_id, **events) else: if packet_id == 'debug': yield { 'id': 'raw', 'value': packet.get('pulses(usec)'), 'tm': packet.get('tm'), 'pulses': packet.get('pulses'), } else: # sensors can have multiple for sensor, value in events.items(): unit = packet.get(sensor + '_unit', None) yield { 'id': packet_id + PACKET_ID_SEP + field_abbrev[sensor], 'sensor': sensor, 'value': value, 'unit': unit, } if packet_id != 'rflink': yield { 'id': packet_id + PACKET_ID_SEP + 'update_time', 'sensor': 'update_time', 'value': round(time.time()), 'unit': 's', }
Return list of all events in the packet. >>> x = list(packet_events({ ... 'protocol': 'alecto v1', ... 'id': 'ec02', ... 'temperature': 1.0, ... 'temperature_unit': '°C', ... 'humidity': 10, ... 'humidity_unit': '%', ... })) >>> assert { ... 'id': 'alectov1_ec02_temp', ... 'sensor': 'temperature', ... 'value': 1.0, ... 'unit': '°C', ... } in x >>> assert { ... 'id': 'alectov1_ec02_hum', ... 'sensor': 'humidity', ... 'value': 10, ... 'unit': '%', ... } in x >>> y = list(packet_events({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... })) >>> assert {'id': 'newkaku_000001_01', 'command': 'on'} in y
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/parser.py#L430-L493
aequitas/python-rflink
rflinkproxy/__main__.py
decode_tx_packet
def decode_tx_packet(packet: str) -> dict: """Break packet down into primitives, and do basic interpretation. >>> decode_packet('10;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True """ node_id, protocol, attrs = packet.split(DELIM, 2) data = cast(Dict[str, Any], { 'node': PacketHeader(node_id).name, }) data['protocol'] = protocol.lower() for i, attr in enumerate(filter(None, attrs.strip(DELIM).split(DELIM))): if i == 0: data['id'] = attr if i == 1: data['switch'] = attr if i == 2: data['command'] = attr # correct KaKu device address if data.get('protocol', '') == 'kaku' and len(data['id']) != 6: data['id'] = '0000' + data['id'] return data
python
def decode_tx_packet(packet: str) -> dict: """Break packet down into primitives, and do basic interpretation. >>> decode_packet('10;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True """ node_id, protocol, attrs = packet.split(DELIM, 2) data = cast(Dict[str, Any], { 'node': PacketHeader(node_id).name, }) data['protocol'] = protocol.lower() for i, attr in enumerate(filter(None, attrs.strip(DELIM).split(DELIM))): if i == 0: data['id'] = attr if i == 1: data['switch'] = attr if i == 2: data['command'] = attr # correct KaKu device address if data.get('protocol', '') == 'kaku' and len(data['id']) != 6: data['id'] = '0000' + data['id'] return data
Break packet down into primitives, and do basic interpretation. >>> decode_packet('10;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L82-L114
aequitas/python-rflink
rflinkproxy/__main__.py
main
def main(argv=sys.argv[1:], loop=None): """Parse argument and setup main program loop.""" args = docopt(__doc__, argv=argv, version=pkg_resources.require('rflink')[0].version) level = logging.ERROR if args['-v']: level = logging.INFO if args['-v'] == 2: level = logging.DEBUG logging.basicConfig(level=level) if not loop: loop = asyncio.get_event_loop() host = args['--host'] port = args['--port'] baud = args['--baud'] listenport = args['--listenport'] proxy = RFLinkProxy(port=port, host=host, baud=baud, loop=loop) server_coro = asyncio.start_server( proxy.client_connected_callback, host="", port=listenport, loop=loop, ) server = loop.run_until_complete(server_coro) addr = server.sockets[0].getsockname() log.info('Serving on %s', addr) conn_coro = proxy.connect() loop.run_until_complete(conn_coro) proxy.closing = False try: loop.run_forever() except KeyboardInterrupt: proxy.closing = True # cleanup server server.close() loop.run_until_complete(server.wait_closed()) # cleanup server connections writers = [i[1] for i in list(clients)] for writer in writers: writer.close() if sys.version_info >= (3, 7): loop.run_until_complete(writer.wait_closed()) # cleanup RFLink connection proxy.transport.close() finally: loop.close()
python
def main(argv=sys.argv[1:], loop=None): """Parse argument and setup main program loop.""" args = docopt(__doc__, argv=argv, version=pkg_resources.require('rflink')[0].version) level = logging.ERROR if args['-v']: level = logging.INFO if args['-v'] == 2: level = logging.DEBUG logging.basicConfig(level=level) if not loop: loop = asyncio.get_event_loop() host = args['--host'] port = args['--port'] baud = args['--baud'] listenport = args['--listenport'] proxy = RFLinkProxy(port=port, host=host, baud=baud, loop=loop) server_coro = asyncio.start_server( proxy.client_connected_callback, host="", port=listenport, loop=loop, ) server = loop.run_until_complete(server_coro) addr = server.sockets[0].getsockname() log.info('Serving on %s', addr) conn_coro = proxy.connect() loop.run_until_complete(conn_coro) proxy.closing = False try: loop.run_forever() except KeyboardInterrupt: proxy.closing = True # cleanup server server.close() loop.run_until_complete(server.wait_closed()) # cleanup server connections writers = [i[1] for i in list(clients)] for writer in writers: writer.close() if sys.version_info >= (3, 7): loop.run_until_complete(writer.wait_closed()) # cleanup RFLink connection proxy.transport.close() finally: loop.close()
Parse argument and setup main program loop.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L264-L321
aequitas/python-rflink
rflinkproxy/__main__.py
ProxyProtocol.handle_raw_packet
def handle_raw_packet(self, raw_packet): """Parse raw packet string into packet dict.""" log.debug('got packet: %s', raw_packet) packet = None try: packet = decode_packet(raw_packet) except: log.exception('failed to parse packet: %s', packet) log.debug('decoded packet: %s', packet) if packet: if 'ok' in packet: # handle response packets internally log.debug('command response: %s', packet) self._last_ack = packet self._command_ack.set() elif self.raw_callback: self.raw_callback(raw_packet) else: log.warning('no valid packet')
python
def handle_raw_packet(self, raw_packet): """Parse raw packet string into packet dict.""" log.debug('got packet: %s', raw_packet) packet = None try: packet = decode_packet(raw_packet) except: log.exception('failed to parse packet: %s', packet) log.debug('decoded packet: %s', packet) if packet: if 'ok' in packet: # handle response packets internally log.debug('command response: %s', packet) self._last_ack = packet self._command_ack.set() elif self.raw_callback: self.raw_callback(raw_packet) else: log.warning('no valid packet')
Parse raw packet string into packet dict.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L59-L79
aequitas/python-rflink
rflinkproxy/__main__.py
RFLinkProxy.handle_raw_tx_packet
def handle_raw_tx_packet(self, writer, raw_packet): """Parse raw packet string into packet dict.""" peer = writer.get_extra_info('peername') log.debug(' %s:%s: processing data: %s', peer[0], peer[1], raw_packet) packet = None try: packet = decode_tx_packet(raw_packet) except: log.exception(' %s:%s: failed to parse packet: %s', peer[0], peer[1], packet) log.debug(' %s:%s: decoded packet: %s', peer[0], peer[1], packet) if self.protocol and packet: if not ';PING;' in raw_packet: log.info(' %s:%s: forwarding packet %s to RFLink', peer[0], peer[1], raw_packet) else: log.debug(' %s:%s: forwarding packet %s to RFLink', peer[0], peer[1], raw_packet) yield from self.forward_packet(writer, packet, raw_packet) else: log.warning(' %s:%s: no valid packet %s', peer[0], peer[1], packet)
python
def handle_raw_tx_packet(self, writer, raw_packet): """Parse raw packet string into packet dict.""" peer = writer.get_extra_info('peername') log.debug(' %s:%s: processing data: %s', peer[0], peer[1], raw_packet) packet = None try: packet = decode_tx_packet(raw_packet) except: log.exception(' %s:%s: failed to parse packet: %s', peer[0], peer[1], packet) log.debug(' %s:%s: decoded packet: %s', peer[0], peer[1], packet) if self.protocol and packet: if not ';PING;' in raw_packet: log.info(' %s:%s: forwarding packet %s to RFLink', peer[0], peer[1], raw_packet) else: log.debug(' %s:%s: forwarding packet %s to RFLink', peer[0], peer[1], raw_packet) yield from self.forward_packet(writer, packet, raw_packet) else: log.warning(' %s:%s: no valid packet %s', peer[0], peer[1], packet)
Parse raw packet string into packet dict.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L131-L150
aequitas/python-rflink
rflinkproxy/__main__.py
RFLinkProxy.forward_packet
def forward_packet(self, writer, packet, raw_packet): """Forward packet from client to RFLink.""" peer = writer.get_extra_info('peername') log.debug(' %s:%s: forwarding data: %s', peer[0], peer[1], packet) if 'command' in packet: packet_id = serialize_packet_id(packet) command = packet['command'] ack = yield from self.protocol.send_command_ack( packet_id, command) if ack: writer.write("20;00;OK;".encode() + CRLF) for _ in range(DEFAULT_SIGNAL_REPETITIONS-1): yield from self.protocol.send_command_ack( packet_id, command) else: self.protocol.send_raw_packet(raw_packet)
python
def forward_packet(self, writer, packet, raw_packet): """Forward packet from client to RFLink.""" peer = writer.get_extra_info('peername') log.debug(' %s:%s: forwarding data: %s', peer[0], peer[1], packet) if 'command' in packet: packet_id = serialize_packet_id(packet) command = packet['command'] ack = yield from self.protocol.send_command_ack( packet_id, command) if ack: writer.write("20;00;OK;".encode() + CRLF) for _ in range(DEFAULT_SIGNAL_REPETITIONS-1): yield from self.protocol.send_command_ack( packet_id, command) else: self.protocol.send_raw_packet(raw_packet)
Forward packet from client to RFLink.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L153-L168
aequitas/python-rflink
rflinkproxy/__main__.py
RFLinkProxy.client_connected_callback
def client_connected_callback(self, reader, writer): """Handle connected client.""" peer = writer.get_extra_info('peername') clients.append((reader, writer, peer)) log.info("Incoming connection from: %s:%s", peer[0], peer[1]) try: while True: data = yield from reader.readline() if not data: break try: line = data.decode().strip() except UnicodeDecodeError: line = '\x00' # Workaround for domoticz issue #2816 if line[-1] != DELIM: line = line + DELIM if valid_packet(line): yield from self.handle_raw_tx_packet(writer, line) else: log.warning(" %s:%s: dropping invalid data: '%s'", peer[0], peer[1], line) pass except ConnectionResetError: pass except Exception as e: log.exception(e) log.info("Disconnected from: %s:%s", peer[0], peer[1]) writer.close() clients.remove((reader, writer, peer))
python
def client_connected_callback(self, reader, writer): """Handle connected client.""" peer = writer.get_extra_info('peername') clients.append((reader, writer, peer)) log.info("Incoming connection from: %s:%s", peer[0], peer[1]) try: while True: data = yield from reader.readline() if not data: break try: line = data.decode().strip() except UnicodeDecodeError: line = '\x00' # Workaround for domoticz issue #2816 if line[-1] != DELIM: line = line + DELIM if valid_packet(line): yield from self.handle_raw_tx_packet(writer, line) else: log.warning(" %s:%s: dropping invalid data: '%s'", peer[0], peer[1], line) pass except ConnectionResetError: pass except Exception as e: log.exception(e) log.info("Disconnected from: %s:%s", peer[0], peer[1]) writer.close() clients.remove((reader, writer, peer))
Handle connected client.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L171-L202
aequitas/python-rflink
rflinkproxy/__main__.py
RFLinkProxy.raw_callback
def raw_callback(self, raw_packet): """Send data to all connected clients.""" if not ';PONG;' in raw_packet: log.info('forwarding packet %s to clients', raw_packet) else: log.debug('forwarding packet %s to clients', raw_packet) writers = [i[1] for i in list(clients)] for writer in writers: writer.write(str(raw_packet).encode() + CRLF)
python
def raw_callback(self, raw_packet): """Send data to all connected clients.""" if not ';PONG;' in raw_packet: log.info('forwarding packet %s to clients', raw_packet) else: log.debug('forwarding packet %s to clients', raw_packet) writers = [i[1] for i in list(clients)] for writer in writers: writer.write(str(raw_packet).encode() + CRLF)
Send data to all connected clients.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L204-L212
aequitas/python-rflink
rflinkproxy/__main__.py
RFLinkProxy.reconnect
def reconnect(self, exc=None): """Schedule reconnect after connection has been unexpectedly lost.""" # Reset protocol binding before starting reconnect self.protocol = None if not self.closing: log.warning('disconnected from Rflink, reconnecting') self.loop.create_task(self.connect())
python
def reconnect(self, exc=None): """Schedule reconnect after connection has been unexpectedly lost.""" # Reset protocol binding before starting reconnect self.protocol = None if not self.closing: log.warning('disconnected from Rflink, reconnecting') self.loop.create_task(self.connect())
Schedule reconnect after connection has been unexpectedly lost.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L214-L221
aequitas/python-rflink
rflinkproxy/__main__.py
RFLinkProxy.connect
async def connect(self): """Set up connection and hook it into HA for reconnect/shutdown.""" import serial log.info('Initiating Rflink connection') # Rflink create_rflink_connection decides based on the value of host # (string or None) if serial or tcp mode should be used # Setup protocol protocol = partial( ProxyProtocol, disconnect_callback=self.reconnect, raw_callback=self.raw_callback, loop=self.loop, ) # Initiate serial/tcp connection to Rflink gateway if self.host: connection = self.loop.create_connection(protocol, self.host, self.port) else: connection = create_serial_connection(self.loop, protocol, self.port, self.baud) try: with async_timeout.timeout(CONNECTION_TIMEOUT, loop=self.loop): self.transport, self.protocol = await connection except (serial.serialutil.SerialException, ConnectionRefusedError, TimeoutError, OSError, asyncio.TimeoutError) as exc: reconnect_interval = DEFAULT_RECONNECT_INTERVAL log.error( "Error connecting to Rflink, reconnecting in %s", reconnect_interval) self.loop.call_later(reconnect_interval, self.reconnect, exc) return log.info('Connected to Rflink')
python
async def connect(self): """Set up connection and hook it into HA for reconnect/shutdown.""" import serial log.info('Initiating Rflink connection') # Rflink create_rflink_connection decides based on the value of host # (string or None) if serial or tcp mode should be used # Setup protocol protocol = partial( ProxyProtocol, disconnect_callback=self.reconnect, raw_callback=self.raw_callback, loop=self.loop, ) # Initiate serial/tcp connection to Rflink gateway if self.host: connection = self.loop.create_connection(protocol, self.host, self.port) else: connection = create_serial_connection(self.loop, protocol, self.port, self.baud) try: with async_timeout.timeout(CONNECTION_TIMEOUT, loop=self.loop): self.transport, self.protocol = await connection except (serial.serialutil.SerialException, ConnectionRefusedError, TimeoutError, OSError, asyncio.TimeoutError) as exc: reconnect_interval = DEFAULT_RECONNECT_INTERVAL log.error( "Error connecting to Rflink, reconnecting in %s", reconnect_interval) self.loop.call_later(reconnect_interval, self.reconnect, exc) return log.info('Connected to Rflink')
Set up connection and hook it into HA for reconnect/shutdown.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflinkproxy/__main__.py#L223-L261
aequitas/python-rflink
rflink/protocol.py
create_rflink_connection
def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol, packet_callback=None, event_callback=None, disconnect_callback=None, ignore=None, loop=None): """Create Rflink manager class, returns transport coroutine.""" # use default protocol if not specified protocol = partial( protocol, loop=loop if loop else asyncio.get_event_loop(), packet_callback=packet_callback, event_callback=event_callback, disconnect_callback=disconnect_callback, ignore=ignore if ignore else [], ) # setup serial connection if no transport specified if host: conn = loop.create_connection(protocol, host, port) else: baud = baud conn = create_serial_connection(loop, protocol, port, baud) return conn
python
def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol, packet_callback=None, event_callback=None, disconnect_callback=None, ignore=None, loop=None): """Create Rflink manager class, returns transport coroutine.""" # use default protocol if not specified protocol = partial( protocol, loop=loop if loop else asyncio.get_event_loop(), packet_callback=packet_callback, event_callback=event_callback, disconnect_callback=disconnect_callback, ignore=ignore if ignore else [], ) # setup serial connection if no transport specified if host: conn = loop.create_connection(protocol, host, port) else: baud = baud conn = create_serial_connection(loop, protocol, port, baud) return conn
Create Rflink manager class, returns transport coroutine.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L304-L325
aequitas/python-rflink
rflink/protocol.py
ProtocolBase.data_received
def data_received(self, data): """Add incoming data to buffer.""" data = data.decode() log.debug('received data: %s', data.strip()) self.buffer += data self.handle_lines()
python
def data_received(self, data): """Add incoming data to buffer.""" data = data.decode() log.debug('received data: %s', data.strip()) self.buffer += data self.handle_lines()
Add incoming data to buffer.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L49-L54
aequitas/python-rflink
rflink/protocol.py
ProtocolBase.handle_lines
def handle_lines(self): """Assemble incoming data into per-line packets.""" while "\r\n" in self.buffer: line, self.buffer = self.buffer.split("\r\n", 1) if valid_packet(line): self.handle_raw_packet(line) else: log.warning('dropping invalid data: %s', line)
python
def handle_lines(self): """Assemble incoming data into per-line packets.""" while "\r\n" in self.buffer: line, self.buffer = self.buffer.split("\r\n", 1) if valid_packet(line): self.handle_raw_packet(line) else: log.warning('dropping invalid data: %s', line)
Assemble incoming data into per-line packets.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L56-L63
aequitas/python-rflink
rflink/protocol.py
ProtocolBase.send_raw_packet
def send_raw_packet(self, packet: str): """Encode and put packet string onto write buffer.""" data = packet + '\r\n' log.debug('writing data: %s', repr(data)) self.transport.write(data.encode())
python
def send_raw_packet(self, packet: str): """Encode and put packet string onto write buffer.""" data = packet + '\r\n' log.debug('writing data: %s', repr(data)) self.transport.write(data.encode())
Encode and put packet string onto write buffer.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L69-L73
aequitas/python-rflink
rflink/protocol.py
ProtocolBase.log_all
def log_all(self, file): """Log all data received from RFLink to file.""" global rflink_log if file == None: rflink_log = None else: log.debug('logging to: %s', file) rflink_log = open(file, 'a')
python
def log_all(self, file): """Log all data received from RFLink to file.""" global rflink_log if file == None: rflink_log = None else: log.debug('logging to: %s', file) rflink_log = open(file, 'a')
Log all data received from RFLink to file.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L75-L82
aequitas/python-rflink
rflink/protocol.py
ProtocolBase.connection_lost
def connection_lost(self, exc): """Log when connection is closed, if needed call callback.""" if exc: log.exception('disconnected due to exception') else: log.info('disconnected because of close/abort.') if self.disconnect_callback: self.disconnect_callback(exc)
python
def connection_lost(self, exc): """Log when connection is closed, if needed call callback.""" if exc: log.exception('disconnected due to exception') else: log.info('disconnected because of close/abort.') if self.disconnect_callback: self.disconnect_callback(exc)
Log when connection is closed, if needed call callback.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L84-L91
aequitas/python-rflink
rflink/protocol.py
PacketHandling.handle_raw_packet
def handle_raw_packet(self, raw_packet): """Parse raw packet string into packet dict.""" log.debug('got packet: %s', raw_packet) if rflink_log: print(raw_packet, file=rflink_log) rflink_log.flush() packet = None try: packet = decode_packet(raw_packet) except: log.exception('failed to parse packet: %s', packet) log.debug('decoded packet: %s', packet) if packet: if 'ok' in packet: # handle response packets internally log.debug('command response: %s', packet) self._last_ack = packet self._command_ack.set() else: self.handle_packet(packet) else: log.warning('no valid packet')
python
def handle_raw_packet(self, raw_packet): """Parse raw packet string into packet dict.""" log.debug('got packet: %s', raw_packet) if rflink_log: print(raw_packet, file=rflink_log) rflink_log.flush() packet = None try: packet = decode_packet(raw_packet) except: log.exception('failed to parse packet: %s', packet) log.debug('decoded packet: %s', packet) if packet: if 'ok' in packet: # handle response packets internally log.debug('command response: %s', packet) self._last_ack = packet self._command_ack.set() else: self.handle_packet(packet) else: log.warning('no valid packet')
Parse raw packet string into packet dict.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L108-L131
aequitas/python-rflink
rflink/protocol.py
PacketHandling.handle_packet
def handle_packet(self, packet): """Process incoming packet dict and optionally call callback.""" if self.packet_callback: # forward to callback self.packet_callback(packet) else: print('packet', packet)
python
def handle_packet(self, packet): """Process incoming packet dict and optionally call callback.""" if self.packet_callback: # forward to callback self.packet_callback(packet) else: print('packet', packet)
Process incoming packet dict and optionally call callback.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L133-L139
aequitas/python-rflink
rflink/protocol.py
PacketHandling.send_command
def send_command(self, device_id, action): """Send device command to rflink gateway.""" command = deserialize_packet_id(device_id) command['command'] = action log.debug('sending command: %s', command) self.send_packet(command)
python
def send_command(self, device_id, action): """Send device command to rflink gateway.""" command = deserialize_packet_id(device_id) command['command'] = action log.debug('sending command: %s', command) self.send_packet(command)
Send device command to rflink gateway.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L145-L150
aequitas/python-rflink
rflink/protocol.py
CommandSerialization.send_command_ack
def send_command_ack(self, device_id, action): """Send command, wait for gateway to repond with acknowledgment.""" # serialize commands yield from self._ready_to_send.acquire() acknowledgement = None try: self._command_ack.clear() self.send_command(device_id, action) log.debug('waiting for acknowledgement') try: yield from asyncio.wait_for(self._command_ack.wait(), TIMEOUT.seconds, loop=self.loop) log.debug('packet acknowledged') except concurrent.futures._base.TimeoutError: acknowledgement = {'ok': False, 'message': 'timeout'} log.warning('acknowledge timeout') else: acknowledgement = self._last_ack.get('ok', False) finally: # allow next command self._ready_to_send.release() return acknowledgement
python
def send_command_ack(self, device_id, action): """Send command, wait for gateway to repond with acknowledgment.""" # serialize commands yield from self._ready_to_send.acquire() acknowledgement = None try: self._command_ack.clear() self.send_command(device_id, action) log.debug('waiting for acknowledgement') try: yield from asyncio.wait_for(self._command_ack.wait(), TIMEOUT.seconds, loop=self.loop) log.debug('packet acknowledged') except concurrent.futures._base.TimeoutError: acknowledgement = {'ok': False, 'message': 'timeout'} log.warning('acknowledge timeout') else: acknowledgement = self._last_ack.get('ok', False) finally: # allow next command self._ready_to_send.release() return acknowledgement
Send command, wait for gateway to repond with acknowledgment.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L165-L188
aequitas/python-rflink
rflink/protocol.py
EventHandling._handle_packet
def _handle_packet(self, packet): """Event specific packet handling logic. Break packet into events and fires configured event callback or nicely prints events for console. """ events = packet_events(packet) for event in events: if self.ignore_event(event['id']): log.debug('ignoring event with id: %s', event) continue log.debug('got event: %s', event) if self.event_callback: self.event_callback(event) else: self.handle_event(event)
python
def _handle_packet(self, packet): """Event specific packet handling logic. Break packet into events and fires configured event callback or nicely prints events for console. """ events = packet_events(packet) for event in events: if self.ignore_event(event['id']): log.debug('ignoring event with id: %s', event) continue log.debug('got event: %s', event) if self.event_callback: self.event_callback(event) else: self.handle_event(event)
Event specific packet handling logic. Break packet into events and fires configured event callback or nicely prints events for console.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L215-L231
aequitas/python-rflink
rflink/protocol.py
EventHandling.handle_event
def handle_event(self, event): """Default handling of incoming event (print).""" string = '{id:<32} ' if 'command' in event: string += '{command}' elif 'version' in event: if 'hardware' in event: string += '{hardware} {firmware} ' string += 'V{version} R{revision}' else: string += '{value}' if event.get('unit'): string += ' {unit}' print(string.format(**event))
python
def handle_event(self, event): """Default handling of incoming event (print).""" string = '{id:<32} ' if 'command' in event: string += '{command}' elif 'version' in event: if 'hardware' in event: string += '{hardware} {firmware} ' string += 'V{version} R{revision}' else: string += '{value}' if event.get('unit'): string += ' {unit}' print(string.format(**event))
Default handling of incoming event (print).
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L233-L247
aequitas/python-rflink
rflink/protocol.py
EventHandling.ignore_event
def ignore_event(self, event_id): """Verify event id against list of events to ignore. >>> e = EventHandling(ignore=[ ... 'test1_00', ... 'test2_*', ... ]) >>> e.ignore_event('test1_00') True >>> e.ignore_event('test2_00') True >>> e.ignore_event('test3_00') False """ for ignore in self.ignore: if (ignore == event_id or (ignore.endswith('*') and event_id.startswith(ignore[:-1]))): return True return False
python
def ignore_event(self, event_id): """Verify event id against list of events to ignore. >>> e = EventHandling(ignore=[ ... 'test1_00', ... 'test2_*', ... ]) >>> e.ignore_event('test1_00') True >>> e.ignore_event('test2_00') True >>> e.ignore_event('test3_00') False """ for ignore in self.ignore: if (ignore == event_id or (ignore.endswith('*') and event_id.startswith(ignore[:-1]))): return True return False
Verify event id against list of events to ignore. >>> e = EventHandling(ignore=[ ... 'test1_00', ... 'test2_*', ... ]) >>> e.ignore_event('test1_00') True >>> e.ignore_event('test2_00') True >>> e.ignore_event('test3_00') False
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L254-L272
aequitas/python-rflink
rflink/protocol.py
InverterProtocol.handle_event
def handle_event(self, event): """Handle incoming packet from rflink gateway.""" if event.get('command'): if event['command'] == 'on': cmd = 'off' else: cmd = 'on' task = self.send_command_ack(event['id'], cmd) self.loop.create_task(task)
python
def handle_event(self, event): """Handle incoming packet from rflink gateway.""" if event.get('command'): if event['command'] == 'on': cmd = 'off' else: cmd = 'on' task = self.send_command_ack(event['id'], cmd) self.loop.create_task(task)
Handle incoming packet from rflink gateway.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L282-L291
aequitas/python-rflink
rflink/protocol.py
RepeaterProtocol.handle_event
def handle_event(self, packet): """Handle incoming packet from rflink gateway.""" if packet.get('command'): task = self.send_command_ack(packet['id'], packet['command']) self.loop.create_task(task)
python
def handle_event(self, packet): """Handle incoming packet from rflink gateway.""" if packet.get('command'): task = self.send_command_ack(packet['id'], packet['command']) self.loop.create_task(task)
Handle incoming packet from rflink gateway.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L297-L301
aequitas/python-rflink
rflink/__main__.py
main
def main(argv=sys.argv[1:], loop=None): """Parse argument and setup main program loop.""" args = docopt(__doc__, argv=argv, version=pkg_resources.require('rflink')[0].version) level = logging.ERROR if args['-v']: level = logging.INFO if args['-v'] == 2: level = logging.DEBUG logging.basicConfig(level=level) if not loop: loop = asyncio.get_event_loop() if args['--ignore']: ignore = args['--ignore'].split(',') else: ignore = [] command = next((c for c in ALL_COMMANDS if args[c] is True), None) if command: protocol = PROTOCOLS['command'] else: protocol = PROTOCOLS[args['-m']] conn = create_rflink_connection( protocol=protocol, host=args['--host'], port=args['--port'], baud=args['--baud'], loop=loop, ignore=ignore, ) transport, protocol = loop.run_until_complete(conn) try: if command: for _ in range(int(args['--repeat'])): loop.run_until_complete( protocol.send_command_ack( args['<id>'], command)) else: loop.run_forever() except KeyboardInterrupt: # cleanup connection transport.close() loop.run_forever() finally: loop.close()
python
def main(argv=sys.argv[1:], loop=None): """Parse argument and setup main program loop.""" args = docopt(__doc__, argv=argv, version=pkg_resources.require('rflink')[0].version) level = logging.ERROR if args['-v']: level = logging.INFO if args['-v'] == 2: level = logging.DEBUG logging.basicConfig(level=level) if not loop: loop = asyncio.get_event_loop() if args['--ignore']: ignore = args['--ignore'].split(',') else: ignore = [] command = next((c for c in ALL_COMMANDS if args[c] is True), None) if command: protocol = PROTOCOLS['command'] else: protocol = PROTOCOLS[args['-m']] conn = create_rflink_connection( protocol=protocol, host=args['--host'], port=args['--port'], baud=args['--baud'], loop=loop, ignore=ignore, ) transport, protocol = loop.run_until_complete(conn) try: if command: for _ in range(int(args['--repeat'])): loop.run_until_complete( protocol.send_command_ack( args['<id>'], command)) else: loop.run_forever() except KeyboardInterrupt: # cleanup connection transport.close() loop.run_forever() finally: loop.close()
Parse argument and setup main program loop.
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/__main__.py#L49-L100
JustinLovinger/optimal
optimal/algorithms/gsa.py
_initial_population_gsa
def _initial_population_gsa(population_size, solution_size, lower_bounds, upper_bounds): """Create a random initial population of floating point values. Args: population_size: an integer representing the number of solutions in the population. problem_size: the number of values in each solution. lower_bounds: a list, each value is a lower bound for the corresponding part of the solution. upper_bounds: a list, each value is a upper bound for the corresponding part of the solution. Returns: list; A list of random solutions. """ if len(lower_bounds) != solution_size or len(upper_bounds) != solution_size: raise ValueError( "Lower and upper bounds much have a length equal to the problem size." ) return common.make_population(population_size, common.random_real_solution, solution_size, lower_bounds, upper_bounds)
python
def _initial_population_gsa(population_size, solution_size, lower_bounds, upper_bounds): """Create a random initial population of floating point values. Args: population_size: an integer representing the number of solutions in the population. problem_size: the number of values in each solution. lower_bounds: a list, each value is a lower bound for the corresponding part of the solution. upper_bounds: a list, each value is a upper bound for the corresponding part of the solution. Returns: list; A list of random solutions. """ if len(lower_bounds) != solution_size or len(upper_bounds) != solution_size: raise ValueError( "Lower and upper bounds much have a length equal to the problem size." ) return common.make_population(population_size, common.random_real_solution, solution_size, lower_bounds, upper_bounds)
Create a random initial population of floating point values. Args: population_size: an integer representing the number of solutions in the population. problem_size: the number of values in each solution. lower_bounds: a list, each value is a lower bound for the corresponding part of the solution. upper_bounds: a list, each value is a upper bound for the corresponding part of the solution. Returns: list; A list of random solutions.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gsa.py#L104-L125
JustinLovinger/optimal
optimal/algorithms/gsa.py
_new_population_gsa
def _new_population_gsa(population, fitnesses, velocities, lower_bounds, upper_bounds, grav_initial, grav_reduction_rate, iteration, max_iterations): """Generate a new population as given by GSA algorithm. In GSA paper, grav_initial is G_i """ # Update the gravitational constant, and the best and worst of the population # Calculate the mass and acceleration for each solution # Update the velocity and position of each solution population_size = len(population) solution_size = len(population[0]) # In GSA paper, grav is G grav = _next_grav_gsa(grav_initial, grav_reduction_rate, iteration, max_iterations) masses = _get_masses(fitnesses) # Create bundled solution with position and mass for the K best calculation # Also store index to later check if two solutions are the same # Sorted by solution fitness (mass) solutions = [{ 'pos': pos, 'mass': mass, 'index': i } for i, (pos, mass) in enumerate(zip(population, masses))] solutions.sort(key=lambda x: x['mass'], reverse=True) # Get the force on each solution # Only the best K solutions apply force # K linearly decreases to 1 num_best = int(population_size - (population_size - 1) * (iteration / float(max_iterations))) forces = [] for i in range(population_size): force_vectors = [] for j in range(num_best): # If it is not the same solution if i != solutions[j]['index']: force_vectors.append( _gsa_force(grav, masses[i], solutions[j]['mass'], population[i], solutions[j]['pos'])) forces.append(_gsa_total_force(force_vectors, solution_size)) # Get the acceleration of each solution accelerations = [] for i in range(population_size): accelerations.append(_gsa_acceleration(forces[i], masses[i])) # Update the velocity of each solution new_velocities = [] for i in range(population_size): new_velocities.append( _gsa_update_velocity(velocities[i], accelerations[i])) # Create the new population new_population = [] for i in range(population_size): new_position = _gsa_update_position(population[i], new_velocities[i]) # Constrain to bounds new_position = list( numpy.clip(new_position, lower_bounds, upper_bounds)) new_population.append(new_position) return new_population, new_velocities
python
def _new_population_gsa(population, fitnesses, velocities, lower_bounds, upper_bounds, grav_initial, grav_reduction_rate, iteration, max_iterations): """Generate a new population as given by GSA algorithm. In GSA paper, grav_initial is G_i """ # Update the gravitational constant, and the best and worst of the population # Calculate the mass and acceleration for each solution # Update the velocity and position of each solution population_size = len(population) solution_size = len(population[0]) # In GSA paper, grav is G grav = _next_grav_gsa(grav_initial, grav_reduction_rate, iteration, max_iterations) masses = _get_masses(fitnesses) # Create bundled solution with position and mass for the K best calculation # Also store index to later check if two solutions are the same # Sorted by solution fitness (mass) solutions = [{ 'pos': pos, 'mass': mass, 'index': i } for i, (pos, mass) in enumerate(zip(population, masses))] solutions.sort(key=lambda x: x['mass'], reverse=True) # Get the force on each solution # Only the best K solutions apply force # K linearly decreases to 1 num_best = int(population_size - (population_size - 1) * (iteration / float(max_iterations))) forces = [] for i in range(population_size): force_vectors = [] for j in range(num_best): # If it is not the same solution if i != solutions[j]['index']: force_vectors.append( _gsa_force(grav, masses[i], solutions[j]['mass'], population[i], solutions[j]['pos'])) forces.append(_gsa_total_force(force_vectors, solution_size)) # Get the acceleration of each solution accelerations = [] for i in range(population_size): accelerations.append(_gsa_acceleration(forces[i], masses[i])) # Update the velocity of each solution new_velocities = [] for i in range(population_size): new_velocities.append( _gsa_update_velocity(velocities[i], accelerations[i])) # Create the new population new_population = [] for i in range(population_size): new_position = _gsa_update_position(population[i], new_velocities[i]) # Constrain to bounds new_position = list( numpy.clip(new_position, lower_bounds, upper_bounds)) new_population.append(new_position) return new_population, new_velocities
Generate a new population as given by GSA algorithm. In GSA paper, grav_initial is G_i
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gsa.py#L128-L193
JustinLovinger/optimal
optimal/algorithms/gsa.py
_next_grav_gsa
def _next_grav_gsa(grav_initial, grav_reduction_rate, iteration, max_iterations): """Calculate G as given by GSA algorithm. In GSA paper, grav is G """ return grav_initial * math.exp( -grav_reduction_rate * iteration / float(max_iterations))
python
def _next_grav_gsa(grav_initial, grav_reduction_rate, iteration, max_iterations): """Calculate G as given by GSA algorithm. In GSA paper, grav is G """ return grav_initial * math.exp( -grav_reduction_rate * iteration / float(max_iterations))
Calculate G as given by GSA algorithm. In GSA paper, grav is G
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gsa.py#L196-L203
JustinLovinger/optimal
optimal/algorithms/gsa.py
_get_masses
def _get_masses(fitnesses): """Convert fitnesses into masses, as given by GSA algorithm.""" # Obtain constants best_fitness = max(fitnesses) worst_fitness = min(fitnesses) fitness_range = best_fitness - worst_fitness # Calculate raw masses for each solution raw_masses = [] for fitness in fitnesses: # Epsilon is added to prevent divide by zero errors raw_masses.append((fitness - worst_fitness) / (fitness_range + EPSILON) + EPSILON) # Normalize to obtain final mass for each solution total_mass = sum(raw_masses) masses = [] for mass in raw_masses: masses.append(mass / total_mass) return masses
python
def _get_masses(fitnesses): """Convert fitnesses into masses, as given by GSA algorithm.""" # Obtain constants best_fitness = max(fitnesses) worst_fitness = min(fitnesses) fitness_range = best_fitness - worst_fitness # Calculate raw masses for each solution raw_masses = [] for fitness in fitnesses: # Epsilon is added to prevent divide by zero errors raw_masses.append((fitness - worst_fitness) / (fitness_range + EPSILON) + EPSILON) # Normalize to obtain final mass for each solution total_mass = sum(raw_masses) masses = [] for mass in raw_masses: masses.append(mass / total_mass) return masses
Convert fitnesses into masses, as given by GSA algorithm.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gsa.py#L206-L226
JustinLovinger/optimal
optimal/algorithms/gsa.py
_gsa_force
def _gsa_force(grav, mass_i, mass_j, position_i, position_j): """Gives the force of solution j on solution i. Variable name in GSA paper given in () args: grav: The gravitational constant. (G) mass_i: The mass of solution i (derived from fitness). (M_i) mass_j: The mass of solution j (derived from fitness). (M_j) position_i: The position of solution i. (x_i) position_j: The position of solution j. (x_j) returns: numpy.array; The force vector of solution j on solution i. """ position_diff = numpy.subtract(position_j, position_i) distance = numpy.linalg.norm(position_diff) # The first 3 terms give the magnitude of the force # The last term is a vector that provides the direction # Epsilon prevents divide by zero errors return grav * (mass_i * mass_j) / (distance + EPSILON) * position_diff
python
def _gsa_force(grav, mass_i, mass_j, position_i, position_j): """Gives the force of solution j on solution i. Variable name in GSA paper given in () args: grav: The gravitational constant. (G) mass_i: The mass of solution i (derived from fitness). (M_i) mass_j: The mass of solution j (derived from fitness). (M_j) position_i: The position of solution i. (x_i) position_j: The position of solution j. (x_j) returns: numpy.array; The force vector of solution j on solution i. """ position_diff = numpy.subtract(position_j, position_i) distance = numpy.linalg.norm(position_diff) # The first 3 terms give the magnitude of the force # The last term is a vector that provides the direction # Epsilon prevents divide by zero errors return grav * (mass_i * mass_j) / (distance + EPSILON) * position_diff
Gives the force of solution j on solution i. Variable name in GSA paper given in () args: grav: The gravitational constant. (G) mass_i: The mass of solution i (derived from fitness). (M_i) mass_j: The mass of solution j (derived from fitness). (M_j) position_i: The position of solution i. (x_i) position_j: The position of solution j. (x_j) returns: numpy.array; The force vector of solution j on solution i.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gsa.py#L229-L251
JustinLovinger/optimal
optimal/algorithms/gsa.py
_gsa_total_force
def _gsa_total_force(force_vectors, vector_length): """Return a randomly weighted sum of the force vectors. args: force_vectors: A list of force vectors on solution i. returns: numpy.array; The total force on solution i. """ if len(force_vectors) == 0: return [0.0] * vector_length # The GSA algorithm specifies that the total force in each dimension # is a random sum of the individual forces in that dimension. # For this reason we sum the dimensions individually instead of simply # using vec_a+vec_b total_force = [0.0] * vector_length for force_vec in force_vectors: for i in range(vector_length): total_force[i] += random.uniform(0.0, 1.0) * force_vec[i] return total_force
python
def _gsa_total_force(force_vectors, vector_length): """Return a randomly weighted sum of the force vectors. args: force_vectors: A list of force vectors on solution i. returns: numpy.array; The total force on solution i. """ if len(force_vectors) == 0: return [0.0] * vector_length # The GSA algorithm specifies that the total force in each dimension # is a random sum of the individual forces in that dimension. # For this reason we sum the dimensions individually instead of simply # using vec_a+vec_b total_force = [0.0] * vector_length for force_vec in force_vectors: for i in range(vector_length): total_force[i] += random.uniform(0.0, 1.0) * force_vec[i] return total_force
Return a randomly weighted sum of the force vectors. args: force_vectors: A list of force vectors on solution i. returns: numpy.array; The total force on solution i.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gsa.py#L254-L273
JustinLovinger/optimal
optimal/algorithms/gsa.py
_gsa_update_velocity
def _gsa_update_velocity(velocity, acceleration): """Stochastically update velocity given acceleration. In GSA paper, velocity is v_i, acceleration is a_i """ # The GSA algorithm specifies that the new velocity for each dimension # is a sum of a random fraction of its current velocity in that dimension, # and its acceleration in that dimension # For this reason we sum the dimensions individually instead of simply # using vec_a+vec_b new_velocity = [] for vel, acc in zip(velocity, acceleration): new_velocity.append(random.uniform(0.0, 1.0) * vel + acc) return new_velocity
python
def _gsa_update_velocity(velocity, acceleration): """Stochastically update velocity given acceleration. In GSA paper, velocity is v_i, acceleration is a_i """ # The GSA algorithm specifies that the new velocity for each dimension # is a sum of a random fraction of its current velocity in that dimension, # and its acceleration in that dimension # For this reason we sum the dimensions individually instead of simply # using vec_a+vec_b new_velocity = [] for vel, acc in zip(velocity, acceleration): new_velocity.append(random.uniform(0.0, 1.0) * vel + acc) return new_velocity
Stochastically update velocity given acceleration. In GSA paper, velocity is v_i, acceleration is a_i
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gsa.py#L284-L298
JustinLovinger/optimal
optimal/algorithms/genalg.py
_new_population_genalg
def _new_population_genalg(population, fitnesses, mutation_chance=0.02, crossover_chance=0.7, selection_function=gaoperators.tournament_selection, crossover_function=gaoperators.one_point_crossover): """Perform all genetic algorithm operations on a population, and return a new population. population must have an even number of chromosomes. Args: population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]] fitness: A list of fitnesses that correspond with chromosomes in the population, ex. [1.2, 10.8] mutation_chance: the chance that a bit will be flipped during mutation crossover_chance: the chance that two parents will be crossed during crossover selection_function: A function that will select parents for crossover and mutation crossover_function: A function that will cross two parents Returns: list; A new population of chromosomes, that should be more fit. """ # Selection # Create the population of parents that will be crossed and mutated. intermediate_population = selection_function(population, fitnesses) # Crossover new_population = _crossover(intermediate_population, crossover_chance, crossover_function) # Mutation # Mutates chromosomes in place gaoperators.random_flip_mutate(new_population, mutation_chance) # Return new population return new_population
python
def _new_population_genalg(population, fitnesses, mutation_chance=0.02, crossover_chance=0.7, selection_function=gaoperators.tournament_selection, crossover_function=gaoperators.one_point_crossover): """Perform all genetic algorithm operations on a population, and return a new population. population must have an even number of chromosomes. Args: population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]] fitness: A list of fitnesses that correspond with chromosomes in the population, ex. [1.2, 10.8] mutation_chance: the chance that a bit will be flipped during mutation crossover_chance: the chance that two parents will be crossed during crossover selection_function: A function that will select parents for crossover and mutation crossover_function: A function that will cross two parents Returns: list; A new population of chromosomes, that should be more fit. """ # Selection # Create the population of parents that will be crossed and mutated. intermediate_population = selection_function(population, fitnesses) # Crossover new_population = _crossover(intermediate_population, crossover_chance, crossover_function) # Mutation # Mutates chromosomes in place gaoperators.random_flip_mutate(new_population, mutation_chance) # Return new population return new_population
Perform all genetic algorithm operations on a population, and return a new population. population must have an even number of chromosomes. Args: population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]] fitness: A list of fitnesses that correspond with chromosomes in the population, ex. [1.2, 10.8] mutation_chance: the chance that a bit will be flipped during mutation crossover_chance: the chance that two parents will be crossed during crossover selection_function: A function that will select parents for crossover and mutation crossover_function: A function that will cross two parents Returns: list; A new population of chromosomes, that should be more fit.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/genalg.py#L109-L144
JustinLovinger/optimal
optimal/algorithms/genalg.py
_crossover
def _crossover(population, crossover_chance, crossover_operator): """Perform crossover on a population, return the new crossed-over population.""" new_population = [] for i in range(0, len(population), 2): # For every other index # Take parents from every set of 2 in the population # Wrap index if out of range try: parents = (population[i], population[i + 1]) except IndexError: parents = (population[i], population[0]) # If crossover takes place if random.uniform(0.0, 1.0) <= crossover_chance: # Add children to the new population new_population.extend(crossover_operator(parents)) else: new_population.extend(parents) return new_population
python
def _crossover(population, crossover_chance, crossover_operator): """Perform crossover on a population, return the new crossed-over population.""" new_population = [] for i in range(0, len(population), 2): # For every other index # Take parents from every set of 2 in the population # Wrap index if out of range try: parents = (population[i], population[i + 1]) except IndexError: parents = (population[i], population[0]) # If crossover takes place if random.uniform(0.0, 1.0) <= crossover_chance: # Add children to the new population new_population.extend(crossover_operator(parents)) else: new_population.extend(parents) return new_population
Perform crossover on a population, return the new crossed-over population.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/genalg.py#L147-L165
JustinLovinger/optimal
optimal/common.py
random_real_solution
def random_real_solution(solution_size, lower_bounds, upper_bounds): """Make a list of random real numbers between lower and upper bounds.""" return [ random.uniform(lower_bounds[i], upper_bounds[i]) for i in range(solution_size) ]
python
def random_real_solution(solution_size, lower_bounds, upper_bounds): """Make a list of random real numbers between lower and upper bounds.""" return [ random.uniform(lower_bounds[i], upper_bounds[i]) for i in range(solution_size) ]
Make a list of random real numbers between lower and upper bounds.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/common.py#L34-L39
JustinLovinger/optimal
optimal/common.py
make_population
def make_population(population_size, solution_generator, *args, **kwargs): """Make a population with the supplied generator.""" return [ solution_generator(*args, **kwargs) for _ in range(population_size) ]
python
def make_population(population_size, solution_generator, *args, **kwargs): """Make a population with the supplied generator.""" return [ solution_generator(*args, **kwargs) for _ in range(population_size) ]
Make a population with the supplied generator.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/common.py#L42-L46
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
tournament_selection
def tournament_selection(population, fitnesses, num_competitors=2, diversity_weight=0.0): """Create a list of parents with tournament selection. Args: population: A list of solutions. fitnesses: A list of fitness values corresponding to solutions in population. num_competitors: Number of solutions to compare every round. Best solution among competitors is selected. diversity_weight: Weight of diversity metric. Determines how frequently diversity is used to select tournament winners. Note that fitness is given a weight of 1.0. diversity_weight == 1.0 gives equal weight to diversity and fitness. """ # Optimization if diversity factor is disabled if diversity_weight <= 0.0: fitness_pop = zip(fitnesses, population) # Zip for easy fitness comparison # Get num_competitors random chromosomes, then add best to result, # by taking max fitness and getting chromosome from tuple. # Repeat until full. return [ max(random.sample(fitness_pop, num_competitors))[1] for _ in range(len(population)) ] else: indices = range(len(population)) # Select tournament winners by either max fitness or diversity. # The metric to check is randomly selected, weighted by diversity_weight. # diversity_metric is calculated between the given solution, # and the list of all currently selected solutions. selected_solutions = [] # Select as many solutions are there are in population for _ in range(len(population)): competitor_indices = random.sample(indices, num_competitors) # Select by either fitness or diversity, # Selected by weighted random selection # NOTE: We assume fitness has a weight of 1.0 if random.uniform(0.0, 1.0) < (1.0 / (1.0 + diversity_weight)): # Fitness selected_solutions.append( max( zip([fitnesses[i] for i in competitor_indices], [population[i] for i in competitor_indices]))[-1]) else: # Diversity # Break ties by fitness selected_solutions.append( max( zip([ _diversity_metric(population[i], selected_solutions ) for i in competitor_indices ], [fitnesses[i] for i in competitor_indices], [population[i] for i in competitor_indices]))[-1]) return selected_solutions
python
def tournament_selection(population, fitnesses, num_competitors=2, diversity_weight=0.0): """Create a list of parents with tournament selection. Args: population: A list of solutions. fitnesses: A list of fitness values corresponding to solutions in population. num_competitors: Number of solutions to compare every round. Best solution among competitors is selected. diversity_weight: Weight of diversity metric. Determines how frequently diversity is used to select tournament winners. Note that fitness is given a weight of 1.0. diversity_weight == 1.0 gives equal weight to diversity and fitness. """ # Optimization if diversity factor is disabled if diversity_weight <= 0.0: fitness_pop = zip(fitnesses, population) # Zip for easy fitness comparison # Get num_competitors random chromosomes, then add best to result, # by taking max fitness and getting chromosome from tuple. # Repeat until full. return [ max(random.sample(fitness_pop, num_competitors))[1] for _ in range(len(population)) ] else: indices = range(len(population)) # Select tournament winners by either max fitness or diversity. # The metric to check is randomly selected, weighted by diversity_weight. # diversity_metric is calculated between the given solution, # and the list of all currently selected solutions. selected_solutions = [] # Select as many solutions are there are in population for _ in range(len(population)): competitor_indices = random.sample(indices, num_competitors) # Select by either fitness or diversity, # Selected by weighted random selection # NOTE: We assume fitness has a weight of 1.0 if random.uniform(0.0, 1.0) < (1.0 / (1.0 + diversity_weight)): # Fitness selected_solutions.append( max( zip([fitnesses[i] for i in competitor_indices], [population[i] for i in competitor_indices]))[-1]) else: # Diversity # Break ties by fitness selected_solutions.append( max( zip([ _diversity_metric(population[i], selected_solutions ) for i in competitor_indices ], [fitnesses[i] for i in competitor_indices], [population[i] for i in competitor_indices]))[-1]) return selected_solutions
Create a list of parents with tournament selection. Args: population: A list of solutions. fitnesses: A list of fitness values corresponding to solutions in population. num_competitors: Number of solutions to compare every round. Best solution among competitors is selected. diversity_weight: Weight of diversity metric. Determines how frequently diversity is used to select tournament winners. Note that fitness is given a weight of 1.0. diversity_weight == 1.0 gives equal weight to diversity and fitness.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L35-L95
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
stochastic_selection
def stochastic_selection(population, fitnesses): """Create a list of parents with stochastic universal sampling.""" pop_size = len(population) probabilities = _fitnesses_to_probabilities(fitnesses) # Create selection list (for stochastic universal sampling) selection_list = [] selection_spacing = 1.0 / pop_size selection_start = random.uniform(0.0, selection_spacing) for i in range(pop_size): selection_list.append(selection_start + selection_spacing * i) # Select intermediate population according to selection list intermediate_population = [] for selection in selection_list: for (i, probability) in enumerate(probabilities): if probability >= selection: intermediate_population.append(population[i]) break random.shuffle(intermediate_population) return intermediate_population
python
def stochastic_selection(population, fitnesses): """Create a list of parents with stochastic universal sampling.""" pop_size = len(population) probabilities = _fitnesses_to_probabilities(fitnesses) # Create selection list (for stochastic universal sampling) selection_list = [] selection_spacing = 1.0 / pop_size selection_start = random.uniform(0.0, selection_spacing) for i in range(pop_size): selection_list.append(selection_start + selection_spacing * i) # Select intermediate population according to selection list intermediate_population = [] for selection in selection_list: for (i, probability) in enumerate(probabilities): if probability >= selection: intermediate_population.append(population[i]) break random.shuffle(intermediate_population) return intermediate_population
Create a list of parents with stochastic universal sampling.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L98-L119
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
roulette_selection
def roulette_selection(population, fitnesses): """Create a list of parents with roulette selection.""" probabilities = _fitnesses_to_probabilities(fitnesses) intermediate_population = [] for _ in range(len(population)): # Choose a random individual selection = random.uniform(0.0, 1.0) # Iterate over probabilities list for i, probability in enumerate(probabilities): if probability >= selection: # First probability that is greater intermediate_population.append(population[i]) break return intermediate_population
python
def roulette_selection(population, fitnesses): """Create a list of parents with roulette selection.""" probabilities = _fitnesses_to_probabilities(fitnesses) intermediate_population = [] for _ in range(len(population)): # Choose a random individual selection = random.uniform(0.0, 1.0) # Iterate over probabilities list for i, probability in enumerate(probabilities): if probability >= selection: # First probability that is greater intermediate_population.append(population[i]) break return intermediate_population
Create a list of parents with roulette selection.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L122-L136
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
_rescale
def _rescale(vector): """Scale values in vector to the range [0, 1]. Args: vector: A list of real values. """ # Subtract min, making smallest value 0 min_val = min(vector) vector = [v - min_val for v in vector] # Divide by max, making largest value 1 max_val = float(max(vector)) try: return [v / max_val for v in vector] except ZeroDivisionError: # All values are the same return [1.0] * len(vector)
python
def _rescale(vector): """Scale values in vector to the range [0, 1]. Args: vector: A list of real values. """ # Subtract min, making smallest value 0 min_val = min(vector) vector = [v - min_val for v in vector] # Divide by max, making largest value 1 max_val = float(max(vector)) try: return [v / max_val for v in vector] except ZeroDivisionError: # All values are the same return [1.0] * len(vector)
Scale values in vector to the range [0, 1]. Args: vector: A list of real values.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L139-L154
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
_diversity_metric
def _diversity_metric(solution, population): """Return diversity value for solution compared to given population. Metric is sum of distance between solution and each solution in population, normalized to [0.0, 1.0]. """ # Edge case for empty population # If there are no other solutions, the given solution has maximum diversity if population == []: return 1.0 return ( sum([_manhattan_distance(solution, other) for other in population]) # Normalize (assuming each value in solution is in range [0.0, 1.0]) # NOTE: len(solution) is maximum manhattan distance / (len(population) * len(solution)))
python
def _diversity_metric(solution, population): """Return diversity value for solution compared to given population. Metric is sum of distance between solution and each solution in population, normalized to [0.0, 1.0]. """ # Edge case for empty population # If there are no other solutions, the given solution has maximum diversity if population == []: return 1.0 return ( sum([_manhattan_distance(solution, other) for other in population]) # Normalize (assuming each value in solution is in range [0.0, 1.0]) # NOTE: len(solution) is maximum manhattan distance / (len(population) * len(solution)))
Return diversity value for solution compared to given population. Metric is sum of distance between solution and each solution in population, normalized to [0.0, 1.0].
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L157-L172
JustinLovinger/optimal
optimal/algorithms/gaoperators.py
_manhattan_distance
def _manhattan_distance(vec_a, vec_b): """Return manhattan distance between two lists of numbers.""" if len(vec_a) != len(vec_b): raise ValueError('len(vec_a) must equal len(vec_b)') return sum(map(lambda a, b: abs(a - b), vec_a, vec_b))
python
def _manhattan_distance(vec_a, vec_b): """Return manhattan distance between two lists of numbers.""" if len(vec_a) != len(vec_b): raise ValueError('len(vec_a) must equal len(vec_b)') return sum(map(lambda a, b: abs(a - b), vec_a, vec_b))
Return manhattan distance between two lists of numbers.
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/gaoperators.py#L175-L179