language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def cleanup_keep_in_db(self) -> None: """ Clear up the database to conform to our span_to_keep_in_db """ if self.start_entry is None or not self.data_in_memory: return cutoff_span = self.stop_entry - self.span_to_keep_in_db if self.start_entry == self.first_key_in_memory: # The entire series is loaded in the memory self.data_in_memory = [tpl for tpl in self.data_in_memory if tpl[0] >= cutoff_span] if self.data_in_memory: self.start_entry = self.first_key_in_memory else: self.start_entry = self.stop_entry = None else: if not self._cleanup_the_db(): return self.cleanup_keep_in_db()
def cleanup_keep_in_db(self) -> None: """ Clear up the database to conform to our span_to_keep_in_db """ if self.start_entry is None or not self.data_in_memory: return cutoff_span = self.stop_entry - self.span_to_keep_in_db if self.start_entry == self.first_key_in_memory: # The entire series is loaded in the memory self.data_in_memory = [tpl for tpl in self.data_in_memory if tpl[0] >= cutoff_span] if self.data_in_memory: self.start_entry = self.first_key_in_memory else: self.start_entry = self.stop_entry = None else: if not self._cleanup_the_db(): return self.cleanup_keep_in_db()
Python
def on_new_data(self, key: K, value: V) -> None: """ Called by the user when there's new data gathered. Key must be greater than start entry :param key: key of the new data :param value: value of the new data :raise ValueError: key was not larger than current stop entry """ if self.stop_entry is not None: if key <= self.stop_entry: raise ValueError('Key not greater than current stop entry!') self.data_in_memory.append((key, value)) self.stop_entry = key if self.start_entry is None: self.start_entry = key
def on_new_data(self, key: K, value: V) -> None: """ Called by the user when there's new data gathered. Key must be greater than start entry :param key: key of the new data :param value: value of the new data :raise ValueError: key was not larger than current stop entry """ if self.stop_entry is not None: if key <= self.stop_entry: raise ValueError('Key not greater than current stop entry!') self.data_in_memory.append((key, value)) self.stop_entry = key if self.start_entry is None: self.start_entry = key
Python
def on_sync_request(self, maximum_entries: tp.Optional[int] = math.inf) -> tp.Iterator[KVTuple]: """ Return an iterator that will provide the source of the data for synchronization. This will preferentially start from the first value, so as to keep values synchronized in-order. :param maximum_entries: :return: an iterator of (KVTuple) that should be synchronized against the server :raise ValueError: nothing to synchronize! """ if not self.start_entry: raise ValueError('Nothing to synchronize!') if self.synced_up_to == self.data_in_memory[-1][0]: raise ValueError('Nothing to synchronize!') if self.synced_up_to is None: # Sync everything iterator = self.db_storage.iterate(None) try: data = list(iterator) if len(data) < maximum_entries: entries_left = maximum_entries - len(data) if entries_left == math.inf: data = itertools.chain(data, self.data_in_memory) else: data = itertools.chain(data, self.data_in_memory[:entries_left]) v = data finally: try_close(iterator) else: if self.first_key_in_memory <= self.synced_up_to: # Means we have to sync from memory if self.synced_up_to is None: v = self.data_in_memory else: index = bisect.bisect_right([y[0] for y in self.data_in_memory], self.synced_up_to) if maximum_entries == math.inf: v = self.data_in_memory[index:] else: v = self.data_in_memory[index:index + maximum_entries] else: # We have to start off the disk data = [] iterator = self.db_storage.iterate(self.start_entry) try: while len(data) < maximum_entries: try: data.append(next(iterator)) except StopIteration: for index, tpl in enumerate(self.data_in_memory): if len(data) >= maximum_entries: break if self.synced_up_to is not None: if tpl[0] > self.synced_up_to: break v = itertools.chain(data, self.data_in_memory[:index]) break else: v = data finally: try_close(iterator) return v
def on_sync_request(self, maximum_entries: tp.Optional[int] = math.inf) -> tp.Iterator[KVTuple]: """ Return an iterator that will provide the source of the data for synchronization. This will preferentially start from the first value, so as to keep values synchronized in-order. :param maximum_entries: :return: an iterator of (KVTuple) that should be synchronized against the server :raise ValueError: nothing to synchronize! """ if not self.start_entry: raise ValueError('Nothing to synchronize!') if self.synced_up_to == self.data_in_memory[-1][0]: raise ValueError('Nothing to synchronize!') if self.synced_up_to is None: # Sync everything iterator = self.db_storage.iterate(None) try: data = list(iterator) if len(data) < maximum_entries: entries_left = maximum_entries - len(data) if entries_left == math.inf: data = itertools.chain(data, self.data_in_memory) else: data = itertools.chain(data, self.data_in_memory[:entries_left]) v = data finally: try_close(iterator) else: if self.first_key_in_memory <= self.synced_up_to: # Means we have to sync from memory if self.synced_up_to is None: v = self.data_in_memory else: index = bisect.bisect_right([y[0] for y in self.data_in_memory], self.synced_up_to) if maximum_entries == math.inf: v = self.data_in_memory[index:] else: v = self.data_in_memory[index:index + maximum_entries] else: # We have to start off the disk data = [] iterator = self.db_storage.iterate(self.start_entry) try: while len(data) < maximum_entries: try: data.append(next(iterator)) except StopIteration: for index, tpl in enumerate(self.data_in_memory): if len(data) >= maximum_entries: break if self.synced_up_to is not None: if tpl[0] > self.synced_up_to: break v = itertools.chain(data, self.data_in_memory[:index]) break else: v = data finally: try_close(iterator) return v
Python
def on_synced_up_to(self, key: K) -> None: """ Called when data was successfully synced up to key included :param key: maximum key synchronized """ self.synced_up_to = key
def on_synced_up_to(self, key: K) -> None: """ Called when data was successfully synced up to key included :param key: maximum key synchronized """ self.synced_up_to = key
Python
def put(self, item: T) -> None: """ Add an element to the queue :param item: element to add """ with self.lock: self.queue.append(item) self.inserted_condition.notify()
def put(self, item: T) -> None: """ Add an element to the queue :param item: element to add """ with self.lock: self.queue.append(item) self.inserted_condition.notify()
Python
def peek(self, timeout: tp.Optional[float] = None) -> T: """ Get an element without removing it from the top of the queue. :param timeout: maximum amount of seconds to wait. Default value of None means wait as long as necessary :return: the item :raise WouldWaitMore: timeout has expired """ return self.__get(timeout, lambda queue: queue[0])
def peek(self, timeout: tp.Optional[float] = None) -> T: """ Get an element without removing it from the top of the queue. :param timeout: maximum amount of seconds to wait. Default value of None means wait as long as necessary :return: the item :raise WouldWaitMore: timeout has expired """ return self.__get(timeout, lambda queue: queue[0])
Python
def add_done_callback(self, callback, only_one: bool = False) -> None: """ Add a callback to a Future to be called on it's completion. By default, this will add the callback to all futures. :param callback: callback that takes the completed Future as argument :param only_one: callback will be added only to a single Future. False by default :raises IndexError: only_one was given and no Futures in collection! """ if only_one: self.futures[0].add_done_callback(callback) else: for future in self.futures: future.add_done_callback(callback)
def add_done_callback(self, callback, only_one: bool = False) -> None: """ Add a callback to a Future to be called on it's completion. By default, this will add the callback to all futures. :param callback: callback that takes the completed Future as argument :param only_one: callback will be added only to a single Future. False by default :raises IndexError: only_one was given and no Futures in collection! """ if only_one: self.futures[0].add_done_callback(callback) else: for future in self.futures: future.add_done_callback(callback)
Python
def result(self, timeout: tp.Optional[float] = None) -> list: """ Return the result of all futures, as a list. This will block until the results are available. :param timeout: a timeout in seconds for a single result. Default value None means wait as long as necessary :return: list containing results of all futures :raises WouldWaitMore: timeout while waiting for result """ try: return [fut.result(timeout) for fut in self.futures] except concurrent.futures.TimeoutError: raise WouldWaitMore('timeout waiting for the result')
def result(self, timeout: tp.Optional[float] = None) -> list: """ Return the result of all futures, as a list. This will block until the results are available. :param timeout: a timeout in seconds for a single result. Default value None means wait as long as necessary :return: list containing results of all futures :raises WouldWaitMore: timeout while waiting for result """ try: return [fut.result(timeout) for fut in self.futures] except concurrent.futures.TimeoutError: raise WouldWaitMore('timeout waiting for the result')
Python
def exception(self, timeout: tp.Optional[float] = None) -> tp.Optional[Exception]: """ Return first exception raised by any of the futures This will block until the results are available. This call proceeding does not mean that results for all are available, since this will return the first exception encountered! :param timeout: a timeout in seconds for a single result. Default value None means wait as long as necessary :return: the first exception, or None if there were no exceptions :raises WouldWaitMore: timeout while waiting for result """ try: for fut in self.futures: e = fut.exception(timeout) if e is not None: return e return None except concurrent.futures.TimeoutError: raise WouldWaitMore('timeout waiting for the result')
def exception(self, timeout: tp.Optional[float] = None) -> tp.Optional[Exception]: """ Return first exception raised by any of the futures This will block until the results are available. This call proceeding does not mean that results for all are available, since this will return the first exception encountered! :param timeout: a timeout in seconds for a single result. Default value None means wait as long as necessary :return: the first exception, or None if there were no exceptions :raises WouldWaitMore: timeout while waiting for result """ try: for fut in self.futures: e = fut.exception(timeout) if e is not None: return e return None except concurrent.futures.TimeoutError: raise WouldWaitMore('timeout waiting for the result')
Python
def refresh(self, load_from=None) -> None: """ Ask the database about this object, or load it from provided serialized representation. Override me, calling me in a super method. :param load_from: serialized object. If not given, the DB will be asked for it """ self._loaded = True
def refresh(self, load_from=None) -> None: """ Ask the database about this object, or load it from provided serialized representation. Override me, calling me in a super method. :param load_from: serialized object. If not given, the DB will be asked for it """ self._loaded = True
Python
def feed(self, key: K, value: V, timestamp: tp.Optional[float] = None): """ Feed this data into the cache """ self.data[key] = value self.timestamp_data[key] = timestamp or self.time_getter()
def feed(self, key: K, value: V, timestamp: tp.Optional[float] = None): """ Feed this data into the cache """ self.data[key] = value self.timestamp_data[key] = timestamp or self.time_getter()
Python
def _on_failure(self, key: K) -> None: """ Called internally when a KeyError occurs. It is expected that invalidate(key) will be always called before """ self.invalidate(key) if self.cache_failures: self.cache_missed.add(key) self.timestamp_data[key] = self.time_getter()
def _on_failure(self, key: K) -> None: """ Called internally when a KeyError occurs. It is expected that invalidate(key) will be always called before """ self.invalidate(key) if self.cache_failures: self.cache_missed.add(key) self.timestamp_data[key] = self.time_getter()
Python
def schedule_a_fetch(self, key: K) -> Future: """ Schedule a value refresh for given key :param key: key to schedule the refresh for :return: future that was queued to ask for given key """ future = self.value_getter_executor.submit(self.value_getter, key) def on_done_callback(fut: Future) -> None: try: result = fut.result() except KeyError: self._on_failure(key) else: self[key] = result future.add_done_callback(on_done_callback) return future
def schedule_a_fetch(self, key: K) -> Future: """ Schedule a value refresh for given key :param key: key to schedule the refresh for :return: future that was queued to ask for given key """ future = self.value_getter_executor.submit(self.value_getter, key) def on_done_callback(fut: Future) -> None: try: result = fut.result() except KeyError: self._on_failure(key) else: self[key] = result future.add_done_callback(on_done_callback) return future
Python
def invalidate(self, key: K) -> None: """ Remove all information about given key from the cache Syntactic sugar for: >>> try: >>> del self[key] >>> except KeyError: >>> pass """ del self[key]
def invalidate(self, key: K) -> None: """ Remove all information about given key from the cache Syntactic sugar for: >>> try: >>> del self[key] >>> except KeyError: >>> pass """ del self[key]
Python
def make_room(self) -> None: """ Assure that there's place for at least one element """ while len(self) > self.max_size - 1: self.evict()
def make_room(self) -> None: """ Assure that there's place for at least one element """ while len(self) > self.max_size - 1: self.evict()
Python
def invalidate(self, key: K) -> None: """ Remove all information about given key from the cache Syntactic sugar for: >>> try: >>> del self[key] >>> except KeyError: >>> pass """ super().invalidate(key) self.lru.remove(key)
def invalidate(self, key: K) -> None: """ Remove all information about given key from the cache Syntactic sugar for: >>> try: >>> del self[key] >>> except KeyError: >>> pass """ super().invalidate(key) self.lru.remove(key)
Python
def feed(self, key: K, value: V, timestamp: tp.Optional[float] = None): """ Feed this data into the cache """ if key not in self.data: self.make_room() super().feed(key, value, timestamp) self.lru.add(key)
def feed(self, key: K, value: V, timestamp: tp.Optional[float] = None): """ Feed this data into the cache """ if key not in self.data: self.make_room() super().feed(key, value, timestamp) self.lru.add(key)
Python
def on_success(self, fun) -> 'Future': """ Schedule function to be called with the result of this future as it's argument only if this future succeeds. :param fun: function to call :return: self """ def inner(fut: PythonFuture): if fut._exception is not None: return return fun(fut._result) self.add_done_callback(inner) return self
def on_success(self, fun) -> 'Future': """ Schedule function to be called with the result of this future as it's argument only if this future succeeds. :param fun: function to call :return: self """ def inner(fut: PythonFuture): if fut._exception is not None: return return fun(fut._result) self.add_done_callback(inner) return self
Python
def on_failure(self, fun): """ Schedule function to be called with the exception value that befall this future :param fun: function to call :return: self """ def inner(fut: PythonFuture): if fut._exception is None: return return fun(fut._exception) self.add_done_callback(inner) return self
def on_failure(self, fun): """ Schedule function to be called with the exception value that befall this future :param fun: function to call :return: self """ def inner(fut: PythonFuture): if fut._exception is None: return return fun(fut._exception) self.add_done_callback(inner) return self
Python
def chain(self, fun) -> 'Future': """ Schedule function to be called with the result of this future as it's argument (or exception value if the future excepted). :param fun: function to call :return: self """ def inner(future): if future._exception is not None: result = future._exception else: result = future._result fun(result) self.add_done_callback(inner) return self
def chain(self, fun) -> 'Future': """ Schedule function to be called with the result of this future as it's argument (or exception value if the future excepted). :param fun: function to call :return: self """ def inner(future): if future._exception is not None: result = future._exception else: result = future._result fun(result) self.add_done_callback(inner) return self
Python
def add_pre_done_callback(self, fn): """ Attaches a callable that will be called just before the future finishes and can change the future's result (or insert an Exception). Args: fn: A callable that will be called with this future as its only argument just before the future completes or is cancelled. """ with self._condition: if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: self._pre_done_callbacks.append(fn) return # noinspection PyBroadException try: fn(self) except Exception: LOGGER.exception('exception calling callback for %r', self)
def add_pre_done_callback(self, fn): """ Attaches a callable that will be called just before the future finishes and can change the future's result (or insert an Exception). Args: fn: A callable that will be called with this future as its only argument just before the future completes or is cancelled. """ with self._condition: if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: self._pre_done_callbacks.append(fn) return # noinspection PyBroadException try: fn(self) except Exception: LOGGER.exception('exception calling callback for %r', self)
Python
def wrap_if(fut: tp.Union[PythonFuture, Future]) -> Future: """ Wrap a future, if it isn't already wrapped :param fut: either a Python Future or a Satella Future :return: a Satella future """ if not isinstance(fut, Future): return WrappingFuture(fut) else: return fut
def wrap_if(fut: tp.Union[PythonFuture, Future]) -> Future: """ Wrap a future, if it isn't already wrapped :param fut: either a Python Future or a Satella Future :return: a Satella future """ if not isinstance(fut, Future): return WrappingFuture(fut) else: return fut
Python
def provide(self) -> dict: """ Return your configuration, as a dict :raise ConfigurationError: on invalid configuration """ return {self.key: self.child.provide()}
def provide(self) -> dict: """ Return your configuration, as a dict :raise ConfigurationError: on invalid configuration """ return {self.key: self.child.provide()}
Python
def reset(self) -> None: """ Delete all child metrics that this metric contains. Also, if called on root metric, sets the runlevel to RUNTIME """ from satella.instrumentation import metrics if self.name == '': with metrics.metrics_lock: metrics.metrics = {} metrics.level = MetricLevel.RUNTIME else: with metrics.metrics_lock: metrics.metrics = {k: v for k, v in metrics.metrics.items() if not k.startswith(self.get_fully_qualified_name() + '.')} del metrics.metrics[self.get_fully_qualified_name()] self.children = []
def reset(self) -> None: """ Delete all child metrics that this metric contains. Also, if called on root metric, sets the runlevel to RUNTIME """ from satella.instrumentation import metrics if self.name == '': with metrics.metrics_lock: metrics.metrics = {} metrics.level = MetricLevel.RUNTIME else: with metrics.metrics_lock: metrics.metrics = {k: v for k, v in metrics.metrics.items() if not k.startswith(self.get_fully_qualified_name() + '.')} del metrics.metrics[self.get_fully_qualified_name()] self.children = []
Python
def _handle(self, *args, **kwargs) -> None: """ To be overridden! The right place to process your data, after it's level was verified by :meth:`Metric.handle` """ raise TypeError('This is a container metric!')
def _handle(self, *args, **kwargs) -> None: """ To be overridden! The right place to process your data, after it's level was verified by :meth:`Metric.handle` """ raise TypeError('This is a container metric!')
Python
def clone(self, labels: dict) -> 'LeafMetric': """ Return a fresh instance of this metric, with it's parent being set to this metric and having a particular set of labels, and being of level INHERIT. """ return self.__class__(self.name, self, MetricLevel.INHERIT, *self.args, labels=labels, **self.kwargs)
def clone(self, labels: dict) -> 'LeafMetric': """ Return a fresh instance of this metric, with it's parent being set to this metric and having a particular set of labels, and being of level INHERIT. """ return self.__class__(self.name, self, MetricLevel.INHERIT, *self.args, labels=labels, **self.kwargs)
Python
def _HASH_FIELDS_TO_USE(self) -> tp.Union[str, tp.Sequence[str]]: """ Return the sequence of names of properties and attributes that will be used for __eq__ and __hash__ """ return ()
def _HASH_FIELDS_TO_USE(self) -> tp.Union[str, tp.Sequence[str]]: """ Return the sequence of names of properties and attributes that will be used for __eq__ and __hash__ """ return ()
Python
def sync_threadpool(tpe: tp.Union[ExecutorWrapper, ThreadPoolExecutor], max_wait: tp.Optional[float] = None) -> None: """ Make sure that every thread of given thread pool executor is done processing jobs scheduled until this moment. Make sure that other tasks do not submit anything to this thread pool executor. :param tpe: thread pool executor to sync. Can be also a ExecutorWrapper. :param max_wait: maximum time to wait. Default, None, means wait forever :raises WouldWaitMore: timeout exceeded. Raised only when max_wait is not None. """ if isinstance(tpe, ExecutorWrapper): return sync_threadpool(tpe.executor, max_wait=max_wait) assert isinstance(tpe, ThreadPoolExecutor), 'Must be a ThreadPoolExecutor!' with measure(timeout=max_wait) as measurement: # noinspection PyProtectedMember workers = tpe._max_workers atm_n = AtomicNumber(workers) cond = Condition() def decrease_atm(): nonlocal atm_n atm_n -= 1 cond.wait() futures = [tpe.submit(decrease_atm) for _ in range(workers)] # wait for all currently scheduled jobs to be picked up # noinspection PyProtectedMember while tpe._work_queue.qsize() > 0: if max_wait is not None: if measurement() > max_wait: for future in futures: future.cancel() raise WouldWaitMore('timeout exceeded') time.sleep(0.5) if max_wait is None: atm_n.wait_until_equal(0) else: while measurement() < max_wait: try: atm_n.wait_until_equal(0, 1) break except WouldWaitMore: continue else: raise WouldWaitMore('timeout exceeded') cond.notify_all() wait(futures)
def sync_threadpool(tpe: tp.Union[ExecutorWrapper, ThreadPoolExecutor], max_wait: tp.Optional[float] = None) -> None: """ Make sure that every thread of given thread pool executor is done processing jobs scheduled until this moment. Make sure that other tasks do not submit anything to this thread pool executor. :param tpe: thread pool executor to sync. Can be also a ExecutorWrapper. :param max_wait: maximum time to wait. Default, None, means wait forever :raises WouldWaitMore: timeout exceeded. Raised only when max_wait is not None. """ if isinstance(tpe, ExecutorWrapper): return sync_threadpool(tpe.executor, max_wait=max_wait) assert isinstance(tpe, ThreadPoolExecutor), 'Must be a ThreadPoolExecutor!' with measure(timeout=max_wait) as measurement: # noinspection PyProtectedMember workers = tpe._max_workers atm_n = AtomicNumber(workers) cond = Condition() def decrease_atm(): nonlocal atm_n atm_n -= 1 cond.wait() futures = [tpe.submit(decrease_atm) for _ in range(workers)] # wait for all currently scheduled jobs to be picked up # noinspection PyProtectedMember while tpe._work_queue.qsize() > 0: if max_wait is not None: if measurement() > max_wait: for future in futures: future.cancel() raise WouldWaitMore('timeout exceeded') time.sleep(0.5) if max_wait is None: atm_n.wait_until_equal(0) else: while measurement() < max_wait: try: atm_n.wait_until_equal(0, 1) break except WouldWaitMore: continue else: raise WouldWaitMore('timeout exceeded') cond.notify_all() wait(futures)
Python
def med_avg(*vals: NumberStr, dp: OptNumStr = '8', rounding: str = None) -> Decimal: """ Standard median average. If there are 3 or less values, the midpoint value will be returned. If there are 4 or more values, the midpoint, and value before the midpoint will be added together, and then divided by two to get the median average. :param NumberStr vals: Two or more values to median average. :param NumberStr dp: Decimal places to round to (can be ``None`` to disable rounding) :param str rounding: Optional rounding method, e.g. ``ROUND_HALF_DOWN`` or ``ROUND_UP`` :return Decimal med_avg: The median average of ``vals`` """ if len(vals) == 0: raise ValueError("No values passed to med_avg... you must pass at least one number!") dp = int(dp) rate_vals = sorted(list([conv_dec(v) for v in vals])) midpoint = int(len(rate_vals) // 2) if len(rate_vals) == 1: return rate_vals[0] if len(rate_vals) <= 3: return rate_vals[midpoint] if empty(dp) else dec_round(rate_vals[midpoint], dp, rounding) # mavg = avg(rate_vals[midpoint - 1], rate_vals[midpoint], dp=dp, rounding=rounding) # mavg = Decimal((rate_vals[midpoint - 1] + rate_vals[midpoint]) / Decimal('2')) # return mavg if empty(dp) else dec_round(mavg, dp, rounding) return avg(rate_vals[midpoint - 1], rate_vals[midpoint], dp=dp, rounding=rounding)
def med_avg(*vals: NumberStr, dp: OptNumStr = '8', rounding: str = None) -> Decimal: """ Standard median average. If there are 3 or less values, the midpoint value will be returned. If there are 4 or more values, the midpoint, and value before the midpoint will be added together, and then divided by two to get the median average. :param NumberStr vals: Two or more values to median average. :param NumberStr dp: Decimal places to round to (can be ``None`` to disable rounding) :param str rounding: Optional rounding method, e.g. ``ROUND_HALF_DOWN`` or ``ROUND_UP`` :return Decimal med_avg: The median average of ``vals`` """ if len(vals) == 0: raise ValueError("No values passed to med_avg... you must pass at least one number!") dp = int(dp) rate_vals = sorted(list([conv_dec(v) for v in vals])) midpoint = int(len(rate_vals) // 2) if len(rate_vals) == 1: return rate_vals[0] if len(rate_vals) <= 3: return rate_vals[midpoint] if empty(dp) else dec_round(rate_vals[midpoint], dp, rounding) # mavg = avg(rate_vals[midpoint - 1], rate_vals[midpoint], dp=dp, rounding=rounding) # mavg = Decimal((rate_vals[midpoint - 1] + rate_vals[midpoint]) / Decimal('2')) # return mavg if empty(dp) else dec_round(mavg, dp, rounding) return avg(rate_vals[midpoint - 1], rate_vals[midpoint], dp=dp, rounding=rounding)
Python
def on_update(self, delta_time): """ All the logic to move, and the game logic goes here. Normally, you'll call update() on the sprite lists that need it. """ self.player_list.update_animation() self.player_sprite.update() self.obstacle_manager.obstacle_list.update()
def on_update(self, delta_time): """ All the logic to move, and the game logic goes here. Normally, you'll call update() on the sprite lists that need it. """ self.player_list.update_animation() self.player_sprite.update() self.obstacle_manager.obstacle_list.update()
Python
def on_key_release(self, key, key_modifiers): """Called whenever the user lets off a previously pressed key.""" # If the player hits 'SPACE' while on the floor jumpable = (key == 32 and self.player_sprite.is_floored) if jumpable: self.player_sprite.jump()
def on_key_release(self, key, key_modifiers): """Called whenever the user lets off a previously pressed key.""" # If the player hits 'SPACE' while on the floor jumpable = (key == 32 and self.player_sprite.is_floored) if jumpable: self.player_sprite.jump()
Python
def make_logger(name): "creates an individual logger so the user knows which module something written in the logger comes from" logger = logging.getLogger(name) if (logger.hasHandlers()): logger.handlers.clear() logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(module)s:%(levelname)s:%(asctime)s:%(message)s') file_handler = logging.FileHandler('projectlog.log') file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
def make_logger(name): "creates an individual logger so the user knows which module something written in the logger comes from" logger = logging.getLogger(name) if (logger.hasHandlers()): logger.handlers.clear() logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(module)s:%(levelname)s:%(asctime)s:%(message)s') file_handler = logging.FileHandler('projectlog.log') file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
Python
def process_covid_csv_data(covid_csv_data) -> int: """ process the covid data from the csv """ last_7_days_cases = 0 current_hospital_cases = (covid_csv_data[1].split(","))[5] for lines in range(len(covid_csv_data)): if (covid_csv_data[lines+1].split(",")[4]) != '': total_deaths = covid_csv_data[lines+1].split(",")[4] break for i in range(7): last_7_days_cases += int((covid_csv_data[i+3].split(",")[6])) return last_7_days_cases , int(current_hospital_cases), int(total_deaths)
def process_covid_csv_data(covid_csv_data) -> int: """ process the covid data from the csv """ last_7_days_cases = 0 current_hospital_cases = (covid_csv_data[1].split(","))[5] for lines in range(len(covid_csv_data)): if (covid_csv_data[lines+1].split(",")[4]) != '': total_deaths = covid_csv_data[lines+1].split(",")[4] break for i in range(7): last_7_days_cases += int((covid_csv_data[i+3].split(",")[6])) return last_7_days_cases , int(current_hospital_cases), int(total_deaths)
Python
def process_covid_json_data(local_covid_data:dict,nation_covid_data:dict) -> dict: """ Processes both local and national covid data and returns in a dictonary """ logger.info("process covid json data ran") data = {"localCases7days":0, "natCases7days":0, "cumulativeDeaths":0, "hospitalCases":0 } for i in range(7): data["localCases7days"] += int(local_covid_data['data'][i]['newCasesByPublishDate']) for j in range(7): data["natCases7days"] += int(nation_covid_data['data'][j]['newCasesByPublishDate']) for k in range(len(nation_covid_data['data'])): if nation_covid_data['data'][k]['hospitalCases'] is not None: data["hospitalCases"] = int(nation_covid_data['data'][k]['hospitalCases']) break data["cumulativeDeaths"] = int(nation_covid_data['data'][1]['cumDeaths28DaysByDeathDate']) return data
def process_covid_json_data(local_covid_data:dict,nation_covid_data:dict) -> dict: """ Processes both local and national covid data and returns in a dictonary """ logger.info("process covid json data ran") data = {"localCases7days":0, "natCases7days":0, "cumulativeDeaths":0, "hospitalCases":0 } for i in range(7): data["localCases7days"] += int(local_covid_data['data'][i]['newCasesByPublishDate']) for j in range(7): data["natCases7days"] += int(nation_covid_data['data'][j]['newCasesByPublishDate']) for k in range(len(nation_covid_data['data'])): if nation_covid_data['data'][k]['hospitalCases'] is not None: data["hospitalCases"] = int(nation_covid_data['data'][k]['hospitalCases']) break data["cumulativeDeaths"] = int(nation_covid_data['data'][1]['cumDeaths28DaysByDeathDate']) return data
Python
def covid_API_request(location: str,location_type:str) -> dict: """ Gets both local and national covid data from the covid API in JSON format and runs the process covid json data function and returns covid data """ global covid_data logger.info("covid API request ran") area_name = ('areaName='+location) area_type = ('areaType='+location_type) local_only = [area_type, area_name] nat_only = ['areaType=nation','areaName='+config['values']["nationareaname"]] cases_and_deaths = { "date": "date", "areaName": "areaName", "newCasesByPublishDate": "newCasesByPublishDate", "hospitalCases" : "hospitalCases", "cumDeaths28DaysByDeathDate" : "cumDeaths28DaysByDeathDate" } api_local = Cov19API(filters=local_only, structure=cases_and_deaths) api_nat = Cov19API(filters=nat_only, structure=cases_and_deaths) data_local = api_local.get_json() data_nat = api_nat.get_json() covid_data = process_covid_json_data(data_local,data_nat) logger.info("covid data updated") return covid_data
def covid_API_request(location: str,location_type:str) -> dict: """ Gets both local and national covid data from the covid API in JSON format and runs the process covid json data function and returns covid data """ global covid_data logger.info("covid API request ran") area_name = ('areaName='+location) area_type = ('areaType='+location_type) local_only = [area_type, area_name] nat_only = ['areaType=nation','areaName='+config['values']["nationareaname"]] cases_and_deaths = { "date": "date", "areaName": "areaName", "newCasesByPublishDate": "newCasesByPublishDate", "hospitalCases" : "hospitalCases", "cumDeaths28DaysByDeathDate" : "cumDeaths28DaysByDeathDate" } api_local = Cov19API(filters=local_only, structure=cases_and_deaths) api_nat = Cov19API(filters=nat_only, structure=cases_and_deaths) data_local = api_local.get_json() data_nat = api_nat.get_json() covid_data = process_covid_json_data(data_local,data_nat) logger.info("covid data updated") return covid_data
Python
def update_covid(): "uses a test to see if it will return valid data and prevent the program from crashing" global covid_data if testcase == True: covid_API_request(config['values']['location'],"Ltla") else: logger.warning("Covid API request not working") covid_data = {"localCases7days":"error getting data from covid 19 API", "natCases7days":0, "cumulativeDeaths":0, "hospitalCases":0 }
def update_covid(): "uses a test to see if it will return valid data and prevent the program from crashing" global covid_data if testcase == True: covid_API_request(config['values']['location'],"Ltla") else: logger.warning("Covid API request not working") covid_data = {"localCases7days":"error getting data from covid 19 API", "natCases7days":0, "cumulativeDeaths":0, "hospitalCases":0 }
Python
def news_API_request(covid_terms:str) -> dict: """Using the argument covid terms this function returns a dictonary of news articles by using the newsapi.""" #creates the dictonary using the news API logger.info("news API request ran") base_url = "https://newsapi.org/v2/everything?q=" api_key = config['values']['apikey'] complete_url = base_url + covid_terms +"&language=en&apiKey=" + api_key response = requests.get(complete_url) covid_dict = response.json()['articles'] #iterates through each news article content to have the max descripiton length for x,values in enumerate(covid_dict): #gets rid of unnessariy data del covid_dict[x]['author'], covid_dict[x]['urlToImage'],covid_dict[x]['publishedAt'] #splits content in sentances and then adds until reaches the character limit if len(covid_dict[x]['description']) < 259: covid_dict[x]['content'] = covid_dict[x]['description'] + " Read more about: "+ covid_dict[x]['url'] else: descrpt = covid_dict[x]['description'].split(".") covid_dict[x]['content'] = "" for i in range(len(descrpt) -1): if len(covid_dict[x]['content'] + descrpt[i]) < 259: covid_dict[x]['content'] += descrpt[i] else: break covid_dict[x]['content'] += ". Read more about:" + covid_dict[x]['url'] del covid_dict[x]['description'] logger.info("news data updated") return covid_dict
def news_API_request(covid_terms:str) -> dict: """Using the argument covid terms this function returns a dictonary of news articles by using the newsapi.""" #creates the dictonary using the news API logger.info("news API request ran") base_url = "https://newsapi.org/v2/everything?q=" api_key = config['values']['apikey'] complete_url = base_url + covid_terms +"&language=en&apiKey=" + api_key response = requests.get(complete_url) covid_dict = response.json()['articles'] #iterates through each news article content to have the max descripiton length for x,values in enumerate(covid_dict): #gets rid of unnessariy data del covid_dict[x]['author'], covid_dict[x]['urlToImage'],covid_dict[x]['publishedAt'] #splits content in sentances and then adds until reaches the character limit if len(covid_dict[x]['description']) < 259: covid_dict[x]['content'] = covid_dict[x]['description'] + " Read more about: "+ covid_dict[x]['url'] else: descrpt = covid_dict[x]['description'].split(".") covid_dict[x]['content'] = "" for i in range(len(descrpt) -1): if len(covid_dict[x]['content'] + descrpt[i]) < 259: covid_dict[x]['content'] += descrpt[i] else: break covid_dict[x]['content'] += ". Read more about:" + covid_dict[x]['url'] del covid_dict[x]['description'] logger.info("news data updated") return covid_dict
Python
def main(): """ Main CLI function. Called by running `chromaqr` at the command line. """ parser = argparse.ArgumentParser(description="Get three times the data into a QR code using RGB.") parser.add_argument("command", choices=["encode", "decode", "serve"], help="command to perform, must be encode, decode, or serve") parser.add_argument("--inFile", type=str, help="path to input file") parser.add_argument("--text", type=str, help="text to encode") parser.add_argument("--outFile", type=str, help="path to output file") parser.add_argument("--debug", action="store_true", help="whether to decode in debug mode") parser.add_argument("--errorCorrection", choices=["LOW", "MED", "HIGH", "MAX"], default="MED", help="level of error correction to use") parser.add_argument("--port", type=int, default=8000, help="port to host the server on") args = parser.parse_args() if args.command == "encode": if args.inFile != None: with open(args.inFile, "rb") as f: inputBytes = f.read() else: inputBytes = args.text.encode("utf-8") encoder = Encoder(error_correction=args.errorCorrection) image = encoder.encode(inputBytes) if args.outFile != None: image.save(args.outFile) else: print("error: you must provide an --outFile to encode to") elif args.command == "decode": if args.inFile != None: inputImage = Image.open(args.inFile) else: print("error: you must provide an --inFile to decode") return decoder = Decoder(debug=args.debug) decoded_bytes = decoder.decode(inputImage) if args.outFile != None: with open(args.outFile, "wb") as f: f.write(decoded_bytes) else: print(decoded_bytes.decode()) elif args.command == "serve": from .server import run run(port=args.port)
def main(): """ Main CLI function. Called by running `chromaqr` at the command line. """ parser = argparse.ArgumentParser(description="Get three times the data into a QR code using RGB.") parser.add_argument("command", choices=["encode", "decode", "serve"], help="command to perform, must be encode, decode, or serve") parser.add_argument("--inFile", type=str, help="path to input file") parser.add_argument("--text", type=str, help="text to encode") parser.add_argument("--outFile", type=str, help="path to output file") parser.add_argument("--debug", action="store_true", help="whether to decode in debug mode") parser.add_argument("--errorCorrection", choices=["LOW", "MED", "HIGH", "MAX"], default="MED", help="level of error correction to use") parser.add_argument("--port", type=int, default=8000, help="port to host the server on") args = parser.parse_args() if args.command == "encode": if args.inFile != None: with open(args.inFile, "rb") as f: inputBytes = f.read() else: inputBytes = args.text.encode("utf-8") encoder = Encoder(error_correction=args.errorCorrection) image = encoder.encode(inputBytes) if args.outFile != None: image.save(args.outFile) else: print("error: you must provide an --outFile to encode to") elif args.command == "decode": if args.inFile != None: inputImage = Image.open(args.inFile) else: print("error: you must provide an --inFile to decode") return decoder = Decoder(debug=args.debug) decoded_bytes = decoder.decode(inputImage) if args.outFile != None: with open(args.outFile, "wb") as f: f.write(decoded_bytes) else: print(decoded_bytes.decode()) elif args.command == "serve": from .server import run run(port=args.port)
Python
def decode(self, image: Image) -> bytearray: """ Decode the given PIL Image containing a ChromaQR code into a bytearray. If no QR code can be found, an empty bytearray will be returned. If the `Decoder` object has the property `debug` set to `True`, the program will save the processed image for each of the codes. """ decoded_bytes = b"" if image.mode == "RGBA": converted_image = Image.new("RGB", image.size, (255, 255, 255)) mask = image.split()[3] mask = ImageOps.colorize(mask, "#000000", "#ffffff", blackpoint=254, whitepoint=255) mask = mask.convert("1") converted_image.paste(image, mask=mask) else: converted_image = image if converted_image.size[0] > 1280 or converted_image.size[1] > 1280: converted_image.thumbnail((min(1280, converted_image.size[0]), min(1280, converted_image.size[1]))) code_quad = None for i in range(3): rgb_image = ImageOps.autocontrast(converted_image.split()[i]) rgb_image = ImageOps.colorize(rgb_image, "#000000", "#ffffff", blackpoint=100, whitepoint=180) decoded_codes = pyzbar.decode(rgb_image, symbols=[pyzbar.ZBarSymbol.QRCODE]) if self.debug: rgb_image.save("debug_{}.png".format(i)) if len(decoded_codes) == 0: return b"" decoded_code = decoded_codes[0] decoded_bytes += decoded_code.data converted_image = converted_image.crop(( decoded_code.rect.left, decoded_code.rect.top, decoded_code.rect.left + decoded_code.rect.width, decoded_code.rect.top + decoded_code.rect.height, )) if i == 0: code_quad = [ [decoded_code.polygon[0].x, decoded_code.polygon[0].y], [decoded_code.polygon[1].x, decoded_code.polygon[1].y], [decoded_code.polygon[2].x, decoded_code.polygon[2].y], [decoded_code.polygon[3].x, decoded_code.polygon[3].y] ] self.result = decoded_bytes self.code_quad = code_quad return decoded_bytes
def decode(self, image: Image) -> bytearray: """ Decode the given PIL Image containing a ChromaQR code into a bytearray. If no QR code can be found, an empty bytearray will be returned. If the `Decoder` object has the property `debug` set to `True`, the program will save the processed image for each of the codes. """ decoded_bytes = b"" if image.mode == "RGBA": converted_image = Image.new("RGB", image.size, (255, 255, 255)) mask = image.split()[3] mask = ImageOps.colorize(mask, "#000000", "#ffffff", blackpoint=254, whitepoint=255) mask = mask.convert("1") converted_image.paste(image, mask=mask) else: converted_image = image if converted_image.size[0] > 1280 or converted_image.size[1] > 1280: converted_image.thumbnail((min(1280, converted_image.size[0]), min(1280, converted_image.size[1]))) code_quad = None for i in range(3): rgb_image = ImageOps.autocontrast(converted_image.split()[i]) rgb_image = ImageOps.colorize(rgb_image, "#000000", "#ffffff", blackpoint=100, whitepoint=180) decoded_codes = pyzbar.decode(rgb_image, symbols=[pyzbar.ZBarSymbol.QRCODE]) if self.debug: rgb_image.save("debug_{}.png".format(i)) if len(decoded_codes) == 0: return b"" decoded_code = decoded_codes[0] decoded_bytes += decoded_code.data converted_image = converted_image.crop(( decoded_code.rect.left, decoded_code.rect.top, decoded_code.rect.left + decoded_code.rect.width, decoded_code.rect.top + decoded_code.rect.height, )) if i == 0: code_quad = [ [decoded_code.polygon[0].x, decoded_code.polygon[0].y], [decoded_code.polygon[1].x, decoded_code.polygon[1].y], [decoded_code.polygon[2].x, decoded_code.polygon[2].y], [decoded_code.polygon[3].x, decoded_code.polygon[3].y] ] self.result = decoded_bytes self.code_quad = code_quad return decoded_bytes
Python
def decode(): """ Decoding endpoint for the API. Takes a file upload called `image` or a URL pointing to an image called `url`. """ try: if "url" in request.form.to_dict().keys(): response = urllib.request.urlopen(request.form.to_dict()["url"]) image = Image.open(BytesIO(response.read())) else: file = request.files["image"] image = Image.open(file.stream) except: return Response(json.dumps({ "method": "decode", "success": False, "error": "no image file was recognised in your request, either upload a file with the identifier 'image' or submit a URL called 'url'" }), status=400, mimetype="application/json") decoder = Decoder() result = decoder.decode(image).decode("utf-8") if result != "": return Response(json.dumps({ "method": "decode", "success": True, "result": result, "coordinates": decoder.code_quad }), mimetype="application/json") else: return Response(json.dumps({ "method": "decode", "success": False, "error": "no ChromaQR code was found in the uploaded image" }), status=404, mimetype="application/json")
def decode(): """ Decoding endpoint for the API. Takes a file upload called `image` or a URL pointing to an image called `url`. """ try: if "url" in request.form.to_dict().keys(): response = urllib.request.urlopen(request.form.to_dict()["url"]) image = Image.open(BytesIO(response.read())) else: file = request.files["image"] image = Image.open(file.stream) except: return Response(json.dumps({ "method": "decode", "success": False, "error": "no image file was recognised in your request, either upload a file with the identifier 'image' or submit a URL called 'url'" }), status=400, mimetype="application/json") decoder = Decoder() result = decoder.decode(image).decode("utf-8") if result != "": return Response(json.dumps({ "method": "decode", "success": True, "result": result, "coordinates": decoder.code_quad }), mimetype="application/json") else: return Response(json.dumps({ "method": "decode", "success": False, "error": "no ChromaQR code was found in the uploaded image" }), status=404, mimetype="application/json")
Python
def is_prime(number): """ Check number is prime or not :param number: number :return: Boolean """ # # _div = number - 1 # check = 1 # while _div > 1: # if number % _div == 0: # check = 0 # # break # _div -= 1 # # if check == 1: # return True # else: # return False # for i in range(2, number+1): # for j in range(2, i): # if i % j == 0: # break # # else: # print(F"{i} Number is prime") for i in range(2, number): if number % i == 0: return False return True
def is_prime(number): """ Check number is prime or not :param number: number :return: Boolean """ # # _div = number - 1 # check = 1 # while _div > 1: # if number % _div == 0: # check = 0 # # break # _div -= 1 # # if check == 1: # return True # else: # return False # for i in range(2, number+1): # for j in range(2, i): # if i % j == 0: # break # # else: # print(F"{i} Number is prime") for i in range(2, number): if number % i == 0: return False return True
Python
def list_vowels(string): """ create list of vowels from string :param string: :return: """ vowels = list() for word in string.split(" "): if is_vowel(word.lower()): vowels.append(word) print(vowels)
def list_vowels(string): """ create list of vowels from string :param string: :return: """ vowels = list() for word in string.split(" "): if is_vowel(word.lower()): vowels.append(word) print(vowels)
Python
def is_palindrome(number): """ check if given number has odd or even digit :param number: :return: """ for i, j in zip(number, number[::-1]): if i != j: return False return True
def is_palindrome(number): """ check if given number has odd or even digit :param number: :return: """ for i, j in zip(number, number[::-1]): if i != j: return False return True
Python
def generate_create_table_command(self): """ Generate crete table command based on self.data :return: """ statement = "CREATE TABLE IF NOT EXISTS {} (".format( self.data.get('table_name')) statement += ", ".join([ "{} {}".format(key, value) for key, value in self.data.get('table_structure').items() ]) statement += ")" return statement
def generate_create_table_command(self): """ Generate crete table command based on self.data :return: """ statement = "CREATE TABLE IF NOT EXISTS {} (".format( self.data.get('table_name')) statement += ", ".join([ "{} {}".format(key, value) for key, value in self.data.get('table_structure').items() ]) statement += ")" return statement
Python
def generate_insert_statement(self): """ Generate insert statement based on the provided data :return: """ insert_data = self.data.get('insert_data') _fields = ", ".join([ field for field in insert_data.keys() ]) place_holder = ", ".join(['?' for value in insert_data.values()]) return F"INSERT INTO {self.data.get('table_name')}({_fields}) " \ F"VALUES({place_holder})"
def generate_insert_statement(self): """ Generate insert statement based on the provided data :return: """ insert_data = self.data.get('insert_data') _fields = ", ".join([ field for field in insert_data.keys() ]) place_holder = ", ".join(['?' for value in insert_data.values()]) return F"INSERT INTO {self.data.get('table_name')}({_fields}) " \ F"VALUES({place_holder})"
Python
def GetConfig(user_config): """Load and return benchmark config. Args: user_config: user supplied configuration (flags and config file) Returns: loaded benchmark configuration """ return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def GetConfig(user_config): """Load and return benchmark config. Args: user_config: user supplied configuration (flags and config file) Returns: loaded benchmark configuration """ return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
Python
def CheckPrerequisites(_): """Verify that the required prerequisites are met. Raises: perfkitbenchmarker.errors.Setup.InvalidFlagConfigurationError: On invalid flag configuration. """ if not FLAGS.openmpi_enable_shared: raise errors.Setup.InvalidFlagConfigurationError( 'The flag openmpi_enable_shared must be True in order to run Horovod.')
def CheckPrerequisites(_): """Verify that the required prerequisites are met. Raises: perfkitbenchmarker.errors.Setup.InvalidFlagConfigurationError: On invalid flag configuration. """ if not FLAGS.openmpi_enable_shared: raise errors.Setup.InvalidFlagConfigurationError( 'The flag openmpi_enable_shared must be True in order to run Horovod.')
Python
def _UpdateBenchmarkSpecWithFlags(benchmark_spec): """Update the benchmark_spec with supplied command line flags. Args: benchmark_spec: benchmark specification to update """ gpus_per_node = cuda_toolkit.QueryNumberOfGpus(benchmark_spec.vms[0]) num_vms = len(benchmark_spec.vms) total_gpus = gpus_per_node * num_vms benchmark_spec.gpus_per_node = gpus_per_node benchmark_spec.num_vms = num_vms benchmark_spec.total_gpus = total_gpus benchmark_spec.model = FLAGS.horovod_model benchmark_spec.batch_size = FLAGS.horovod_batch_size benchmark_spec.num_epochs = FLAGS.horovod_num_epochs benchmark_spec.synthetic = FLAGS.horovod_synthetic benchmark_spec.deep_learning_examples_commit = ( FLAGS.horovod_deep_learning_examples_commit)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec): """Update the benchmark_spec with supplied command line flags. Args: benchmark_spec: benchmark specification to update """ gpus_per_node = cuda_toolkit.QueryNumberOfGpus(benchmark_spec.vms[0]) num_vms = len(benchmark_spec.vms) total_gpus = gpus_per_node * num_vms benchmark_spec.gpus_per_node = gpus_per_node benchmark_spec.num_vms = num_vms benchmark_spec.total_gpus = total_gpus benchmark_spec.model = FLAGS.horovod_model benchmark_spec.batch_size = FLAGS.horovod_batch_size benchmark_spec.num_epochs = FLAGS.horovod_num_epochs benchmark_spec.synthetic = FLAGS.horovod_synthetic benchmark_spec.deep_learning_examples_commit = ( FLAGS.horovod_deep_learning_examples_commit)
Python
def _CopyAndUpdateRunScripts(vm): """Copy and update all necessary run scripts on the given vm. Args: vm: vm to place and update run scripts on """ vm.InstallPackages('git') vm.RemoteCommand('rm -rf deep-learning-models') vm.RemoteCommand('git clone %s' % DEEP_LEARNING_EXAMPLES_REPO) vm.RemoteCommand( 'cd deep-learning-models && git checkout {}'.format( FLAGS.horovod_deep_learning_examples_commit) ) # Copy the benchmark script from the github repo to the home directory. vm.RemoteCommand( 'cp %s .' % posixpath.join('deep-learning-models', 'models', 'resnet', 'tensorflow', 'train_imagenet_resnet_hvd.py'))
def _CopyAndUpdateRunScripts(vm): """Copy and update all necessary run scripts on the given vm. Args: vm: vm to place and update run scripts on """ vm.InstallPackages('git') vm.RemoteCommand('rm -rf deep-learning-models') vm.RemoteCommand('git clone %s' % DEEP_LEARNING_EXAMPLES_REPO) vm.RemoteCommand( 'cd deep-learning-models && git checkout {}'.format( FLAGS.horovod_deep_learning_examples_commit) ) # Copy the benchmark script from the github repo to the home directory. vm.RemoteCommand( 'cp %s .' % posixpath.join('deep-learning-models', 'models', 'resnet', 'tensorflow', 'train_imagenet_resnet_hvd.py'))
Python
def _PrepareHorovod(vm): """Install Horovod on a single vm. Args: vm: vm to operate on """ # TODO(ferneyhough): Consider moving horovod installation to a package. logging.info('Installing Horovod on %s', vm) vm.AuthenticateVm() if not FLAGS.horovod_synthetic: # Install ILSVRC2012 from the mlperf benchmark. vm.InstallPreprovisionedBenchmarkData( 'mlperf', mlperf_benchmark.BENCHMARK_DATA, vm_util.VM_TMP_DIR) vm.RemoteCommand('tar xvf %s' % posixpath.join(vm_util.VM_TMP_DIR, 'ILSVRC2012.tar')) vm.Install('tensorflow') vm.Install('openmpi') vm.RemoteCommand('sudo HOROVOD_GPU_ALLREDUCE=NCCL pip install ' '--no-cache-dir horovod')
def _PrepareHorovod(vm): """Install Horovod on a single vm. Args: vm: vm to operate on """ # TODO(ferneyhough): Consider moving horovod installation to a package. logging.info('Installing Horovod on %s', vm) vm.AuthenticateVm() if not FLAGS.horovod_synthetic: # Install ILSVRC2012 from the mlperf benchmark. vm.InstallPreprovisionedBenchmarkData( 'mlperf', mlperf_benchmark.BENCHMARK_DATA, vm_util.VM_TMP_DIR) vm.RemoteCommand('tar xvf %s' % posixpath.join(vm_util.VM_TMP_DIR, 'ILSVRC2012.tar')) vm.Install('tensorflow') vm.Install('openmpi') vm.RemoteCommand('sudo HOROVOD_GPU_ALLREDUCE=NCCL pip install ' '--no-cache-dir horovod')
Python
def Prepare(benchmark_spec): """Install and set up Horovod on the target vms. Args: benchmark_spec: The benchmark specification """ vms = benchmark_spec.vms vm_util.RunThreaded(_PrepareHorovod, vms) _UpdateBenchmarkSpecWithFlags(benchmark_spec) for vm in vms: _CopyAndUpdateRunScripts(vm) hpc_util.CreateMachineFile(vms, lambda _: benchmark_spec.gpus_per_node, MACHINEFILE)
def Prepare(benchmark_spec): """Install and set up Horovod on the target vms. Args: benchmark_spec: The benchmark specification """ vms = benchmark_spec.vms vm_util.RunThreaded(_PrepareHorovod, vms) _UpdateBenchmarkSpecWithFlags(benchmark_spec) for vm in vms: _CopyAndUpdateRunScripts(vm) hpc_util.CreateMachineFile(vms, lambda _: benchmark_spec.gpus_per_node, MACHINEFILE)
Python
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: benchmark spec Returns: metadata dict """ vm = benchmark_spec.vms[0] metadata = dict() metadata.update(cuda_toolkit.GetMetadata(vm)) metadata['benchmark_version'] = BENCHMARK_VERSION metadata['num_nodes'] = len(benchmark_spec.vms) metadata['total_gpus'] = int(benchmark_spec.total_gpus) metadata['model'] = benchmark_spec.model metadata['batch_size'] = benchmark_spec.batch_size metadata['num_epochs'] = benchmark_spec.num_epochs metadata['synthetic'] = benchmark_spec.synthetic metadata['deep_learning_examples_commit'] = ( benchmark_spec.deep_learning_examples_commit) return metadata
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: benchmark spec Returns: metadata dict """ vm = benchmark_spec.vms[0] metadata = dict() metadata.update(cuda_toolkit.GetMetadata(vm)) metadata['benchmark_version'] = BENCHMARK_VERSION metadata['num_nodes'] = len(benchmark_spec.vms) metadata['total_gpus'] = int(benchmark_spec.total_gpus) metadata['model'] = benchmark_spec.model metadata['batch_size'] = benchmark_spec.batch_size metadata['num_epochs'] = benchmark_spec.num_epochs metadata['synthetic'] = benchmark_spec.synthetic metadata['deep_learning_examples_commit'] = ( benchmark_spec.deep_learning_examples_commit) return metadata
Python
def _ExtractThroughputAndRuntime(output): """Extract throughput and runtime from Horovod output. Args: output: Horvod output Returns: Tuple of: Average throuput in images per second (float), Runtime in seconds (float). """ # Start from last line and iterate backwards. throughput_samples = [] runtime = 0 for line in output.splitlines()[::-1]: split_line = line.split() if split_line[0].startswith('Finished'): runtime = float(split_line[2]) continue split_line = line.split() if split_line[0] == '1': # Done parsing. break throughput_samples.append(float(split_line[2])) avg_throughput = sum(throughput_samples) / len(throughput_samples) return round(avg_throughput, 1), round(runtime, 1)
def _ExtractThroughputAndRuntime(output): """Extract throughput and runtime from Horovod output. Args: output: Horvod output Returns: Tuple of: Average throuput in images per second (float), Runtime in seconds (float). """ # Start from last line and iterate backwards. throughput_samples = [] runtime = 0 for line in output.splitlines()[::-1]: split_line = line.split() if split_line[0].startswith('Finished'): runtime = float(split_line[2]) continue split_line = line.split() if split_line[0] == '1': # Done parsing. break throughput_samples.append(float(split_line[2])) avg_throughput = sum(throughput_samples) / len(throughput_samples) return round(avg_throughput, 1), round(runtime, 1)
Python
def _MakeSamplesFromOutput(benchmark_spec, output): """Create a sample continaing the measured Horovod throughput. Args: benchmark_spec: benchmark spec output: Horovod output Returns: a Sample containing the Horovod throughput in images/sec """ metadata = _CreateMetadataDict(benchmark_spec) images_sec, runtime = _ExtractThroughputAndRuntime(output) samples = [] samples.append(sample.Sample('Training thoughput', images_sec, 'images/second', metadata)) samples.append(sample.Sample('Runtime', runtime, 'seconds', metadata)) return samples
def _MakeSamplesFromOutput(benchmark_spec, output): """Create a sample continaing the measured Horovod throughput. Args: benchmark_spec: benchmark spec output: Horovod output Returns: a Sample containing the Horovod throughput in images/sec """ metadata = _CreateMetadataDict(benchmark_spec) images_sec, runtime = _ExtractThroughputAndRuntime(output) samples = [] samples.append(sample.Sample('Training thoughput', images_sec, 'images/second', metadata)) samples.append(sample.Sample('Runtime', runtime, 'seconds', metadata)) return samples
Python
def Run(benchmark_spec): """Run Horovod on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vms = benchmark_spec.vms master_vm = vms[0] # All this maddness was copied from the horovod example training script in # the tensorflow_p36 environment on the AWS DLAMI. # https://aws.amazon.com/releasenotes/deep-learning-ami-ubuntu-version-21-2 run_command = ( 'mpirun -np {num_gpus} -hostfile HOSTFILE -mca plm_rsh_no_tree_spawn 1 ' '-bind-to socket -map-by slot -x HOROVOD_HIERARCHICAL_ALLREDUCE=1 ' '-x HOROVOD_FUSION_THRESHOLD=16777216 -x NCCL_MIN_NRINGS=4 ' '-x LD_LIBRARY_PATH -x PATH -mca pml ob1 -mca btl ^openib ' '-mca btl_tcp_if_exclude lo,docker0 -x TF_CPP_MIN_LOG_LEVEL=0 ' 'python -W ignore {training_script} --num_epochs {num_epochs} ' '-b {batch_size} --model {model} --fp16 --clear_log' ).format( num_gpus=benchmark_spec.total_gpus, training_script='train_imagenet_resnet_hvd.py', num_epochs=benchmark_spec.num_epochs, batch_size=benchmark_spec.batch_size, model=benchmark_spec.model) if benchmark_spec.synthetic: run_command += ' --synthetic' # The use of larc and loss scale is taken from the AWS DLAMI training # script (see comment above). if benchmark_spec.total_gpus >= 128: run_command += ' --use_larc --loss_scale 256' else: run_command += ' --data_dir ~/ILSVRC2012/ILSVRC2012 --warmup_epochs 10' _, stderr = master_vm.RobustRemoteCommand(run_command, should_log=True) return _MakeSamplesFromOutput(benchmark_spec, stderr)
def Run(benchmark_spec): """Run Horovod on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vms = benchmark_spec.vms master_vm = vms[0] # All this maddness was copied from the horovod example training script in # the tensorflow_p36 environment on the AWS DLAMI. # https://aws.amazon.com/releasenotes/deep-learning-ami-ubuntu-version-21-2 run_command = ( 'mpirun -np {num_gpus} -hostfile HOSTFILE -mca plm_rsh_no_tree_spawn 1 ' '-bind-to socket -map-by slot -x HOROVOD_HIERARCHICAL_ALLREDUCE=1 ' '-x HOROVOD_FUSION_THRESHOLD=16777216 -x NCCL_MIN_NRINGS=4 ' '-x LD_LIBRARY_PATH -x PATH -mca pml ob1 -mca btl ^openib ' '-mca btl_tcp_if_exclude lo,docker0 -x TF_CPP_MIN_LOG_LEVEL=0 ' 'python -W ignore {training_script} --num_epochs {num_epochs} ' '-b {batch_size} --model {model} --fp16 --clear_log' ).format( num_gpus=benchmark_spec.total_gpus, training_script='train_imagenet_resnet_hvd.py', num_epochs=benchmark_spec.num_epochs, batch_size=benchmark_spec.batch_size, model=benchmark_spec.model) if benchmark_spec.synthetic: run_command += ' --synthetic' # The use of larc and loss scale is taken from the AWS DLAMI training # script (see comment above). if benchmark_spec.total_gpus >= 128: run_command += ' --use_larc --loss_scale 256' else: run_command += ' --data_dir ~/ILSVRC2012/ILSVRC2012 --warmup_epochs 10' _, stderr = master_vm.RobustRemoteCommand(run_command, should_log=True) return _MakeSamplesFromOutput(benchmark_spec, stderr)
Python
def _DeleteSecurityGroups(self): """Delete the security groups associated with this cluster.""" if self.cluster_id: cmd = self.cmd_prefix + ['emr', 'describe-cluster', '--cluster-id', self.cluster_id] stdout, _, _ = vm_util.IssueCommand(cmd) cluster_desc = json.loads(stdout) sec_object = cluster_desc['Cluster']['Ec2InstanceAttributes'] manager_sg = sec_object[MANAGER_SG] worker_sg = sec_object[WORKER_SG] # the manager group and the worker group reference each other, so neither # can be deleted. First we delete the references to the manager group in # the worker group. Then we delete the manager group, and then, finally # the worker group. # remove all references to the manager group from the worker group. for proto, port in [('tcp', '0-65535'), ('udp', '0-65535'), ('icmp', '-1')]: for group1, group2 in [(worker_sg, manager_sg), (manager_sg, worker_sg)]: cmd = self.cmd_prefix + ['ec2', 'revoke-security-group-ingress', '--group-id=' + group1, '--source-group=' + group2, '--protocol=' + proto, '--port=' + port] vm_util.IssueCommand(cmd) # Now we need to delete the manager, then the worker. for group in manager_sg, worker_sg: sec_group = AwsSecurityGroup(self.cmd_prefix, group) sec_group.Delete()
def _DeleteSecurityGroups(self): """Delete the security groups associated with this cluster.""" if self.cluster_id: cmd = self.cmd_prefix + ['emr', 'describe-cluster', '--cluster-id', self.cluster_id] stdout, _, _ = vm_util.IssueCommand(cmd) cluster_desc = json.loads(stdout) sec_object = cluster_desc['Cluster']['Ec2InstanceAttributes'] manager_sg = sec_object[MANAGER_SG] worker_sg = sec_object[WORKER_SG] # the manager group and the worker group reference each other, so neither # can be deleted. First we delete the references to the manager group in # the worker group. Then we delete the manager group, and then, finally # the worker group. # remove all references to the manager group from the worker group. for proto, port in [('tcp', '0-65535'), ('udp', '0-65535'), ('icmp', '-1')]: for group1, group2 in [(worker_sg, manager_sg), (manager_sg, worker_sg)]: cmd = self.cmd_prefix + ['ec2', 'revoke-security-group-ingress', '--group-id=' + group1, '--source-group=' + group2, '--protocol=' + proto, '--port=' + port] vm_util.IssueCommand(cmd) # Now we need to delete the manager, then the worker. for group in manager_sg, worker_sg: sec_group = AwsSecurityGroup(self.cmd_prefix, group) sec_group.Delete()
Python
def _Exists(self): """Check to see whether the cluster exists.""" if not self.cluster_id: return False cmd = self.cmd_prefix + ['emr', 'describe-cluster', '--cluster-id', self.cluster_id] stdout, _, rc = vm_util.IssueCommand(cmd) if rc != 0: return False result = json.loads(stdout) if result['Cluster']['Status']['State'] in INVALID_STATES: return False else: return True
def _Exists(self): """Check to see whether the cluster exists.""" if not self.cluster_id: return False cmd = self.cmd_prefix + ['emr', 'describe-cluster', '--cluster-id', self.cluster_id] stdout, _, rc = vm_util.IssueCommand(cmd) if rc != 0: return False result = json.loads(stdout) if result['Cluster']['Status']['State'] in INVALID_STATES: return False else: return True
Python
def _IsReady(self): """Check to see if the cluster is ready.""" logging.info('Checking _Ready cluster:', self.cluster_id) cmd = self.cmd_prefix + ['emr', 'describe-cluster', '--cluster-id', self.cluster_id] stdout, _, rc = vm_util.IssueCommand(cmd) result = json.loads(stdout) # TODO(saksena): Handle error outcomees when spinning up emr clusters return result['Cluster']['Status']['State'] == READY_STATE
def _IsReady(self): """Check to see if the cluster is ready.""" logging.info('Checking _Ready cluster:', self.cluster_id) cmd = self.cmd_prefix + ['emr', 'describe-cluster', '--cluster-id', self.cluster_id] stdout, _, rc = vm_util.IssueCommand(cmd) result = json.loads(stdout) # TODO(saksena): Handle error outcomees when spinning up emr clusters return result['Cluster']['Status']['State'] == READY_STATE
Python
def _IsStepDone(self, step_id): """Determine whether the step is done. Args: step_id: The step id to query. Returns: A dictionary describing the step if the step the step is complete, None otherwise. """ cmd = self.cmd_prefix + ['emr', 'describe-step', '--cluster-id', self.cluster_id, '--step-id', step_id] stdout, _, _ = vm_util.IssueCommand(cmd) result = json.loads(stdout) state = result['Step']['Status']['State'] if state == 'COMPLETED' or state == 'FAILED': return result else: return None
def _IsStepDone(self, step_id): """Determine whether the step is done. Args: step_id: The step id to query. Returns: A dictionary describing the step if the step the step is complete, None otherwise. """ cmd = self.cmd_prefix + ['emr', 'describe-step', '--cluster-id', self.cluster_id, '--step-id', step_id] stdout, _, _ = vm_util.IssueCommand(cmd) result = json.loads(stdout) state = result['Step']['Status']['State'] if state == 'COMPLETED' or state == 'FAILED': return result else: return None
Python
def generate_data(self, source_dir, udpate_default_fs, num_files, size_file): """Method to generate data using a distributed job on the cluster.""" @vm_util.Retry(timeout=EMR_TIMEOUT, poll_interval=5, fuzz=0) def WaitForStep(step_id): result = self._IsStepDone(step_id) if result is None: raise EMRRetryableException('Step {0} not complete.'.format(step_id)) return result job_arguments = ['TestDFSIO'] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(source_dir)) job_arguments.append('-Dtest.build.data={}'.format(source_dir)) job_arguments.extend(['-write', '-nrFiles', str(num_files), '-fileSize', str(size_file)]) arg_spec = '[' + ','.join(job_arguments) + ']' step_type_spec = 'Type=CUSTOM_JAR' step_name = 'Name="TestDFSIO"' step_action_on_failure = 'ActionOnFailure=CONTINUE' jar_spec = GENERATE_HADOOP_JAR step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec] step_list.append('Args=' + arg_spec) step_string = ','.join(step_list) step_cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id', self.cluster_id, '--steps', step_string] stdout, _, _ = vm_util.IssueCommand(step_cmd) result = json.loads(stdout) step_id = result['StepIds'][0] result = WaitForStep(step_id) step_state = result['Step']['Status']['State'] if step_state != 'COMPLETED': return {dpb_service.SUCCESS: False} else: return {dpb_service.SUCCESS: True}
def generate_data(self, source_dir, udpate_default_fs, num_files, size_file): """Method to generate data using a distributed job on the cluster.""" @vm_util.Retry(timeout=EMR_TIMEOUT, poll_interval=5, fuzz=0) def WaitForStep(step_id): result = self._IsStepDone(step_id) if result is None: raise EMRRetryableException('Step {0} not complete.'.format(step_id)) return result job_arguments = ['TestDFSIO'] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(source_dir)) job_arguments.append('-Dtest.build.data={}'.format(source_dir)) job_arguments.extend(['-write', '-nrFiles', str(num_files), '-fileSize', str(size_file)]) arg_spec = '[' + ','.join(job_arguments) + ']' step_type_spec = 'Type=CUSTOM_JAR' step_name = 'Name="TestDFSIO"' step_action_on_failure = 'ActionOnFailure=CONTINUE' jar_spec = GENERATE_HADOOP_JAR step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec] step_list.append('Args=' + arg_spec) step_string = ','.join(step_list) step_cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id', self.cluster_id, '--steps', step_string] stdout, _, _ = vm_util.IssueCommand(step_cmd) result = json.loads(stdout) step_id = result['StepIds'][0] result = WaitForStep(step_id) step_state = result['Step']['Status']['State'] if step_state != 'COMPLETED': return {dpb_service.SUCCESS: False} else: return {dpb_service.SUCCESS: True}
Python
def read_data(self, source_dir, udpate_default_fs, num_files, size_file): """Method to read data using a distributed job on the cluster.""" @vm_util.Retry(timeout=EMR_TIMEOUT, poll_interval=5, fuzz=0) def WaitForStep(step_id): result = self._IsStepDone(step_id) if result is None: raise EMRRetryableException('Step {0} not complete.'.format(step_id)) return result job_arguments = ['TestDFSIO'] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(source_dir)) job_arguments.append('-Dtest.build.data={}'.format(source_dir)) job_arguments.extend(['-read', '-nrFiles', str(num_files), '-fileSize', str(size_file)]) arg_spec = '[' + ','.join(job_arguments) + ']' step_type_spec = 'Type=CUSTOM_JAR' step_name = 'Name="TestDFSIO"' step_action_on_failure = 'ActionOnFailure=CONTINUE' jar_spec = GENERATE_HADOOP_JAR step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec] step_list.append('Args=' + arg_spec) step_string = ','.join(step_list) step_cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id', self.cluster_id, '--steps', step_string] stdout, _, _ = vm_util.IssueCommand(step_cmd) result = json.loads(stdout) step_id = result['StepIds'][0] result = WaitForStep(step_id) step_state = result['Step']['Status']['State'] if step_state != 'COMPLETED': return {dpb_service.SUCCESS: False} else: return {dpb_service.SUCCESS: True}
def read_data(self, source_dir, udpate_default_fs, num_files, size_file): """Method to read data using a distributed job on the cluster.""" @vm_util.Retry(timeout=EMR_TIMEOUT, poll_interval=5, fuzz=0) def WaitForStep(step_id): result = self._IsStepDone(step_id) if result is None: raise EMRRetryableException('Step {0} not complete.'.format(step_id)) return result job_arguments = ['TestDFSIO'] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(source_dir)) job_arguments.append('-Dtest.build.data={}'.format(source_dir)) job_arguments.extend(['-read', '-nrFiles', str(num_files), '-fileSize', str(size_file)]) arg_spec = '[' + ','.join(job_arguments) + ']' step_type_spec = 'Type=CUSTOM_JAR' step_name = 'Name="TestDFSIO"' step_action_on_failure = 'ActionOnFailure=CONTINUE' jar_spec = GENERATE_HADOOP_JAR step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec] step_list.append('Args=' + arg_spec) step_string = ','.join(step_list) step_cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id', self.cluster_id, '--steps', step_string] stdout, _, _ = vm_util.IssueCommand(step_cmd) result = json.loads(stdout) step_id = result['StepIds'][0] result = WaitForStep(step_id) step_state = result['Step']['Status']['State'] if step_state != 'COMPLETED': return {dpb_service.SUCCESS: False} else: return {dpb_service.SUCCESS: True}
Python
def distributed_copy(self, source_location, destination_location): """Method to copy data using a distributed job on the cluster.""" @vm_util.Retry(timeout=EMR_TIMEOUT, poll_interval=5, fuzz=0) def WaitForStep(step_id): result = self._IsStepDone(step_id) if result is None: raise EMRRetryableException('Step {0} not complete.'.format(step_id)) return result job_arguments = ['s3-dist-cp', '--s3Endpoint=s3.amazonaws.com'] job_arguments.append('--src={}'.format(source_location)) job_arguments.append('--dest={}'.format(destination_location)) arg_spec = '[' + ','.join(job_arguments) + ']' step_type_spec = 'Type=CUSTOM_JAR' step_name = 'Name="S3DistCp"' step_action_on_failure = 'ActionOnFailure=CONTINUE' jar_spec = 'Jar=command-runner.jar' step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec] step_list.append('Args=' + arg_spec) step_string = ','.join(step_list) step_cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id', self.cluster_id, '--steps', step_string] stdout, _, _ = vm_util.IssueCommand(step_cmd) result = json.loads(stdout) step_id = result['StepIds'][0] metrics = {} result = WaitForStep(step_id) pending_time = result['Step']['Status']['Timeline']['CreationDateTime'] start_time = result['Step']['Status']['Timeline']['StartDateTime'] end_time = result['Step']['Status']['Timeline']['EndDateTime'] metrics[dpb_service.WAITING] = start_time - pending_time metrics[dpb_service.RUNTIME] = end_time - start_time step_state = result['Step']['Status']['State'] metrics[dpb_service.SUCCESS] = step_state == 'COMPLETED' return metrics
def distributed_copy(self, source_location, destination_location): """Method to copy data using a distributed job on the cluster.""" @vm_util.Retry(timeout=EMR_TIMEOUT, poll_interval=5, fuzz=0) def WaitForStep(step_id): result = self._IsStepDone(step_id) if result is None: raise EMRRetryableException('Step {0} not complete.'.format(step_id)) return result job_arguments = ['s3-dist-cp', '--s3Endpoint=s3.amazonaws.com'] job_arguments.append('--src={}'.format(source_location)) job_arguments.append('--dest={}'.format(destination_location)) arg_spec = '[' + ','.join(job_arguments) + ']' step_type_spec = 'Type=CUSTOM_JAR' step_name = 'Name="S3DistCp"' step_action_on_failure = 'ActionOnFailure=CONTINUE' jar_spec = 'Jar=command-runner.jar' step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec] step_list.append('Args=' + arg_spec) step_string = ','.join(step_list) step_cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id', self.cluster_id, '--steps', step_string] stdout, _, _ = vm_util.IssueCommand(step_cmd) result = json.loads(stdout) step_id = result['StepIds'][0] metrics = {} result = WaitForStep(step_id) pending_time = result['Step']['Status']['Timeline']['CreationDateTime'] start_time = result['Step']['Status']['Timeline']['StartDateTime'] end_time = result['Step']['Status']['Timeline']['EndDateTime'] metrics[dpb_service.WAITING] = start_time - pending_time metrics[dpb_service.RUNTIME] = end_time - start_time step_state = result['Step']['Status']['State'] metrics[dpb_service.SUCCESS] = step_state == 'COMPLETED' return metrics
Python
def cleanup_data(self, base_dir, udpate_default_fs): """Method to cleanup data using a distributed job on the cluster.""" @vm_util.Retry(timeout=EMR_TIMEOUT, poll_interval=5, fuzz=0) def WaitForStep(step_id): result = self._IsStepDone(step_id) if result is None: raise EMRRetryableException('Step {0} not complete.'.format(step_id)) return result job_arguments = ['TestDFSIO'] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(base_dir)) job_arguments.append('-Dtest.build.data={}'.format(base_dir)) job_arguments.append('-clean') arg_spec = '[' + ','.join(job_arguments) + ']' step_type_spec = 'Type=CUSTOM_JAR' step_name = 'Name="TestDFSIO"' step_action_on_failure = 'ActionOnFailure=CONTINUE' jar_spec = GENERATE_HADOOP_JAR # How will we handle a class name ???? step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec ] step_list.append('Args=' + arg_spec) step_string = ','.join(step_list) step_cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id', self.cluster_id, '--steps', step_string] stdout, _, _ = vm_util.IssueCommand(step_cmd) result = json.loads(stdout) step_id = result['StepIds'][0] result = WaitForStep(step_id) step_state = result['Step']['Status']['State'] if step_state != 'COMPLETED': return {dpb_service.SUCCESS: False} else: rb_step_cmd = self.cmd_prefix + ['s3', 'rb', base_dir, '--force'] stdout, _, _ = vm_util.IssueCommand(rb_step_cmd) return {dpb_service.SUCCESS: True}
def cleanup_data(self, base_dir, udpate_default_fs): """Method to cleanup data using a distributed job on the cluster.""" @vm_util.Retry(timeout=EMR_TIMEOUT, poll_interval=5, fuzz=0) def WaitForStep(step_id): result = self._IsStepDone(step_id) if result is None: raise EMRRetryableException('Step {0} not complete.'.format(step_id)) return result job_arguments = ['TestDFSIO'] if udpate_default_fs: job_arguments.append('-Dfs.default.name={}'.format(base_dir)) job_arguments.append('-Dtest.build.data={}'.format(base_dir)) job_arguments.append('-clean') arg_spec = '[' + ','.join(job_arguments) + ']' step_type_spec = 'Type=CUSTOM_JAR' step_name = 'Name="TestDFSIO"' step_action_on_failure = 'ActionOnFailure=CONTINUE' jar_spec = GENERATE_HADOOP_JAR # How will we handle a class name ???? step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec ] step_list.append('Args=' + arg_spec) step_string = ','.join(step_list) step_cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id', self.cluster_id, '--steps', step_string] stdout, _, _ = vm_util.IssueCommand(step_cmd) result = json.loads(stdout) step_id = result['StepIds'][0] result = WaitForStep(step_id) step_state = result['Step']['Status']['State'] if step_state != 'COMPLETED': return {dpb_service.SUCCESS: False} else: rb_step_cmd = self.cmd_prefix + ['s3', 'rb', base_dir, '--force'] stdout, _, _ = vm_util.IssueCommand(rb_step_cmd) return {dpb_service.SUCCESS: True}
Python
def GetMetadata(self): """Return a dictionary of the metadata for this cluster.""" basic_data = super(AwsDpbEmr, self).GetMetadata() basic_data['dpb_service'] = 'emr_{}'.format(self.emr_release_label) return basic_data
def GetMetadata(self): """Return a dictionary of the metadata for this cluster.""" basic_data = super(AwsDpbEmr, self).GetMetadata() basic_data['dpb_service'] = 'emr_{}'.format(self.emr_release_label) return basic_data
Python
def WhitelistIPAddress(self, ip_address): """To whitelist the IP address on the cluster.""" self.whitelist_ip = ip_address cmd = [azure.AZURE_PATH, 'sql', 'server', 'firewall-rule', 'create', '--name', self.whitelist_ip, '--resource-group', self.resource_group, '--server', self.server_name, '--end-ip-address', self.whitelist_ip, '--start-ip-address', self.whitelist_ip] vm_util.IssueCommand(cmd)
def WhitelistIPAddress(self, ip_address): """To whitelist the IP address on the cluster.""" self.whitelist_ip = ip_address cmd = [azure.AZURE_PATH, 'sql', 'server', 'firewall-rule', 'create', '--name', self.whitelist_ip, '--resource-group', self.resource_group, '--server', self.server_name, '--end-ip-address', self.whitelist_ip, '--start-ip-address', self.whitelist_ip] vm_util.IssueCommand(cmd)
Python
def _Exists(self): """Method to validate the existence of cluster. Returns: Boolean value indicating the existence of a cluster. """ stdout, _, _ = self.__DescribeCluster() if not stdout or (json.loads(stdout)['status'] not in VALID_EXIST_STATUSES): return False else: return True
def _Exists(self): """Method to validate the existence of cluster. Returns: Boolean value indicating the existence of a cluster. """ stdout, _, _ = self.__DescribeCluster() if not stdout or (json.loads(stdout)['status'] not in VALID_EXIST_STATUSES): return False else: return True
Python
def _IsDeleting(self): """Method to check if the cluster is pausing.""" stdout, _, _ = self.__DescribeCluster() if not stdout: return False else: return json.loads(stdout)['status'] in PAUSING_STATUSES
def _IsDeleting(self): """Method to check if the cluster is pausing.""" stdout, _, _ = self.__DescribeCluster() if not stdout: return False else: return json.loads(stdout)['status'] in PAUSING_STATUSES
Python
def _DeleteDependencies(self): """Delete dependencies of the cluster.""" if self.whitelist_ip is not None: cmd = [azure.AZURE_PATH, 'sql', 'server', 'firewall-rule', 'delete', '--name', self.whitelist_ip, '--resource-group', self.resource_group, '--server', self.server_name] vm_util.IssueCommand(cmd)
def _DeleteDependencies(self): """Delete dependencies of the cluster.""" if self.whitelist_ip is not None: cmd = [azure.AZURE_PATH, 'sql', 'server', 'firewall-rule', 'delete', '--name', self.whitelist_ip, '--resource-group', self.resource_group, '--server', self.server_name] vm_util.IssueCommand(cmd)
Python
def GetMetadata(self): """Return a dictionary of the metadata for this cluster.""" basic_data = super(Azuresqldatawarehouse, self).GetMetadata() basic_data['resource_group'] = self.resource_group basic_data['server_name'] = self.server_name return basic_data
def GetMetadata(self): """Return a dictionary of the metadata for this cluster.""" basic_data = super(Azuresqldatawarehouse, self).GetMetadata() basic_data['resource_group'] = self.resource_group basic_data['server_name'] = self.server_name return basic_data
Python
def InstallAndAuthenticateRunner(self, vm): """Method to perform installation and authentication of azure runner. sqlcmd client https://docs.microsoft.com/en-us/sql/linux/sql-server-linux-setup-tools?view=sql-server-2017#ubuntu azure cli https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-apt?view=azure-cli-latest#install Args: vm: Client vm on which the script will be run. """ vm.Install('mssql_tools')
def InstallAndAuthenticateRunner(self, vm): """Method to perform installation and authentication of azure runner. sqlcmd client https://docs.microsoft.com/en-us/sql/linux/sql-server-linux-setup-tools?view=sql-server-2017#ubuntu azure cli https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-apt?view=azure-cli-latest#install Args: vm: Client vm on which the script will be run. """ vm.Install('mssql_tools')
Python
def _UpdateBenchmarkSpecWithFlags(benchmark_spec): """Update the benchmark_spec with supplied command line flags. Args: benchmark_spec: benchmark specification to update """ benchmark_spec.depth = FLAGS.resnet_depth benchmark_spec.mode = FLAGS.resnet_mode benchmark_spec.train_batch_size = FLAGS.resnet_train_batch_size benchmark_spec.eval_batch_size = FLAGS.resnet_eval_batch_size benchmark_spec.data_format = FLAGS.resnet_data_format benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0]) benchmark_spec.skip_host_call = FLAGS.resnet_skip_host_call benchmark_spec.data_dir = FLAGS.imagenet_data_dir benchmark_spec.num_train_images = FLAGS.imagenet_num_train_images benchmark_spec.num_eval_images = FLAGS.imagenet_num_eval_images benchmark_spec.num_examples_per_epoch = ( float(benchmark_spec.num_train_images) / benchmark_spec.train_batch_size) benchmark_spec.train_epochs = FLAGS.resnet_train_epochs benchmark_spec.train_steps = int( benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch) benchmark_spec.epochs_per_eval = FLAGS.resnet_epochs_per_eval benchmark_spec.steps_per_eval = int( benchmark_spec.epochs_per_eval * benchmark_spec.num_examples_per_epoch)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec): """Update the benchmark_spec with supplied command line flags. Args: benchmark_spec: benchmark specification to update """ benchmark_spec.depth = FLAGS.resnet_depth benchmark_spec.mode = FLAGS.resnet_mode benchmark_spec.train_batch_size = FLAGS.resnet_train_batch_size benchmark_spec.eval_batch_size = FLAGS.resnet_eval_batch_size benchmark_spec.data_format = FLAGS.resnet_data_format benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0]) benchmark_spec.skip_host_call = FLAGS.resnet_skip_host_call benchmark_spec.data_dir = FLAGS.imagenet_data_dir benchmark_spec.num_train_images = FLAGS.imagenet_num_train_images benchmark_spec.num_eval_images = FLAGS.imagenet_num_eval_images benchmark_spec.num_examples_per_epoch = ( float(benchmark_spec.num_train_images) / benchmark_spec.train_batch_size) benchmark_spec.train_epochs = FLAGS.resnet_train_epochs benchmark_spec.train_steps = int( benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch) benchmark_spec.epochs_per_eval = FLAGS.resnet_epochs_per_eval benchmark_spec.steps_per_eval = int( benchmark_spec.epochs_per_eval * benchmark_spec.num_examples_per_epoch)
Python
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec) metadata.update({ 'depth': benchmark_spec.depth, 'mode': benchmark_spec.mode, 'data_format': benchmark_spec.data_format, 'precision': benchmark_spec.precision, 'skip_host_call': benchmark_spec.skip_host_call, 'epochs_per_eval': benchmark_spec.epochs_per_eval, 'steps_per_eval': benchmark_spec.steps_per_eval, 'train_batch_size': benchmark_spec.train_batch_size, 'eval_batch_size': benchmark_spec.eval_batch_size }) return metadata
def _CreateMetadataDict(benchmark_spec): """Create metadata dict to be used in run results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: metadata dict """ metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec) metadata.update({ 'depth': benchmark_spec.depth, 'mode': benchmark_spec.mode, 'data_format': benchmark_spec.data_format, 'precision': benchmark_spec.precision, 'skip_host_call': benchmark_spec.skip_host_call, 'epochs_per_eval': benchmark_spec.epochs_per_eval, 'steps_per_eval': benchmark_spec.steps_per_eval, 'train_batch_size': benchmark_spec.train_batch_size, 'eval_batch_size': benchmark_spec.eval_batch_size }) return metadata
Python
def _ParseDateTime(wall_time): """Parse date and time from output log. Args: wall_time: date and time from output log Example: 0626 15:10:23.018357 Returns: datetime """ if wall_time: current_date = datetime.datetime.now() current_month = current_date.month run_month = wall_time[0:2] if run_month == '12' and current_month == '01': year = current_date.year - 1 else: year = current_date.year return datetime.datetime.strptime( '{year}{datetime}'.format(year=year, datetime=wall_time), '%Y%m%d %H:%M:%S.%f')
def _ParseDateTime(wall_time): """Parse date and time from output log. Args: wall_time: date and time from output log Example: 0626 15:10:23.018357 Returns: datetime """ if wall_time: current_date = datetime.datetime.now() current_month = current_date.month run_month = wall_time[0:2] if run_month == '12' and current_month == '01': year = current_date.year - 1 else: year = current_date.year return datetime.datetime.strptime( '{year}{datetime}'.format(year=year, datetime=wall_time), '%Y%m%d %H:%M:%S.%f')
Python
def MakeSamplesFromEvalOutput(metadata, output, elapsed_seconds): """Create a sample containing evaluation metrics. Args: metadata: dict contains all the metadata that reports. output: string, command output elapsed_seconds: float, elapsed seconds from saved checkpoint. Example output: perfkitbenchmarker/tests/linux_benchmarks/resnet_benchmark_test.py Returns: a Sample containing evaluation metrics """ pattern = (r'Saving dict for global step \d+: global_step = (\d+), ' r'loss = (\d+\.\d+), top_1_accuracy = (\d+\.\d+), ' r'top_5_accuracy = (\d+\.\d+)') step, loss, top_1_accuracy, top_5_accuracy = ( regex_util.ExtractExactlyOneMatch(pattern, output)) metadata_copy = metadata.copy() step = int(step) metadata_copy['step'] = step num_examples_per_epoch = metadata['num_examples_per_epoch'] metadata_copy['epoch'] = step / num_examples_per_epoch metadata_copy['elapsed_seconds'] = elapsed_seconds return [sample.Sample('Eval Loss', float(loss), '', metadata_copy), # In the case of top-1 score, the trained model checks if the top # class (the one having the highest probability) is the same as the # target label. In the case of top-5 score, the trained model checks # if the target label is one of your top 5 predictions (the 5 ones # with the highest probabilities). sample.Sample('Top 1 Accuracy', float(top_1_accuracy) * 100, '%', metadata_copy), sample.Sample('Top 5 Accuracy', float(top_5_accuracy) * 100, '%', metadata_copy)]
def MakeSamplesFromEvalOutput(metadata, output, elapsed_seconds): """Create a sample containing evaluation metrics. Args: metadata: dict contains all the metadata that reports. output: string, command output elapsed_seconds: float, elapsed seconds from saved checkpoint. Example output: perfkitbenchmarker/tests/linux_benchmarks/resnet_benchmark_test.py Returns: a Sample containing evaluation metrics """ pattern = (r'Saving dict for global step \d+: global_step = (\d+), ' r'loss = (\d+\.\d+), top_1_accuracy = (\d+\.\d+), ' r'top_5_accuracy = (\d+\.\d+)') step, loss, top_1_accuracy, top_5_accuracy = ( regex_util.ExtractExactlyOneMatch(pattern, output)) metadata_copy = metadata.copy() step = int(step) metadata_copy['step'] = step num_examples_per_epoch = metadata['num_examples_per_epoch'] metadata_copy['epoch'] = step / num_examples_per_epoch metadata_copy['elapsed_seconds'] = elapsed_seconds return [sample.Sample('Eval Loss', float(loss), '', metadata_copy), # In the case of top-1 score, the trained model checks if the top # class (the one having the highest probability) is the same as the # target label. In the case of top-5 score, the trained model checks # if the target label is one of your top 5 predictions (the 5 ones # with the highest probabilities). sample.Sample('Top 1 Accuracy', float(top_1_accuracy) * 100, '%', metadata_copy), sample.Sample('Top 5 Accuracy', float(top_5_accuracy) * 100, '%', metadata_copy)]
Python
def Run(benchmark_spec): """Run ResNet on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] resnet_benchmark_script = 'resnet_main.py' resnet_benchmark_cmd = ( '{env_cmd} && cd tpu/models/official/resnet && ' 'python {script} ' '--use_tpu={use_tpu} ' '--data_dir={data_dir} ' '--model_dir={model_dir} ' '--resnet_depth={depth} ' '--train_batch_size={train_batch_size} ' '--eval_batch_size={eval_batch_size} ' '--iterations_per_loop={iterations} ' '--data_format={data_format} ' '--precision={precision} ' '--skip_host_call={skip_host_call} ' '--num_train_images={num_train_images} ' '--num_eval_images={num_eval_images}'.format( env_cmd=benchmark_spec.env_cmd, script=resnet_benchmark_script, use_tpu=bool(benchmark_spec.tpus), data_dir=benchmark_spec.data_dir, model_dir=benchmark_spec.model_dir, depth=benchmark_spec.depth, train_batch_size=benchmark_spec.train_batch_size, eval_batch_size=benchmark_spec.eval_batch_size, iterations=benchmark_spec.iterations, data_format=benchmark_spec.data_format, precision=benchmark_spec.precision, skip_host_call=benchmark_spec.skip_host_call, num_train_images=benchmark_spec.num_train_images, num_eval_images=benchmark_spec.num_eval_images )) if FLAGS.tf_device == 'gpu': resnet_benchmark_cmd = '{env} {cmd}'.format( env=tensorflow.GetEnvironmentVars(vm), cmd=resnet_benchmark_cmd) samples = [] metadata = _CreateMetadataDict(benchmark_spec) elapsed_seconds = 0 steps_per_eval = benchmark_spec.steps_per_eval train_steps = benchmark_spec.train_steps for step in range(steps_per_eval, train_steps + steps_per_eval, steps_per_eval): step = min(step, train_steps) resnet_benchmark_cmd_step = '{cmd} --train_steps={step}'.format( cmd=resnet_benchmark_cmd, step=step) if benchmark_spec.mode in ('train', 'train_and_eval'): if benchmark_spec.tpus: tpu = benchmark_spec.tpu_groups['train'].GetName() num_cores = '--num_cores={}'.format( benchmark_spec.tpu_groups['train'].GetNumShards()) else: tpu = num_cores = '' resnet_benchmark_train_cmd = ( '{cmd} --tpu={tpu} --mode=train {num_cores}'.format( cmd=resnet_benchmark_cmd_step, tpu=tpu, num_cores=num_cores)) start = time.time() stdout, stderr = vm.RobustRemoteCommand(resnet_benchmark_train_cmd, should_log=True) elapsed_seconds += (time.time() - start) samples.extend(mnist_benchmark.MakeSamplesFromTrainOutput( metadata, stdout + stderr, elapsed_seconds, step)) if benchmark_spec.mode in ('train_and_eval', 'eval'): if benchmark_spec.tpus: tpu = benchmark_spec.tpu_groups['eval'].GetName() num_cores = '--num_cores={}'.format( benchmark_spec.tpu_groups['eval'].GetNumShards()) else: tpu = num_cores = '' resnet_benchmark_eval_cmd = ( '{cmd} --tpu={tpu} --mode=eval {num_cores}'.format( cmd=resnet_benchmark_cmd_step, tpu=tpu, num_cores=num_cores)) stdout, stderr = vm.RobustRemoteCommand(resnet_benchmark_eval_cmd, should_log=True) samples.extend(MakeSamplesFromEvalOutput( metadata, stdout + stderr, elapsed_seconds)) return samples
def Run(benchmark_spec): """Run ResNet on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] resnet_benchmark_script = 'resnet_main.py' resnet_benchmark_cmd = ( '{env_cmd} && cd tpu/models/official/resnet && ' 'python {script} ' '--use_tpu={use_tpu} ' '--data_dir={data_dir} ' '--model_dir={model_dir} ' '--resnet_depth={depth} ' '--train_batch_size={train_batch_size} ' '--eval_batch_size={eval_batch_size} ' '--iterations_per_loop={iterations} ' '--data_format={data_format} ' '--precision={precision} ' '--skip_host_call={skip_host_call} ' '--num_train_images={num_train_images} ' '--num_eval_images={num_eval_images}'.format( env_cmd=benchmark_spec.env_cmd, script=resnet_benchmark_script, use_tpu=bool(benchmark_spec.tpus), data_dir=benchmark_spec.data_dir, model_dir=benchmark_spec.model_dir, depth=benchmark_spec.depth, train_batch_size=benchmark_spec.train_batch_size, eval_batch_size=benchmark_spec.eval_batch_size, iterations=benchmark_spec.iterations, data_format=benchmark_spec.data_format, precision=benchmark_spec.precision, skip_host_call=benchmark_spec.skip_host_call, num_train_images=benchmark_spec.num_train_images, num_eval_images=benchmark_spec.num_eval_images )) if FLAGS.tf_device == 'gpu': resnet_benchmark_cmd = '{env} {cmd}'.format( env=tensorflow.GetEnvironmentVars(vm), cmd=resnet_benchmark_cmd) samples = [] metadata = _CreateMetadataDict(benchmark_spec) elapsed_seconds = 0 steps_per_eval = benchmark_spec.steps_per_eval train_steps = benchmark_spec.train_steps for step in range(steps_per_eval, train_steps + steps_per_eval, steps_per_eval): step = min(step, train_steps) resnet_benchmark_cmd_step = '{cmd} --train_steps={step}'.format( cmd=resnet_benchmark_cmd, step=step) if benchmark_spec.mode in ('train', 'train_and_eval'): if benchmark_spec.tpus: tpu = benchmark_spec.tpu_groups['train'].GetName() num_cores = '--num_cores={}'.format( benchmark_spec.tpu_groups['train'].GetNumShards()) else: tpu = num_cores = '' resnet_benchmark_train_cmd = ( '{cmd} --tpu={tpu} --mode=train {num_cores}'.format( cmd=resnet_benchmark_cmd_step, tpu=tpu, num_cores=num_cores)) start = time.time() stdout, stderr = vm.RobustRemoteCommand(resnet_benchmark_train_cmd, should_log=True) elapsed_seconds += (time.time() - start) samples.extend(mnist_benchmark.MakeSamplesFromTrainOutput( metadata, stdout + stderr, elapsed_seconds, step)) if benchmark_spec.mode in ('train_and_eval', 'eval'): if benchmark_spec.tpus: tpu = benchmark_spec.tpu_groups['eval'].GetName() num_cores = '--num_cores={}'.format( benchmark_spec.tpu_groups['eval'].GetNumShards()) else: tpu = num_cores = '' resnet_benchmark_eval_cmd = ( '{cmd} --tpu={tpu} --mode=eval {num_cores}'.format( cmd=resnet_benchmark_cmd_step, tpu=tpu, num_cores=num_cores)) stdout, stderr = vm.RobustRemoteCommand(resnet_benchmark_eval_cmd, should_log=True) samples.extend(MakeSamplesFromEvalOutput( metadata, stdout + stderr, elapsed_seconds)) return samples
Python
def Cleanup(benchmark_spec): """Cleanup ResNet on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ mnist_benchmark.Cleanup(benchmark_spec)
def Cleanup(benchmark_spec): """Cleanup ResNet on the cluster. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ mnist_benchmark.Cleanup(benchmark_spec)
Python
def _Create(self): """Create a BigQuery cluster. Bigquery clusters creation is out of scope of the benchmarking. """ raise NotImplementedError
def _Create(self): """Create a BigQuery cluster. Bigquery clusters creation is out of scope of the benchmarking. """ raise NotImplementedError
Python
def _Delete(self): """Delete a BigQuery cluster. Bigquery cluster deletion is out of scope of benchmarking. """ raise NotImplementedError
def _Delete(self): """Delete a BigQuery cluster. Bigquery cluster deletion is out of scope of benchmarking. """ raise NotImplementedError
Python
def InstallAndAuthenticateRunner(self, vm): """Method to perform installation and authentication of bigquery runner. Native Bigquery client that ships with the google_cloud_sdk https://cloud.google.com/bigquery/docs/bq-command-line-too used as client. Args: vm: Client vm on which the script will be run. """ vm.Install('google_cloud_sdk') gcp_util.AuthenticateServiceAccount(vm)
def InstallAndAuthenticateRunner(self, vm): """Method to perform installation and authentication of bigquery runner. Native Bigquery client that ships with the google_cloud_sdk https://cloud.google.com/bigquery/docs/bq-command-line-too used as client. Args: vm: Client vm on which the script will be run. """ vm.Install('google_cloud_sdk') gcp_util.AuthenticateServiceAccount(vm)
Python
def GetDpbServiceClass(dpb_service_type): """Gets the Data Processing Backend class corresponding to 'service_type'. Args: dpb_service_type: String service type as specified in configuration Returns: Implementation class corresponding to the argument dpb_service_type Raises: Exception: An invalid data processing backend service type was provided """ return resource.GetResourceClass( BaseDpbService, SERVICE_TYPE=dpb_service_type)
def GetDpbServiceClass(dpb_service_type): """Gets the Data Processing Backend class corresponding to 'service_type'. Args: dpb_service_type: String service type as specified in configuration Returns: Implementation class corresponding to the argument dpb_service_type Raises: Exception: An invalid data processing backend service type was provided """ return resource.GetResourceClass( BaseDpbService, SERVICE_TYPE=dpb_service_type)
Python
def SubmitJob(self, job_jar, class_name, job_poll_interval=None, job_stdout_file=None, job_arguments=None, job_type=None): """Submit a data processing job to the backend. Args: job_jar: Jar file to execute. class_name: Name of the main class. job_poll_interval: integer saying how often to poll for job completion. Not used by providers for which submit job is a synchronous operation. job_stdout_file: String giving the location of the file in which to put the standard out of the job. job_arguments: Arguments to pass to class_name. These are not the arguments passed to the wrapper that submits the job. job_type: Spark or Hadoop job Returns: dictionary, where success is true if the job succeeded, false otherwise. The dictionary may also contain an entry for running_time and pending_time if the platform reports those metrics. """ pass
def SubmitJob(self, job_jar, class_name, job_poll_interval=None, job_stdout_file=None, job_arguments=None, job_type=None): """Submit a data processing job to the backend. Args: job_jar: Jar file to execute. class_name: Name of the main class. job_poll_interval: integer saying how often to poll for job completion. Not used by providers for which submit job is a synchronous operation. job_stdout_file: String giving the location of the file in which to put the standard out of the job. job_arguments: Arguments to pass to class_name. These are not the arguments passed to the wrapper that submits the job. job_type: Spark or Hadoop job Returns: dictionary, where success is true if the job succeeded, false otherwise. The dictionary may also contain an entry for running_time and pending_time if the platform reports those metrics. """ pass
Python
def GetMetadata(self): """Return a dictionary of the metadata for this cluster.""" basic_data = { 'dpb_service': self.SERVICE_TYPE, 'dpb_cluster_id': self.cluster_id, 'dpb_cluster_shape': self.spec.worker_group.vm_spec.machine_type, 'dpb_cluster_size': self.spec.worker_count } return basic_data
def GetMetadata(self): """Return a dictionary of the metadata for this cluster.""" basic_data = { 'dpb_service': self.SERVICE_TYPE, 'dpb_cluster_id': self.cluster_id, 'dpb_cluster_shape': self.spec.worker_group.vm_spec.machine_type, 'dpb_cluster_size': self.spec.worker_count } return basic_data
Python
def _ProcessWallTime(self, start_time, end_time): """Compute the wall time from the given start and end processing time. Args: start_time: Datetime value when the processing was started. end_time: Datetime value when the processing completed. Returns: Wall time in seconds. Raises: ValueError: Exception raised when invalid input is provided. """ if start_time > end_time: raise ValueError('start_time cannot be later than the end_time') return (end_time - start_time).total_seconds()
def _ProcessWallTime(self, start_time, end_time): """Compute the wall time from the given start and end processing time. Args: start_time: Datetime value when the processing was started. end_time: Datetime value when the processing completed. Returns: Wall time in seconds. Raises: ValueError: Exception raised when invalid input is provided. """ if start_time > end_time: raise ValueError('start_time cannot be later than the end_time') return (end_time - start_time).total_seconds()
Python
def GetExecutionJar(self, job_category, job_type): """Retrieve execution jar corresponding to the job_category and job_type. Args: job_category: String category of the job for eg. hadoop, spark, hive, etc. job_type: String name of the type of workload to executed on the cluster, for eg. word_count, terasort, etc. Returns: The path to the execusion jar on the cluster Raises: NotImplementedError: Exception: An unsupported combination of job_category and job_type was provided for execution on the cluster. """ if job_category not in self.JOB_JARS or job_type not in self.JOB_JARS[ job_category]: raise NotImplementedError() return self.JOB_JARS[job_category][job_type]
def GetExecutionJar(self, job_category, job_type): """Retrieve execution jar corresponding to the job_category and job_type. Args: job_category: String category of the job for eg. hadoop, spark, hive, etc. job_type: String name of the type of workload to executed on the cluster, for eg. word_count, terasort, etc. Returns: The path to the execusion jar on the cluster Raises: NotImplementedError: Exception: An unsupported combination of job_category and job_type was provided for execution on the cluster. """ if job_category not in self.JOB_JARS or job_type not in self.JOB_JARS[ job_category]: raise NotImplementedError() return self.JOB_JARS[job_category][job_type]
Python
def GenerateDataForTerasort(self, base_dir, generate_jar, generate_job_category): """TeraGen generates data used as input data for subsequent TeraSort run. Args: base_dir: String for the base directory URI (inclusive of the file system) for terasort benchmark data. generate_jar: String path to the executable for generating the data. Can point to a hadoop/yarn executable. generate_job_category: String category of the generate job for eg. hadoop, spark, hive, etc. Returns: Wall time for the Generate job. The statistics from running the Generate job. """ generate_args = [ TERAGEN, str(FLAGS.dpb_terasort_num_records), base_dir + TERAGEN ] start_time = datetime.datetime.now() stats = self.SubmitJob( generate_jar, None, job_poll_interval=5, job_arguments=generate_args, job_stdout_file=None, job_type=generate_job_category) end_time = datetime.datetime.now() return self._ProcessWallTime(start_time, end_time), stats
def GenerateDataForTerasort(self, base_dir, generate_jar, generate_job_category): """TeraGen generates data used as input data for subsequent TeraSort run. Args: base_dir: String for the base directory URI (inclusive of the file system) for terasort benchmark data. generate_jar: String path to the executable for generating the data. Can point to a hadoop/yarn executable. generate_job_category: String category of the generate job for eg. hadoop, spark, hive, etc. Returns: Wall time for the Generate job. The statistics from running the Generate job. """ generate_args = [ TERAGEN, str(FLAGS.dpb_terasort_num_records), base_dir + TERAGEN ] start_time = datetime.datetime.now() stats = self.SubmitJob( generate_jar, None, job_poll_interval=5, job_arguments=generate_args, job_stdout_file=None, job_type=generate_job_category) end_time = datetime.datetime.now() return self._ProcessWallTime(start_time, end_time), stats
Python
def SortDataForTerasort(self, base_dir, sort_jar, sort_job_category): """TeraSort samples the input data and sorts the data into a total order. TeraSort is implemented as a MapReduce sort job with a custom partitioner that uses a sorted list of n-1 sampled keys that define the key range for each reduce. Args: base_dir: String for the base directory URI (inclusive of the file system) for terasort benchmark data. sort_jar: String path to the executable for sorting the data. Can point to a hadoop/yarn executable. sort_job_category: String category of the generate job for eg. hadoop, spark, hive, etc. Returns: Wall time for the Sort job. The statistics from running the Sort job. """ sort_args = [TERASORT, base_dir + TERAGEN, base_dir + TERASORT] start_time = datetime.datetime.now() stats = self.SubmitJob( sort_jar, None, job_poll_interval=5, job_arguments=sort_args, job_stdout_file=None, job_type=sort_job_category) end_time = datetime.datetime.now() return self._ProcessWallTime(start_time, end_time), stats
def SortDataForTerasort(self, base_dir, sort_jar, sort_job_category): """TeraSort samples the input data and sorts the data into a total order. TeraSort is implemented as a MapReduce sort job with a custom partitioner that uses a sorted list of n-1 sampled keys that define the key range for each reduce. Args: base_dir: String for the base directory URI (inclusive of the file system) for terasort benchmark data. sort_jar: String path to the executable for sorting the data. Can point to a hadoop/yarn executable. sort_job_category: String category of the generate job for eg. hadoop, spark, hive, etc. Returns: Wall time for the Sort job. The statistics from running the Sort job. """ sort_args = [TERASORT, base_dir + TERAGEN, base_dir + TERASORT] start_time = datetime.datetime.now() stats = self.SubmitJob( sort_jar, None, job_poll_interval=5, job_arguments=sort_args, job_stdout_file=None, job_type=sort_job_category) end_time = datetime.datetime.now() return self._ProcessWallTime(start_time, end_time), stats
Python
def ValidateDataForTerasort(self, base_dir, validate_jar, validate_job_category): """TeraValidate ensures that the output data of TeraSort is globally sorted. Args: base_dir: String for the base directory URI (inclusive of the file system) for terasort benchmark data. validate_jar: String path to the executable for validating the sorted data. Can point to a hadoop/yarn executable. validate_job_category: String category of the validate job for eg. hadoop, spark, hive, etc. Returns: Wall time for the Validate job. The statistics from running the Validate job. """ validate_args = [TERAVALIDATE, base_dir + TERASORT, base_dir + TERAVALIDATE] start_time = datetime.datetime.now() stats = self.SubmitJob( validate_jar, None, job_poll_interval=5, job_arguments=validate_args, job_stdout_file=None, job_type=validate_job_category) end_time = datetime.datetime.now() return self._ProcessWallTime(start_time, end_time), stats
def ValidateDataForTerasort(self, base_dir, validate_jar, validate_job_category): """TeraValidate ensures that the output data of TeraSort is globally sorted. Args: base_dir: String for the base directory URI (inclusive of the file system) for terasort benchmark data. validate_jar: String path to the executable for validating the sorted data. Can point to a hadoop/yarn executable. validate_job_category: String category of the validate job for eg. hadoop, spark, hive, etc. Returns: Wall time for the Validate job. The statistics from running the Validate job. """ validate_args = [TERAVALIDATE, base_dir + TERASORT, base_dir + TERAVALIDATE] start_time = datetime.datetime.now() stats = self.SubmitJob( validate_jar, None, job_poll_interval=5, job_arguments=validate_args, job_stdout_file=None, job_type=validate_job_category) end_time = datetime.datetime.now() return self._ProcessWallTime(start_time, end_time), stats
Python
def _GetDataContents(file_name): """Cet the files in the data folder.""" path = data.ResourcePath('hammerdb/' + file_name) with open(path) as fp: contents = fp.read() return contents
def _GetDataContents(file_name): """Cet the files in the data folder.""" path = data.ResourcePath('hammerdb/' + file_name) with open(path) as fp: contents = fp.read() return contents
Python
def Install(vm): """Installs the HammerDB package on the VM.""" zip_path = ntpath.join(vm.temp_dir, HAMMERDB_DIR) vm.DownloadFile(HAMMERDB_URL, zip_path) vm.UnzipFile(zip_path, vm.temp_dir)
def Install(vm): """Installs the HammerDB package on the VM.""" zip_path = ntpath.join(vm.temp_dir, HAMMERDB_DIR) vm.DownloadFile(HAMMERDB_URL, zip_path) vm.UnzipFile(zip_path, vm.temp_dir)
Python
def _CreateSingleScript(vm, contents, filename): """Create a single file named as <filename> with <contents> as contents.""" hammerdb_exe_dir = ntpath.join(vm.temp_dir, 'HammerDB-3.1') command = ('cd {hammerdb_exe_dir}; echo \"{contents}\" > .\\tmp.txt; ' 'cat tmp.txt | Out-File -FilePath {filename}' ' -Encoding ascii').format( hammerdb_exe_dir=hammerdb_exe_dir, contents=contents, filename=filename) vm.RemoteCommand(command, timeout=HAMMERDB_CREATE_FILE_TIMEOUT)
def _CreateSingleScript(vm, contents, filename): """Create a single file named as <filename> with <contents> as contents.""" hammerdb_exe_dir = ntpath.join(vm.temp_dir, 'HammerDB-3.1') command = ('cd {hammerdb_exe_dir}; echo \"{contents}\" > .\\tmp.txt; ' 'cat tmp.txt | Out-File -FilePath {filename}' ' -Encoding ascii').format( hammerdb_exe_dir=hammerdb_exe_dir, contents=contents, filename=filename) vm.RemoteCommand(command, timeout=HAMMERDB_CREATE_FILE_TIMEOUT)
Python
def _CreateFiles(vm): """Create the file dynamically used by the hammerdb. This function creates the following files: - schema.tcl and schema-tpch.tcl, the files for hammerdb to build the schema of tpcc and tpch benchmarks - sqlrun.tcl, sqlrun-tpch-power.tcl and sqlrun-tpch-throughput.tcl, the benchmark test script that does the actual measurements. - hammerdbcli, hammerdbclitpch, hammerdbcli.bat and hammerdbclitpch.bat, the cli tool and batch file for starting the hammerdb and run the different scripts of each benchmarking stage. """ # create the content for the schema building file of tpcc schema = _GetDataContents('hammerdb_schema_tpcc.txt').replace( '*ware_house_num*', str(FLAGS.hammerdb_tpcc_warehouse)).replace( '*virtual_user_num*', str(FLAGS.hammerdb_tpcc_schema_virtual_user)) # create the content for the tpcc benchmark run time file. virtual_user_seq = '' for virtual_user_num in FLAGS.hammerdb_tpcc_virtual_user_list: virtual_user_seq += str(virtual_user_num) virtual_user_seq += ' ' sqlrun = _GetDataContents('hammerdb_run_tpcc.txt').replace( '*virtual_user_seq*', virtual_user_seq).replace( '*timer*', str(FLAGS.hammerdb_tpcc_runtime + 60)).replace( '*duration*', str(FLAGS.hammerdb_tpcc_runtime / 60)) whether_run_tpcc = 'true' if FLAGS.hammerdb_run_tpcc else 'false' whether_run_tpch = 'true' if FLAGS.hammerdb_run_tpch else 'false' # create the content for the tpcc cli tool for tun time. cli = _GetDataContents('hammerdb_cli_tpcc.txt').replace( '*schema_file_name*', HAMMERDB_SCHEMA_FILE).replace( '*sqlrun_file_name*', HAMMERDB_SQLRUN_FILE).replace( '*whether_run_tpcc*', whether_run_tpcc) # create the content for the tpch cli tool for tun time. cli_tpch = _GetDataContents('hammerdb_cli_tpch.txt').replace( '*whether_run_tpch*', whether_run_tpch) cli_tpch = cli_tpch.replace('*schema_file_name_tpch*', HAMMERDB_SCHEMA_FILE_TPCH) cli_tpch = cli_tpch.replace('*sqlrun_power_file_name*', HAMMERDB_SQLRUN_FILE_TPCH_POWER) cli_tpch = cli_tpch.replace('*sqlrun_throughput_file_name*', HAMMERDB_SQLRUN_FILE_TPCH_THROUGHPUT) cli_bat = _GetDataContents('hammerdb_cli_bat_tpcc.txt') cli_bat_tpch = _GetDataContents('hammerdb_cli_bat_tpch.txt') schema_tpch = _GetDataContents('hammerdb_schema_tpch.txt') sqlrun_tpch_power = _GetDataContents('hammerdb_run_tpch.txt').replace( '*virtual_user*', str(1)).replace( '*test_sequence_complete_sentence*', '\"TPCH POWER COMPLETE\"') sqlrun_tpch_throughput = _GetDataContents('hammerdb_run_tpch.txt').replace( '*virtual_user*', str(FLAGS.hammerdb_tpch_virtual_user)).replace( '*test_sequence_complete_sentence*', '\"TPCH THROUGHPUT COMPLETE\"') schema = schema.replace('\"', '`\"') sqlrun = sqlrun.replace('\"', '`\"') schema_tpch = schema_tpch.replace('\"', '`\"') sqlrun_tpch_power = sqlrun_tpch_power.replace('\"', '`\"') sqlrun_tpch_throughput = sqlrun_tpch_throughput.replace('\"', '`\"') cli = cli.replace('\"', '`\"') cli_tpch = cli_tpch.replace('\"', '`\"') cli_bat = cli_bat.replace('\"', '`\"') cli_bat_tpch = cli_bat_tpch.replace('\"', '`\"') # create the necessary files of running hammerdb _CreateSingleScript(vm, schema, HAMMERDB_SCHEMA_FILE) _CreateSingleScript(vm, sqlrun, HAMMERDB_SQLRUN_FILE) _CreateSingleScript(vm, cli, HAMMERDB_CLI_FILE) _CreateSingleScript(vm, cli_bat, HAMMERDB_CLI_BAT_FILE) _CreateSingleScript(vm, cli_tpch, HAMMERDB_CLI_FILE_TPCH) _CreateSingleScript(vm, cli_bat_tpch, HAMMERDB_CLI_BAT_FILE_TPCH) _CreateSingleScript(vm, schema_tpch, HAMMERDB_SCHEMA_FILE_TPCH) _CreateSingleScript(vm, sqlrun_tpch_power, HAMMERDB_SQLRUN_FILE_TPCH_POWER) _CreateSingleScript(vm, sqlrun_tpch_throughput, HAMMERDB_SQLRUN_FILE_TPCH_THROUGHPUT)
def _CreateFiles(vm): """Create the file dynamically used by the hammerdb. This function creates the following files: - schema.tcl and schema-tpch.tcl, the files for hammerdb to build the schema of tpcc and tpch benchmarks - sqlrun.tcl, sqlrun-tpch-power.tcl and sqlrun-tpch-throughput.tcl, the benchmark test script that does the actual measurements. - hammerdbcli, hammerdbclitpch, hammerdbcli.bat and hammerdbclitpch.bat, the cli tool and batch file for starting the hammerdb and run the different scripts of each benchmarking stage. """ # create the content for the schema building file of tpcc schema = _GetDataContents('hammerdb_schema_tpcc.txt').replace( '*ware_house_num*', str(FLAGS.hammerdb_tpcc_warehouse)).replace( '*virtual_user_num*', str(FLAGS.hammerdb_tpcc_schema_virtual_user)) # create the content for the tpcc benchmark run time file. virtual_user_seq = '' for virtual_user_num in FLAGS.hammerdb_tpcc_virtual_user_list: virtual_user_seq += str(virtual_user_num) virtual_user_seq += ' ' sqlrun = _GetDataContents('hammerdb_run_tpcc.txt').replace( '*virtual_user_seq*', virtual_user_seq).replace( '*timer*', str(FLAGS.hammerdb_tpcc_runtime + 60)).replace( '*duration*', str(FLAGS.hammerdb_tpcc_runtime / 60)) whether_run_tpcc = 'true' if FLAGS.hammerdb_run_tpcc else 'false' whether_run_tpch = 'true' if FLAGS.hammerdb_run_tpch else 'false' # create the content for the tpcc cli tool for tun time. cli = _GetDataContents('hammerdb_cli_tpcc.txt').replace( '*schema_file_name*', HAMMERDB_SCHEMA_FILE).replace( '*sqlrun_file_name*', HAMMERDB_SQLRUN_FILE).replace( '*whether_run_tpcc*', whether_run_tpcc) # create the content for the tpch cli tool for tun time. cli_tpch = _GetDataContents('hammerdb_cli_tpch.txt').replace( '*whether_run_tpch*', whether_run_tpch) cli_tpch = cli_tpch.replace('*schema_file_name_tpch*', HAMMERDB_SCHEMA_FILE_TPCH) cli_tpch = cli_tpch.replace('*sqlrun_power_file_name*', HAMMERDB_SQLRUN_FILE_TPCH_POWER) cli_tpch = cli_tpch.replace('*sqlrun_throughput_file_name*', HAMMERDB_SQLRUN_FILE_TPCH_THROUGHPUT) cli_bat = _GetDataContents('hammerdb_cli_bat_tpcc.txt') cli_bat_tpch = _GetDataContents('hammerdb_cli_bat_tpch.txt') schema_tpch = _GetDataContents('hammerdb_schema_tpch.txt') sqlrun_tpch_power = _GetDataContents('hammerdb_run_tpch.txt').replace( '*virtual_user*', str(1)).replace( '*test_sequence_complete_sentence*', '\"TPCH POWER COMPLETE\"') sqlrun_tpch_throughput = _GetDataContents('hammerdb_run_tpch.txt').replace( '*virtual_user*', str(FLAGS.hammerdb_tpch_virtual_user)).replace( '*test_sequence_complete_sentence*', '\"TPCH THROUGHPUT COMPLETE\"') schema = schema.replace('\"', '`\"') sqlrun = sqlrun.replace('\"', '`\"') schema_tpch = schema_tpch.replace('\"', '`\"') sqlrun_tpch_power = sqlrun_tpch_power.replace('\"', '`\"') sqlrun_tpch_throughput = sqlrun_tpch_throughput.replace('\"', '`\"') cli = cli.replace('\"', '`\"') cli_tpch = cli_tpch.replace('\"', '`\"') cli_bat = cli_bat.replace('\"', '`\"') cli_bat_tpch = cli_bat_tpch.replace('\"', '`\"') # create the necessary files of running hammerdb _CreateSingleScript(vm, schema, HAMMERDB_SCHEMA_FILE) _CreateSingleScript(vm, sqlrun, HAMMERDB_SQLRUN_FILE) _CreateSingleScript(vm, cli, HAMMERDB_CLI_FILE) _CreateSingleScript(vm, cli_bat, HAMMERDB_CLI_BAT_FILE) _CreateSingleScript(vm, cli_tpch, HAMMERDB_CLI_FILE_TPCH) _CreateSingleScript(vm, cli_bat_tpch, HAMMERDB_CLI_BAT_FILE_TPCH) _CreateSingleScript(vm, schema_tpch, HAMMERDB_SCHEMA_FILE_TPCH) _CreateSingleScript(vm, sqlrun_tpch_power, HAMMERDB_SQLRUN_FILE_TPCH_POWER) _CreateSingleScript(vm, sqlrun_tpch_throughput, HAMMERDB_SQLRUN_FILE_TPCH_THROUGHPUT)
Python
def _CatFile(vm, filename): """Cat out the content of a file.""" hammerdb_exe_dir = ntpath.join(vm.temp_dir, 'HammerDB-3.1') command = 'cd {hammerdb_exe_dir}; cat {filename}'.format( hammerdb_exe_dir=hammerdb_exe_dir, filename=filename) cat_output, _ = vm.RemoteCommand(command) return cat_output
def _CatFile(vm, filename): """Cat out the content of a file.""" hammerdb_exe_dir = ntpath.join(vm.temp_dir, 'HammerDB-3.1') command = 'cd {hammerdb_exe_dir}; cat {filename}'.format( hammerdb_exe_dir=hammerdb_exe_dir, filename=filename) cat_output, _ = vm.RemoteCommand(command) return cat_output
Python
def _RunHammerDbTPCC(vm): """Run the tpcc benchmark by starting the batch script.""" hammerdb_exe_dir = ntpath.join(vm.temp_dir, 'HammerDB-3.1') command = 'cd {hammerdb_exe_dir}; .\\hammerdbcli.bat'.format( hammerdb_exe_dir=hammerdb_exe_dir) total_time_out = ((HAMMERDB_SCHEMA_WAITTIME + HAMMERDB_SQLRUN_WAITIME_ADDON + FLAGS.hammerdb_tpcc_runtime) * HAMMERDB_TEST_TIMEOUT_MULTIPLIER) vm.RemoteCommand(command, timeout=total_time_out)
def _RunHammerDbTPCC(vm): """Run the tpcc benchmark by starting the batch script.""" hammerdb_exe_dir = ntpath.join(vm.temp_dir, 'HammerDB-3.1') command = 'cd {hammerdb_exe_dir}; .\\hammerdbcli.bat'.format( hammerdb_exe_dir=hammerdb_exe_dir) total_time_out = ((HAMMERDB_SCHEMA_WAITTIME + HAMMERDB_SQLRUN_WAITIME_ADDON + FLAGS.hammerdb_tpcc_runtime) * HAMMERDB_TEST_TIMEOUT_MULTIPLIER) vm.RemoteCommand(command, timeout=total_time_out)
Python
def _RunHammerDbTPCH(vm): """Run the tpch benchmark by starting the batch script.""" hammerdb_exe_dir = ntpath.join(vm.temp_dir, 'HammerDB-3.1') command = 'cd {hammerdb_exe_dir}; .\\hammerdbclitpch.bat'.format( hammerdb_exe_dir=hammerdb_exe_dir) total_time_out = ((HAMMERDB_SCHEMA_WAITTIME + HAMMERDB_SQLRUN_WAITIME_ADDON + FLAGS.hammerdb_tpcc_runtime) * HAMMERDB_TEST_TIMEOUT_MULTIPLIER) vm.RemoteCommand(command, timeout=total_time_out)
def _RunHammerDbTPCH(vm): """Run the tpch benchmark by starting the batch script.""" hammerdb_exe_dir = ntpath.join(vm.temp_dir, 'HammerDB-3.1') command = 'cd {hammerdb_exe_dir}; .\\hammerdbclitpch.bat'.format( hammerdb_exe_dir=hammerdb_exe_dir) total_time_out = ((HAMMERDB_SCHEMA_WAITTIME + HAMMERDB_SQLRUN_WAITIME_ADDON + FLAGS.hammerdb_tpcc_runtime) * HAMMERDB_TEST_TIMEOUT_MULTIPLIER) vm.RemoteCommand(command, timeout=total_time_out)
Python
def RunHammerDB(vm): """Run HammerDB and return the samples collected from the run.""" _CreateFiles(vm) if FLAGS.hammerdb_run_tpcc: _RunHammerDbTPCC(vm) if FLAGS.hammerdb_run_tpch: _RunHammerDbTPCH(vm) hammer_result = _CatFile(vm, 'C://hammerdb.log') metadata = {} for k, v in vm.GetResourceMetadata().iteritems(): metadata[k] = v metadata['hammerdb_tpcc_warehouse'] = FLAGS.hammerdb_tpcc_warehouse metadata['hammerdb_tpcc_runtime'] = FLAGS.hammerdb_tpcc_runtime metadata['hammerdb_run_tpcc'] = FLAGS.hammerdb_run_tpcc metadata['hammerdb_run_tpch'] = FLAGS.hammerdb_run_tpch return _ParseHammerDBResults(hammer_result, metadata, FLAGS.hammerdb_tpcc_virtual_user_list)
def RunHammerDB(vm): """Run HammerDB and return the samples collected from the run.""" _CreateFiles(vm) if FLAGS.hammerdb_run_tpcc: _RunHammerDbTPCC(vm) if FLAGS.hammerdb_run_tpch: _RunHammerDbTPCH(vm) hammer_result = _CatFile(vm, 'C://hammerdb.log') metadata = {} for k, v in vm.GetResourceMetadata().iteritems(): metadata[k] = v metadata['hammerdb_tpcc_warehouse'] = FLAGS.hammerdb_tpcc_warehouse metadata['hammerdb_tpcc_runtime'] = FLAGS.hammerdb_tpcc_runtime metadata['hammerdb_run_tpcc'] = FLAGS.hammerdb_run_tpcc metadata['hammerdb_run_tpch'] = FLAGS.hammerdb_run_tpch return _ParseHammerDBResults(hammer_result, metadata, FLAGS.hammerdb_tpcc_virtual_user_list)
Python
def ParseHammerDBResultTPCC(result, metadata, virtual_user_list): """Parses the text log file from TPCC benchmark and returns a list of samples. each list of sample only have one sample with read speed as value all the other information is stored in the meta data Args: result: HammerDB output metadata: the running info of vm virtual_user_list: the list of virtual user number Returns: list of samples from the results of the HammerDB tests. """ samples = [] result_prefix = 'TEST RESULT : System achieved ' result_suffix = ' SQL Server TPM at' start_list = [m.start() for m in re.finditer(result_prefix, result)] end_list = [m.start() for m in re.finditer(result_suffix, result)] for i, virtual_user_num in enumerate(virtual_user_list): metadata['hammerdb_tpcc_virtual_user'] = virtual_user_num start_pos = start_list[i] + len(result_prefix) end_pos = end_list[i] result_tpm = int(result[start_pos: end_pos]) samples.append( sample.Sample('TPM', result_tpm, 'times/minutes', metadata.copy())) return samples
def ParseHammerDBResultTPCC(result, metadata, virtual_user_list): """Parses the text log file from TPCC benchmark and returns a list of samples. each list of sample only have one sample with read speed as value all the other information is stored in the meta data Args: result: HammerDB output metadata: the running info of vm virtual_user_list: the list of virtual user number Returns: list of samples from the results of the HammerDB tests. """ samples = [] result_prefix = 'TEST RESULT : System achieved ' result_suffix = ' SQL Server TPM at' start_list = [m.start() for m in re.finditer(result_prefix, result)] end_list = [m.start() for m in re.finditer(result_suffix, result)] for i, virtual_user_num in enumerate(virtual_user_list): metadata['hammerdb_tpcc_virtual_user'] = virtual_user_num start_pos = start_list[i] + len(result_prefix) end_pos = end_list[i] result_tpm = int(result[start_pos: end_pos]) samples.append( sample.Sample('TPM', result_tpm, 'times/minutes', metadata.copy())) return samples
Python
def ParseHammerDBResultTPCH(result, metadata, scale_fact): """Parses the text log file from TPCH benchmark and returns a list of samples. each list of sample only have one sample with read speed as value all the other information is stored in the meta data, this uses the equation: https://www.hammerdb.com/docs/ch09s02.html Args: result: HammerDB output metadata: the running info of vm scale_fact: the scale factor of running tpch Returns: list of samples from the results of the HammerDB tests. """ samples = [] query_time_list = [] refresh_time_list = [] for i in range(22): result_prefix = 'query {0} completed in '.format(str(i + 1)) result_suffix = ' seconds' start_pos = result.find(result_prefix) + len(result_prefix) end_pos = result.find(result_suffix, start_pos) query_time_list.append(float(result[start_pos: end_pos])) result_prefix = 'New Sales refresh complete in ' result_suffix = ' seconds' start_pos = result.find(result_prefix) + len(result_prefix) end_pos = result.find(result_suffix, start_pos) refresh_time_list.append(float(result[start_pos: end_pos])) result_prefix = 'Old Sales refresh complete in ' result_suffix = ' seconds' start_pos = result.find(result_prefix) + len(result_prefix) end_pos = result.find(result_suffix, start_pos) refresh_time_list.append(float(result[start_pos: end_pos])) result_prefix = ' query set.s. in ' result_suffix = ' seconds' throughput_time = 0 start_list = [m.start() for m in re.finditer(result_prefix, result)] for index in start_list[1:]: start_pos = index + len(result_prefix) end_pos = result.find(result_suffix, start_pos) throughput_time = max(throughput_time, int(result[start_pos: end_pos])) tpch_power = _CalculateTPCHPower(query_time_list, refresh_time_list, scale_fact) stream_num = HAMMERDB_SCALE_TO_STREAMS[str(scale_fact)] tpch_throughput = stream_num * 22.0 * 3600 * scale_fact / throughput_time qphh = np.sqrt(tpch_power * tpch_throughput) samples.append( sample.Sample('qphh', qphh, 'N/A', metadata.copy())) return samples
def ParseHammerDBResultTPCH(result, metadata, scale_fact): """Parses the text log file from TPCH benchmark and returns a list of samples. each list of sample only have one sample with read speed as value all the other information is stored in the meta data, this uses the equation: https://www.hammerdb.com/docs/ch09s02.html Args: result: HammerDB output metadata: the running info of vm scale_fact: the scale factor of running tpch Returns: list of samples from the results of the HammerDB tests. """ samples = [] query_time_list = [] refresh_time_list = [] for i in range(22): result_prefix = 'query {0} completed in '.format(str(i + 1)) result_suffix = ' seconds' start_pos = result.find(result_prefix) + len(result_prefix) end_pos = result.find(result_suffix, start_pos) query_time_list.append(float(result[start_pos: end_pos])) result_prefix = 'New Sales refresh complete in ' result_suffix = ' seconds' start_pos = result.find(result_prefix) + len(result_prefix) end_pos = result.find(result_suffix, start_pos) refresh_time_list.append(float(result[start_pos: end_pos])) result_prefix = 'Old Sales refresh complete in ' result_suffix = ' seconds' start_pos = result.find(result_prefix) + len(result_prefix) end_pos = result.find(result_suffix, start_pos) refresh_time_list.append(float(result[start_pos: end_pos])) result_prefix = ' query set.s. in ' result_suffix = ' seconds' throughput_time = 0 start_list = [m.start() for m in re.finditer(result_prefix, result)] for index in start_list[1:]: start_pos = index + len(result_prefix) end_pos = result.find(result_suffix, start_pos) throughput_time = max(throughput_time, int(result[start_pos: end_pos])) tpch_power = _CalculateTPCHPower(query_time_list, refresh_time_list, scale_fact) stream_num = HAMMERDB_SCALE_TO_STREAMS[str(scale_fact)] tpch_throughput = stream_num * 22.0 * 3600 * scale_fact / throughput_time qphh = np.sqrt(tpch_power * tpch_throughput) samples.append( sample.Sample('qphh', qphh, 'N/A', metadata.copy())) return samples
Python
def _CalculateTPCHPower(query_time_list, refresh_time_list, scale_fact): """helper function for calculating tpch power test result. This uses the equation given by: https://www.hammerdb.com/docs/ch09s02.html """ maxi = np.amax(query_time_list) mini = np.amin(query_time_list) if mini < maxi / 1000: query_time_list = [maxi / 1000 for x in query_time_list if x < maxi / 1000] query_time_sum = np.sum([np.log(x) for x in query_time_list]) refresh_time_sum = np.sum([np.log(x) for x in refresh_time_list]) norm_factor = -1 / float((len(query_time_list) + len(refresh_time_list))) return 3600 * np.exp(norm_factor * (query_time_sum + refresh_time_sum)) * \ scale_fact
def _CalculateTPCHPower(query_time_list, refresh_time_list, scale_fact): """helper function for calculating tpch power test result. This uses the equation given by: https://www.hammerdb.com/docs/ch09s02.html """ maxi = np.amax(query_time_list) mini = np.amin(query_time_list) if mini < maxi / 1000: query_time_list = [maxi / 1000 for x in query_time_list if x < maxi / 1000] query_time_sum = np.sum([np.log(x) for x in query_time_list]) refresh_time_sum = np.sum([np.log(x) for x in refresh_time_list]) norm_factor = -1 / float((len(query_time_list) + len(refresh_time_list))) return 3600 * np.exp(norm_factor * (query_time_sum + refresh_time_sum)) * \ scale_fact
Python
def _UpdateBenchmarkSpecWithFlags(benchmark_spec): """Update the benchmark_spec with supplied command line flags. Args: benchmark_spec: benchmark specification to update """ benchmark_spec.data_dir = FLAGS.mnist_data_dir benchmark_spec.iterations = FLAGS.tpu_iterations benchmark_spec.gcp_service_account = FLAGS.gcp_service_account benchmark_spec.batch_size = FLAGS.mnist_batch_size benchmark_spec.num_train_images = FLAGS.mnist_num_train_images benchmark_spec.num_eval_images = FLAGS.mnist_num_eval_images benchmark_spec.num_examples_per_epoch = ( float(benchmark_spec.num_train_images) / benchmark_spec.batch_size) benchmark_spec.train_epochs = FLAGS.mnist_train_epochs benchmark_spec.train_steps = int( benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch) benchmark_spec.eval_epochs = FLAGS.mnist_eval_epochs benchmark_spec.eval_steps = int( benchmark_spec.eval_epochs * benchmark_spec.num_examples_per_epoch) benchmark_spec.precision = FLAGS.tpu_precision benchmark_spec.env_cmd = 'export PYTHONPATH=$PYTHONPATH:$PWD/tpu/models'
def _UpdateBenchmarkSpecWithFlags(benchmark_spec): """Update the benchmark_spec with supplied command line flags. Args: benchmark_spec: benchmark specification to update """ benchmark_spec.data_dir = FLAGS.mnist_data_dir benchmark_spec.iterations = FLAGS.tpu_iterations benchmark_spec.gcp_service_account = FLAGS.gcp_service_account benchmark_spec.batch_size = FLAGS.mnist_batch_size benchmark_spec.num_train_images = FLAGS.mnist_num_train_images benchmark_spec.num_eval_images = FLAGS.mnist_num_eval_images benchmark_spec.num_examples_per_epoch = ( float(benchmark_spec.num_train_images) / benchmark_spec.batch_size) benchmark_spec.train_epochs = FLAGS.mnist_train_epochs benchmark_spec.train_steps = int( benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch) benchmark_spec.eval_epochs = FLAGS.mnist_eval_epochs benchmark_spec.eval_steps = int( benchmark_spec.eval_epochs * benchmark_spec.num_examples_per_epoch) benchmark_spec.precision = FLAGS.tpu_precision benchmark_spec.env_cmd = 'export PYTHONPATH=$PYTHONPATH:$PWD/tpu/models'
Python
def Prepare(benchmark_spec): """Install and set up MNIST on the target vm. Args: benchmark_spec: The benchmark specification """ benchmark_spec.always_call_cleanup = True _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] if not benchmark_spec.tpus: vm.Install('tensorflow') vm.Install('cloud_tpu_models') vm.Install('tensorflow_models') if benchmark_spec.tpus: storage_service = gcs.GoogleCloudStorageService() storage_service.PrepareVM(vm) benchmark_spec.storage_service = storage_service model_dir = 'gs://{}'.format(FLAGS.run_uri) benchmark_spec.model_dir = model_dir vm.RemoteCommand( '{gsutil} mb -c regional -l {location} {model_dir}'.format( gsutil=vm.gsutil_path, location=util.GetRegionFromZone( benchmark_spec.tpu_groups['train'].GetZone()), model_dir=benchmark_spec.model_dir), should_log=True) vm.RemoteCommand( '{gsutil} acl ch -u {service_account}:W {model_dir}'.format( gsutil=vm.gsutil_path, service_account=benchmark_spec.gcp_service_account, model_dir=benchmark_spec.model_dir), should_log=True) else: benchmark_spec.model_dir = '/tmp' if (FLAGS.imagenet_data_dir or FLAGS.t2t_data_dir) and FLAGS.cloud != 'GCP': vm.Install('google_cloud_sdk') vm.RemoteCommand('echo "export {}" >> ~/.bashrc'.format(GCP_ENV), login_shell=True) credential_path = os.path.join('~', '.config', 'gcloud') vm.RemoteCommand('mkdir -p {}'.format(credential_path), login_shell=True) credential_file = os.path.join(credential_path, 'application_default_credentials.json') vm.PushFile(FLAGS.gcp_credential, credential_file) vm.RemoteCommand('{env} gcloud auth ' 'activate-service-account --key-file {key_file}'.format( env=GCP_ENV, key_file=credential_file), login_shell=True)
def Prepare(benchmark_spec): """Install and set up MNIST on the target vm. Args: benchmark_spec: The benchmark specification """ benchmark_spec.always_call_cleanup = True _UpdateBenchmarkSpecWithFlags(benchmark_spec) vm = benchmark_spec.vms[0] if not benchmark_spec.tpus: vm.Install('tensorflow') vm.Install('cloud_tpu_models') vm.Install('tensorflow_models') if benchmark_spec.tpus: storage_service = gcs.GoogleCloudStorageService() storage_service.PrepareVM(vm) benchmark_spec.storage_service = storage_service model_dir = 'gs://{}'.format(FLAGS.run_uri) benchmark_spec.model_dir = model_dir vm.RemoteCommand( '{gsutil} mb -c regional -l {location} {model_dir}'.format( gsutil=vm.gsutil_path, location=util.GetRegionFromZone( benchmark_spec.tpu_groups['train'].GetZone()), model_dir=benchmark_spec.model_dir), should_log=True) vm.RemoteCommand( '{gsutil} acl ch -u {service_account}:W {model_dir}'.format( gsutil=vm.gsutil_path, service_account=benchmark_spec.gcp_service_account, model_dir=benchmark_spec.model_dir), should_log=True) else: benchmark_spec.model_dir = '/tmp' if (FLAGS.imagenet_data_dir or FLAGS.t2t_data_dir) and FLAGS.cloud != 'GCP': vm.Install('google_cloud_sdk') vm.RemoteCommand('echo "export {}" >> ~/.bashrc'.format(GCP_ENV), login_shell=True) credential_path = os.path.join('~', '.config', 'gcloud') vm.RemoteCommand('mkdir -p {}'.format(credential_path), login_shell=True) credential_file = os.path.join(credential_path, 'application_default_credentials.json') vm.PushFile(FLAGS.gcp_credential, credential_file) vm.RemoteCommand('{env} gcloud auth ' 'activate-service-account --key-file {key_file}'.format( env=GCP_ENV, key_file=credential_file), login_shell=True)