language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def close_window(self): """ Function to destroy the tk Window """ self.destroy()
def close_window(self): """ Function to destroy the tk Window """ self.destroy()
Python
def insert_kw(listbox: tk.Listbox, keywords): """ Insert list of keywords into given listbox :param listbox: Given Listbox Widget :param keywords: Given Keywords List from :modul: `keywordanalytics.extract_keywords` """ listbox.delete(0, tk.END) s = 0 for i in keywords: listbox.insert(s, i[0]) s += 1
def insert_kw(listbox: tk.Listbox, keywords): """ Insert list of keywords into given listbox :param listbox: Given Listbox Widget :param keywords: Given Keywords List from :modul: `keywordanalytics.extract_keywords` """ listbox.delete(0, tk.END) s = 0 for i in keywords: listbox.insert(s, i[0]) s += 1
Python
def insert_related_kw(listbox: tk.Listbox, keywords): """ Insert list of related keywords from selection into second listbox :param listbox: listbox in which related keywords should be pasted :param keywords: selected keywords in listbox keywords """ listbox.delete(0, tk.END) s = 0 for i in keywords: listbox.insert(s, i) s += 1
def insert_related_kw(listbox: tk.Listbox, keywords): """ Insert list of related keywords from selection into second listbox :param listbox: listbox in which related keywords should be pasted :param keywords: selected keywords in listbox keywords """ listbox.delete(0, tk.END) s = 0 for i in keywords: listbox.insert(s, i) s += 1
Python
def open_textfile(textarea: tk.Text): """ Open Dialog Function to get text from a textfile and paste it into the summary textbox :param textarea: In which textarea should the text be pasted? --> Summary textbox :raises Exception: Get error message, i.e. File wasn't selected """ try: file = fd.askopenfilename( filetypes=[("All Files", "*.*"), ("Text Documents", "*.txt")], defaultextension=".txt") if file is None: return else: with open(file, "r") as f: text = f.read() textarea.delete(1.0, tk.END) textarea.insert(tk.INSERT, text) except Exception as error: logger.error(error)
def open_textfile(textarea: tk.Text): """ Open Dialog Function to get text from a textfile and paste it into the summary textbox :param textarea: In which textarea should the text be pasted? --> Summary textbox :raises Exception: Get error message, i.e. File wasn't selected """ try: file = fd.askopenfilename( filetypes=[("All Files", "*.*"), ("Text Documents", "*.txt")], defaultextension=".txt") if file is None: return else: with open(file, "r") as f: text = f.read() textarea.delete(1.0, tk.END) textarea.insert(tk.INSERT, text) except Exception as error: logger.error(error)
Python
def export_textarea(textarea: tk.Text): """ Open Dialog Function to export summarized text from textbox into textfile. :param textarea: In which textarea is the text stored? --> textarea_text :raises Exception: Get error message, i.e. File wasn't created """ try: file = fd.asksaveasfilename( initialfile="Untitled.txt", filetypes=[("All Files", "*.*"), ("Text Documents", "*.txt")], defaultextension=".txt" ) if file is None: return else: text = str(textarea.get(1.0, tk.END)) with open(file, "w") as file: file.write(text) except Exception as error: logger.error(error)
def export_textarea(textarea: tk.Text): """ Open Dialog Function to export summarized text from textbox into textfile. :param textarea: In which textarea is the text stored? --> textarea_text :raises Exception: Get error message, i.e. File wasn't created """ try: file = fd.asksaveasfilename( initialfile="Untitled.txt", filetypes=[("All Files", "*.*"), ("Text Documents", "*.txt")], defaultextension=".txt" ) if file is None: return else: text = str(textarea.get(1.0, tk.END)) with open(file, "w") as file: file.write(text) except Exception as error: logger.error(error)
Python
def export_kw_listbox(listbox: tk.Listbox): """ Open Dialog Function to export extracted keywords from listbox into csv. :param listbox: Which listbox? -> listbox_keywords :raises Exception: Get error message, i.e. File wasn't created """ try: csvfile = fd.asksaveasfilename( initialfile="Untitled.csv", filetypes=[("All Files", "*.*"), ("CSV", "*.csv")], defaultextension=".csv" ) if csvfile is None: return else: items = listbox.get(0, tk.END) items = ("Keywords",) + items with open(csvfile, "w", newline="") as csvfile: writer = csv.writer(csvfile, delimiter="\n") writer.writerow(items) except Exception as error: logger.error(error)
def export_kw_listbox(listbox: tk.Listbox): """ Open Dialog Function to export extracted keywords from listbox into csv. :param listbox: Which listbox? -> listbox_keywords :raises Exception: Get error message, i.e. File wasn't created """ try: csvfile = fd.asksaveasfilename( initialfile="Untitled.csv", filetypes=[("All Files", "*.*"), ("CSV", "*.csv")], defaultextension=".csv" ) if csvfile is None: return else: items = listbox.get(0, tk.END) items = ("Keywords",) + items with open(csvfile, "w", newline="") as csvfile: writer = csv.writer(csvfile, delimiter="\n") writer.writerow(items) except Exception as error: logger.error(error)
Python
def plot(f=None, **kwargs): """Plot functions. Not the same as SymPy's plot. This plot function is specific to Gamma. It has the following syntax:: plot([x^2, x^3, ...]) or:: plot(y=x,y1=x^2,r=sin(theta),r1=cos(theta)) ``plot`` accepts either a list of single-variable expressions to plot or keyword arguments indicating expressions to plot. If keyword arguments are used, the plot will be polar if the keyword argument starts with ``r`` and will be an xy graph otherwise. Note that Gamma will cut off plot values above and below a certain value, and that it will **not** warn the user if so. """ pass
def plot(f=None, **kwargs): """Plot functions. Not the same as SymPy's plot. This plot function is specific to Gamma. It has the following syntax:: plot([x^2, x^3, ...]) or:: plot(y=x,y1=x^2,r=sin(theta),r1=cos(theta)) ``plot`` accepts either a list of single-variable expressions to plot or keyword arguments indicating expressions to plot. If keyword arguments are used, the plot will be polar if the keyword argument starts with ``r`` and will be an xy graph otherwise. Note that Gamma will cut off plot values above and below a certain value, and that it will **not** warn the user if so. """ pass
Python
def format_by_type(result, arguments=None, formatter=None, function_name=None): """ Format something based on its type and on the input to Gamma. """ if arguments and not function_name: function_name = arguments[0] if function_name in _function_formatters: return _function_formatters[function_name](result, arguments, formatter) elif function_name in all_cards and 'format_output_function' in all_cards[function_name].card_info: return all_cards[function_name].format_output(result, formatter) elif isinstance(result, (list, tuple)): return format_list(result, formatter) else: return formatter(result)
def format_by_type(result, arguments=None, formatter=None, function_name=None): """ Format something based on its type and on the input to Gamma. """ if arguments and not function_name: function_name = arguments[0] if function_name in _function_formatters: return _function_formatters[function_name](result, arguments, formatter) elif function_name in all_cards and 'format_output_function' in all_cards[function_name].card_info: return all_cards[function_name].format_output(result, formatter) elif isinstance(result, (list, tuple)): return format_list(result, formatter) else: return formatter(result)
Python
def find_result_set(function_name, input_evaluated): """ Finds a set of result cards based on function name and evaluated input. Returns: - Function that parses the evaluated input into components. For instance, for an integral this would extract the integrand and limits of integration. This function will always extract the variables. - List of result cards. """ result = [] result_converter = default_variable for predicate, converter, result_cards in result_sets: if predicate == function_name: if converter: result_converter = converter if result_cards is None: return result_converter, result for card in result_cards: if card not in result: result.append(card) elif callable(predicate) and predicate(input_evaluated): if converter: result_converter = converter if result_cards is None: return result_converter, result for card in result_cards: if card not in result: result.append(card) return result_converter, result
def find_result_set(function_name, input_evaluated): """ Finds a set of result cards based on function name and evaluated input. Returns: - Function that parses the evaluated input into components. For instance, for an integral this would extract the integrand and limits of integration. This function will always extract the variables. - List of result cards. """ result = [] result_converter = default_variable for predicate, converter, result_cards in result_sets: if predicate == function_name: if converter: result_converter = converter if result_cards is None: return result_converter, result for card in result_cards: if card not in result: result.append(card) elif callable(predicate) and predicate(input_evaluated): if converter: result_converter = converter if result_cards is None: return result_converter, result for card in result_cards: if card not in result: result.append(card) return result_converter, result
Python
def custom_implicit_transformation(result, local_dict, global_dict): """Allows a slightly relaxed syntax. - Parentheses for single-argument method calls are optional. - Multiplication is implicit. - Symbol names can be split (i.e. spaces are not needed between symbols). - Functions can be exponentiated. Example: >>> from sympy.parsing.sympy_parser import (parse_expr, ... standard_transformations, implicit_multiplication_application) >>> parse_expr("10sin**2 x**2 + 3xyz + tan theta", ... transformations=(standard_transformations + ... (implicit_multiplication_application,))) 3*x*y*z + 10*sin(x**2)**2 + tan(theta) """ for step in (split_symbols, implicit_multiplication, implicit_application, function_exponentiation): result = step(result, local_dict, global_dict) return result
def custom_implicit_transformation(result, local_dict, global_dict): """Allows a slightly relaxed syntax. - Parentheses for single-argument method calls are optional. - Multiplication is implicit. - Symbol names can be split (i.e. spaces are not needed between symbols). - Functions can be exponentiated. Example: >>> from sympy.parsing.sympy_parser import (parse_expr, ... standard_transformations, implicit_multiplication_application) >>> parse_expr("10sin**2 x**2 + 3xyz + tan theta", ... transformations=(standard_transformations + ... (implicit_multiplication_application,))) 3*x*y*z + 10*sin(x**2)**2 + tan(theta) """ for step in (split_symbols, implicit_multiplication, implicit_application, function_exponentiation): result = step(result, local_dict, global_dict) return result
Python
def synonyms(tokens, local_dict, global_dict): """Make some names synonyms for others. This is done at the token level so that the "stringified" output that Gamma displays shows the correct function name. Must be applied before auto_symbol. """ result = [] for token in tokens: if token[0] == NAME: if token[1] in SYNONYMS: result.append((NAME, SYNONYMS[token[1]])) continue result.append(token) return result
def synonyms(tokens, local_dict, global_dict): """Make some names synonyms for others. This is done at the token level so that the "stringified" output that Gamma displays shows the correct function name. Must be applied before auto_symbol. """ result = [] for token in tokens: if token[0] == NAME: if token[1] in SYNONYMS: result.append((NAME, SYNONYMS[token[1]])) continue result.append(token) return result
Python
def close_matches(s, global_dict): """ Checks undefined names to see if they are close matches to a defined name. """ tokens = sympy_tokenize.generate_tokens(StringIO(s.strip()).readline) result = [] has_result = False all_names = set(global_dict).union(SYNONYMS) # strip the token location info to avoid strange untokenize results tokens = [(tok[0], tok[1]) for tok in tokens] for token in tokens: if (token[0] == NAME and token[1] not in all_names and len(token[1]) > 1): matches = difflib.get_close_matches(token[1], all_names) if matches and matches[0] == token[1]: matches = matches[1:] if matches: result.append((NAME, matches[0])) has_result = True continue result.append(token) if has_result: return sympy_tokenize.untokenize(result).strip() return None
def close_matches(s, global_dict): """ Checks undefined names to see if they are close matches to a defined name. """ tokens = sympy_tokenize.generate_tokens(StringIO(s.strip()).readline) result = [] has_result = False all_names = set(global_dict).union(SYNONYMS) # strip the token location info to avoid strange untokenize results tokens = [(tok[0], tok[1]) for tok in tokens] for token in tokens: if (token[0] == NAME and token[1] not in all_names and len(token[1]) > 1): matches = difflib.get_close_matches(token[1], all_names) if matches and matches[0] == token[1]: matches = matches[1:] if matches: result.append((NAME, matches[0])) has_result = True continue result.append(token) if has_result: return sympy_tokenize.untokenize(result).strip() return None
Python
def iterate_stdin(): ''' Something weird about iterating through sys.stdin (which I think is what fileinput does internally) in python2--it doesn't start iterating until it's closed or something. ''' while True: line = sys.stdin.readline() yield line
def iterate_stdin(): ''' Something weird about iterating through sys.stdin (which I think is what fileinput does internally) in python2--it doesn't start iterating until it's closed or something. ''' while True: line = sys.stdin.readline() yield line
Python
def compute_centers_and_boundaries(self, nx=4, ny=3): ''' Computes the centers and boundaries of the cut-up pieces Arguments: - nx - number of pieces in x direction - ny - number of pieces in y direction ''' # check that the concentration array shape matches the provided number of pieces if len(self.c[0]) != nx*ny: raise ValueError('Concentration data doesn\'t match number of pieces: %i != %i x %i' % (len(self.c[0],nx,ny))) dl = self.length/nx dw = self.width/ny self._x_centers = np.array([dl*(i+0.5) for i in range(nx)]) self._y_centers = np.array([dw*(j+0.5) for j in range(ny)]) self._x_boundaries = np.array([i*dl for i in range(nx+1)]) self._y_boundaries = np.array([j*dw for j in range(ny+1)])
def compute_centers_and_boundaries(self, nx=4, ny=3): ''' Computes the centers and boundaries of the cut-up pieces Arguments: - nx - number of pieces in x direction - ny - number of pieces in y direction ''' # check that the concentration array shape matches the provided number of pieces if len(self.c[0]) != nx*ny: raise ValueError('Concentration data doesn\'t match number of pieces: %i != %i x %i' % (len(self.c[0],nx,ny))) dl = self.length/nx dw = self.width/ny self._x_centers = np.array([dl*(i+0.5) for i in range(nx)]) self._y_centers = np.array([dw*(j+0.5) for j in range(ny)]) self._x_boundaries = np.array([i*dl for i in range(nx+1)]) self._y_boundaries = np.array([j*dw for j in range(ny+1)])
Python
def add_brine_params(self, dl, dw, c_init = 1., c_brine = 1.): ''' Extends the cheese centers, boundaries and measurements with brine data. Arguments: - dl - difference length of brine - dw - difference width of brine - c_init - initial salt concentration in brine - c_brine - measured concentration of brine in time=self.time ''' # move center coordinates self.brine_dlength = dl self.brine_dwidth = dw self.brine_c_salt = c_brine if not hasattr(self, 'brine_c0'): self.brine_c0 = c_init
def add_brine_params(self, dl, dw, c_init = 1., c_brine = 1.): ''' Extends the cheese centers, boundaries and measurements with brine data. Arguments: - dl - difference length of brine - dw - difference width of brine - c_init - initial salt concentration in brine - c_brine - measured concentration of brine in time=self.time ''' # move center coordinates self.brine_dlength = dl self.brine_dwidth = dw self.brine_c_salt = c_brine if not hasattr(self, 'brine_c0'): self.brine_c0 = c_init
Python
def fix_path(path, #*, _pathsep=PATH_SEP): """Return a platform-appropriate path for the given path.""" if not path: return '.' return path.replace('/', _pathsep)
def fix_path(path, #*, _pathsep=PATH_SEP): """Return a platform-appropriate path for the given path.""" if not path: return '.' return path.replace('/', _pathsep)
Python
def fix_fileid(fileid, rootdir=None, #*, normalize=False, strictpathsep=None, _pathsep=PATH_SEP, **kwargs ): """Return a pathsep-separated file ID ("./"-prefixed) for the given value. The file ID may be absolute. If so and "rootdir" is provided then make the file ID relative. If absolute but "rootdir" is not provided then leave it absolute. """ if not fileid or fileid == '.': return fileid # We default to "/" (forward slash) as the final path sep, since # that gives us a consistent, cross-platform result. (Windows does # actually support "/" as a path separator.) Most notably, node IDs # from pytest use "/" as the path separator by default. _fileid = fileid.replace(_pathsep, '/') relpath = _resolve_relpath(_fileid, rootdir, _pathsep=_pathsep, **kwargs ) if relpath: # Note that we treat "" here as an absolute path. _fileid = './' + relpath if normalize: if strictpathsep: raise ValueError( 'cannot normalize *and* keep strict path separator') _fileid = _str_to_lower(_fileid) elif strictpathsep: # We do not use _normcase since we want to preserve capitalization. _fileid = _fileid.replace('/', _pathsep) return _fileid
def fix_fileid(fileid, rootdir=None, #*, normalize=False, strictpathsep=None, _pathsep=PATH_SEP, **kwargs ): """Return a pathsep-separated file ID ("./"-prefixed) for the given value. The file ID may be absolute. If so and "rootdir" is provided then make the file ID relative. If absolute but "rootdir" is not provided then leave it absolute. """ if not fileid or fileid == '.': return fileid # We default to "/" (forward slash) as the final path sep, since # that gives us a consistent, cross-platform result. (Windows does # actually support "/" as a path separator.) Most notably, node IDs # from pytest use "/" as the path separator by default. _fileid = fileid.replace(_pathsep, '/') relpath = _resolve_relpath(_fileid, rootdir, _pathsep=_pathsep, **kwargs ) if relpath: # Note that we treat "" here as an absolute path. _fileid = './' + relpath if normalize: if strictpathsep: raise ValueError( 'cannot normalize *and* keep strict path separator') _fileid = _str_to_lower(_fileid) elif strictpathsep: # We do not use _normcase since we want to preserve capitalization. _fileid = _fileid.replace('/', _pathsep) return _fileid
Python
def shlex_unsplit(argv): """Return the shell-safe string for the given arguments. This effectively the equivalent of reversing shlex.split(). """ argv = [_quote_arg(a) for a in argv] return ' '.join(argv)
def shlex_unsplit(argv): """Return the shell-safe string for the given arguments. This effectively the equivalent of reversing shlex.split(). """ argv = [_quote_arg(a) for a in argv] return ' '.join(argv)
Python
async def submit_job(file_path: str) -> dict: """Submit job for processing, return ID on queue""" data = {'file': open(file_path, 'rb')} async with aiohttp.ClientSession() as session: async with session.post(f"{ENDPOINT}/submit", data=data) as response: return await response.json()
async def submit_job(file_path: str) -> dict: """Submit job for processing, return ID on queue""" data = {'file': open(file_path, 'rb')} async with aiohttp.ClientSession() as session: async with session.post(f"{ENDPOINT}/submit", data=data) as response: return await response.json()
Python
async def collect_result(job_id, local_filename): """Collect the results for the given job and store""" newpath = infer_result_name(local_filename) async with aiohttp.ClientSession() as session: async with session.get(f"{ENDPOINT}/{job_id}/result") as response: content = await response.text() with open(newpath,'w') as f: f.write(content)
async def collect_result(job_id, local_filename): """Collect the results for the given job and store""" newpath = infer_result_name(local_filename) async with aiohttp.ClientSession() as session: async with session.get(f"{ENDPOINT}/{job_id}/result") as response: content = await response.text() with open(newpath,'w') as f: f.write(content)
Python
def infer_result_name(input_name): """Calculate output name for annotated paper""" nameroot, _ = os.path.splitext(input_name) newpath = nameroot + "_annotated.xml" return newpath
def infer_result_name(input_name): """Calculate output name for annotated paper""" nameroot, _ = os.path.splitext(input_name) newpath = nameroot + "_annotated.xml" return newpath
Python
def run(self, name): '''Extend the run of unittest, to check if results directory have been found. If no results directory exists, the test will be ignored. ''' from os.path import join, dirname, exists results_dir = join(dirname(__file__), 'results') if not exists(results_dir): log.warning('No result directory found, cancel test.') os.mkdir(results_dir) self.test_counter = 0 self.results_dir = results_dir self.test_failed = False return super(GraphicUnitTest, self).run(name)
def run(self, name): '''Extend the run of unittest, to check if results directory have been found. If no results directory exists, the test will be ignored. ''' from os.path import join, dirname, exists results_dir = join(dirname(__file__), 'results') if not exists(results_dir): log.warning('No result directory found, cancel test.') os.mkdir(results_dir) self.test_counter = 0 self.results_dir = results_dir self.test_failed = False return super(GraphicUnitTest, self).run(name)
Python
def on_window_flip(self, window): '''Internal method to be called when the window have just displayed an image. When an image is showed, we decrement our framecount. If framecount is come to 0, we are taking the screenshot. The screenshot is done in a temporary place, and is compared to the original one -> test ok/ko. If no screenshot is available in the results directory, a new one will be created. ''' from kivy.base import EventLoop from tempfile import mkstemp from os.path import join, exists from os import unlink, close from shutil import move, copy # don't save screenshot until we have enough frames. # log.debug('framecount %d' % self.framecount) self.framecount -= 1 if self.framecount > 0: return # don't create screenshots if a specific var is in env ignore = ['TRAVIS_OS_NAME', 'APPVEYOR_BUILD_FOLDER'] from os import environ if any(i in environ for i in ignore): EventLoop.stop() return reffn = None match = False try: # just get a temporary name fd, tmpfn = mkstemp(suffix='.png', prefix='kivyunit-') close(fd) unlink(tmpfn) # get a filename for the current unit test self.test_counter += 1 test_uid = '%s-%d.png' % ( '_'.join(self.id().split('.')[-2:]), self.test_counter) # capture the screen log.info('Capturing screenshot for %s' % test_uid) tmpfn = window.screenshot(tmpfn) log.info('Capture saved at %s' % tmpfn) # search the file to compare to reffn = join(self.results_dir, test_uid) log.info('Compare with %s' % reffn) # get sourcecode import inspect frame = inspect.getouterframes(inspect.currentframe())[6] sourcecodetab, line = inspect.getsourcelines(frame[0]) line = frame[2] - line currentline = sourcecodetab[line] sourcecodetab[line] = '<span style="color: red;">%s</span>' % ( currentline) sourcecode = ''.join(sourcecodetab) sourcecodetab[line] = '>>>>>>>>\n%s<<<<<<<<\n' % currentline sourcecodeask = ''.join(sourcecodetab) if not exists(reffn): log.info('No image reference, move %s as ref ?' % test_uid) if self.interactive_ask_ref(sourcecodeask, tmpfn, self.id()): move(tmpfn, reffn) tmpfn = reffn log.info('Image used as reference') match = True else: log.info('Image discarded') else: from kivy.core.image import Image as CoreImage s1 = CoreImage(tmpfn, keep_data=True) sd1 = s1.image._data[0].data s2 = CoreImage(reffn, keep_data=True) sd2 = s2.image._data[0].data if sd1 != sd2: log.critical( '%s at render() #%d, images are different.' % ( self.id(), self.test_counter)) if self.interactive_ask_diff(sourcecodeask, tmpfn, reffn, self.id()): log.critical('user ask to use it as ref.') move(tmpfn, reffn) tmpfn = reffn match = True else: self.test_failed = True else: match = True # generate html from os.path import join, dirname, exists, basename from os import mkdir build_dir = join(dirname(__file__), 'build') if not exists(build_dir): mkdir(build_dir) copy(reffn, join(build_dir, 'ref_%s' % basename(reffn))) if tmpfn != reffn: copy(tmpfn, join(build_dir, 'test_%s' % basename(reffn))) with open(join(build_dir, 'index.html'), 'at') as fd: color = '#ffdddd' if not match else '#ffffff' fd.write('<div style="background-color: %s">' % color) fd.write('<h2>%s #%d</h2>' % (self.id(), self.test_counter)) fd.write('<table><tr><th>Reference</th>' '<th>Test</th>' '<th>Comment</th>') fd.write('<tr><td><img src="ref_%s"/></td>' % basename(reffn)) if tmpfn != reffn: fd.write('<td><img src="test_%s"/></td>' % basename(reffn)) else: fd.write('<td>First time, no comparison.</td>') fd.write('<td><pre>%s</pre></td>' % sourcecode) fd.write('</table></div>') finally: try: if reffn != tmpfn: unlink(tmpfn) except: pass EventLoop.stop()
def on_window_flip(self, window): '''Internal method to be called when the window have just displayed an image. When an image is showed, we decrement our framecount. If framecount is come to 0, we are taking the screenshot. The screenshot is done in a temporary place, and is compared to the original one -> test ok/ko. If no screenshot is available in the results directory, a new one will be created. ''' from kivy.base import EventLoop from tempfile import mkstemp from os.path import join, exists from os import unlink, close from shutil import move, copy # don't save screenshot until we have enough frames. # log.debug('framecount %d' % self.framecount) self.framecount -= 1 if self.framecount > 0: return # don't create screenshots if a specific var is in env ignore = ['TRAVIS_OS_NAME', 'APPVEYOR_BUILD_FOLDER'] from os import environ if any(i in environ for i in ignore): EventLoop.stop() return reffn = None match = False try: # just get a temporary name fd, tmpfn = mkstemp(suffix='.png', prefix='kivyunit-') close(fd) unlink(tmpfn) # get a filename for the current unit test self.test_counter += 1 test_uid = '%s-%d.png' % ( '_'.join(self.id().split('.')[-2:]), self.test_counter) # capture the screen log.info('Capturing screenshot for %s' % test_uid) tmpfn = window.screenshot(tmpfn) log.info('Capture saved at %s' % tmpfn) # search the file to compare to reffn = join(self.results_dir, test_uid) log.info('Compare with %s' % reffn) # get sourcecode import inspect frame = inspect.getouterframes(inspect.currentframe())[6] sourcecodetab, line = inspect.getsourcelines(frame[0]) line = frame[2] - line currentline = sourcecodetab[line] sourcecodetab[line] = '<span style="color: red;">%s</span>' % ( currentline) sourcecode = ''.join(sourcecodetab) sourcecodetab[line] = '>>>>>>>>\n%s<<<<<<<<\n' % currentline sourcecodeask = ''.join(sourcecodetab) if not exists(reffn): log.info('No image reference, move %s as ref ?' % test_uid) if self.interactive_ask_ref(sourcecodeask, tmpfn, self.id()): move(tmpfn, reffn) tmpfn = reffn log.info('Image used as reference') match = True else: log.info('Image discarded') else: from kivy.core.image import Image as CoreImage s1 = CoreImage(tmpfn, keep_data=True) sd1 = s1.image._data[0].data s2 = CoreImage(reffn, keep_data=True) sd2 = s2.image._data[0].data if sd1 != sd2: log.critical( '%s at render() #%d, images are different.' % ( self.id(), self.test_counter)) if self.interactive_ask_diff(sourcecodeask, tmpfn, reffn, self.id()): log.critical('user ask to use it as ref.') move(tmpfn, reffn) tmpfn = reffn match = True else: self.test_failed = True else: match = True # generate html from os.path import join, dirname, exists, basename from os import mkdir build_dir = join(dirname(__file__), 'build') if not exists(build_dir): mkdir(build_dir) copy(reffn, join(build_dir, 'ref_%s' % basename(reffn))) if tmpfn != reffn: copy(tmpfn, join(build_dir, 'test_%s' % basename(reffn))) with open(join(build_dir, 'index.html'), 'at') as fd: color = '#ffdddd' if not match else '#ffffff' fd.write('<div style="background-color: %s">' % color) fd.write('<h2>%s #%d</h2>' % (self.id(), self.test_counter)) fd.write('<table><tr><th>Reference</th>' '<th>Test</th>' '<th>Comment</th>') fd.write('<tr><td><img src="ref_%s"/></td>' % basename(reffn)) if tmpfn != reffn: fd.write('<td><img src="test_%s"/></td>' % basename(reffn)) else: fd.write('<td>First time, no comparison.</td>') fd.write('<td><pre>%s</pre></td>' % sourcecode) fd.write('</table></div>') finally: try: if reffn != tmpfn: unlink(tmpfn) except: pass EventLoop.stop()
Python
def texture_update(self, *largs): '''Force texture recreation with the current Label properties. After this function call, the :attr:`texture` and :attr:`texture_size` will be updated in this order. ''' mrkup = self._label.__class__ is CoreMarkupLabel self.texture = None if (not self._label.text or (self.halign == 'justify' or self.strip) and not self._label.text.strip()): self.texture_size = (0, 0) self.is_shortened = False if mrkup: self.refs, self._label._refs = {}, {} self.anchors, self._label._anchors = {}, {} else: if mrkup: text = self.text # we must strip here, otherwise, if the last line is empty, # markup will retain the last empty line since it only strips # line by line within markup if self.halign == 'justify' or self.strip: text = text.strip() self._label.text = ''.join(('[color=', get_hex_from_color( self.disabled_color if self.disabled else self.color), ']', text, '[/color]')) self._label.refresh() # force the rendering to get the references if self._label.texture: self._label.texture.bind() self.refs = self._label.refs self.anchors = self._label.anchors else: self._label.refresh() texture = self._label.texture if texture is not None: self.texture = self._label.texture self.texture_size = list(self.texture.size) self.is_shortened = self._label.is_shortened
def texture_update(self, *largs): '''Force texture recreation with the current Label properties. After this function call, the :attr:`texture` and :attr:`texture_size` will be updated in this order. ''' mrkup = self._label.__class__ is CoreMarkupLabel self.texture = None if (not self._label.text or (self.halign == 'justify' or self.strip) and not self._label.text.strip()): self.texture_size = (0, 0) self.is_shortened = False if mrkup: self.refs, self._label._refs = {}, {} self.anchors, self._label._anchors = {}, {} else: if mrkup: text = self.text # we must strip here, otherwise, if the last line is empty, # markup will retain the last empty line since it only strips # line by line within markup if self.halign == 'justify' or self.strip: text = text.strip() self._label.text = ''.join(('[color=', get_hex_from_color( self.disabled_color if self.disabled else self.color), ']', text, '[/color]')) self._label.refresh() # force the rendering to get the references if self._label.texture: self._label.texture.bind() self.refs = self._label.refs self.anchors = self._label.anchors else: self._label.refresh() texture = self._label.texture if texture is not None: self.texture = self._label.texture self.texture_size = list(self.texture.size) self.is_shortened = self._label.is_shortened
Python
def width(self): '''Image width in pixels. (If the image is mipmapped, it will use the level 0) ''' return self.mipmaps[0][0]
def width(self): '''Image width in pixels. (If the image is mipmapped, it will use the level 0) ''' return self.mipmaps[0][0]
Python
def height(self): '''Image height in pixels. (If the image is mipmapped, it will use the level 0) ''' return self.mipmaps[0][1]
def height(self): '''Image height in pixels. (If the image is mipmapped, it will use the level 0) ''' return self.mipmaps[0][1]
Python
def data(self): '''Image data. (If the image is mipmapped, it will use the level 0) ''' return self.mipmaps[0][2]
def data(self): '''Image data. (If the image is mipmapped, it will use the level 0) ''' return self.mipmaps[0][2]
Python
def add_mipmap(self, level, width, height, data, rowlength): '''Add a image for a specific mipmap level. .. versionadded:: 1.0.7 ''' self.mipmaps[level] = [int(width), int(height), data, rowlength]
def add_mipmap(self, level, width, height, data, rowlength): '''Add a image for a specific mipmap level. .. versionadded:: 1.0.7 ''' self.mipmaps[level] = [int(width), int(height), data, rowlength]
Python
def iterate_mipmaps(self): '''Iterate over all mipmap images available. .. versionadded:: 1.0.7 ''' mm = self.mipmaps for x in range(len(mm)): item = mm.get(x, None) if item is None: raise Exception('Invalid mipmap level, found empty one') yield x, item[0], item[1], item[2], item[3]
def iterate_mipmaps(self): '''Iterate over all mipmap images available. .. versionadded:: 1.0.7 ''' mm = self.mipmaps for x in range(len(mm)): item = mm.get(x, None) if item is None: raise Exception('Invalid mipmap level, found empty one') yield x, item[0], item[1], item[2], item[3]
Python
def texture(self): '''Get the image texture (created on the first call) ''' if self._textures is None: self.populate() if self._textures is None: return None return self._textures[0]
def texture(self): '''Get the image texture (created on the first call) ''' if self._textures is None: self.populate() if self._textures is None: return None return self._textures[0]
Python
def textures(self): '''Get the textures list (for mipmapped image or animated image) .. versionadded:: 1.0.8 ''' if self._textures is None: self.populate() return self._textures
def textures(self): '''Get the textures list (for mipmapped image or animated image) .. versionadded:: 1.0.8 ''' if self._textures is None: self.populate() return self._textures
Python
def nocache(self): '''Indicate if the texture will not be stored in the cache .. versionadded:: 1.6.0 ''' return self._nocache
def nocache(self): '''Indicate if the texture will not be stored in the cache .. versionadded:: 1.6.0 ''' return self._nocache
Python
def zip_loader(filename, **kwargs): '''Read images from an zip file. .. versionadded:: 1.0.8 Returns an Image with a list of type ImageData stored in Image._data ''' # read zip in memory for faster access _file = BytesIO(open(filename, 'rb').read()) # read all images inside the zip z = zipfile.ZipFile(_file) image_data = [] # sort filename list znamelist = z.namelist() znamelist.sort() image = None for zfilename in znamelist: try: # read file and store it in mem with fileIO struct around it tmpfile = BytesIO(z.read(zfilename)) ext = zfilename.split('.')[-1].lower() im = None for loader in ImageLoader.loaders: if (ext not in loader.extensions() or not loader.can_load_memory()): continue Logger.debug('Image%s: Load <%s> from <%s>' % (loader.__name__[11:], zfilename, filename)) try: im = loader(zfilename, ext=ext, rawdata=tmpfile, inline=True, **kwargs) except: # Loader failed, continue trying. continue break if im is not None: # append ImageData to local variable before it's # overwritten image_data.append(im._data[0]) image = im # else: if not image file skip to next except: Logger.warning('Image: Unable to load image' '<%s> in zip <%s> trying to continue...' % (zfilename, filename)) z.close() if len(image_data) == 0: raise Exception('no images in zip <%s>' % filename) # replace Image.Data with the array of all the images in the zip image._data = image_data image.filename = filename return image
def zip_loader(filename, **kwargs): '''Read images from an zip file. .. versionadded:: 1.0.8 Returns an Image with a list of type ImageData stored in Image._data ''' # read zip in memory for faster access _file = BytesIO(open(filename, 'rb').read()) # read all images inside the zip z = zipfile.ZipFile(_file) image_data = [] # sort filename list znamelist = z.namelist() znamelist.sort() image = None for zfilename in znamelist: try: # read file and store it in mem with fileIO struct around it tmpfile = BytesIO(z.read(zfilename)) ext = zfilename.split('.')[-1].lower() im = None for loader in ImageLoader.loaders: if (ext not in loader.extensions() or not loader.can_load_memory()): continue Logger.debug('Image%s: Load <%s> from <%s>' % (loader.__name__[11:], zfilename, filename)) try: im = loader(zfilename, ext=ext, rawdata=tmpfile, inline=True, **kwargs) except: # Loader failed, continue trying. continue break if im is not None: # append ImageData to local variable before it's # overwritten image_data.append(im._data[0]) image = im # else: if not image file skip to next except: Logger.warning('Image: Unable to load image' '<%s> in zip <%s> trying to continue...' % (zfilename, filename)) z.close() if len(image_data) == 0: raise Exception('no images in zip <%s>' % filename) # replace Image.Data with the array of all the images in the zip image._data = image_data image.filename = filename return image
Python
def anim_reset(self, allow_anim): '''Reset an animation if available. .. versionadded:: 1.0.8 :Parameters: `allow_anim`: bool Indicate whether the animation should restart playing or not. Usage:: # start/reset animation image.anim_reset(True) # or stop the animation image.anim_reset(False) You can change the animation speed whilst it is playing:: # Set to 20 FPS image.anim_delay = 1 / 20. ''' # stop animation if self._anim_ev is not None: self._anim_ev.cancel() self._anim_ev = None if allow_anim and self._anim_available and self._anim_delay >= 0: self._anim_ev = Clock.schedule_interval(self._anim, self.anim_delay) self._anim()
def anim_reset(self, allow_anim): '''Reset an animation if available. .. versionadded:: 1.0.8 :Parameters: `allow_anim`: bool Indicate whether the animation should restart playing or not. Usage:: # start/reset animation image.anim_reset(True) # or stop the animation image.anim_reset(False) You can change the animation speed whilst it is playing:: # Set to 20 FPS image.anim_delay = 1 / 20. ''' # stop animation if self._anim_ev is not None: self._anim_ev.cancel() self._anim_ev = None if allow_anim and self._anim_available and self._anim_delay >= 0: self._anim_ev = Clock.schedule_interval(self._anim, self.anim_delay) self._anim()
Python
def anim_available(self): '''Return True if this Image instance has animation available. .. versionadded:: 1.0.8 ''' return self._anim_available
def anim_available(self): '''Return True if this Image instance has animation available. .. versionadded:: 1.0.8 ''' return self._anim_available
Python
def anim_index(self): '''Return the index number of the image currently in the texture. .. versionadded:: 1.0.8 ''' return self._anim_index
def anim_index(self): '''Return the index number of the image currently in the texture. .. versionadded:: 1.0.8 ''' return self._anim_index
Python
def on_texture(self, *largs): '''This event is fired when the texture reference or content has changed. It is normally used for sequenced images. .. versionadded:: 1.0.8 ''' pass
def on_texture(self, *largs): '''This event is fired when the texture reference or content has changed. It is normally used for sequenced images. .. versionadded:: 1.0.8 ''' pass
Python
def load_memory(self, data, ext, filename='__inline__'): '''(internal) Method to load an image from raw data. ''' self._filename = filename # see if there is a available loader for it loaders = [loader for loader in ImageLoader.loaders if loader.can_load_memory() and ext in loader.extensions()] if not loaders: raise Exception('No inline loader found to load {}'.format(ext)) image = loaders[0](filename, ext=ext, rawdata=data, inline=True, nocache=self._nocache, mipmap=self._mipmap, keep_data=self._keep_data) if isinstance(image, Texture): self._texture = image self._size = image.size else: self.image = image
def load_memory(self, data, ext, filename='__inline__'): '''(internal) Method to load an image from raw data. ''' self._filename = filename # see if there is a available loader for it loaders = [loader for loader in ImageLoader.loaders if loader.can_load_memory() and ext in loader.extensions()] if not loaders: raise Exception('No inline loader found to load {}'.format(ext)) image = loaders[0](filename, ext=ext, rawdata=data, inline=True, nocache=self._nocache, mipmap=self._mipmap, keep_data=self._keep_data) if isinstance(image, Texture): self._texture = image self._size = image.size else: self.image = image
Python
def save(self, filename, flipped=False): '''Save image texture to file. The filename should have the '.png' extension because the texture data read from the GPU is in the RGBA format. '.jpg' might work but has not been heavily tested so some providers might break when using it. Any other extensions are not officially supported. The flipped parameter flips the saved image vertically, and defaults to False. Example:: # Save an core image object from kivy.core.image import Image img = Image('hello.png') img.save('hello2.png') # Save a texture texture = Texture.create(...) img = Image(texture) img.save('hello3.png') .. versionadded:: 1.7.0 .. versionchanged:: 1.8.0 Parameter `flipped` added to flip the image before saving, default to False. ''' pixels = None size = None loaders = [x for x in ImageLoader.loaders if x.can_save()] if not loaders: return False loader = loaders[0] if self.image: # we might have a ImageData object to use data = self.image._data[0] if data.data is not None: if data.fmt in ('rgba', 'rgb'): # fast path, use the "raw" data when keep_data is used size = data.width, data.height pixels = data.data else: # the format is not rgba, we need to convert it. # use texture for that. self.populate() if pixels is None and self._texture: # use the texture pixels size = self._texture.size pixels = self._texture.pixels if pixels is None: return False l_pixels = len(pixels) if l_pixels == size[0] * size[1] * 3: fmt = 'rgb' elif l_pixels == size[0] * size[1] * 4: fmt = 'rgba' else: raise Exception('Unable to determine the format of the pixels') return loader.save(filename, size[0], size[1], fmt, pixels, flipped)
def save(self, filename, flipped=False): '''Save image texture to file. The filename should have the '.png' extension because the texture data read from the GPU is in the RGBA format. '.jpg' might work but has not been heavily tested so some providers might break when using it. Any other extensions are not officially supported. The flipped parameter flips the saved image vertically, and defaults to False. Example:: # Save an core image object from kivy.core.image import Image img = Image('hello.png') img.save('hello2.png') # Save a texture texture = Texture.create(...) img = Image(texture) img.save('hello3.png') .. versionadded:: 1.7.0 .. versionchanged:: 1.8.0 Parameter `flipped` added to flip the image before saving, default to False. ''' pixels = None size = None loaders = [x for x in ImageLoader.loaders if x.can_save()] if not loaders: return False loader = loaders[0] if self.image: # we might have a ImageData object to use data = self.image._data[0] if data.data is not None: if data.fmt in ('rgba', 'rgb'): # fast path, use the "raw" data when keep_data is used size = data.width, data.height pixels = data.data else: # the format is not rgba, we need to convert it. # use texture for that. self.populate() if pixels is None and self._texture: # use the texture pixels size = self._texture.size pixels = self._texture.pixels if pixels is None: return False l_pixels = len(pixels) if l_pixels == size[0] * size[1] * 3: fmt = 'rgb' elif l_pixels == size[0] * size[1] * 4: fmt = 'rgba' else: raise Exception('Unable to determine the format of the pixels') return loader.save(filename, size[0], size[1], fmt, pixels, flipped)
Python
def refresh_from_data(self, *largs, **kwargs): """ This should be called when data changes. Data changes typically indicate that everything should be recomputed since the source data changed. This method is automatically bound to the :attr:`~RecycleDataModelBehavior.on_data_changed` method of the :class:`~RecycleDataModelBehavior` class and therefore responds to and accepts the keyword arguments of that event. It can be called manually to trigger an update. """ self._refresh_flags['data'].append(kwargs) self._refresh_trigger()
def refresh_from_data(self, *largs, **kwargs): """ This should be called when data changes. Data changes typically indicate that everything should be recomputed since the source data changed. This method is automatically bound to the :attr:`~RecycleDataModelBehavior.on_data_changed` method of the :class:`~RecycleDataModelBehavior` class and therefore responds to and accepts the keyword arguments of that event. It can be called manually to trigger an update. """ self._refresh_flags['data'].append(kwargs) self._refresh_trigger()
Python
def refresh_from_layout(self, *largs, **kwargs): """ This should be called when the layout changes or needs to change. It is typically called when a layout parameter has changed and therefore the layout needs to be recomputed. """ self._refresh_flags['layout'].append(kwargs) self._refresh_trigger()
def refresh_from_layout(self, *largs, **kwargs): """ This should be called when the layout changes or needs to change. It is typically called when a layout parameter has changed and therefore the layout needs to be recomputed. """ self._refresh_flags['layout'].append(kwargs) self._refresh_trigger()
Python
def refresh_from_viewport(self, *largs): """ This should be called when the viewport changes and the displayed data must be updated. Neither the data nor the layout will be recomputed. """ self._refresh_flags['viewport'] = True self._refresh_trigger()
def refresh_from_viewport(self, *largs): """ This should be called when the viewport changes and the displayed data must be updated. Neither the data nor the layout will be recomputed. """ self._refresh_flags['viewport'] = True self._refresh_trigger()
Python
def refresh_view_attrs(self, rv, index, data): '''Called by the :class:`RecycleAdapter` when the view is initially populated with the values from the `data` dictionary for this item. Any pos or size info should be removed because they are set subsequently with :attr:`refresh_view_layout`. :Parameters: `rv`: :class:`RecycleView` instance The :class:`RecycleView` that caused the update. `data`: dict The data dict used to populate this view. ''' sizing_attrs = RecycleDataAdapter._sizing_attrs for key, value in data.items(): if key not in sizing_attrs: setattr(self, key, value)
def refresh_view_attrs(self, rv, index, data): '''Called by the :class:`RecycleAdapter` when the view is initially populated with the values from the `data` dictionary for this item. Any pos or size info should be removed because they are set subsequently with :attr:`refresh_view_layout`. :Parameters: `rv`: :class:`RecycleView` instance The :class:`RecycleView` that caused the update. `data`: dict The data dict used to populate this view. ''' sizing_attrs = RecycleDataAdapter._sizing_attrs for key, value in data.items(): if key not in sizing_attrs: setattr(self, key, value)
Python
def refresh_view_layout(self, rv, index, layout, viewport): '''Called when the view's size is updated by the layout manager, :class:`RecycleLayoutManagerBehavior`. :Parameters: `rv`: :class:`RecycleView` instance The :class:`RecycleView` that caused the update. `viewport`: 4-tuple The coordinates of the bottom left and width height in layout manager coordinates. This may be larger than this view item. :raises: `LayoutChangeException`: If the sizing or data changed during a call to this method, raising a `LayoutChangeException` exception will force a refresh. Useful when data changed and we don't want to layout further since it'll be overwritten again soon. ''' w, h = layout.pop('size') if w is None: if h is not None: self.height = h else: if h is None: self.width = w else: self.size = w, h for name, value in layout.items(): setattr(self, name, value)
def refresh_view_layout(self, rv, index, layout, viewport): '''Called when the view's size is updated by the layout manager, :class:`RecycleLayoutManagerBehavior`. :Parameters: `rv`: :class:`RecycleView` instance The :class:`RecycleView` that caused the update. `viewport`: 4-tuple The coordinates of the bottom left and width height in layout manager coordinates. This may be larger than this view item. :raises: `LayoutChangeException`: If the sizing or data changed during a call to this method, raising a `LayoutChangeException` exception will force a refresh. Useful when data changed and we don't want to layout further since it'll be overwritten again soon. ''' w, h = layout.pop('size') if w is None: if h is not None: self.height = h else: if h is None: self.width = w else: self.size = w, h for name, value in layout.items(): setattr(self, name, value)
Python
def create_view(self, index, data_item, viewclass): '''(internal) Creates and initializes the view for the data at `index`. The returned view is synced with the data, except for the pos/size information. ''' if viewclass is None: return view = viewclass() self.refresh_view_attrs(index, data_item, view) return view
def create_view(self, index, data_item, viewclass): '''(internal) Creates and initializes the view for the data at `index`. The returned view is synced with the data, except for the pos/size information. ''' if viewclass is None: return view = viewclass() self.refresh_view_attrs(index, data_item, view) return view
Python
def refresh_view_attrs(self, index, data_item, view): '''(internal) Syncs the view and brings it up to date with the data. This method calls :meth:`RecycleDataViewBehavior.refresh_view_attrs` if the view inherits from :class:`RecycleDataViewBehavior`. See that method for more details. .. note:: Any sizing and position info is skipped when syncing with the data. ''' viewclass = view.__class__ if viewclass not in _view_base_cache: _view_base_cache[viewclass] = isinstance(view, RecycleDataViewBehavior) if _view_base_cache[viewclass]: view.refresh_view_attrs(self.recycleview, index, data_item) else: sizing_attrs = RecycleDataAdapter._sizing_attrs for key, value in data_item.items(): if key not in sizing_attrs: setattr(view, key, value)
def refresh_view_attrs(self, index, data_item, view): '''(internal) Syncs the view and brings it up to date with the data. This method calls :meth:`RecycleDataViewBehavior.refresh_view_attrs` if the view inherits from :class:`RecycleDataViewBehavior`. See that method for more details. .. note:: Any sizing and position info is skipped when syncing with the data. ''' viewclass = view.__class__ if viewclass not in _view_base_cache: _view_base_cache[viewclass] = isinstance(view, RecycleDataViewBehavior) if _view_base_cache[viewclass]: view.refresh_view_attrs(self.recycleview, index, data_item) else: sizing_attrs = RecycleDataAdapter._sizing_attrs for key, value in data_item.items(): if key not in sizing_attrs: setattr(view, key, value)
Python
def refresh_view_layout(self, index, layout, view, viewport): '''Updates the sizing information of the view. viewport is in coordinates of the layout manager. This method calls :meth:`RecycleDataViewBehavior.refresh_view_attrs` if the view inherits from :class:`RecycleDataViewBehavior`. See that method for more details. .. note:: Any sizing and position info is skipped when syncing with the data. ''' if view.__class__ not in _view_base_cache: _view_base_cache[view.__class__] = isinstance( view, RecycleDataViewBehavior) if _view_base_cache[view.__class__]: view.refresh_view_layout( self.recycleview, index, layout, viewport) else: w, h = layout.pop('size') if w is None: if h is not None: view.height = h else: if h is None: view.width = w else: view.size = w, h for name, value in layout.items(): setattr(view, name, value)
def refresh_view_layout(self, index, layout, view, viewport): '''Updates the sizing information of the view. viewport is in coordinates of the layout manager. This method calls :meth:`RecycleDataViewBehavior.refresh_view_attrs` if the view inherits from :class:`RecycleDataViewBehavior`. See that method for more details. .. note:: Any sizing and position info is skipped when syncing with the data. ''' if view.__class__ not in _view_base_cache: _view_base_cache[view.__class__] = isinstance( view, RecycleDataViewBehavior) if _view_base_cache[view.__class__]: view.refresh_view_layout( self.recycleview, index, layout, viewport) else: w, h = layout.pop('size') if w is None: if h is not None: view.height = h else: if h is None: view.width = w else: view.size = w, h for name, value in layout.items(): setattr(view, name, value)
Python
def make_view_dirty(self, view, index): '''(internal) Used to flag this view as dirty, ready to be used for others. See :meth:`make_views_dirty`. ''' del self.views[index] self.dirty_views[view.__class__][index] = view
def make_view_dirty(self, view, index): '''(internal) Used to flag this view as dirty, ready to be used for others. See :meth:`make_views_dirty`. ''' del self.views[index] self.dirty_views[view.__class__][index] = view
Python
def make_views_dirty(self): '''Makes all the current views dirty. Dirty views are still in sync with the corresponding data. However, the size information may go out of sync. Therefore a dirty view can be reused by the same index by just updating the sizing information. Once the underlying data of this index changes, the view should be removed from the dirty views and moved to the global cache with :meth:`invalidate`. This is typically called when the layout manager needs to re-layout all the data. ''' views = self.views if not views: return dirty_views = self.dirty_views for index, view in views.items(): dirty_views[view.__class__][index] = view self.views = {}
def make_views_dirty(self): '''Makes all the current views dirty. Dirty views are still in sync with the corresponding data. However, the size information may go out of sync. Therefore a dirty view can be reused by the same index by just updating the sizing information. Once the underlying data of this index changes, the view should be removed from the dirty views and moved to the global cache with :meth:`invalidate`. This is typically called when the layout manager needs to re-layout all the data. ''' views = self.views if not views: return dirty_views = self.dirty_views for index, view in views.items(): dirty_views[view.__class__][index] = view self.views = {}
Python
def invalidate(self): '''Moves all the current views into the global cache. As opposed to making a view dirty where the view is in sync with the data except for sizing information, this will completely disconnect the view from the data, as it is assumed the data has gone out of sync with the view. This is typically called when the data changes. ''' global _cache_count for view in self.views.values(): _cached_views[view.__class__].append(view) _cache_count += 1 for cls, views in self.dirty_views.items(): _cached_views[cls].extend(views.values()) _cache_count += len(views) if _cache_count >= _max_cache_size: _clean_cache() self.views = {} self.dirty_views.clear()
def invalidate(self): '''Moves all the current views into the global cache. As opposed to making a view dirty where the view is in sync with the data except for sizing information, this will completely disconnect the view from the data, as it is assumed the data has gone out of sync with the view. This is typically called when the data changes. ''' global _cache_count for view in self.views.values(): _cached_views[view.__class__].append(view) _cache_count += 1 for cls, views in self.dirty_views.items(): _cached_views[cls].extend(views.values()) _cache_count += len(views) if _cache_count >= _max_cache_size: _clean_cache() self.views = {} self.dirty_views.clear()
Python
def cursor_index(self, cursor=None): '''Return the cursor index in the text/value. ''' if not cursor: cursor = self.cursor try: l = self._lines if len(l) == 0: return 0 lf = self._lines_flags index, cr = cursor for row in range(cr): if row >= len(l): continue index += len(l[row]) if lf[row] & FL_IS_LINEBREAK: index += 1 if lf[cr] & FL_IS_LINEBREAK: index += 1 return index except IndexError: return 0
def cursor_index(self, cursor=None): '''Return the cursor index in the text/value. ''' if not cursor: cursor = self.cursor try: l = self._lines if len(l) == 0: return 0 lf = self._lines_flags index, cr = cursor for row in range(cr): if row >= len(l): continue index += len(l[row]) if lf[row] & FL_IS_LINEBREAK: index += 1 if lf[cr] & FL_IS_LINEBREAK: index += 1 return index except IndexError: return 0
Python
def insert_text(self, substring, from_undo=False): '''Insert new text at the current cursor position. Override this function in order to pre-process text for input validation. ''' if self.readonly or not substring or not self._lines: return if isinstance(substring, bytes): substring = substring.decode('utf8') if self.replace_crlf: substring = substring.replace(u'\r\n', u'\n') mode = self.input_filter if mode is not None: chr = type(substring) if chr is bytes: int_pat = self._insert_int_patb else: int_pat = self._insert_int_patu if mode == 'int': substring = re.sub(int_pat, chr(''), substring) elif mode == 'float': if '.' in self.text: substring = re.sub(int_pat, chr(''), substring) else: substring = '.'.join([re.sub(int_pat, chr(''), k) for k in substring.split(chr('.'), 1)]) else: substring = mode(substring, from_undo) if not substring: return self._hide_handles(EventLoop.window) if not from_undo and self.multiline and self.auto_indent \ and substring == u'\n': substring = self._auto_indent(substring) cc, cr = self.cursor sci = self.cursor_index ci = sci() text = self._lines[cr] len_str = len(substring) new_text = text[:cc] + substring + text[cc:] self._set_line_text(cr, new_text) wrap = (self._get_text_width( new_text, self.tab_width, self._label_cached) > (self.width - self.padding[0] - self.padding[2])) if len_str > 1 or substring == u'\n' or wrap: # Avoid refreshing text on every keystroke. # Allows for faster typing of text when the amount of text in # TextInput gets large. start, finish, lines,\ lineflags, len_lines = self._get_line_from_cursor(cr, new_text) # calling trigger here could lead to wrong cursor positioning # and repeating of text when keys are added rapidly in a automated # fashion. From Android Keyboard for example. self._refresh_text_from_property('insert', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(ci + len_str) # handle undo and redo self._set_unredo_insert(ci, ci + len_str, substring, from_undo)
def insert_text(self, substring, from_undo=False): '''Insert new text at the current cursor position. Override this function in order to pre-process text for input validation. ''' if self.readonly or not substring or not self._lines: return if isinstance(substring, bytes): substring = substring.decode('utf8') if self.replace_crlf: substring = substring.replace(u'\r\n', u'\n') mode = self.input_filter if mode is not None: chr = type(substring) if chr is bytes: int_pat = self._insert_int_patb else: int_pat = self._insert_int_patu if mode == 'int': substring = re.sub(int_pat, chr(''), substring) elif mode == 'float': if '.' in self.text: substring = re.sub(int_pat, chr(''), substring) else: substring = '.'.join([re.sub(int_pat, chr(''), k) for k in substring.split(chr('.'), 1)]) else: substring = mode(substring, from_undo) if not substring: return self._hide_handles(EventLoop.window) if not from_undo and self.multiline and self.auto_indent \ and substring == u'\n': substring = self._auto_indent(substring) cc, cr = self.cursor sci = self.cursor_index ci = sci() text = self._lines[cr] len_str = len(substring) new_text = text[:cc] + substring + text[cc:] self._set_line_text(cr, new_text) wrap = (self._get_text_width( new_text, self.tab_width, self._label_cached) > (self.width - self.padding[0] - self.padding[2])) if len_str > 1 or substring == u'\n' or wrap: # Avoid refreshing text on every keystroke. # Allows for faster typing of text when the amount of text in # TextInput gets large. start, finish, lines,\ lineflags, len_lines = self._get_line_from_cursor(cr, new_text) # calling trigger here could lead to wrong cursor positioning # and repeating of text when keys are added rapidly in a automated # fashion. From Android Keyboard for example. self._refresh_text_from_property('insert', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(ci + len_str) # handle undo and redo self._set_unredo_insert(ci, ci + len_str, substring, from_undo)
Python
def do_undo(self): '''Do undo operation. .. versionadded:: 1.3.0 This action un-does any edits that have been made since the last call to reset_undo(). This function is automatically called when `ctrl+z` keys are pressed. ''' try: x_item = self._undo.pop() undo_type = x_item['undo_command'][0] self.cursor = self.get_cursor_from_index(x_item['undo_command'][1]) if undo_type == 'insert': ci, sci = x_item['undo_command'][1:] self._selection_from = ci self._selection_to = sci self._selection = True self.delete_selection(True) elif undo_type == 'bkspc': substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) elif undo_type == 'shiftln': direction, rows, cursor = x_item['undo_command'][1:] self._shift_lines(direction, rows, cursor, True) else: # delsel substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) self._redo.append(x_item) except IndexError: # reached at top of undo list pass
def do_undo(self): '''Do undo operation. .. versionadded:: 1.3.0 This action un-does any edits that have been made since the last call to reset_undo(). This function is automatically called when `ctrl+z` keys are pressed. ''' try: x_item = self._undo.pop() undo_type = x_item['undo_command'][0] self.cursor = self.get_cursor_from_index(x_item['undo_command'][1]) if undo_type == 'insert': ci, sci = x_item['undo_command'][1:] self._selection_from = ci self._selection_to = sci self._selection = True self.delete_selection(True) elif undo_type == 'bkspc': substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) elif undo_type == 'shiftln': direction, rows, cursor = x_item['undo_command'][1:] self._shift_lines(direction, rows, cursor, True) else: # delsel substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) self._redo.append(x_item) except IndexError: # reached at top of undo list pass
Python
def do_backspace(self, from_undo=False, mode='bkspc'): '''Do backspace operation from the current cursor position. This action might do several things: - removing the current selection if available. - removing the previous char and move the cursor back. - do nothing, if we are at the start. ''' if self.readonly: return cc, cr = self.cursor _lines = self._lines text = _lines[cr] cursor_index = self.cursor_index() text_last_line = _lines[cr - 1] if cc == 0 and cr == 0: return _lines_flags = self._lines_flags start = cr if cc == 0: substring = u'\n' if _lines_flags[cr] else u' ' new_text = text_last_line + text self._set_line_text(cr - 1, new_text) self._delete_line(cr) start = cr - 1 else: # ch = text[cc-1] substring = text[cc - 1] new_text = text[:cc - 1] + text[cc:] self._set_line_text(cr, new_text) # refresh just the current line instead of the whole text start, finish, lines, lineflags, len_lines =\ self._get_line_from_cursor(start, new_text) # avoid trigger refresh, leads to issue with # keys/text send rapidly through code. self._refresh_text_from_property('del', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(cursor_index - 1) # handle undo and redo self._set_undo_redo_bkspc( cursor_index, cursor_index - 1, substring, from_undo)
def do_backspace(self, from_undo=False, mode='bkspc'): '''Do backspace operation from the current cursor position. This action might do several things: - removing the current selection if available. - removing the previous char and move the cursor back. - do nothing, if we are at the start. ''' if self.readonly: return cc, cr = self.cursor _lines = self._lines text = _lines[cr] cursor_index = self.cursor_index() text_last_line = _lines[cr - 1] if cc == 0 and cr == 0: return _lines_flags = self._lines_flags start = cr if cc == 0: substring = u'\n' if _lines_flags[cr] else u' ' new_text = text_last_line + text self._set_line_text(cr - 1, new_text) self._delete_line(cr) start = cr - 1 else: # ch = text[cc-1] substring = text[cc - 1] new_text = text[:cc - 1] + text[cc:] self._set_line_text(cr, new_text) # refresh just the current line instead of the whole text start, finish, lines, lineflags, len_lines =\ self._get_line_from_cursor(start, new_text) # avoid trigger refresh, leads to issue with # keys/text send rapidly through code. self._refresh_text_from_property('del', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(cursor_index - 1) # handle undo and redo self._set_undo_redo_bkspc( cursor_index, cursor_index - 1, substring, from_undo)
Python
def delete_selection(self, from_undo=False): '''Delete the current text selection (if any). ''' if self.readonly: return self._hide_handles(EventLoop.window) scrl_x = self.scroll_x scrl_y = self.scroll_y cc, cr = self.cursor if not self._selection: return v = self._get_text(encode=False) a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self.cursor = cursor = self.get_cursor_from_index(a) start = cursor finish = self.get_cursor_from_index(b) cur_line = self._lines[start[1]][:start[0]] +\ self._lines[finish[1]][finish[0]:] lines, lineflags = self._split_smart(cur_line) len_lines = len(lines) if start[1] == finish[1]: self._set_line_text(start[1], cur_line) else: self._refresh_text_from_property('del', start[1], finish[1], lines, lineflags, len_lines) self.scroll_x = scrl_x self.scroll_y = scrl_y # handle undo and redo for delete selection self._set_unredo_delsel(a, b, v[a:b], from_undo) self.cancel_selection()
def delete_selection(self, from_undo=False): '''Delete the current text selection (if any). ''' if self.readonly: return self._hide_handles(EventLoop.window) scrl_x = self.scroll_x scrl_y = self.scroll_y cc, cr = self.cursor if not self._selection: return v = self._get_text(encode=False) a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self.cursor = cursor = self.get_cursor_from_index(a) start = cursor finish = self.get_cursor_from_index(b) cur_line = self._lines[start[1]][:start[0]] +\ self._lines[finish[1]][finish[0]:] lines, lineflags = self._split_smart(cur_line) len_lines = len(lines) if start[1] == finish[1]: self._set_line_text(start[1], cur_line) else: self._refresh_text_from_property('del', start[1], finish[1], lines, lineflags, len_lines) self.scroll_x = scrl_x self.scroll_y = scrl_y # handle undo and redo for delete selection self._set_unredo_delsel(a, b, v[a:b], from_undo) self.cancel_selection()
Python
def _update_selection(self, finished=False): '''Update selection text and order of from/to if finished is True. Can be called multiple times until finished is True. ''' a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self._selection_finished = finished _selection_text = self._get_text(encode=False)[a:b] self.selection_text = ("" if not self.allow_copy else ((self.password_mask * (b - a)) if self.password else _selection_text)) if not finished: self._selection = True else: self._selection = bool(len(_selection_text)) self._selection_touch = None if a == 0: # update graphics only on new line # allows smoother scrolling, noticeably # faster when dealing with large text. self._update_graphics_selection() # self._trigger_update_graphics()
def _update_selection(self, finished=False): '''Update selection text and order of from/to if finished is True. Can be called multiple times until finished is True. ''' a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self._selection_finished = finished _selection_text = self._get_text(encode=False)[a:b] self.selection_text = ("" if not self.allow_copy else ((self.password_mask * (b - a)) if self.password else _selection_text)) if not finished: self._selection = True else: self._selection = bool(len(_selection_text)) self._selection_touch = None if a == 0: # update graphics only on new line # allows smoother scrolling, noticeably # faster when dealing with large text. self._update_graphics_selection() # self._trigger_update_graphics()
Python
def on_triple_tap(self): '''This event is dispatched when a triple tap happens inside TextInput. The default behavior is to select the line around current cursor position. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' ci = self.cursor_index() sindex, eindex = self._expand_range(ci) Clock.schedule_once(lambda dt: self.select_text(sindex, eindex))
def on_triple_tap(self): '''This event is dispatched when a triple tap happens inside TextInput. The default behavior is to select the line around current cursor position. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' ci = self.cursor_index() sindex, eindex = self._expand_range(ci) Clock.schedule_once(lambda dt: self.select_text(sindex, eindex))
Python
def cut(self): ''' Copy current selection to clipboard then delete it from TextInput. .. versionadded:: 1.8.0 ''' self._cut(self.selection_text)
def cut(self): ''' Copy current selection to clipboard then delete it from TextInput. .. versionadded:: 1.8.0 ''' self._cut(self.selection_text)
Python
def copy(self, data=''): ''' Copy the value provided in argument `data` into current clipboard. If data is not of type string it will be converted to string. If no data is provided then current selection if present is copied. .. versionadded:: 1.8.0 ''' self._ensure_clipboard() if data: return Clipboard.copy(data) if self.selection_text: return Clipboard.copy(self.selection_text)
def copy(self, data=''): ''' Copy the value provided in argument `data` into current clipboard. If data is not of type string it will be converted to string. If no data is provided then current selection if present is copied. .. versionadded:: 1.8.0 ''' self._ensure_clipboard() if data: return Clipboard.copy(data) if self.selection_text: return Clipboard.copy(self.selection_text)
Python
def tick(self): '''Advance the clock to the next step. Must be called every frame. The default clock has a tick() function called by the core Kivy framework.''' self._release_references() ts = self.time() current = self.idle() # tick the current time self._frames += 1 self._fps_counter += 1 # compute how long the event processing takes self._duration_count += 1 self._sleep_time += current - ts t_tot = current - self._duration_ts0 if t_tot >= 1.: self._events_duration = \ (t_tot - self._sleep_time) / float(self._duration_count) self._duration_ts0 = current self._sleep_time = self._duration_count = 0 # calculate fps things if self._last_fps_tick is None: self._last_fps_tick = current elif current - self._last_fps_tick > 1: d = float(current - self._last_fps_tick) self._fps = self._fps_counter / d self._rfps = self._rfps_counter self._last_fps_tick = current self._fps_counter = 0 self._rfps_counter = 0 # process event self._process_events() return self._dt
def tick(self): '''Advance the clock to the next step. Must be called every frame. The default clock has a tick() function called by the core Kivy framework.''' self._release_references() ts = self.time() current = self.idle() # tick the current time self._frames += 1 self._fps_counter += 1 # compute how long the event processing takes self._duration_count += 1 self._sleep_time += current - ts t_tot = current - self._duration_ts0 if t_tot >= 1.: self._events_duration = \ (t_tot - self._sleep_time) / float(self._duration_count) self._duration_ts0 = current self._sleep_time = self._duration_count = 0 # calculate fps things if self._last_fps_tick is None: self._last_fps_tick = current elif current - self._last_fps_tick > 1: d = float(current - self._last_fps_tick) self._fps = self._fps_counter / d self._rfps = self._rfps_counter self._last_fps_tick = current self._fps_counter = 0 self._rfps_counter = 0 # process event self._process_events() return self._dt
Python
def _animate_content(self): '''Animate content to IME height. ''' kargs = self.keyboard_anim_args global Animation if not Animation: from kivy.animation import Animation Animation.cancel_all(self) Animation( _kheight=self.keyboard_height + self.keyboard_padding, d=kargs['d'], t=kargs['t']).start(self)
def _animate_content(self): '''Animate content to IME height. ''' kargs = self.keyboard_anim_args global Animation if not Animation: from kivy.animation import Animation Animation.cancel_all(self) Animation( _kheight=self.keyboard_height + self.keyboard_padding, d=kargs['d'], t=kargs['t']).start(self)
Python
def create_window(self, *largs): '''Will create the main window and configure it. .. warning:: This method is called automatically at runtime. If you call it, it will recreate a RenderContext and Canvas. This means you'll have a new graphics tree, and the old one will be unusable. This method exist to permit the creation of a new OpenGL context AFTER closing the first one. (Like using runTouchApp() and stopTouchApp()). This method has only been tested in a unittest environment and is not suitable for Applications. Again, don't use this method unless you know exactly what you are doing! ''' # just to be sure, if the trigger is set, and if this method is # manually called, unset the trigger self.trigger_create_window.cancel() # ensure the window creation will not be called twice if platform in ('android', 'ios'): self._unbind_create_window() if not self.initialized: from kivy.core.gl import init_gl init_gl() # create the render context and canvas, only the first time. from kivy.graphics import RenderContext, Canvas self.render_context = RenderContext() self.canvas = Canvas() self.render_context.add(self.canvas) else: # if we get initialized more than once, then reload opengl state # after the second time. # XXX check how it's working on embed platform. if platform == 'linux' or Window.__class__.__name__ == 'WindowSDL': # on linux, it's safe for just sending a resize. self.dispatch('on_resize', *self.system_size) else: # on other platform, window are recreated, we need to reload. from kivy.graphics.context import get_context get_context().reload() Clock.schedule_once(lambda x: self.canvas.ask_update(), 0) self.dispatch('on_resize', *self.system_size) # ensure the gl viewport is correct self.update_viewport()
def create_window(self, *largs): '''Will create the main window and configure it. .. warning:: This method is called automatically at runtime. If you call it, it will recreate a RenderContext and Canvas. This means you'll have a new graphics tree, and the old one will be unusable. This method exist to permit the creation of a new OpenGL context AFTER closing the first one. (Like using runTouchApp() and stopTouchApp()). This method has only been tested in a unittest environment and is not suitable for Applications. Again, don't use this method unless you know exactly what you are doing! ''' # just to be sure, if the trigger is set, and if this method is # manually called, unset the trigger self.trigger_create_window.cancel() # ensure the window creation will not be called twice if platform in ('android', 'ios'): self._unbind_create_window() if not self.initialized: from kivy.core.gl import init_gl init_gl() # create the render context and canvas, only the first time. from kivy.graphics import RenderContext, Canvas self.render_context = RenderContext() self.canvas = Canvas() self.render_context.add(self.canvas) else: # if we get initialized more than once, then reload opengl state # after the second time. # XXX check how it's working on embed platform. if platform == 'linux' or Window.__class__.__name__ == 'WindowSDL': # on linux, it's safe for just sending a resize. self.dispatch('on_resize', *self.system_size) else: # on other platform, window are recreated, we need to reload. from kivy.graphics.context import get_context get_context().reload() Clock.schedule_once(lambda x: self.canvas.ask_update(), 0) self.dispatch('on_resize', *self.system_size) # ensure the gl viewport is correct self.update_viewport()
Python
def add_widget(self, widget, canvas=None): '''Add a widget to a window''' widget.parent = self self.children.insert(0, widget) canvas = self.canvas.before if canvas == 'before' else \ self.canvas.after if canvas == 'after' else self.canvas canvas.add(widget.canvas) self.update_childsize([widget]) widget.bind( pos_hint=self._update_childsize, size_hint=self._update_childsize, size_hint_max=self._update_childsize, size_hint_min=self._update_childsize, size=self._update_childsize, pos=self._update_childsize)
def add_widget(self, widget, canvas=None): '''Add a widget to a window''' widget.parent = self self.children.insert(0, widget) canvas = self.canvas.before if canvas == 'before' else \ self.canvas.after if canvas == 'after' else self.canvas canvas.add(widget.canvas) self.update_childsize([widget]) widget.bind( pos_hint=self._update_childsize, size_hint=self._update_childsize, size_hint_max=self._update_childsize, size_hint_min=self._update_childsize, size=self._update_childsize, pos=self._update_childsize)
Python
def w_getopt(args, options): """A getopt for Windows style command lines. Options may start with either '-' or '/', the option names may have more than one letter (examples are /tlb or -RegServer), and option names are case insensitive. Returns two elements, just as getopt.getopt. The first is a list of (option, value) pairs in the same way getopt.getopt does, but there is no '-' or '/' prefix to the option name, and the option name is always lower case. The second is the list of arguments which do not belong to any option. Different from getopt.getopt, a single argument not belonging to an option does not terminate parsing. """ opts = [] arguments = [] while args: if args[0][:1] in "/-": arg = args[0][1:] # strip the '-' or '/' arg = arg.lower() if arg + ':' in options: try: opts.append((arg, args[1])) except IndexError: raise GetoptError, "option '%s' requires an argument" % args[0] args = args[1:] elif arg in options: opts.append((arg, '')) else: raise GetoptError, "invalid option '%s'" % args[0] args = args[1:] else: arguments.append(args[0]) args = args[1:] return opts, arguments
def w_getopt(args, options): """A getopt for Windows style command lines. Options may start with either '-' or '/', the option names may have more than one letter (examples are /tlb or -RegServer), and option names are case insensitive. Returns two elements, just as getopt.getopt. The first is a list of (option, value) pairs in the same way getopt.getopt does, but there is no '-' or '/' prefix to the option name, and the option name is always lower case. The second is the list of arguments which do not belong to any option. Different from getopt.getopt, a single argument not belonging to an option does not terminate parsing. """ opts = [] arguments = [] while args: if args[0][:1] in "/-": arg = args[0][1:] # strip the '-' or '/' arg = arg.lower() if arg + ':' in options: try: opts.append((arg, args[1])) except IndexError: raise GetoptError, "option '%s' requires an argument" % args[0] args = args[1:] elif arg in options: opts.append((arg, '')) else: raise GetoptError, "invalid option '%s'" % args[0] args = args[1:] else: arguments.append(args[0]) args = args[1:] return opts, arguments
Python
def write_value(self, value, cube_name, element_tuple, dimensions=None): """ Write value into cube at specified coordinates :param value: the actual value :param cube_name: name of the target cube :param element_tuple: target coordinates :param dimensions: optional. Dimension names in their natural order. Will speed up the execution! :return: response """ from TM1py.Services.CubeService import CubeService if not dimensions: dimensions = CubeService(self._rest).get(cube_name).dimensions request = "/api/v1/Cubes('{}')/tm1.Update".format(cube_name) body_as_dict = collections.OrderedDict() body_as_dict["Cells"] = [{}] body_as_dict["Cells"][0]["[email protected]"] = \ ["Dimensions('{}')/Hierarchies('{}')/Elements('{}')".format(dim, dim, elem) for dim, elem in zip(dimensions, element_tuple)] body_as_dict["Value"] = str(value) if value else "" data = json.dumps(body_as_dict, ensure_ascii=False) return self._rest.POST(request=request, data=data)
def write_value(self, value, cube_name, element_tuple, dimensions=None): """ Write value into cube at specified coordinates :param value: the actual value :param cube_name: name of the target cube :param element_tuple: target coordinates :param dimensions: optional. Dimension names in their natural order. Will speed up the execution! :return: response """ from TM1py.Services.CubeService import CubeService if not dimensions: dimensions = CubeService(self._rest).get(cube_name).dimensions request = "/api/v1/Cubes('{}')/tm1.Update".format(cube_name) body_as_dict = collections.OrderedDict() body_as_dict["Cells"] = [{}] body_as_dict["Cells"][0]["[email protected]"] = \ ["Dimensions('{}')/Hierarchies('{}')/Elements('{}')".format(dim, dim, elem) for dim, elem in zip(dimensions, element_tuple)] body_as_dict["Value"] = str(value) if value else "" data = json.dumps(body_as_dict, ensure_ascii=False) return self._rest.POST(request=request, data=data)
Python
def write_values(self, cube_name, cellset_as_dict, dimensions=None): """ Write values in cube. Easy to use but doesnt scale. Not suitable for cellsets with > 1000 cells :param cube_name: name of the cube :param cellset_as_dict: {(elem_a, elem_b, elem_c): 243, (elem_d, elem_e, elem_f) : 109} :param dimensions: optional. Dimension names in their natural order. Will speed up the execution! :return: """ if not dimensions: from TM1py.Services import CubeService cube_service = CubeService(self._rest) dimensions = cube_service.get_dimension_names(cube_name) request = "/api/v1/Cubes('{}')/tm1.Update".format(cube_name) updates = [] for element_tuple, value in cellset_as_dict.items(): body_as_dict = collections.OrderedDict() body_as_dict["Cells"] = [{}] body_as_dict["Cells"][0]["[email protected]"] = \ ["Dimensions('{}')/Hierarchies('{}')/Elements('{}')".format(dim, dim, elem) for dim, elem in zip(dimensions, element_tuple)] body_as_dict["Value"] = str(value) if value else "" updates.append(json.dumps(body_as_dict, ensure_ascii=False)) updates = '[' + ','.join(updates) + ']' self._rest.POST(request=request, data=updates)
def write_values(self, cube_name, cellset_as_dict, dimensions=None): """ Write values in cube. Easy to use but doesnt scale. Not suitable for cellsets with > 1000 cells :param cube_name: name of the cube :param cellset_as_dict: {(elem_a, elem_b, elem_c): 243, (elem_d, elem_e, elem_f) : 109} :param dimensions: optional. Dimension names in their natural order. Will speed up the execution! :return: """ if not dimensions: from TM1py.Services import CubeService cube_service = CubeService(self._rest) dimensions = cube_service.get_dimension_names(cube_name) request = "/api/v1/Cubes('{}')/tm1.Update".format(cube_name) updates = [] for element_tuple, value in cellset_as_dict.items(): body_as_dict = collections.OrderedDict() body_as_dict["Cells"] = [{}] body_as_dict["Cells"][0]["[email protected]"] = \ ["Dimensions('{}')/Hierarchies('{}')/Elements('{}')".format(dim, dim, elem) for dim, elem in zip(dimensions, element_tuple)] body_as_dict["Value"] = str(value) if value else "" updates.append(json.dumps(body_as_dict, ensure_ascii=False)) updates = '[' + ','.join(updates) + ']' self._rest.POST(request=request, data=updates)
Python
def write_values_through_cellset(self, mdx, values): """ Significantly faster than write_values function Cellset gets created according to MDX Expression. For instance: [ 61, 29 ,13 42, 54, 15, 17, 28, 81 ] Each value in the cellset can be addressed through its position: The ordinal integer value. Ordinal-enumeration goes from top to bottom from left to right Number 61 has Ordinal 0, 29 has Ordinal 1, etc. The order of the iterable determines the insertion point in the cellset. For instance: [91, 85, 72, 68, 51, 42, 35, 28, 11] would lead to: [ 91, 85 ,72 68, 51, 42, 35, 28, 11 ] When writing large datasets into TM1 Cubes it can be convenient to call this function asynchronously. :param mdx: Valid MDX Expression. :param values: List of values. The Order of the List/ Iterable determines the insertion point in the cellset. :return: """ # execute mdx and create cellset at Server cellset_id = self.create_cellset(mdx) # write data self.update_cellset(cellset_id, values) # delete cellset (free up memory on server side)! self.delete_cellset(cellset_id)
def write_values_through_cellset(self, mdx, values): """ Significantly faster than write_values function Cellset gets created according to MDX Expression. For instance: [ 61, 29 ,13 42, 54, 15, 17, 28, 81 ] Each value in the cellset can be addressed through its position: The ordinal integer value. Ordinal-enumeration goes from top to bottom from left to right Number 61 has Ordinal 0, 29 has Ordinal 1, etc. The order of the iterable determines the insertion point in the cellset. For instance: [91, 85, 72, 68, 51, 42, 35, 28, 11] would lead to: [ 91, 85 ,72 68, 51, 42, 35, 28, 11 ] When writing large datasets into TM1 Cubes it can be convenient to call this function asynchronously. :param mdx: Valid MDX Expression. :param values: List of values. The Order of the List/ Iterable determines the insertion point in the cellset. :return: """ # execute mdx and create cellset at Server cellset_id = self.create_cellset(mdx) # write data self.update_cellset(cellset_id, values) # delete cellset (free up memory on server side)! self.delete_cellset(cellset_id)
Python
def update_cellset(self, cellset_id, values): """ Write values into cellset Number of values must match the number of cells in the cellset :param cellset_id: :param values: iterable with Numeric and String values :return: """ request = "/api/v1/Cellsets('{}')/Cells".format(cellset_id) data = [] for i, value in enumerate(values): data.append({ "Ordinal": i, "Value": value }) self._rest.PATCH(request, json.dumps(data, ensure_ascii=False))
def update_cellset(self, cellset_id, values): """ Write values into cellset Number of values must match the number of cells in the cellset :param cellset_id: :param values: iterable with Numeric and String values :return: """ request = "/api/v1/Cellsets('{}')/Cells".format(cellset_id) data = [] for i, value in enumerate(values): data.append({ "Ordinal": i, "Value": value }) self._rest.PATCH(request, json.dumps(data, ensure_ascii=False))
Python
def execute_mdx(self, mdx, cell_properties=None, top=None): """ Execute MDX and return the cells with their properties :param mdx: MDX Query, as string :param cell_properties: properties to be queried from the cell. E.g. Value, Ordinal, RuleDerived, ... :param top: integer :return: content in sweet consice strcuture. """ if not cell_properties: cell_properties = ['Value', 'Ordinal'] elif 'Ordinal' not in cell_properties: cell_properties.append('Ordinal') request = '/api/v1/ExecuteMDX?$expand=' \ 'Cube($select=Name;$expand=Dimensions($select=Name)),' \ 'Axes($expand=Tuples($expand=Members($select=Name;$expand=Element($select=UniqueName)){})),' \ 'Cells($select={}{})'.format(';$top=' + str(top) if top else '', ','.join(cell_properties), ';$top=' + str(top) if top else '') data = { 'MDX': mdx } response = self._rest.POST(request=request, data=json.dumps(data, ensure_ascii=False)) return Utils.build_content_from_cellset(raw_cellset_as_dict=response.json(), cell_properties=cell_properties, top=top)
def execute_mdx(self, mdx, cell_properties=None, top=None): """ Execute MDX and return the cells with their properties :param mdx: MDX Query, as string :param cell_properties: properties to be queried from the cell. E.g. Value, Ordinal, RuleDerived, ... :param top: integer :return: content in sweet consice strcuture. """ if not cell_properties: cell_properties = ['Value', 'Ordinal'] elif 'Ordinal' not in cell_properties: cell_properties.append('Ordinal') request = '/api/v1/ExecuteMDX?$expand=' \ 'Cube($select=Name;$expand=Dimensions($select=Name)),' \ 'Axes($expand=Tuples($expand=Members($select=Name;$expand=Element($select=UniqueName)){})),' \ 'Cells($select={}{})'.format(';$top=' + str(top) if top else '', ','.join(cell_properties), ';$top=' + str(top) if top else '') data = { 'MDX': mdx } response = self._rest.POST(request=request, data=json.dumps(data, ensure_ascii=False)) return Utils.build_content_from_cellset(raw_cellset_as_dict=response.json(), cell_properties=cell_properties, top=top)
Python
def execute_view(self, cube_name, view_name, cell_properties=None, private=True, top=None): """ get view content as dictionary with sweet and concise structure. Works on NativeView and MDXView ! :param cube_name: String :param view_name: String :param cell_properties: List, cell properties: [Values, Status, HasPicklist, etc.] :param private: Boolean :param top: Int, number of cells to return (counting from top) :return: Dictionary : {([dim1].[elem1], [dim2][elem6]): {'Value':3127.312, 'Ordinal':12} .... } """ if not cell_properties: cell_properties = ['Value', 'Ordinal'] elif 'Ordinal' not in cell_properties: cell_properties.append('Ordinal') cellset_as_dict = self._get_cellset_from_view(cube_name, view_name, cell_properties, private, top) content_as_dict = Utils.build_content_from_cellset(cellset_as_dict, cell_properties, top) return content_as_dict
def execute_view(self, cube_name, view_name, cell_properties=None, private=True, top=None): """ get view content as dictionary with sweet and concise structure. Works on NativeView and MDXView ! :param cube_name: String :param view_name: String :param cell_properties: List, cell properties: [Values, Status, HasPicklist, etc.] :param private: Boolean :param top: Int, number of cells to return (counting from top) :return: Dictionary : {([dim1].[elem1], [dim2][elem6]): {'Value':3127.312, 'Ordinal':12} .... } """ if not cell_properties: cell_properties = ['Value', 'Ordinal'] elif 'Ordinal' not in cell_properties: cell_properties.append('Ordinal') cellset_as_dict = self._get_cellset_from_view(cube_name, view_name, cell_properties, private, top) content_as_dict = Utils.build_content_from_cellset(cellset_as_dict, cell_properties, top) return content_as_dict
Python
def _get_cellset_from_view(self, cube_name, view_name, cell_properties, private=True, top=None): """ get view content as dictionary in its native (cellset-) structure. :param cube_name: String :param view_name: String :param cell_properties: List of cell properties :param private: Boolean :param top: Int, number of cells :return: `Dictionary` : {Cells : {}, 'ID' : '', 'Axes' : [{'Ordinal' : 1, Members: [], ...}, {'Ordinal' : 2, Members: [], ...}, {'Ordinal' : 3, Members: [], ...} ] } """ views = 'PrivateViews' if private else 'Views' if top: request = '/api/v1/Cubes(\'{}\')/{}(\'{}\')/tm1.Execute?$expand=' \ 'Cube($select=Name;$expand=Dimensions($select=Name)),' \ 'Axes($expand=Tuples($expand=Members($select=Name;$expand=Element($select=UniqueName));$top={})),' \ 'Cells($select={};$top={})' \ .format(cube_name, views, view_name, str(top), ','.join(cell_properties), str(top)) else: request = '/api/v1/Cubes(\'{}\')/{}(\'{}\')/tm1.Execute?$expand=' \ 'Cube($select=Name;$expand=Dimensions($select=Name)),' \ 'Axes($expand=Tuples($expand=Members($select=Name;$expand=Element($select=UniqueName)))),' \ 'Cells($select={})' \ .format(cube_name, views, view_name, ','.join(cell_properties)) response = self._rest.POST(request, '') return response.json()
def _get_cellset_from_view(self, cube_name, view_name, cell_properties, private=True, top=None): """ get view content as dictionary in its native (cellset-) structure. :param cube_name: String :param view_name: String :param cell_properties: List of cell properties :param private: Boolean :param top: Int, number of cells :return: `Dictionary` : {Cells : {}, 'ID' : '', 'Axes' : [{'Ordinal' : 1, Members: [], ...}, {'Ordinal' : 2, Members: [], ...}, {'Ordinal' : 3, Members: [], ...} ] } """ views = 'PrivateViews' if private else 'Views' if top: request = '/api/v1/Cubes(\'{}\')/{}(\'{}\')/tm1.Execute?$expand=' \ 'Cube($select=Name;$expand=Dimensions($select=Name)),' \ 'Axes($expand=Tuples($expand=Members($select=Name;$expand=Element($select=UniqueName));$top={})),' \ 'Cells($select={};$top={})' \ .format(cube_name, views, view_name, str(top), ','.join(cell_properties), str(top)) else: request = '/api/v1/Cubes(\'{}\')/{}(\'{}\')/tm1.Execute?$expand=' \ 'Cube($select=Name;$expand=Dimensions($select=Name)),' \ 'Axes($expand=Tuples($expand=Members($select=Name;$expand=Element($select=UniqueName)))),' \ 'Cells($select={})' \ .format(cube_name, views, view_name, ','.join(cell_properties)) response = self._rest.POST(request, '') return response.json()
Python
def create_cellset(self, mdx): """ Execute MDX in order to create cellset at server. return the cellset-id :param mdx: MDX Query, as string :return: """ request = '/api/v1/ExecuteMDX' data = { 'MDX': mdx } response = self._rest.POST(request=request, data=json.dumps(data, ensure_ascii=False)) cellset_id = response.json()['ID'] return cellset_id
def create_cellset(self, mdx): """ Execute MDX in order to create cellset at server. return the cellset-id :param mdx: MDX Query, as string :return: """ request = '/api/v1/ExecuteMDX' data = { 'MDX': mdx } response = self._rest.POST(request=request, data=json.dumps(data, ensure_ascii=False)) cellset_id = response.json()['ID'] return cellset_id
Python
def create(self, hierarchy): """ Create a hierarchy in an existing dimension :param hierarchy: :return: """ request = '/api/v1/Dimensions(\'{}\')/Hierarchies'.format(hierarchy.dimension_name) response = self._rest.POST(request, hierarchy.body) return response
def create(self, hierarchy): """ Create a hierarchy in an existing dimension :param hierarchy: :return: """ request = '/api/v1/Dimensions(\'{}\')/Hierarchies'.format(hierarchy.dimension_name) response = self._rest.POST(request, hierarchy.body) return response
Python
def _update_element_attributes(self, hierarchy): """ Update the elementattributes of a hierarchy :param hierarchy: Instance of TM1py.Hierarchy :return: """ # get existing attributes first. element_attributes = self.elements.get_element_attributes(dimension_name=hierarchy.dimension_name, hierarchy_name=hierarchy.name) element_attribute_names = [ea.name for ea in element_attributes] # write ElementAttributes that don't already exist ! for element_attribute in hierarchy.element_attributes: if element_attribute not in element_attribute_names: self.elements.create_element_attribute(dimension_name=hierarchy.dimension_name, hierarchy_name=hierarchy.name, element_attribute=element_attribute) # delete attributes that are determined to be removed for element_attribute in element_attribute_names: if element_attribute not in hierarchy.element_attributes: self.elements.delete_element_attribute(dimension_name=hierarchy.dimension_name, hierarchy_name=hierarchy.name, element_attribute=element_attribute)
def _update_element_attributes(self, hierarchy): """ Update the elementattributes of a hierarchy :param hierarchy: Instance of TM1py.Hierarchy :return: """ # get existing attributes first. element_attributes = self.elements.get_element_attributes(dimension_name=hierarchy.dimension_name, hierarchy_name=hierarchy.name) element_attribute_names = [ea.name for ea in element_attributes] # write ElementAttributes that don't already exist ! for element_attribute in hierarchy.element_attributes: if element_attribute not in element_attribute_names: self.elements.create_element_attribute(dimension_name=hierarchy.dimension_name, hierarchy_name=hierarchy.name, element_attribute=element_attribute) # delete attributes that are determined to be removed for element_attribute in element_attribute_names: if element_attribute not in hierarchy.element_attributes: self.elements.delete_element_attribute(dimension_name=hierarchy.dimension_name, hierarchy_name=hierarchy.name, element_attribute=element_attribute)
Python
def update(self, subset, private=True): """ update a subset on the TM1 Server :param subset: instance of TM1py.Subset. :param private: Boolean :return: response """ if private: # Just delete it and rebuild it, since there are no dependencies return self._update_private(subset) else: # Update it. Clear Elements with evil workaround return self._update_public(subset)
def update(self, subset, private=True): """ update a subset on the TM1 Server :param subset: instance of TM1py.Subset. :param private: Boolean :return: response """ if private: # Just delete it and rebuild it, since there are no dependencies return self._update_private(subset) else: # Update it. Clear Elements with evil workaround return self._update_public(subset)
Python
def _update_private(self, subset): """ update a private subset on the TM1 Server :param subset: instance of TM1py.Subset :return: response """ # Delete it request = '/api/v1/Dimensions(\'{}\')/Hierarchies(\'{}\')/PrivateSubsets(\'{}\')' \ .format(subset.dimension_name, subset.hierarchy_name, subset.name) self._rest.DELETE(request, '') # Rebuild it return self.create(subset, True)
def _update_private(self, subset): """ update a private subset on the TM1 Server :param subset: instance of TM1py.Subset :return: response """ # Delete it request = '/api/v1/Dimensions(\'{}\')/Hierarchies(\'{}\')/PrivateSubsets(\'{}\')' \ .format(subset.dimension_name, subset.hierarchy_name, subset.name) self._rest.DELETE(request, '') # Rebuild it return self.create(subset, True)
Python
def _update_public(self, subset): """ Update a public subset on the TM1 Server :param subset: instance of TM1py.Subset :return: response """ # clear elements of subset. evil workaround! Should be done through delete on the Elements Collection # (which is currently not supported 10.2.2 FP6) ti = "SubsetDeleteAllElements(\'{}\', \'{}\');".format(subset.dimension_name, subset.name) self._process_service.execute_ti_code(lines_prolog=ti, lines_epilog='') # update subset request = '/api/v1/Dimensions(\'{}\')/Hierarchies(\'{}\')/Subsets(\'{}\')' \ .format(subset.dimension_name, subset.hierarchy_name, subset.name) return self._rest.PATCH(request=request, data=subset.body)
def _update_public(self, subset): """ Update a public subset on the TM1 Server :param subset: instance of TM1py.Subset :return: response """ # clear elements of subset. evil workaround! Should be done through delete on the Elements Collection # (which is currently not supported 10.2.2 FP6) ti = "SubsetDeleteAllElements(\'{}\', \'{}\');".format(subset.dimension_name, subset.name) self._process_service.execute_ti_code(lines_prolog=ti, lines_epilog='') # update subset request = '/api/v1/Dimensions(\'{}\')/Hierarchies(\'{}\')/Subsets(\'{}\')' \ .format(subset.dimension_name, subset.hierarchy_name, subset.name) return self._rest.PATCH(request=request, data=subset.body)
Python
def exists(self, subset_name, dimension_name, hierarchy_name=None): """checks if subset exists as private and / or public :param dimension_name: :param hierarchy_name: :param subset_name: :return: 2 booleans: (Private subset exsits, Public subset exists) """ hierarchy_name = hierarchy_name if hierarchy_name else dimension_name subset_types = collections.OrderedDict() subset_types['PrivateSubsets'] = False subset_types['Subsets'] = False for subset_type in subset_types: try: self._rest.GET("/api/v1/Dimensions('{}')/Hierarchies('{}')/{}('{}')" .format(dimension_name, hierarchy_name, subset_type, subset_name)) subset_types[subset_type] = True except TM1pyException as e: if e._status_code != 404: raise e return tuple(subset_types.values())
def exists(self, subset_name, dimension_name, hierarchy_name=None): """checks if subset exists as private and / or public :param dimension_name: :param hierarchy_name: :param subset_name: :return: 2 booleans: (Private subset exsits, Public subset exists) """ hierarchy_name = hierarchy_name if hierarchy_name else dimension_name subset_types = collections.OrderedDict() subset_types['PrivateSubsets'] = False subset_types['Subsets'] = False for subset_type in subset_types: try: self._rest.GET("/api/v1/Dimensions('{}')/Hierarchies('{}')/{}('{}')" .format(dimension_name, hierarchy_name, subset_type, subset_name)) subset_types[subset_type] = True except TM1pyException as e: if e._status_code != 404: raise e return tuple(subset_types.values())
Python
def build_content_from_cellset(raw_cellset_as_dict, cell_properties, top=None): """ transform raw cellset data into concise dictionary :param raw_cellset_as_dict: :param cell_properties: :param top: Maximum Number of cells :return: """ content_as_dict = CaseAndSpaceInsensitiveTuplesDict() cube_dimensions = [dim['Name'] for dim in raw_cellset_as_dict['Cube']['Dimensions']] axe0_as_dict = raw_cellset_as_dict['Axes'][0] axe1_as_dict = raw_cellset_as_dict['Axes'][1] ordinal_cells = 0 ordinal_axe2 = 0 # get coordinates on axe 2: Title # if there are no elements on axe 2 assign empty list to elements_on_axe2 if len(raw_cellset_as_dict['Axes']) > 2: axe2_as_dict = raw_cellset_as_dict['Axes'][2] tuples_as_dict = axe2_as_dict['Tuples'][ordinal_axe2]['Members'] # condition for MDX Calculated Members (WITH MEMBER AS), that have no underlying Element elements_on_axe2 = [member['Element']['UniqueName'] if member['Element'] else member['Name'] for member in tuples_as_dict] else: elements_on_axe2 = [] ordinal_axe1 = 0 for i in range(axe1_as_dict['Cardinality']): # get coordinates on axe 1: Rows tuples_as_dict = axe1_as_dict['Tuples'][ordinal_axe1]['Members'] elements_on_axe1 = [member['Element']['UniqueName'] if member['Element'] else member['Name'] for member in tuples_as_dict] ordinal_axe0 = 0 for j in range(axe0_as_dict['Cardinality']): # get coordinates on axe 0: Columns tuples_as_dict = axe0_as_dict['Tuples'][ordinal_axe0]['Members'] elements_on_axe0 = [member['Element']['UniqueName'] if member['Element'] else member['Name'] for member in tuples_as_dict] coordinates = elements_on_axe0 + elements_on_axe2 + elements_on_axe1 coordinates_sorted = sort_addresstuple(cube_dimensions, coordinates) # get cell properties content_as_dict[coordinates_sorted] = {} for cell_property in cell_properties: value = raw_cellset_as_dict['Cells'][ordinal_cells][cell_property] content_as_dict[coordinates_sorted][cell_property] = value ordinal_axe0 += 1 ordinal_cells += 1 if top is not None and ordinal_cells >= top: break if top is not None and ordinal_cells >= top: break ordinal_axe1 += 1 return content_as_dict
def build_content_from_cellset(raw_cellset_as_dict, cell_properties, top=None): """ transform raw cellset data into concise dictionary :param raw_cellset_as_dict: :param cell_properties: :param top: Maximum Number of cells :return: """ content_as_dict = CaseAndSpaceInsensitiveTuplesDict() cube_dimensions = [dim['Name'] for dim in raw_cellset_as_dict['Cube']['Dimensions']] axe0_as_dict = raw_cellset_as_dict['Axes'][0] axe1_as_dict = raw_cellset_as_dict['Axes'][1] ordinal_cells = 0 ordinal_axe2 = 0 # get coordinates on axe 2: Title # if there are no elements on axe 2 assign empty list to elements_on_axe2 if len(raw_cellset_as_dict['Axes']) > 2: axe2_as_dict = raw_cellset_as_dict['Axes'][2] tuples_as_dict = axe2_as_dict['Tuples'][ordinal_axe2]['Members'] # condition for MDX Calculated Members (WITH MEMBER AS), that have no underlying Element elements_on_axe2 = [member['Element']['UniqueName'] if member['Element'] else member['Name'] for member in tuples_as_dict] else: elements_on_axe2 = [] ordinal_axe1 = 0 for i in range(axe1_as_dict['Cardinality']): # get coordinates on axe 1: Rows tuples_as_dict = axe1_as_dict['Tuples'][ordinal_axe1]['Members'] elements_on_axe1 = [member['Element']['UniqueName'] if member['Element'] else member['Name'] for member in tuples_as_dict] ordinal_axe0 = 0 for j in range(axe0_as_dict['Cardinality']): # get coordinates on axe 0: Columns tuples_as_dict = axe0_as_dict['Tuples'][ordinal_axe0]['Members'] elements_on_axe0 = [member['Element']['UniqueName'] if member['Element'] else member['Name'] for member in tuples_as_dict] coordinates = elements_on_axe0 + elements_on_axe2 + elements_on_axe1 coordinates_sorted = sort_addresstuple(cube_dimensions, coordinates) # get cell properties content_as_dict[coordinates_sorted] = {} for cell_property in cell_properties: value = raw_cellset_as_dict['Cells'][ordinal_cells][cell_property] content_as_dict[coordinates_sorted][cell_property] = value ordinal_axe0 += 1 ordinal_cells += 1 if top is not None and ordinal_cells >= top: break if top is not None and ordinal_cells >= top: break ordinal_axe1 += 1 return content_as_dict
Python
def read_path_file(self): """Look for a file that lists items to add to path. If present, read it and add the paths.""" filtered_lines = [] if os.path.isfile(self.path_file_name): pth_file = open(self.path_file_name, 'r') lines = pth_file.readlines() for line in lines: stripped_line = line.strip() if stripped_line and stripped_line[0] != '#': filtered_lines.append(stripped_line) length = len(filtered_lines) if length == 3: (bin_dir, runtime_dir, ml_dir) = filtered_lines if (not os.path.isdir(bin_dir)) or (not os.path.isdir(runtime_dir)) or (not os.path.isdir(ml_dir)): return False else: (self.bin_dir, self.runtime_dir, self.ml_dir) = (bin_dir, runtime_dir, ml_dir) return True else: return False
def read_path_file(self): """Look for a file that lists items to add to path. If present, read it and add the paths.""" filtered_lines = [] if os.path.isfile(self.path_file_name): pth_file = open(self.path_file_name, 'r') lines = pth_file.readlines() for line in lines: stripped_line = line.strip() if stripped_line and stripped_line[0] != '#': filtered_lines.append(stripped_line) length = len(filtered_lines) if length == 3: (bin_dir, runtime_dir, ml_dir) = filtered_lines if (not os.path.isdir(bin_dir)) or (not os.path.isdir(runtime_dir)) or (not os.path.isdir(ml_dir)): return False else: (self.bin_dir, self.runtime_dir, self.ml_dir) = (bin_dir, runtime_dir, ml_dir) return True else: return False
Python
def write_path_file(self): """Write a file that lists items to add to path. If present, it will be overwritten.""" existing_contents = '' if os.path.isfile(self.path_file_name): path_file = open(self.path_file_name, 'r') existing_contents = path_file.readlines() path_file.close() path_file = open(self.path_file_name, 'w') if self.system == 'Windows': print('# bin dir: added to both OS path and system path', file=path_file) else: print('# bin dir: added to system path', file=path_file) print(self.bin_dir, file=path_file) print('', file=path_file) print('# runtime dir: added to system path', file=path_file) print(self.runtime_dir, file=path_file) print('', file=path_file) print('# matlab (mlarray) dir: added to system path', file=path_file) print(self.ml_dir, file=path_file) print('', file=path_file) if existing_contents: print(existing_contents, file=path_file) path_file.close()
def write_path_file(self): """Write a file that lists items to add to path. If present, it will be overwritten.""" existing_contents = '' if os.path.isfile(self.path_file_name): path_file = open(self.path_file_name, 'r') existing_contents = path_file.readlines() path_file.close() path_file = open(self.path_file_name, 'w') if self.system == 'Windows': print('# bin dir: added to both OS path and system path', file=path_file) else: print('# bin dir: added to system path', file=path_file) print(self.bin_dir, file=path_file) print('', file=path_file) print('# runtime dir: added to system path', file=path_file) print(self.runtime_dir, file=path_file) print('', file=path_file) print('# matlab (mlarray) dir: added to system path', file=path_file) print(self.ml_dir, file=path_file) print('', file=path_file) if existing_contents: print(existing_contents, file=path_file) path_file.close()
Python
def update_paths(self): """Update the OS and Python paths.""" #For Windows, add the bin_dir to the OS path. This is unnecessary #for Linux and Mac, where the OS can find this information via rpath. if self.is_windows: os.environ[self.path_var] = self.bin_dir + os.pathsep + os.environ[self.path_var] #Add all paths to the Python path. sys.path.insert(0, self.bin_dir) sys.path.insert(0, self.runtime_dir) sys.path.insert(0, self.ml_dir)
def update_paths(self): """Update the OS and Python paths.""" #For Windows, add the bin_dir to the OS path. This is unnecessary #for Linux and Mac, where the OS can find this information via rpath. if self.is_windows: os.environ[self.path_var] = self.bin_dir + os.pathsep + os.environ[self.path_var] #Add all paths to the Python path. sys.path.insert(0, self.bin_dir) sys.path.insert(0, self.runtime_dir) sys.path.insert(0, self.ml_dir)
Python
def import_matlab(self): """Import the matlab package. Must be done after Python system path contains what it needs to.""" try: self.ml_handle = importlib.import_module('matlab') except Exception as e: raise e
def import_matlab(self): """Import the matlab package. Must be done after Python system path contains what it needs to.""" try: self.ml_handle = importlib.import_module('matlab') except Exception as e: raise e
Python
def initialize(): """ Initialize package and return a handle. Initialize a package consisting of one or more deployed MATLAB functions. The return value is used as a handle on which any of the functions can be executed. To wait for all graphical figures to close before continuing, call wait_for_figures_to_close() on the handle. To close the package, call terminate(), quit() or exit() (which are synonymous) on the handle. The terminate() function is executed automatically when the script or session ends. Returns handle - used to execute deployed MATLAB functions and to call terminate() """ return _pir.initialize_package()
def initialize(): """ Initialize package and return a handle. Initialize a package consisting of one or more deployed MATLAB functions. The return value is used as a handle on which any of the functions can be executed. To wait for all graphical figures to close before continuing, call wait_for_figures_to_close() on the handle. To close the package, call terminate(), quit() or exit() (which are synonymous) on the handle. The terminate() function is executed automatically when the script or session ends. Returns handle - used to execute deployed MATLAB functions and to call terminate() """ return _pir.initialize_package()
Python
def initialize_runtime(option_list): """ Initialize runtime with a list of startup options. Initialize the MATLAB Runtime with a list of startup options that will affect all packages opened within the script or session. If it is not called explicitly, it will be executed automatically, with an empty list of options, by the first call to initialize(). Do not call initialize_runtime() after calling initialize(). There is no corresponding terminate_runtime() call. The runtime is terminated automatically when the script or session ends. Parameters option_list - Python list of options; valid options are: -nodisplay (suppresses display functionality; Linux only) -nojvm (disables the Java Virtual Machine) """ if option_list: if not isinstance(option_list, list) and not isinstance(option_list, tuple): raise SyntaxError('initialize_runtime takes a list or tuple of strings.') _pir.initialize_runtime(option_list)
def initialize_runtime(option_list): """ Initialize runtime with a list of startup options. Initialize the MATLAB Runtime with a list of startup options that will affect all packages opened within the script or session. If it is not called explicitly, it will be executed automatically, with an empty list of options, by the first call to initialize(). Do not call initialize_runtime() after calling initialize(). There is no corresponding terminate_runtime() call. The runtime is terminated automatically when the script or session ends. Parameters option_list - Python list of options; valid options are: -nodisplay (suppresses display functionality; Linux only) -nojvm (disables the Java Virtual Machine) """ if option_list: if not isinstance(option_list, list) and not isinstance(option_list, tuple): raise SyntaxError('initialize_runtime takes a list or tuple of strings.') _pir.initialize_runtime(option_list)
Python
def toggle_written_image_file(image_path, figure): """Test writing an image file.""" # noqa: DAR101 if image_path.is_file(): image_path.unlink() write_image_file(figure, image_path, image_path.suffix[1:]) assert image_path.is_file()
def toggle_written_image_file(image_path, figure): """Test writing an image file.""" # noqa: DAR101 if image_path.is_file(): image_path.unlink() write_image_file(figure, image_path, image_path.suffix[1:]) assert image_path.is_file()
Python
def create_sample_custom_chart_figure(): """Return figure dictionary using CustomChart classes. Returns: dict: chart figure """ chart_main = FittedChart( title='Sample Fitted Scatter Data', xlabel='Index', ylabel='Measured Value', ) chart_main.fit_eqs = [('linear', equations.linear)] # Create dataframe based on px sample dataset iris = px.data.iris() data_raw = pd.DataFrame( data={ 'name': iris['species'], 'x': iris['petal_width'], 'y': iris['petal_length'], 'label': None, }, ) return chart_main.create_figure(df_raw=data_raw)
def create_sample_custom_chart_figure(): """Return figure dictionary using CustomChart classes. Returns: dict: chart figure """ chart_main = FittedChart( title='Sample Fitted Scatter Data', xlabel='Index', ylabel='Measured Value', ) chart_main.fit_eqs = [('linear', equations.linear)] # Create dataframe based on px sample dataset iris = px.data.iris() data_raw = pd.DataFrame( data={ 'name': iris['species'], 'x': iris['petal_width'], 'y': iris['petal_length'], 'label': None, }, ) return chart_main.create_figure(df_raw=data_raw)
Python
def map_args(raw_args, inputs, states): """Map the function arguments into a dictionary with keys for the input and state names. For situations where the order of inputs and states may change, use this function to verbosely define the inputs: ```python a_in, a_state = map_args(raw_args, inputs, states) click_data = a_in[self.id_main_figure]['clickData'] n_clicks = a_in[self.id_randomize_button]['n_clicks'] data_cache = a_state[self.id_store]['data'] ``` Alternatively, for use cases that are unlikely to change the order of Inputs/State, unwrap positionally with: ```python click_data, n_clicks = args[:len(inputs)] data_cache = args[len(inputs):] ``` Args: raw_args: list of arguments passed to callback inputs: list of input components. May be empty list states: list of state components. May be empty list Returns: dict: with keys of the app_id, property, and arg value (`a_in[key][arg_type]`) """ # Split args into groups of inputs/states a_in = raw_args[:len(inputs)] a_state = raw_args[len(inputs):] # Map args into dictionaries arg_map = [{app_id: [] for app_id in {items[0] for items in group}} for group in [inputs, states]] for group_idx, (groups, args) in enumerate([(inputs, a_in), (states, a_state)]): # Assign the arg to the appropriate dictionary in arg_map for arg_idx, (app_id, prop) in enumerate(groups): arg_map[group_idx][app_id].append((prop, args[arg_idx])) for app_id in arg_map[group_idx].keys(): arg_map[group_idx][app_id] = dict(arg_map[group_idx][app_id]) return arg_map
def map_args(raw_args, inputs, states): """Map the function arguments into a dictionary with keys for the input and state names. For situations where the order of inputs and states may change, use this function to verbosely define the inputs: ```python a_in, a_state = map_args(raw_args, inputs, states) click_data = a_in[self.id_main_figure]['clickData'] n_clicks = a_in[self.id_randomize_button]['n_clicks'] data_cache = a_state[self.id_store]['data'] ``` Alternatively, for use cases that are unlikely to change the order of Inputs/State, unwrap positionally with: ```python click_data, n_clicks = args[:len(inputs)] data_cache = args[len(inputs):] ``` Args: raw_args: list of arguments passed to callback inputs: list of input components. May be empty list states: list of state components. May be empty list Returns: dict: with keys of the app_id, property, and arg value (`a_in[key][arg_type]`) """ # Split args into groups of inputs/states a_in = raw_args[:len(inputs)] a_state = raw_args[len(inputs):] # Map args into dictionaries arg_map = [{app_id: [] for app_id in {items[0] for items in group}} for group in [inputs, states]] for group_idx, (groups, args) in enumerate([(inputs, a_in), (states, a_state)]): # Assign the arg to the appropriate dictionary in arg_map for arg_idx, (app_id, prop) in enumerate(groups): arg_map[group_idx][app_id].append((prop, args[arg_idx])) for app_id in arg_map[group_idx].keys(): arg_map[group_idx][app_id] = dict(arg_map[group_idx][app_id]) return arg_map
Python
def map_outputs(outputs, element_info): """Return properly ordered list of new Dash elements based on the order of outputs. Alternatively, for simple cases of 1-2 outputs, just return the list with: ```python return [new_element_1, new_element_2] ``` Args: outputs: list of output components element_info: list of tuples with keys `(app_id, prop, element)` Returns: list: ordered list to match the order of outputs Raises: RuntimeError: Check that the number of outputs and the number of element_info match """ if len(outputs) != len(element_info): raise RuntimeError(f'Expected same number of items between:\noutputs:{outputs}\nelement_info:{element_info}') # Create a dictionary of the elements lookup = {app_id: [] for app_id in {info[0] for info in element_info}} for app_id, prop, element in element_info: lookup[app_id].append((prop, element)) for app_id in lookup: lookup[app_id] = dict(lookup[app_id]) return [lookup[app_id][prop] for app_id, prop in outputs]
def map_outputs(outputs, element_info): """Return properly ordered list of new Dash elements based on the order of outputs. Alternatively, for simple cases of 1-2 outputs, just return the list with: ```python return [new_element_1, new_element_2] ``` Args: outputs: list of output components element_info: list of tuples with keys `(app_id, prop, element)` Returns: list: ordered list to match the order of outputs Raises: RuntimeError: Check that the number of outputs and the number of element_info match """ if len(outputs) != len(element_info): raise RuntimeError(f'Expected same number of items between:\noutputs:{outputs}\nelement_info:{element_info}') # Create a dictionary of the elements lookup = {app_id: [] for app_id in {info[0] for info in element_info}} for app_id, prop, element in element_info: lookup[app_id].append((prop, element)) for app_id in lookup: lookup[app_id] = dict(lookup[app_id]) return [lookup[app_id][prop] for app_id, prop in outputs]
Python
def create_rolling_traces(df_raw, count_rolling, count_std): """Calculate traces for rolling average and standard deviation. Args: df_raw: pandas dataframe with columns `x: float`, `y: float` and `label: str` count_rolling: number of points to use for the rolling calculation count_std: number of standard deviations to use for the standard deviation Returns: list: of Scatter traces for rolling mean and std """ rolling_mean = df_raw['y'].rolling(count_rolling).mean().tolist() rolling_std = df_raw['y'].rolling(count_std).std().tolist() return [ go.Scatter( fill='toself', hoverinfo='skip', name=f'{count_std}x STD Range', opacity=0.5, x=(df_raw['x'].tolist() + df_raw['x'].tolist()[::-1]), y=( np.add(rolling_mean, np.multiply(count_std, rolling_std)).tolist() + np.subtract(rolling_mean, np.multiply(count_std, rolling_std)).tolist()[::-1] ), ), go.Scatter( hoverinfo='skip', mode='lines', name='Rolling Mean', opacity=0.9, x=df_raw['x'], y=rolling_mean, ), ]
def create_rolling_traces(df_raw, count_rolling, count_std): """Calculate traces for rolling average and standard deviation. Args: df_raw: pandas dataframe with columns `x: float`, `y: float` and `label: str` count_rolling: number of points to use for the rolling calculation count_std: number of standard deviations to use for the standard deviation Returns: list: of Scatter traces for rolling mean and std """ rolling_mean = df_raw['y'].rolling(count_rolling).mean().tolist() rolling_std = df_raw['y'].rolling(count_std).std().tolist() return [ go.Scatter( fill='toself', hoverinfo='skip', name=f'{count_std}x STD Range', opacity=0.5, x=(df_raw['x'].tolist() + df_raw['x'].tolist()[::-1]), y=( np.add(rolling_mean, np.multiply(count_std, rolling_std)).tolist() + np.subtract(rolling_mean, np.multiply(count_std, rolling_std)).tolist()[::-1] ), ), go.Scatter( hoverinfo='skip', mode='lines', name='Rolling Mean', opacity=0.9, x=df_raw['x'], y=rolling_mean, ), ]
Python
def create_fit_traces(df_raw, name, fit_equation, suppress_fit_errors=False): # noqa: CCR001 """Create traces for specified equation. Args: df_raw: pandas dataframe with columns `name: str`, `x: float`, `y: float` and `label: str` name: unique name for trace fit_equation: equation used suppress_fit_errors: If True, bury errors from scipy fit. Default is False. Returns: list: of Scatter traces for fitted equation """ fitted_data = [] try: popt, pcov = optimize.curve_fit(fit_equation, xdata=df_raw['x'], ydata=df_raw['y'], method='lm') # Calculate representative x values for plotting fit x_min = np.min(df_raw['x']) x_max = np.max(df_raw['x']) x_range = x_max - x_min x_values = sorted([ x_min - 0.05 * x_range, *np.divide(range(int(x_min * 10), int(x_max * 10)), 10), x_max + 0.05 * x_range, ]) fitted_data = [ go.Scatter( mode='lines+markers', name=name, opacity=0.9, text=f'popt:{[round(param, 3) for param in popt]}', x=x_values, y=fit_equation(x_values, *popt), ), ] except (RuntimeError, ValueError) as err: # pragma: no cover if not suppress_fit_errors: raise return fitted_data
def create_fit_traces(df_raw, name, fit_equation, suppress_fit_errors=False): # noqa: CCR001 """Create traces for specified equation. Args: df_raw: pandas dataframe with columns `name: str`, `x: float`, `y: float` and `label: str` name: unique name for trace fit_equation: equation used suppress_fit_errors: If True, bury errors from scipy fit. Default is False. Returns: list: of Scatter traces for fitted equation """ fitted_data = [] try: popt, pcov = optimize.curve_fit(fit_equation, xdata=df_raw['x'], ydata=df_raw['y'], method='lm') # Calculate representative x values for plotting fit x_min = np.min(df_raw['x']) x_max = np.max(df_raw['x']) x_range = x_max - x_min x_values = sorted([ x_min - 0.05 * x_range, *np.divide(range(int(x_min * 10), int(x_max * 10)), 10), x_max + 0.05 * x_range, ]) fitted_data = [ go.Scatter( mode='lines+markers', name=name, opacity=0.9, text=f'popt:{[round(param, 3) for param in popt]}', x=x_values, y=fit_equation(x_values, *popt), ), ] except (RuntimeError, ValueError) as err: # pragma: no cover if not suppress_fit_errors: raise return fitted_data
Python
def create_traces(self, df_raw): """Return traces for plotly chart. Args: df_raw: pandas dataframe with columns `x: float`, `y: float` and `label: str` Returns: list: Dash chart traces """ # Verify data format check_raw_data(df_raw, ['x', 'y', 'label']) # Create and return the traces chart_data = [ go.Scatter( mode='markers', name=self.label_data, opacity=0.5, text=df_raw['label'], x=df_raw['x'], y=df_raw['y'], ), ] # Only add the rolling calculations if there are a sufficient number of points if len(df_raw['x']) >= self.count_rolling: chart_data.extend( create_rolling_traces(df_raw, self.count_rolling, self.count_std), ) return chart_data
def create_traces(self, df_raw): """Return traces for plotly chart. Args: df_raw: pandas dataframe with columns `x: float`, `y: float` and `label: str` Returns: list: Dash chart traces """ # Verify data format check_raw_data(df_raw, ['x', 'y', 'label']) # Create and return the traces chart_data = [ go.Scatter( mode='markers', name=self.label_data, opacity=0.5, text=df_raw['label'], x=df_raw['x'], y=df_raw['y'], ), ] # Only add the rolling calculations if there are a sufficient number of points if len(df_raw['x']) >= self.count_rolling: chart_data.extend( create_rolling_traces(df_raw, self.count_rolling, self.count_std), ) return chart_data
Python
def create_traces(self, df_raw): # noqa: CCR001 """Return traces for plotly chart. Args: df_raw: pandas dataframe with columns `name: str`, `x: float`, `y: float` and `label: str` Returns: list: Dash chart traces """ # Verify data format check_raw_data(df_raw, ['name', 'x', 'y', 'label']) # Separate raw tidy dataframe into separate scatter plots scatter_data = [] fit_traces = [] for name in set(df_raw['name']): df_name = df_raw[df_raw['name'] == name] scatter_data.append( go.Scatter( customdata=[name], mode='markers' if self.fit_eqs else self.fallback_mode, name=name, opacity=0.5, text=df_name['label'], x=df_name['x'], y=df_name['y'], ), ) if len(df_name['x']) > self.min_scatter_for_fit: for fit_name, fit_equation in self.fit_eqs: fit_traces.extend( create_fit_traces(df_name, f'{name}-{fit_name}', fit_equation, self.suppress_fit_errors), ) return scatter_data + fit_traces
def create_traces(self, df_raw): # noqa: CCR001 """Return traces for plotly chart. Args: df_raw: pandas dataframe with columns `name: str`, `x: float`, `y: float` and `label: str` Returns: list: Dash chart traces """ # Verify data format check_raw_data(df_raw, ['name', 'x', 'y', 'label']) # Separate raw tidy dataframe into separate scatter plots scatter_data = [] fit_traces = [] for name in set(df_raw['name']): df_name = df_raw[df_raw['name'] == name] scatter_data.append( go.Scatter( customdata=[name], mode='markers' if self.fit_eqs else self.fallback_mode, name=name, opacity=0.5, text=df_name['label'], x=df_name['x'], y=df_name['y'], ), ) if len(df_name['x']) > self.min_scatter_for_fit: for fit_name, fit_equation in self.fit_eqs: fit_traces.extend( create_fit_traces(df_name, f'{name}-{fit_name}', fit_equation, self.suppress_fit_errors), ) return scatter_data + fit_traces
Python
def linear(x_values, factor_a, factor_b): """Return result(s) of linear equation with factors of a and b. `y = a * x + b` Args: x_values: single number of list of numbers factor_a: number, slope factor_b: number, intercept Returns: y_values: as list or single digit """ return np.add( np.multiply(factor_a, x_values), factor_b, )
def linear(x_values, factor_a, factor_b): """Return result(s) of linear equation with factors of a and b. `y = a * x + b` Args: x_values: single number of list of numbers factor_a: number, slope factor_b: number, intercept Returns: y_values: as list or single digit """ return np.add( np.multiply(factor_a, x_values), factor_b, )
Python
def quadratic(x_values, factor_a, factor_b, factor_c): """Return result(s) of quadratic equation with factors of a, b, and c. `y = a * x^2 + b * x + c` Args: x_values: single number of list of numbers factor_a: number factor_b: number factor_c: number Returns: y_values: as list or single digit """ return np.add( np.multiply( factor_a, np.power(x_values, 2), ), np.add( np.multiply(factor_b, x_values), factor_c, ), )
def quadratic(x_values, factor_a, factor_b, factor_c): """Return result(s) of quadratic equation with factors of a, b, and c. `y = a * x^2 + b * x + c` Args: x_values: single number of list of numbers factor_a: number factor_b: number factor_c: number Returns: y_values: as list or single digit """ return np.add( np.multiply( factor_a, np.power(x_values, 2), ), np.add( np.multiply(factor_b, x_values), factor_c, ), )
Python
def power(x_values, factor_a, factor_b): """Return result(s) of quadratic equation with factors of a and b. `y = a * x^b` Args: x_values: single number of list of numbers factor_a: number factor_b: number Returns: y_values: as list or single digit """ return np.multiply( factor_a, np.power( np.array(x_values).astype(float), factor_b, ), )
def power(x_values, factor_a, factor_b): """Return result(s) of quadratic equation with factors of a and b. `y = a * x^b` Args: x_values: single number of list of numbers factor_a: number factor_b: number Returns: y_values: as list or single digit """ return np.multiply( factor_a, np.power( np.array(x_values).astype(float), factor_b, ), )
Python
def exponential(x_values, factor_a, factor_b): """Return result(s) of exponential equation with factors of a and b. `y = a * e^(b * x)` Args: x_values: single number of list of numbers factor_a: number factor_b: number Returns: y_values: as list or single digit """ return np.multiply( factor_a, np.exp( np.multiply(factor_b, x_values), ), )
def exponential(x_values, factor_a, factor_b): """Return result(s) of exponential equation with factors of a and b. `y = a * e^(b * x)` Args: x_values: single number of list of numbers factor_a: number factor_b: number Returns: y_values: as list or single digit """ return np.multiply( factor_a, np.exp( np.multiply(factor_b, x_values), ), )
Python
def double_exponential(x_values, factor_a, factor_b, factor_c, factor_d): """Return result(s) of a double exponential equation with factors of a, b, c, and d. `y = a * e^(b * x) - c * e^(d * x)` Args: x_values: single number of list of numbers factor_a: number factor_b: number factor_c: number factor_d: number Returns: y_values: as list or single digit """ return np.subtract( np.multiply( factor_a, np.exp( np.multiply(factor_b, x_values), ), ), np.multiply( factor_c, np.exp( np.multiply(factor_d, x_values), ), ), )
def double_exponential(x_values, factor_a, factor_b, factor_c, factor_d): """Return result(s) of a double exponential equation with factors of a, b, c, and d. `y = a * e^(b * x) - c * e^(d * x)` Args: x_values: single number of list of numbers factor_a: number factor_b: number factor_c: number factor_d: number Returns: y_values: as list or single digit """ return np.subtract( np.multiply( factor_a, np.exp( np.multiply(factor_b, x_values), ), ), np.multiply( factor_c, np.exp( np.multiply(factor_d, x_values), ), ), )
Python
def parse_dash_cli_args(): # pragma: no cover """Configure the CLI options for Dash applications. Returns: dict: keyword arguments for Dash """ parser = argparse.ArgumentParser(description='Process Dash Parameters.') parser.add_argument( '--port', type=int, default=8050, help='Pass port number to Dash server. Default is 8050', ) parser.add_argument( '--nodebug', action='store_true', default=False, help='If set, will disable debug mode. Default is to set `debug=True`', ) args = parser.parse_args() return {'port': args.port, 'debug': not args.nodebug}
def parse_dash_cli_args(): # pragma: no cover """Configure the CLI options for Dash applications. Returns: dict: keyword arguments for Dash """ parser = argparse.ArgumentParser(description='Process Dash Parameters.') parser.add_argument( '--port', type=int, default=8050, help='Pass port number to Dash server. Default is 8050', ) parser.add_argument( '--nodebug', action='store_true', default=False, help='If set, will disable debug mode. Default is to set `debug=True`', ) args = parser.parse_args() return {'port': args.port, 'debug': not args.nodebug}
Python
def graph_return(resp, keys): """Based on concepts of GraphQL, return specified subset of response. Args: resp: dictionary with values from function keys: list of keynames from the resp dictionary Returns: the `resp` dictionary with only the keys specified in the `keys` list Raises: RuntimeError: if `keys` is not a list or tuple """ if not (len(keys) and isinstance(keys, (list, tuple))): raise RuntimeError(f'Expected list of keys for: `{resp.items()}`, but received `{keys}`') ordered_responses = [resp.get(key, None) for key in keys] return ordered_responses if len(ordered_responses) > 1 else ordered_responses[0]
def graph_return(resp, keys): """Based on concepts of GraphQL, return specified subset of response. Args: resp: dictionary with values from function keys: list of keynames from the resp dictionary Returns: the `resp` dictionary with only the keys specified in the `keys` list Raises: RuntimeError: if `keys` is not a list or tuple """ if not (len(keys) and isinstance(keys, (list, tuple))): raise RuntimeError(f'Expected list of keys for: `{resp.items()}`, but received `{keys}`') ordered_responses = [resp.get(key, None) for key in keys] return ordered_responses if len(ordered_responses) > 1 else ordered_responses[0]