sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def save(self, *args, **kwargs): """ Extends save() method of Django models to check that the database name is not left blank. Note: 'blank=False' is only checked at a form-validation-stage. A test using Fixtureless that tries to randomly create a CrossRefDB with an empty string name would unintentionally break the test. """ if self.name == '': raise FieldError else: return super(CrossRefDB, self).save(*args, **kwargs)
Extends save() method of Django models to check that the database name is not left blank. Note: 'blank=False' is only checked at a form-validation-stage. A test using Fixtureless that tries to randomly create a CrossRefDB with an empty string name would unintentionally break the test.
entailment
def makeProducer(self, request, fileForReading): """ Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response. """ byteRange = request.getHeader(b'range') if byteRange is None or not self.getFileSize(): self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) try: parsedRanges = self._parseRangeHeader(byteRange) except ValueError: logger.warning("Ignoring malformed Range header %r" % (byteRange,)) self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) if len(parsedRanges) == 1: offset, size = self._doSingleRangeRequest( request, parsedRanges[0]) self._setContentHeaders(request, size) return SingleRangeStaticProducer( request, fileForReading, offset, size) else: rangeInfo = self._doMultipleRangeRequest(request, parsedRanges) return MultipleRangeStaticProducer( request, fileForReading, rangeInfo)
Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response.
entailment
def render_GET(self, request): """ Begin sending the contents of this L{File} (or a subset of the contents, based on the 'range' header) to the given request. """ request.setHeader(b'accept-ranges', b'bytes') producer = self.makeProducer(request, self.fileObject) if request.method == b'HEAD': return b'' def done(ign): producer.stopProducing() request.notifyFinish().addCallbacks(done, done) producer.start() # and make sure the connection doesn't get closed return server.NOT_DONE_YET
Begin sending the contents of this L{File} (or a subset of the contents, based on the 'range' header) to the given request.
entailment
def interface(self, context): """Implement the interface for the adapter object""" self.context = context self.callback = self.context.get("callback")
Implement the interface for the adapter object
entailment
def shutdown(self): """Executed on shutdown of application""" self.stopped.set() if hasattr(self.api, "shutdown"): self.api.shutdown() for thread in self.thread.values(): thread.join()
Executed on shutdown of application
entailment
def SwitchToAlert(): ''' <input value="Test" type="button" onClick="alert('OK')" > ''' try: alert = WebDriverWait(Web.driver, 10).until(lambda driver: driver.switch_to_alert()) return alert except: print("Waring: Timeout at %d seconds.Alert was not found.") return False
<input value="Test" type="button" onClick="alert('OK')" >
entailment
def _element(cls): ''' find the element with controls ''' if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) if len(elements) < cls.__control["index"] + 1: raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"])) if len(elements) > 1: print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"])) elm = elements[cls.__control["index"]] cls.__control["index"] = 0 return elm
find the element with controls
entailment
def _elements(cls): ''' find the elements with controls ''' if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) return elements
find the elements with controls
entailment
def DyStrData(cls, name, regx, index = 0): ''' set dynamic value from the string data of response @param name: glob parameter name @param regx: re._pattern_type e.g. DyStrData("a",re.compile('123')) ''' text = Web.PageSource() if not text: return if not isinstance(regx, re._pattern_type): raise Exception("DyStrData need the arg which have compiled the regular expression.") values = regx.findall(text) result = "" if len(values)>index: result = values[index] cls.glob.update({name:result})
set dynamic value from the string data of response @param name: glob parameter name @param regx: re._pattern_type e.g. DyStrData("a",re.compile('123'))
entailment
def DyJsonData(cls,name, sequence): ''' set dynamic value from the json data of response @note: 获取innerHTML json的数据 如, <html><body>{ "code": 1,"desc": "成功"}</body></html> @param name: glob parameter name @param sequence: sequence for the json e.g. result={"a":1, "b":[1,2,3,4], "c":{"d":5,"e":6}, "f":{"g":[7,8,9]}, "h":[{"i":10,"j":11},{"k":12}] } sequence1 ="a" # -> 1 sequence2 ="b.3" # -> 4 sequence3 = "f.g.2" # -> 9 sequence4 = "h.0.j" # -> 11 ''' cls.SetControl(by = "tag name", value = "body") json_body = cls._element().get_attribute('innerHTML') if not json_body: return resp = json.loads(json_body) sequence = [_parse_string_value(i) for i in sequence.split('.')] for i in sequence: try: if isinstance(i, int): resp = resp[i] else: resp = resp.get(i) except: cls.glob.update({name:None}) return cls.glob.update({name:resp})
set dynamic value from the json data of response @note: 获取innerHTML json的数据 如, <html><body>{ "code": 1,"desc": "成功"}</body></html> @param name: glob parameter name @param sequence: sequence for the json e.g. result={"a":1, "b":[1,2,3,4], "c":{"d":5,"e":6}, "f":{"g":[7,8,9]}, "h":[{"i":10,"j":11},{"k":12}] } sequence1 ="a" # -> 1 sequence2 ="b.3" # -> 4 sequence3 = "f.g.2" # -> 9 sequence4 = "h.0.j" # -> 11
entailment
def VerifyURL(cls, url): """ 获取当前页面的url """ if Web.driver.current_url == url: return True else: print("VerifyURL: %s" % Web.driver.current_url) return False
获取当前页面的url
entailment
def SelectByIndex(cls, index): ''' 通过索引,选择下拉框选项, @param index: 下拉框 索引 ''' try: Select(cls._element()).select_by_index(int(index)) except: return False
通过索引,选择下拉框选项, @param index: 下拉框 索引
entailment
def DeSelectByIndex(cls, index): ''' 通过索引,取消选择下拉框选项, @param index: 下拉框 索引 ''' try: Select(cls._element()).deselect_by_index(int(index)) except: return False
通过索引,取消选择下拉框选项, @param index: 下拉框 索引
entailment
def MouseOver(cls): ''' 鼠标悬浮 ''' element = cls._element() action = ActionChains(Web.driver) action.move_to_element(element) action.perform() time.sleep(1)
鼠标悬浮
entailment
def Click(cls): ''' 左键 点击 1次 ''' element= cls._element() action = ActionChains(Web.driver) action.click(element) action.perform()
左键 点击 1次
entailment
def DoubleClick(cls): ''' 左键点击2次 ''' element = cls._element() action = ActionChains(Web.driver) action.double_click(element) action.perform()
左键点击2次
entailment
def EnhancedClick(cls): ''' Description: Sometimes, one click on the element doesn't work. So wait more time, then click again and again. Risk: It may operate more than one click operations. ''' element = cls._element() for _ in range(3): action = ActionChains(Web.driver) action.move_to_element(element) action.perform() time.sleep(0.5)
Description: Sometimes, one click on the element doesn't work. So wait more time, then click again and again. Risk: It may operate more than one click operations.
entailment
def RightClick(cls): ''' 右键点击1次 ''' element = cls._element() action = ActionChains(Web.driver) action.context_click(element) action.perform()
右键点击1次
entailment
def ClickAndHold(cls): ''' 相当于 按压,press ''' element = cls._element() action = ActionChains(Web.driver) action.click_and_hold(element) action.perform()
相当于 按压,press
entailment
def ReleaseClick(cls): ''' 释放按压操作 ''' element = cls._element() action = ActionChains(Web.driver) action.release(element) action.perform()
释放按压操作
entailment
def Enter(cls): ''' 在指定输入框发送回回车键 @note: key event -> enter ''' element = cls._element() action = ActionChains(Web.driver) action.send_keys_to_element(element, Keys.ENTER) action.perform()
在指定输入框发送回回车键 @note: key event -> enter
entailment
def Ctrl(cls, key): """ 在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X' """ element = cls._element() element.send_keys(Keys.CONTROL, key)
在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X'
entailment
def Alt(cls, key): """ 在指定元素上执行alt组合事件 @note: key event -> alt + key @param key: 如'X' """ element = cls._element() element.send_keys(Keys.ALT, key)
在指定元素上执行alt组合事件 @note: key event -> alt + key @param key: 如'X'
entailment
def Focus(cls): """ 在指定输入框发送 Null, 用于设置焦点 @note: key event -> NULL """ element = cls._element() # element.send_keys(Keys.NULL) action = ActionChains(Web.driver) action.send_keys_to_element(element, Keys.NULL) action.perform()
在指定输入框发送 Null, 用于设置焦点 @note: key event -> NULL
entailment
def Upload(cls, filename): """ 文件上传, 非原生input @todo: some upload.exe not prepared @param file: 文件名(文件必须存在在工程resource目录下), upload.exe工具放在工程tools目录下 """ raise Exception("to do") TOOLS_PATH = "" RESOURCE_PATH = "" tool_4path = os.path.join(TOOLS_PATH, "upload.exe") file_4path = os.path.join(RESOURCE_PATH, filename) #file_4path.decode('utf-8').encode('gbk') if os.path.isfile(file_4path): cls.Click() os.system(tool_4path + ' ' + file_4path) else: raise Exception('%s is not exists' % file_4path)
文件上传, 非原生input @todo: some upload.exe not prepared @param file: 文件名(文件必须存在在工程resource目录下), upload.exe工具放在工程tools目录下
entailment
def UploadType(cls, file_path): """ 上传, 一般,上传页面如果是input,原生file文件框, 如: <input type="file" id="test-image-file" name="test" accept="image/gif">,像这样的,定位到该元素,然后使用 send_keys 上传的文件的绝对路径 @param file_name: 文件名(文件必须存在在工程resource目录下) """ if not os.path.isabs(file_path): return False if os.path.isfile(file_path): cls.SendKeys(file_path) else: return False
上传, 一般,上传页面如果是input,原生file文件框, 如: <input type="file" id="test-image-file" name="test" accept="image/gif">,像这样的,定位到该元素,然后使用 send_keys 上传的文件的绝对路径 @param file_name: 文件名(文件必须存在在工程resource目录下)
entailment
def update(xCqNck7t, **kwargs): """Updates the Dict with the given values. Turns internal dicts into Dicts.""" def dict_list_val(inlist): l = [] for i in inlist: if type(i)==dict: l.append(Dict(**i)) elif type(i)==list: l.append(make_list(i)) elif type(i)==bytes: l.append(i.decode('UTF-8')) else: l.append(i) return l for k in list(kwargs.keys()): if type(kwargs[k])==dict: xCqNck7t[k] = Dict(**kwargs[k]) elif type(kwargs[k])==list: xCqNck7t[k] = dict_list_val(kwargs[k]) else: xCqNck7t[k] = kwargs[k]
Updates the Dict with the given values. Turns internal dicts into Dicts.
entailment
def keys(self, key=None, reverse=False): """sort the keys before returning them""" ks = sorted(list(dict.keys(self)), key=key, reverse=reverse) return ks
sort the keys before returning them
entailment
def hub(self, port): ''' java -jar selenium-server.jar -role hub -port 4444 @param port: listen port of selenium hub ''' self._ip = "localhost" self._port = port self.command = [self._conf["java_path"], "-jar", self._conf["jar_path"], "-port", str(port), "-role", "hub"] return self
java -jar selenium-server.jar -role hub -port 4444 @param port: listen port of selenium hub
entailment
def node(self,port, hub_address=("localhost", 4444)): ''' java -jar selenium-server.jar -role node -port 5555 -hub http://127.0.0.1:4444/grid/register/ @param port: listen port of selenium node @param hub_address: hub address which node will connect to ''' self._ip, self._port = hub_address self.command = [self._conf["java_path"], "-jar", self._conf["jar_path"], "-port", str(port), "-role", "node", "-hub", "http://%s:%s/grid/register/" %(self._ip, self._port)] return self
java -jar selenium-server.jar -role node -port 5555 -hub http://127.0.0.1:4444/grid/register/ @param port: listen port of selenium node @param hub_address: hub address which node will connect to
entailment
def start_server(self): """start the selenium Remote Server.""" self.__subp = subprocess.Popen(self.command) #print("\tselenium jar pid[%s] is running." %self.__subp.pid) time.sleep(2)
start the selenium Remote Server.
entailment
def is_runnnig(self): """Determine whether hub server is running :return:True or False """ resp = None try: resp = requests.get("http://%s:%s" %(self._ip, self._port)) if resp.status_code == 200: return True else: return False except: return False
Determine whether hub server is running :return:True or False
entailment
def negate_gate(wordlen, input='x', output='~x'): """Implements two's complement negation.""" neg = bitwise_negate(wordlen, input, "tmp") inc = inc_gate(wordlen, "tmp", output) return neg >> inc
Implements two's complement negation.
entailment
def kmodels(wordlen: int, k: int, input=None, output=None): """Return a circuit taking a wordlen bitvector where only k valuations return True. Uses encoding from [1]. Note that this is equivalent to (~x < k). - TODO: Add automated simplification so that the circuits are equiv. [1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model Counting." IJCAI. 2015. """ assert 0 <= k < 2**wordlen if output is None: output = _fresh() if input is None: input = _fresh() input_names = named_indexes(wordlen, input) atoms = map(aiger.atom, input_names) active = False expr = aiger.atom(False) for atom, bit in zip(atoms, encode_int(wordlen, k, signed=False)): active |= bit if not active: # Skip until first 1. continue expr = (expr | atom) if bit else (expr & atom) return aigbv.AIGBV( aig=expr.aig, input_map=frozenset([(input, tuple(input_names))]), output_map=frozenset([(output, (expr.output,))]), )
Return a circuit taking a wordlen bitvector where only k valuations return True. Uses encoding from [1]. Note that this is equivalent to (~x < k). - TODO: Add automated simplification so that the circuits are equiv. [1]: Chakraborty, Supratik, et al. "From Weighted to Unweighted Model Counting." IJCAI. 2015.
entailment
def flatten_list(lobj): """ Recursively flattens a list. :param lobj: List to flatten :type lobj: list :rtype: list For example: >>> import pmisc >>> pmisc.flatten_list([1, [2, 3, [4, 5, 6]], 7]) [1, 2, 3, 4, 5, 6, 7] """ ret = [] for item in lobj: if isinstance(item, list): for sub_item in flatten_list(item): ret.append(sub_item) else: ret.append(item) return ret
Recursively flattens a list. :param lobj: List to flatten :type lobj: list :rtype: list For example: >>> import pmisc >>> pmisc.flatten_list([1, [2, 3, [4, 5, 6]], 7]) [1, 2, 3, 4, 5, 6, 7]
entailment
def auth(self): """ tuple of (username, password). if use_keyring is set to true the password will be queried from the local keyring instead of taken from the configuration file. """ username = self._settings["username"] if not username: raise ValueError("Username was not configured in %s" % CONFIG_FILE) if self._settings["use_keyring"]: password = self.keyring_get_password(username) if not password: self.keyring_set_password(username) password = self.keyring_get_password(username) else: password = self._settings["password"] return self._settings["username"], password
tuple of (username, password). if use_keyring is set to true the password will be queried from the local keyring instead of taken from the configuration file.
entailment
def load(self, file=CONFIG_FILE): """ load a configuration file. loads default config if file is not found """ if not os.path.exists(file): print("Config file was not found under %s. Default file has been created" % CONFIG_FILE) self._settings = yaml.load(DEFAULT_CONFIG, yaml.RoundTripLoader) self.save(file) sys.exit() with open(file, 'r') as f: self._settings = yaml.load(f, yaml.RoundTripLoader)
load a configuration file. loads default config if file is not found
entailment
def save(self, file=CONFIG_FILE): """ Save configuration to provided path as a yaml file """ os.makedirs(os.path.dirname(file), exist_ok=True) with open(file, "w") as f: yaml.dump(self._settings, f, Dumper=yaml.RoundTripDumper, width=float("inf"))
Save configuration to provided path as a yaml file
entailment
def selection_dialog(self, courses): """ opens a curses/picker based interface to select courses that should be downloaded. """ selected = list(filter(lambda x: x.course.id in self._settings["selected_courses"], courses)) selection = Picker( title="Select courses to download", options=courses, checked=selected).getSelected() if selection: self._settings["selected_courses"] = list(map(lambda x: x.course.id, selection)) self.save() log.info("Updated course selection")
opens a curses/picker based interface to select courses that should be downloaded.
entailment
def create(self): """Create the corresponding index. Will overwrite existing indexes of the same name.""" body = dict() if self.mapping is not None: body['mappings'] = self.mapping if self.settings is not None: body['settings'] = self.settings else: body['settings'] = self._default_settings() self.instance.indices.create(self.index, body)
Create the corresponding index. Will overwrite existing indexes of the same name.
entailment
def search(self, query=None, size=100, unpack=True): """Search the index with a query. Can at most return 10'000 results from a search. If the search would yield more than 10'000 hits, only the first 10'000 are returned. The default number of hits returned is 100. """ logging.info('Download all documents from index %s.', self.index) if query is None: query = self.match_all results = list() data = self.instance.search(index=self.index, doc_type=self.doc_type, body=query, size=size) if unpack: for items in data['hits']['hits']: if '_source' in items: results.append(items['_source']) else: results.append(items) else: results = data['hits']['hits'] return results
Search the index with a query. Can at most return 10'000 results from a search. If the search would yield more than 10'000 hits, only the first 10'000 are returned. The default number of hits returned is 100.
entailment
def scan_index(self, query: Union[Dict[str, str], None] = None) -> List[Dict[str, str]]: """Scan the index with the query. Will return any number of results above 10'000. Important to note is, that all the data is loaded into memory at once and returned. This works only with small data sets. Use scroll otherwise which returns a generator to cycle through the resources in set chunks. :param query: The query used to scan the index. Default None will return the entire index. :returns list of dicts: The list of dictionaries contains all the documents without metadata. """ if query is None: query = self.match_all logging.info('Download all documents from index %s with query %s.', self.index, query) results = list() data = scan(self.instance, index=self.index, doc_type=self.doc_type, query=query) for items in data: if '_source' in items: results.append(items['_source']) else: results.append(items) return results
Scan the index with the query. Will return any number of results above 10'000. Important to note is, that all the data is loaded into memory at once and returned. This works only with small data sets. Use scroll otherwise which returns a generator to cycle through the resources in set chunks. :param query: The query used to scan the index. Default None will return the entire index. :returns list of dicts: The list of dictionaries contains all the documents without metadata.
entailment
def scroll(self, query=None, scroll='5m', size=100, unpack=True): """Scroll an index with the specified search query. Works as a generator. Will yield `size` results per iteration until all hits are returned. """ query = self.match_all if query is None else query response = self.instance.search(index=self.index, doc_type=self.doc_type, body=query, size=size, scroll=scroll) while len(response['hits']['hits']) > 0: scroll_id = response['_scroll_id'] logging.debug(response) if unpack: yield [source['_source'] if '_source' in source else source for source in response['hits']['hits']] else: yield response['hits']['hits'] response = self.instance.scroll(scroll_id=scroll_id, scroll=scroll)
Scroll an index with the specified search query. Works as a generator. Will yield `size` results per iteration until all hits are returned.
entailment
def get(self, identifier): """Fetch document by _id. Returns None if it is not found. (Will log a warning if not found as well. Should not be used to search an id.)""" logging.info('Download document with id ' + str(identifier) + '.') try: record = self.instance.get(index=self.index, doc_type=self.doc_type, id=identifier) if '_source' in record: return record['_source'] else: return record except NotFoundError: return None
Fetch document by _id. Returns None if it is not found. (Will log a warning if not found as well. Should not be used to search an id.)
entailment
def index_into(self, document, id) -> bool: """Index a single document into the index.""" try: self.instance.index(index=self.index, doc_type=self.doc_type, body=json.dumps(document, ensure_ascii=False), id=id) except RequestError as ex: logging.error(ex) return False else: return True
Index a single document into the index.
entailment
def delete(self, doc_id: str) -> bool: """Delete a document with id.""" try: self.instance.delete(self.index, self.doc_type, doc_id) except RequestError as ex: logging.error(ex) return False else: return True
Delete a document with id.
entailment
def update(self, doc: dict, doc_id: str): """Partial update to a single document. Uses the Update API with the specified partial document. """ body = { 'doc': doc } self.instance.update(self.index, self.doc_type, doc_id, body=body)
Partial update to a single document. Uses the Update API with the specified partial document.
entailment
def script_update(self, script: str, params: Union[dict, None], doc_id: str): """Uses painless script to update a document. See documentation for more information. """ body = { 'script': { 'source': script, 'lang': 'painless' } } if params is not None: body['script']['params'] = params self.instance.update(self.index, self.doc_type, doc_id, body=body)
Uses painless script to update a document. See documentation for more information.
entailment
def bulk(self, data: List[Dict[str, str]], identifier_key: str, op_type='index', upsert=False, keep_id_key=False) -> bool: """ Takes a list of dictionaries and an identifier key and indexes everything into this index. :param data: List of dictionaries containing the data to be indexed. :param identifier_key: The name of the dictionary element which should be used as _id. This will be removed from the body. Is ignored when None or empty string. This will cause elastic to create their own _id. :param op_type: What should be done: 'index', 'delete', 'update'. :param upsert: The update op_type can be upserted, which will create a document if not already present. :param keep_id_key Determines if the value designated as the identifier_key should be kept as part of the document or removed from it. :returns Returns True if all the messages were indexed without errors. False otherwise. """ bulk_objects = [] for document in data: bulk_object = dict() bulk_object['_op_type'] = op_type if identifier_key is not None and identifier_key != '': bulk_object['_id'] = document[identifier_key] if not keep_id_key: document.pop(identifier_key) if bulk_object['_id'] == '': bulk_object.pop('_id') if op_type == 'index': bulk_object['_source'] = document elif op_type == 'update': bulk_object['doc'] = document if upsert: bulk_object['doc_as_upsert'] = True bulk_objects.append(bulk_object) logging.debug(str(bulk_object)) logging.info('Start bulk index for ' + str(len(bulk_objects)) + ' objects.') errors = bulk(self.instance, actions=bulk_objects, index=self.index, doc_type=self.doc_type, raise_on_error=False) logging.info(str(errors[0]) + ' documents were successfully indexed/updated/deleted.') if errors[0] - len(bulk_objects) != 0: logging.error(str(len(bulk_objects) - errors[0]) + ' documents could not be indexed/updated/deleted.') for error in errors[1]: logging.error(str(error)) return False else: logging.debug('Finished bulk %s.', op_type) return True
Takes a list of dictionaries and an identifier key and indexes everything into this index. :param data: List of dictionaries containing the data to be indexed. :param identifier_key: The name of the dictionary element which should be used as _id. This will be removed from the body. Is ignored when None or empty string. This will cause elastic to create their own _id. :param op_type: What should be done: 'index', 'delete', 'update'. :param upsert: The update op_type can be upserted, which will create a document if not already present. :param keep_id_key Determines if the value designated as the identifier_key should be kept as part of the document or removed from it. :returns Returns True if all the messages were indexed without errors. False otherwise.
entailment
def reindex(self, new_index_name: str, identifier_key: str, **kwargs) -> 'ElasticIndex': """Reindex the entire index. Scrolls the old index and bulk indexes all data into the new index. :param new_index_name: :param identifier_key: :param kwargs: Overwrite ElasticIndex __init__ params. :return: """ if 'url' not in kwargs: kwargs['url'] = self.url if 'doc_type' not in kwargs: kwargs['doc_type'] = self.doc_type if 'mapping' not in kwargs: kwargs['mapping'] = self.mapping new_index = ElasticIndex(new_index_name, **kwargs) for results in self.scroll(size=500): new_index.bulk(results, identifier_key) return new_index
Reindex the entire index. Scrolls the old index and bulk indexes all data into the new index. :param new_index_name: :param identifier_key: :param kwargs: Overwrite ElasticIndex __init__ params. :return:
entailment
def dump(self, path: str, file_name: str = "", **kwargs: dict): """ Dumps the entire index into a json file. :param path: The path to directory where the dump should be stored. :param file_name: Name of the file the dump should be stored in. If empty the index name is used. :param kwargs: Keyword arguments for the json converter. (ex. indent=4, ensure_ascii=False) """ export = list() for results in self.scroll(): export.extend(results) if not path.endswith('/'): path += '/' if file_name == '': file_name = self.index if not file_name.endswith('.json'): file_name += '.json' store = path + file_name with open(store, 'w') as fp: json.dump(export, fp, **kwargs) logging.info("Extracted %s records from the index %s and stored them in %s/%s.", len(export), self.index, path, file_name)
Dumps the entire index into a json file. :param path: The path to directory where the dump should be stored. :param file_name: Name of the file the dump should be stored in. If empty the index name is used. :param kwargs: Keyword arguments for the json converter. (ex. indent=4, ensure_ascii=False)
entailment
def main(): """ parse command line options and either launch some configuration dialog or start an instance of _MainLoop as a daemon """ (options, _) = _parse_args() if options.change_password: c.keyring_set_password(c["username"]) sys.exit(0) if options.select: courses = client.get_courses() c.selection_dialog(courses) c.save() sys.exit(0) if options.stop: os.system("kill -2 `cat ~/.studdp/studdp.pid`") sys.exit(0) task = _MainLoop(options.daemonize, options.update_courses) if options.daemonize: log.info("daemonizing...") with daemon.DaemonContext(working_directory=".", pidfile=PIDLockFile(PID_FILE)): # we have to create a new logger in the daemon context handler = logging.FileHandler(LOG_PATH) handler.setFormatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s') log.addHandler(handler) task() else: task()
parse command line options and either launch some configuration dialog or start an instance of _MainLoop as a daemon
entailment
def generate(self, signature_data): """Takes data and returns a signature :arg dict signature_data: data to use to generate a signature :returns: ``Result`` instance """ result = Result() for rule in self.pipeline: rule_name = rule.__class__.__name__ try: if rule.predicate(signature_data, result): rule.action(signature_data, result) except Exception as exc: if self.error_handler: self.error_handler( signature_data, exc_info=sys.exc_info(), extra={'rule': rule_name} ) result.info(rule_name, 'Rule failed: %s', exc) return result
Takes data and returns a signature :arg dict signature_data: data to use to generate a signature :returns: ``Result`` instance
entailment
def _blast(bvname2vals, name_map): """Helper function to expand (blast) str -> int map into str -> bool map. This is used to send word level inputs to aiger.""" if len(name_map) == 0: return dict() return fn.merge(*(dict(zip(names, bvname2vals[bvname])) for bvname, names in name_map))
Helper function to expand (blast) str -> int map into str -> bool map. This is used to send word level inputs to aiger.
entailment
def _unblast(name2vals, name_map): """Helper function to lift str -> bool maps used by aiger to the word level. Dual of the `_blast` function.""" def _collect(names): return tuple(name2vals[n] for n in names) return {bvname: _collect(names) for bvname, names in name_map}
Helper function to lift str -> bool maps used by aiger to the word level. Dual of the `_blast` function.
entailment
def create_logger(self): """Generates a logger instance from the singleton""" name = "bors" if hasattr(self, "name"): name = self.name self.log = logging.getLogger(name) try: lvl = self.conf.get_log_level() except AttributeError: lvl = self.context.get("log_level", None) self.log.setLevel(getattr(logging, lvl, logging.INFO))
Generates a logger instance from the singleton
entailment
def get_cls(project_name, project_data): """ gets class from name and data, sets base level attrs defaults to facsimile.base.Facsimile """ if project_name: cls = getattr(facsimile.base, project_data.get('class', 'Facsimile')) cls.name = project_name else: cls = facsimile.base.Facsimile return cls
gets class from name and data, sets base level attrs defaults to facsimile.base.Facsimile
entailment
def check_recaptcha(view_func): """Chech that the entered recaptcha data is correct""" @wraps(view_func) def _wrapped_view(request, *args, **kwargs): request.recaptcha_is_valid = None if request.method == 'POST': recaptcha_response = request.POST.get('g-recaptcha-response') data = { 'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY, 'response': recaptcha_response } r = requests.post( 'https://www.google.com/recaptcha/api/siteverify', data=data ) result = r.json() if result['success']: request.recaptcha_is_valid = True else: request.recaptcha_is_valid = False error_message = 'Invalid reCAPTCHA. Please try again. ' error_message += str(result['error-codes']) print(error_message) return view_func(request, *args, **kwargs) return _wrapped_view
Chech that the entered recaptcha data is correct
entailment
def _request(self, url, method="GET", params=None, api_call=None): """Internal request method""" method = method.lower() params = params or {} func = getattr(requests, method) requests_args = {} if method == "get" or method == "delete": requests_args["params"] = params else: if params.get("json"): requests_args["json"] = params.get("json") if params.get("files"): requests_args["files"] = params.get("files") if params.get("data"): requests_args["data"] = params.get("data") try: response = func(url, **requests_args) except requests.RequestException as e: raise SafecastPyError(str(e)) # greater than 304 (not modified) is an error if response.status_code > 304: if response.status_code == 401: raise SafecastPyAuthError(response.json().get("error")) if response.status_code in [422]: raise SafecastPyError(response.json().get("errors")) raise SafecastPyError(response.content, error_code=response.status_code) try: if response.status_code == 204: content = response.content else: content = response.json() except ValueError: raise SafecastPyError( "Response was not valid JSON. \ Unable to decode." ) return content
Internal request method
entailment
def request(self, endpoint, method="GET", params=None): """Return dict of response received from Safecast's API :param endpoint: (required) Full url or Safecast API endpoint (e.g. measurements/users) :type endpoint: string :param method: (optional) Method of accessing data, either GET, POST, PUT or DELETE. (default GET) :type method: string :param params: (optional) Dict of parameters (if any) accepted the by Safecast API endpoint you are trying to access (default None) :type params: dict or None :rtype: dict """ # In case they want to pass a full Safecast URL # i.e. https://api.safecast.org/measurements.json if endpoint.startswith("http"): url = endpoint else: url = "%s/%s.json" % (self.api_url, endpoint) if method != "GET": if self.api_key is None: raise SafecastPyAuthError("Require an api_key") url = url + "?api_key={0}".format(self.api_key) content = self._request(url, method=method, params=params, api_call=url) return content
Return dict of response received from Safecast's API :param endpoint: (required) Full url or Safecast API endpoint (e.g. measurements/users) :type endpoint: string :param method: (optional) Method of accessing data, either GET, POST, PUT or DELETE. (default GET) :type method: string :param params: (optional) Dict of parameters (if any) accepted the by Safecast API endpoint you are trying to access (default None) :type params: dict or None :rtype: dict
entailment
def post_list(self, request, **kwargs): """ (Copied from implementation in https://github.com/greenelab/adage-server/blob/master/adage/analyze/api.py) Handle an incoming POST as a GET to work around URI length limitations """ # The convert_post_to_VERB() technique is borrowed from # resources.py in tastypie source. This helps us to convert the POST # to a GET in the proper way internally. request.method = 'GET' # override the incoming POST dispatch_request = convert_post_to_VERB(request, 'GET') return self.dispatch('list', dispatch_request, **kwargs)
(Copied from implementation in https://github.com/greenelab/adage-server/blob/master/adage/analyze/api.py) Handle an incoming POST as a GET to work around URI length limitations
entailment
def execute(self, context): """Execute the strategies on the given context""" for ware in self.middleware: ware.premessage(context) context = ware.bind(context) ware.postmessage(context) return context
Execute the strategies on the given context
entailment
def shutdown(self): """Perform cleanup! We're goin' down!!!""" for ware in self.middleware: ware.preshutdown() self._shutdown() ware.postshutdown()
Perform cleanup! We're goin' down!!!
entailment
def main(argv=None): """Generates documentation for signature generation pipeline""" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( 'pipeline', help='Python dotted path to rules pipeline to document' ) parser.add_argument('output', help='output file') if argv is None: args = parser.parse_args() else: args = parser.parse_args(argv) print('Generating documentation for %s in %s...' % (args.pipeline, args.output)) rules = import_rules(args.pipeline) with open(args.output, 'w') as fp: fp.write('.. THIS IS AUTOGEMERATED USING:\n') fp.write(' \n') fp.write(' %s\n' % (' '.join(sys.argv))) fp.write(' \n') fp.write('Signature generation rules pipeline\n') fp.write('===================================\n') fp.write('\n') fp.write('\n') fp.write( 'This is the signature generation pipeline defined at ``%s``:\n' % args.pipeline ) fp.write('\n') for i, rule in enumerate(rules): li = '%s. ' % (i + 1) fp.write('%s%s\n' % ( li, indent(get_doc(rule), ' ' * len(li)) )) fp.write('\n')
Generates documentation for signature generation pipeline
entailment
def handle(self, *args, **options): """This function is called by the Django API to specify how this object will be saved to the database. """ taxonomy_id = options['taxonomy_id'] # Remove leading and trailing blank characters in "common_name" # and "scientific_name common_name = options['common_name'].strip() scientific_name = options['scientific_name'].strip() if common_name and scientific_name: # A 'slug' is a label for an object in django, which only contains # letters, numbers, underscores, and hyphens, thus making it URL- # usable. The slugify method in django takes any string and # converts it to this format. For more information, see: # http://stackoverflow.com/questions/427102/what-is-a-slug-in-django slug = slugify(scientific_name) logger.info("Slug generated: %s", slug) # If organism exists, update with passed parameters try: org = Organism.objects.get(taxonomy_id=taxonomy_id) org.common_name = common_name org.scientific_name = scientific_name org.slug = slug # If organism doesn't exist, construct an organism object # (see organisms/models.py). except Organism.DoesNotExist: org = Organism(taxonomy_id=taxonomy_id, common_name=common_name, scientific_name=scientific_name, slug=slug ) org.save() # Save to the database. else: # Report an error if the user did not fill out all fields. logger.error( "Failed to add or update organism. " "Please check that all fields are filled correctly." )
This function is called by the Django API to specify how this object will be saved to the database.
entailment
def init(resolution, pygame_flags=0, display_pos=(0, 0), interactive_mode=False): """Creates a window of given resolution. :param resolution: the resolution of the windows as (width, height) in pixels :type resolution: tuple :param pygame_flags: modify the creation of the window. For further information see :ref:`creating_a_window` :type pygame_flags: int :param display_pos: determines the position on the desktop where the window is created. In a multi monitor system this can be used to position the window on a different monitor. E.g. the monitor to the right of the main-monitor would be at position (1920, 0) if the main monitor has the width 1920. :type display_pos: tuple :param interactive_mode: Will install a thread, that emptys the event-queue every 100ms. This is neccessary to be able to use the display() function in an interactive console on windows systems. If interactive_mode is set, init() will return a reference to the background thread. This thread has a stop() method which can be used to cancel it. If you use ctrl+d or exit() within ipython, while the thread is still running, ipython will become unusable, but not close. :type interactive_mode: bool :return: a reference to the display screen, or a reference to the background thread if interactive_mode was set to true. In the second scenario you can obtain a reference to the display surface via pygame.display.get_surface() :rtype: pygame.Surface """ os.environ['SDL_VIDEO_WINDOW_POS'] = "{}, {}".format(*display_pos) pygame.init() pygame.font.init() disp = pygame.display.set_mode(resolution, pygame_flags) return _PumpThread() if interactive_mode else disp
Creates a window of given resolution. :param resolution: the resolution of the windows as (width, height) in pixels :type resolution: tuple :param pygame_flags: modify the creation of the window. For further information see :ref:`creating_a_window` :type pygame_flags: int :param display_pos: determines the position on the desktop where the window is created. In a multi monitor system this can be used to position the window on a different monitor. E.g. the monitor to the right of the main-monitor would be at position (1920, 0) if the main monitor has the width 1920. :type display_pos: tuple :param interactive_mode: Will install a thread, that emptys the event-queue every 100ms. This is neccessary to be able to use the display() function in an interactive console on windows systems. If interactive_mode is set, init() will return a reference to the background thread. This thread has a stop() method which can be used to cancel it. If you use ctrl+d or exit() within ipython, while the thread is still running, ipython will become unusable, but not close. :type interactive_mode: bool :return: a reference to the display screen, or a reference to the background thread if interactive_mode was set to true. In the second scenario you can obtain a reference to the display surface via pygame.display.get_surface() :rtype: pygame.Surface
entailment
def display(surface): """Displays a pygame.Surface in the window. in pygame the window is represented through a surface, on which you can draw as on any other pygame.Surface. A refernce to to the screen can be optained via the :py:func:`pygame.display.get_surface` function. To display the contents of the screen surface in the window :py:func:`pygame.display.flip` needs to be called. :py:func:`display` draws the surface onto the screen surface at the postion (0, 0), and then calls :py:func:`flip`. :param surface: the pygame.Surface to display :type surface: pygame.Surface """ screen = pygame.display.get_surface() screen.blit(surface, (0, 0)) pygame.display.flip()
Displays a pygame.Surface in the window. in pygame the window is represented through a surface, on which you can draw as on any other pygame.Surface. A refernce to to the screen can be optained via the :py:func:`pygame.display.get_surface` function. To display the contents of the screen surface in the window :py:func:`pygame.display.flip` needs to be called. :py:func:`display` draws the surface onto the screen surface at the postion (0, 0), and then calls :py:func:`flip`. :param surface: the pygame.Surface to display :type surface: pygame.Surface
entailment
def empty_surface(fill_color, size=None): """Returns an empty surface filled with fill_color. :param fill_color: color to fill the surface with :type fill_color: pygame.Color :param size: the size of the new surface, if None its created to be the same size as the screen :type size: int-2-tuple """ sr = pygame.display.get_surface().get_rect() surf = pygame.Surface(size or (sr.w, sr.h)) surf.fill(fill_color) return surf
Returns an empty surface filled with fill_color. :param fill_color: color to fill the surface with :type fill_color: pygame.Color :param size: the size of the new surface, if None its created to be the same size as the screen :type size: int-2-tuple
entailment
def process_char(buffer: str, char: str, mappings=_char_mappings): """This is a convinience method for use with EventListener.wait_for_unicode_char(). In most cases it simply appends char to buffer. Some replacements are done because presing return will produce '\\r' but for most cases '\\n' would be desireable. Also backspace cant just be added to a string either, therefore, if char is "\\u0008" the last character from buffer will be cut off. The replacement from '\\r' to '\\n' is done using the mappings argument, the default value for it also contains a mapping from '\t' to 4 spaces. :param buffer: the string to be updated :type buffer: str :param char: the unicode character to be processed :type char: str :param mappings: a dict containing mappings :type mappings: dict :returns: a new string""" if char in mappings: return buffer + mappings[char] elif char == "\u0008": return buffer[:-1] if len(buffer) > 0 else buffer else: return buffer + char
This is a convinience method for use with EventListener.wait_for_unicode_char(). In most cases it simply appends char to buffer. Some replacements are done because presing return will produce '\\r' but for most cases '\\n' would be desireable. Also backspace cant just be added to a string either, therefore, if char is "\\u0008" the last character from buffer will be cut off. The replacement from '\\r' to '\\n' is done using the mappings argument, the default value for it also contains a mapping from '\t' to 4 spaces. :param buffer: the string to be updated :type buffer: str :param char: the unicode character to be processed :type char: str :param mappings: a dict containing mappings :type mappings: dict :returns: a new string
entailment
def run(self): """ Called by internal API subsystem to initialize websockets connections in the API interface """ self.api = self.context.get("cls")(self.context) self.context["inst"].append(self) # Adapters used by strategies def on_ws_connect(*args, **kwargs): """Callback on connect hook to set is_connected_ws""" self.is_connected_ws = True self.api.on_ws_connect(*args, **kwargs) # Initialize websocket in a thread with channels if hasattr(self.api, "on_ws_connect"): self.thread = Process(target=self.api.connect_ws, args=( on_ws_connect, [ SockChannel(channel, res_type, self._generate_result) for channel, res_type in self .context .get("conf") .get("subscriptions") .items() ])) self.thread.start()
Called by internal API subsystem to initialize websockets connections in the API interface
entailment
def add_channels(self, channels): """ Take a list of SockChannel objects and extend the websock listener """ chans = [ SockChannel(chan, res, self._generate_result) for chan, res in channels.items() ] self.api.channels.extend(chans) self.api.connect_channels(chans)
Take a list of SockChannel objects and extend the websock listener
entailment
def _generate_result(self, res_type, channel, result): """Generate the result object""" schema = self.api.ws_result_schema() schema.context['channel'] = channel schema.context['response_type'] = res_type self.callback(schema.load(result), self.context)
Generate the result object
entailment
def rand_elem(seq, n=None): """returns a random element from seq n times. If n is None, it continues indefinitly""" return map(random.choice, repeat(seq, n) if n is not None else repeat(seq))
returns a random element from seq n times. If n is None, it continues indefinitly
entailment
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None): '''Return first paragraph of multiline_str as a oneliner. When without_trailing_dot is True, the last char of the first paragraph will be removed, if it is a dot ('.'). Examples: >>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph' >>> print(first_paragraph(multiline_str)) first line second line >>> multiline_str = 'first \\n second \\n \\n next paragraph ' >>> print(first_paragraph(multiline_str)) first second >>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph' >>> print(first_paragraph(multiline_str, maxlength=3)) fir >>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph' >>> print(first_paragraph(multiline_str, maxlength=78)) first line second line >>> multiline_str = 'first line.' >>> print(first_paragraph(multiline_str)) first line >>> multiline_str = 'first line.' >>> print(first_paragraph(multiline_str, without_trailing_dot=False)) first line. >>> multiline_str = '' >>> print(first_paragraph(multiline_str)) <BLANKLINE> ''' stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()]) paragraph = stripped.split('\n\n')[0] res = paragraph.replace('\n', ' ') if without_trailing_dot: res = res.rsplit('.', 1)[0] if maxlength: res = res[0:maxlength] return res
Return first paragraph of multiline_str as a oneliner. When without_trailing_dot is True, the last char of the first paragraph will be removed, if it is a dot ('.'). Examples: >>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph' >>> print(first_paragraph(multiline_str)) first line second line >>> multiline_str = 'first \\n second \\n \\n next paragraph ' >>> print(first_paragraph(multiline_str)) first second >>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph' >>> print(first_paragraph(multiline_str, maxlength=3)) fir >>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph' >>> print(first_paragraph(multiline_str, maxlength=78)) first line second line >>> multiline_str = 'first line.' >>> print(first_paragraph(multiline_str)) first line >>> multiline_str = 'first line.' >>> print(first_paragraph(multiline_str, without_trailing_dot=False)) first line. >>> multiline_str = '' >>> print(first_paragraph(multiline_str)) <BLANKLINE>
entailment
def print_doc1(*args, **kwargs): '''Print the first paragraph of the docstring of the decorated function. The paragraph will be printed as a oneliner. May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``) or with named arguments ``color``, ``bold``, ``prefix`` of ``tail`` (eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``). Examples: # >>> @print_doc1 # ... def foo(): # ... """First line of docstring. # ... # ... another line. # ... """ # ... pass # ... # >>> foo() # \033[34mFirst line of docstring\033[0m # >>> @print_doc1 # ... def foo(): # ... """First paragraph of docstring which contains more than one # ... line. # ... # ... Another paragraph. # ... """ # ... pass # ... # >>> foo() # \033[34mFirst paragraph of docstring which contains more than one line\033[0m ''' # output settings from kwargs or take defaults color = kwargs.get('color', blue) bold = kwargs.get('bold', False) prefix = kwargs.get('prefix', '') tail = kwargs.get('tail', '\n') def real_decorator(func): '''real decorator function''' @wraps(func) def wrapper(*args, **kwargs): '''the wrapper function''' try: prgf = first_paragraph(func.__doc__) print(color(prefix + prgf + tail, bold)) except AttributeError as exc: name = func.__name__ print(red(flo('{name}() has no docstring'))) raise(exc) return func(*args, **kwargs) return wrapper invoked = bool(not args or kwargs) if not invoked: # invoke decorator function which returns the wrapper function return real_decorator(func=args[0]) return real_decorator
Print the first paragraph of the docstring of the decorated function. The paragraph will be printed as a oneliner. May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``) or with named arguments ``color``, ``bold``, ``prefix`` of ``tail`` (eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``). Examples: # >>> @print_doc1 # ... def foo(): # ... """First line of docstring. # ... # ... another line. # ... """ # ... pass # ... # >>> foo() # \033[34mFirst line of docstring\033[0m # >>> @print_doc1 # ... def foo(): # ... """First paragraph of docstring which contains more than one # ... line. # ... # ... Another paragraph. # ... """ # ... pass # ... # >>> foo() # \033[34mFirst paragraph of docstring which contains more than one line\033[0m
entailment
def print_full_name(*args, **kwargs): '''Decorator, print the full name of the decorated function. May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``) or with named arguments ``color``, ``bold``, or ``prefix`` (eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``). ''' color = kwargs.get('color', default_color) bold = kwargs.get('bold', False) prefix = kwargs.get('prefix', '') tail = kwargs.get('tail', '') def real_decorator(func): '''real decorator function''' @wraps(func) def wrapper(*args, **kwargs): '''the wrapper function''' first_line = '' try: first_line = func.__module__ + '.' + func.__qualname__ except AttributeError as exc: first_line = func.__name__ print(color(prefix + first_line + tail, bold)) return func(*args, **kwargs) return wrapper invoked = bool(not args or kwargs) if not invoked: # invoke decorator function which returns the wrapper function return real_decorator(func=args[0]) return real_decorator
Decorator, print the full name of the decorated function. May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``) or with named arguments ``color``, ``bold``, or ``prefix`` (eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
entailment
def filled_out_template_str(template, **substitutions): '''Return str template with applied substitutions. Example: >>> template = 'Asyl for {{name}} {{surname}}!' >>> filled_out_template_str(template, name='Edward', surname='Snowden') 'Asyl for Edward Snowden!' >>> template = '[[[foo]]] was substituted by {{foo}}' >>> filled_out_template_str(template, foo='bar') '{{foo}} was substituted by bar' >>> template = 'names wrapped by {single} {curly} {braces} {{curly}}' >>> filled_out_template_str(template, curly='remains unchanged') 'names wrapped by {single} {curly} {braces} remains unchanged' ''' template = template.replace('{', '{{') template = template.replace('}', '}}') template = template.replace('{{{{', '{') template = template.replace('}}}}', '}') template = template.format(**substitutions) template = template.replace('{{', '{') template = template.replace('}}', '}') template = template.replace('[[[', '{{') template = template.replace(']]]', '}}') return template
Return str template with applied substitutions. Example: >>> template = 'Asyl for {{name}} {{surname}}!' >>> filled_out_template_str(template, name='Edward', surname='Snowden') 'Asyl for Edward Snowden!' >>> template = '[[[foo]]] was substituted by {{foo}}' >>> filled_out_template_str(template, foo='bar') '{{foo}} was substituted by bar' >>> template = 'names wrapped by {single} {curly} {braces} {{curly}}' >>> filled_out_template_str(template, curly='remains unchanged') 'names wrapped by {single} {curly} {braces} remains unchanged'
entailment
def filled_out_template(filename, **substitutions): '''Return content of file filename with applied substitutions.''' res = None with open(filename, 'r') as fp: template = fp.read() res = filled_out_template_str(template, **substitutions) return res
Return content of file filename with applied substitutions.
entailment
def update_or_append_line(filename, prefix, new_line, keep_backup=True, append=True): '''Search in file 'filename' for a line starting with 'prefix' and replace the line by 'new_line'. If a line starting with 'prefix' not exists 'new_line' will be appended. If the file not exists, it will be created. Return False if new_line was appended, else True (i.e. if the prefix was found within of the file). ''' same_line_exists, line_updated = False, False filename = os.path.expanduser(filename) if os.path.isfile(filename): backup = filename + '~' shutil.move(filename, backup) # with open(filename, 'w') as dest, open(backup, 'r') as source: with open(filename, 'w') as dest: with open(backup, 'r') as source: # try update.. for line in source: if line == new_line: same_line_exists = True if line.startswith(prefix): dest.write(new_line + '\n') line_updated = True else: dest.write(line) # ..or append if not (same_line_exists or line_updated) and append: dest.write(new_line + '\n') if not keep_backup: os.remove(backup) else: with open(filename, 'w') as dest: dest.write(new_line + '\n') return same_line_exists or line_updated
Search in file 'filename' for a line starting with 'prefix' and replace the line by 'new_line'. If a line starting with 'prefix' not exists 'new_line' will be appended. If the file not exists, it will be created. Return False if new_line was appended, else True (i.e. if the prefix was found within of the file).
entailment
def comment_out_line(filename, line, comment='#', update_or_append_line=update_or_append_line): '''Comment line out by putting a comment sign in front of the line. If the file does not contain the line, the files content will not be changed (but the file will be touched in every case). ''' update_or_append_line(filename, prefix=line, new_line=comment+line, append=False)
Comment line out by putting a comment sign in front of the line. If the file does not contain the line, the files content will not be changed (but the file will be touched in every case).
entailment
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#', keep_backup=True, update_or_append_line=update_or_append_line): '''Remove the comment of an commented out line and make the line "active". If such an commented out line not exists it would be appended. ''' uncommented = update_or_append_line(filename, prefix=comment+prefix, new_line=new_line, keep_backup=keep_backup, append=False) if not uncommented: update_or_append_line(filename, prefix, new_line, keep_backup=keep_backup, append=True)
Remove the comment of an commented out line and make the line "active". If such an commented out line not exists it would be appended.
entailment
def convert_unicode_2_utf8(input): '''Return a copy of `input` with every str component encoded from unicode to utf-8. ''' if isinstance(input, dict): try: # python-2.6 return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value)) for key, value in input.iteritems()) except AttributeError: # since python-2.7 cf. http://stackoverflow.com/a/1747827 # [the ugly eval('...') is required for a valid syntax on # python-2.6, cf. http://stackoverflow.com/a/25049535] return eval('''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value) for key, value in input.items()}''') elif isinstance(input, list): return [convert_unicode_2_utf8(element) for element in input] # elif order relevant: python2 vs. python3 # cf. http://stackoverflow.com/a/19877309 elif isinstance(input, str): return input else: try: if eval('''isinstance(input, unicode)'''): return input.encode('utf-8') except NameError: # unicode does not exist in python-3.x pass return input
Return a copy of `input` with every str component encoded from unicode to utf-8.
entailment
def load_json(filename, gzip_mode=False): '''Return the json-file data, with all strings utf-8 encoded.''' open_file = open if gzip_mode: open_file = gzip.open try: with open_file(filename, 'rt') as fh: data = json.load(fh) data = convert_unicode_2_utf8(data) return data except AttributeError: # Python-2.6 fh = open_file(filename, 'rt') data = json.load(fh) fh.close() data = convert_unicode_2_utf8(data) return data
Return the json-file data, with all strings utf-8 encoded.
entailment
def write_json(data, filename, gzip_mode=False): '''Write the python data structure as a json-Object to filename.''' open_file = open if gzip_mode: open_file = gzip.open try: with open_file(filename, 'wt') as fh: json.dump(obj=data, fp=fh, sort_keys=True) except AttributeError: # Python-2.6 fh = open_file(filename, 'wt') json.dump(obj=data, fp=fh, sort_keys=True) fh.close()
Write the python data structure as a json-Object to filename.
entailment
def text_with_newlines(text, line_length=78, newline='\n'): '''Return text with a `newline` inserted after each `line_length` char. Return `text` unchanged if line_length == 0. ''' if line_length > 0: if len(text) <= line_length: return text else: return newline.join([text[idx:idx+line_length] for idx in range(0, len(text), line_length)]) else: return text
Return text with a `newline` inserted after each `line_length` char. Return `text` unchanged if line_length == 0.
entailment
def lazy_val(func, with_del_hook=False): '''A memoize decorator for class properties. Return a cached property that is calculated by function `func` on first access. ''' def hook_for(that): try: orig_del = that.__del__ except AttributeError: orig_del = None def del_hook(*args, **kwargs): del that._cache[id(that)] del that._del_hook_cache[id(that)] if orig_del is not None: orig_del(that, *args, **kwargs) try: if orig_del is not None: that.__del__ = del_hook except AttributeError: # that.__del__ is a class property and cannot be changed by instance orig_del = None return del_hook def add_to_del_hook_cache(that): if with_del_hook: try: that._del_hook_cache[id(that)] = hook_for(that) except AttributeError: # when that._del_hook_cache not exists, it means it is not a # class property. Then, we don't need a del_hook(). pass @functools.wraps(func) def get(self): try: return self._cache[id(self)][func] except AttributeError: self._cache = {id(self): {}, } add_to_del_hook_cache(self) except KeyError: try: self._cache[id(self)] except KeyError: self._cache[id(self)] = {} add_to_del_hook_cache(self) val = self._cache[id(self)][func] = func(self) return val return property(get)
A memoize decorator for class properties. Return a cached property that is calculated by function `func` on first access.
entailment
def _readlines(fname, fpointer1=open, fpointer2=open): # pragma: no cover """Read all lines from file.""" # fpointer1, fpointer2 arguments to ease testing try: with fpointer1(fname, "r") as fobj: return fobj.readlines() except UnicodeDecodeError: # pragma: no cover with fpointer2(fname, "r", encoding="utf-8") as fobj: return fobj.readlines()
Read all lines from file.
entailment
def call(self, callname, data=None, **args): """ Generic interface to REST apiGeneric interface to REST api :param callname: query name :param data: dictionary of inputs :param args: keyword arguments added to the payload :return: """ url = f"{self.url_base}/{callname}" payload = self.payload.copy() payload.update(**args) if data is not None: payload.update(data) res = self.session.post(url, data=payload) if res.status_code > 299: self.log.error(f"URL: {url}") self.log.error(f"Payload: {payload}") self.log.error(f"STATUS: {res.status_code}") self.log.error(f"RESPONSE: {res.text}") return elif 'error' in res.json(): self.log.error(res.json()['error']) return return res.json()
Generic interface to REST apiGeneric interface to REST api :param callname: query name :param data: dictionary of inputs :param args: keyword arguments added to the payload :return:
entailment
def launch_plugin(self): ''' launch nagios_plugin command ''' # nagios_plugins probes for plugin in self.plugins: # Construct the nagios_plugin command command = ('%s%s' % (self.plugins[plugin]['path'], self.plugins[plugin]['command'])) try: nagios_plugin = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: LOG.error("[nagios_plugins]: '%s' executable is missing", command) else: output = nagios_plugin.communicate()[0].strip() return_code = nagios_plugin.returncode if return_code >= len(STATUSES): LOG.error("[nagios_plugins]: '%s' executable has an issue, return code: %s", command, return_code) else: LOG.log(STATUSES[return_code][1], "[nagios_plugins][%s] (%s status): %s", plugin, STATUSES[return_code][0], output) yield {'return_code': int(return_code), 'output': str(output), 'time_stamp': int(time.time()), 'service_description': plugin, 'specific_servers': self.plugins[plugin]['servers']}
launch nagios_plugin command
entailment
def match(Class, path, pattern, flags=re.I, sortkey=None, ext=None): """for a given path and regexp pattern, return the files that match""" return sorted( [ Class(fn=fn) for fn in rglob(path, f"*{ext or ''}") if re.search(pattern, os.path.basename(fn), flags=flags) is not None and os.path.basename(fn)[0] != '~' # omit temp files ], key=sortkey, )
for a given path and regexp pattern, return the files that match
entailment
def copy(self, new_fn): """copy the file to the new_fn, preserving atime and mtime""" new_file = self.__class__(fn=str(new_fn)) new_file.write(data=self.read()) new_file.utime(self.atime, self.mtime) return new_file
copy the file to the new_fn, preserving atime and mtime
entailment
def make_basename(self, fn=None, ext=None): """make a filesystem-compliant basename for this file""" fb, oldext = os.path.splitext(os.path.basename(fn or self.fn)) ext = ext or oldext.lower() fb = String(fb).hyphenify(ascii=True) return ''.join([fb, ext])
make a filesystem-compliant basename for this file
entailment
def tempfile(self, mode='wb', **args): "write the contents of the file to a tempfile and return the tempfile filename" tf = tempfile.NamedTemporaryFile(mode=mode) self.write(tf.name, mode=mode, **args) return tfn
write the contents of the file to a tempfile and return the tempfile filename
entailment
def delete(self): """delete the file from the filesystem.""" if self.isfile: os.remove(self.fn) elif self.isdir: shutil.rmtree(self.fn)
delete the file from the filesystem.
entailment
def readable_size(C, bytes, suffix='B', decimals=1, sep='\u00a0'): """given a number of bytes, return the file size in readable units""" if bytes is None: return size = float(bytes) for unit in C.SIZE_UNITS: if abs(size) < 1024 or unit == C.SIZE_UNITS[-1]: return "{size:.{decimals}f}{sep}{unit}{suffix}".format( size=size, unit=unit, suffix=suffix, sep=sep, decimals=C.SIZE_UNITS.index(unit) > 0 and decimals or 0, # B with no decimal ) size /= 1024
given a number of bytes, return the file size in readable units
entailment
def bytes_from_readable_size(C, size, suffix='B'): """given a readable_size (as produced by File.readable_size()), return the number of bytes.""" s = re.split("^([0-9\.]+)\s*([%s]?)%s?" % (''.join(C.SIZE_UNITS), suffix), size, flags=re.I) bytes, unit = round(float(s[1])), s[2].upper() while unit in C.SIZE_UNITS and C.SIZE_UNITS.index(unit) > 0: bytes *= 1024 unit = C.SIZE_UNITS[C.SIZE_UNITS.index(unit) - 1] return bytes
given a readable_size (as produced by File.readable_size()), return the number of bytes.
entailment
def run(self): """Executed on startup of application""" for wsock in self.wsocks: wsock.run() for api in self.apis: api.run()
Executed on startup of application
entailment
def shutdown(self): """Executed on shutdown of application""" for wsock in self.wsocks: wsock.shutdown() for api in self.apis: api.shutdown()
Executed on shutdown of application
entailment
def request(self, url, method, data=None, headers=None): """Makes a HTTP call, formats response and does error handling. """ http_headers = merge_dict(self.default_headers, headers or {}) request_data = merge_dict({'api_key': self.apikey}, data or {}) logger.info('HTTP %s REQUEST TO %s' % (method, url)) start = datetime.datetime.now() try: response = requests.request(method=method, url=url, data=json.dumps(request_data), headers=http_headers) except exceptions.BadRequestError as e: return json.loads({'errors': e.content}) duration = datetime.datetime.now() - start logger.info('RESPONSE %s DURATION %s.%s' % (response.encoding, duration.seconds, duration.microseconds)) return json.loads(response.content) if response.content else {}
Makes a HTTP call, formats response and does error handling.
entailment
def get(self, action, params=None, headers=None): """Makes a GET request """ return self.request(make_url(self.endpoint, action), method='GET', data=params, headers=headers)
Makes a GET request
entailment