Search is not available for this dataset
text
stringlengths
75
104k
def get_stack_data(self, frame, traceback, event_type): """Get the stack frames data at each of the hooks above (Ie. for each line of the Python code)""" heap_data = Heap(self.options) stack_data = StackFrames(self.options) stack_frames, cur_frame_ind = self.get_stack(frame, traceback) for frame_ind, (frame, lineno) in enumerate(stack_frames): skip_this_stack = False # Skip the self.run calling frame (first frame) if frame_ind == 0: continue # Skip stack after a certain stack frame depth if len(stack_data) > self.options.depth: skip_this_stack = True break # Skip stack when frames dont belong to the current notebook or # current cell, I.e. frames in another global scope altogether # or frames in other cells if (not self.is_notebook_frame(frame) or self.is_other_cell_frame(frame)): if not self.options.step_all: skip_this_stack = True break lineno = 0 # So line markers dont display for these frames else: lineno += 1 # Because cell magic is actually line 1 # Filter out ignored names from the frame locals user_locals = filter_dict( frame.f_locals, ignore_vars + list(self.ipy_shell.user_ns_hidden.keys()) ) # Add frame and heap data stack_data.add(frame, lineno, event_type, user_locals) heap_data.add(user_locals) if not skip_this_stack and not stack_data.is_empty(): self.trace_history.append( stack_data, heap_data, self.stdout.getvalue() )
def filter_dict(d, exclude): """Return a new dict with specified keys excluded from the origional dict Args: d (dict): origional dict exclude (list): The keys that are excluded """ ret = {} for key, value in d.items(): if key not in exclude: ret.update({key: value}) return ret
def redirect_stdout(new_stdout): """Redirect the stdout Args: new_stdout (io.StringIO): New stdout to use instead """ old_stdout, sys.stdout = sys.stdout, new_stdout try: yield None finally: sys.stdout = old_stdout
def format(obj, options): """Return a string representation of the Python object Args: obj: The Python object options: Format options """ formatters = { float_types: lambda x: '{:.{}g}'.format(x, options.digits), } for _types, fmtr in formatters.items(): if isinstance(obj, _types): return fmtr(obj) try: if six.PY2 and isinstance(obj, six.string_types): return str(obj.encode('utf-8')) return str(obj) except: return 'OBJECT'
def get_type_info(obj): """Get type information for a Python object Args: obj: The Python object Returns: tuple: (object type "catagory", object type name) """ if isinstance(obj, primitive_types): return ('primitive', type(obj).__name__) if isinstance(obj, sequence_types): return ('sequence', type(obj).__name__) if isinstance(obj, array_types): return ('array', type(obj).__name__) if isinstance(obj, key_value_types): return ('key-value', type(obj).__name__) if isinstance(obj, types.ModuleType): return ('module', type(obj).__name__) if isinstance(obj, (types.FunctionType, types.MethodType)): return ('function', type(obj).__name__) if isinstance(obj, type): if hasattr(obj, '__dict__'): return ('class', obj.__name__) if isinstance(type(obj), type): if hasattr(obj, '__dict__'): cls_name = type(obj).__name__ if cls_name == 'classobj': cls_name = obj.__name__ return ('class', '{}'.format(cls_name)) if cls_name == 'instance': cls_name = obj.__class__.__name__ return ('instance', '{} instance'.format(cls_name)) return ('unknown', type(obj).__name__)
def refresh(self): """ Reloads the wallet and its accounts. By default, this method is called only once, on :class:`Wallet` initialization. When the wallet is accessed by multiple clients or exists in multiple instances, calling `refresh()` will be necessary to update the list of accounts. """ self.accounts = self.accounts or [] idx = 0 for _acc in self._backend.accounts(): _acc.wallet = self try: if self.accounts[idx]: continue except IndexError: pass self.accounts.append(_acc) idx += 1
def spend_key(self): """ Returns private spend key. None if wallet is view-only. :rtype: str or None """ key = self._backend.spend_key() if key == numbers.EMPTY_KEY: return None return key
def new_account(self, label=None): """ Creates new account, appends it to the :class:`Wallet`'s account list and returns it. :param label: account label as `str` :rtype: :class:`Account` """ acc, addr = self._backend.new_account(label=label) assert acc.index == len(self.accounts) self.accounts.append(acc) return acc
def confirmations(self, txn_or_pmt): """ Returns the number of confirmations for given :class:`Transaction <monero.transaction.Transaction>` or :class:`Payment <monero.transaction.Payment>` object. :rtype: int """ if isinstance(txn_or_pmt, Payment): txn = txn_or_pmt.transaction else: txn = txn_or_pmt try: return max(0, self.height() - txn.height) except TypeError: return 0
def get_address(self, major, minor): """ Calculates sub-address for account index (`major`) and address index within the account (`minor`). :rtype: :class:`BaseAddress <monero.address.BaseAddress>` """ # ensure indexes are within uint32 if major < 0 or major >= 2**32: raise ValueError('major index {} is outside uint32 range'.format(major)) if minor < 0 or minor >= 2**32: raise ValueError('minor index {} is outside uint32 range'.format(minor)) master_address = self.address() if major == minor == 0: return master_address master_svk = unhexlify(self.view_key()) master_psk = unhexlify(self.address().spend_key()) # m = Hs("SubAddr\0" || master_svk || major || minor) hsdata = b''.join([ b'SubAddr\0', master_svk, struct.pack('<I', major), struct.pack('<I', minor)]) m = keccak_256(hsdata).digest() # D = master_psk + m * B D = ed25519.add_compressed( ed25519.decodepoint(master_psk), ed25519.scalarmult(ed25519.B, ed25519.decodeint(m))) # C = master_svk * D C = ed25519.scalarmult(D, ed25519.decodeint(master_svk)) netbyte = bytearray([ 42 if master_address.is_mainnet() else \ 63 if master_address.is_testnet() else 36]) data = netbyte + ed25519.encodepoint(D) + ed25519.encodepoint(C) checksum = keccak_256(data).digest()[:4] return address.SubAddress(base58.encode(hexlify(data + checksum)))
def transfer(self, address, amount, priority=prio.NORMAL, payment_id=None, unlock_time=0, relay=True): """ Sends a transfer from the default account. Returns a list of resulting transactions. :param address: destination :class:`Address <monero.address.Address>` or subtype :param amount: amount to send :param priority: transaction priority, implies fee. The priority can be a number from 1 to 4 (unimportant, normal, elevated, priority) or a constant from `monero.prio`. :param payment_id: ID for the payment (must be None if :class:`IntegratedAddress <monero.address.IntegratedAddress>` is used as the destination) :param unlock_time: the extra unlock delay :param relay: if `True`, the wallet will relay the transaction(s) to the network immediately; when `False`, it will only return the transaction(s) so they might be broadcasted later :rtype: list of :class:`Transaction <monero.transaction.Transaction>` """ return self.accounts[0].transfer( address, amount, priority=priority, payment_id=payment_id, unlock_time=unlock_time, relay=relay)
def transfer_multiple(self, destinations, priority=prio.NORMAL, payment_id=None, unlock_time=0, relay=True): """ Sends a batch of transfers from the default account. Returns a list of resulting transactions. :param destinations: a list of destination and amount pairs: [(address, amount), ...] :param priority: transaction priority, implies fee. The priority can be a number from 1 to 4 (unimportant, normal, elevated, priority) or a constant from `monero.prio`. :param payment_id: ID for the payment (must be None if :class:`IntegratedAddress <monero.address.IntegratedAddress>` is used as a destination) :param unlock_time: the extra unlock delay :param relay: if `True`, the wallet will relay the transaction(s) to the network immediately; when `False`, it will only return the transaction(s) so they might be broadcasted later :rtype: list of :class:`Transaction <monero.transaction.Transaction>` """ return self.accounts[0].transfer_multiple( destinations, priority=priority, payment_id=payment_id, unlock_time=unlock_time, relay=relay)
def balance(self, unlocked=False): """ Returns specified balance. :param unlocked: if `True`, return the unlocked balance, otherwise return total balance :rtype: Decimal """ return self._backend.balances(account=self.index)[1 if unlocked else 0]
def new_address(self, label=None): """ Creates a new address. :param label: address label as `str` :rtype: :class:`SubAddress <monero.address.SubAddress>` """ return self._backend.new_address(account=self.index, label=label)
def transfer(self, address, amount, priority=prio.NORMAL, payment_id=None, unlock_time=0, relay=True): """ Sends a transfer. Returns a list of resulting transactions. :param address: destination :class:`Address <monero.address.Address>` or subtype :param amount: amount to send :param priority: transaction priority, implies fee. The priority can be a number from 1 to 4 (unimportant, normal, elevated, priority) or a constant from `monero.prio`. :param payment_id: ID for the payment (must be None if :class:`IntegratedAddress <monero.address.IntegratedAddress>` is used as the destination) :param unlock_time: the extra unlock delay :param relay: if `True`, the wallet will relay the transaction(s) to the network immediately; when `False`, it will only return the transaction(s) so they might be broadcasted later :rtype: list of :class:`Transaction <monero.transaction.Transaction>` """ return self._backend.transfer( [(address, amount)], priority, payment_id, unlock_time, account=self.index, relay=relay)
def transfer_multiple(self, destinations, priority=prio.NORMAL, payment_id=None, unlock_time=0, relay=True): """ Sends a batch of transfers. Returns a list of resulting transactions. :param destinations: a list of destination and amount pairs: [(:class:`Address <monero.address.Address>`, `Decimal`), ...] :param priority: transaction priority, implies fee. The priority can be a number from 1 to 4 (unimportant, normal, elevated, priority) or a constant from `monero.prio`. :param payment_id: ID for the payment (must be None if :class:`IntegratedAddress <monero.address.IntegratedAddress>` is used as the destination) :param unlock_time: the extra unlock delay :param relay: if `True`, the wallet will relay the transaction(s) to the network immediately; when `False`, it will only return the transaction(s) so they might be broadcasted later :rtype: list of :class:`Transaction <monero.transaction.Transaction>` """ return self._backend.transfer( destinations, priority, payment_id, unlock_time, account=self.index, relay=relay)
def to_atomic(amount): """Convert Monero decimal to atomic integer of piconero.""" if not isinstance(amount, (Decimal, float) + _integer_types): raise ValueError("Amount '{}' doesn't have numeric type. Only Decimal, int, long and " "float (not recommended) are accepted as amounts.") return int(amount * 10**12)
def _validate_checksum(self): """Given a mnemonic word string, confirm seed checksum (last word) matches the computed checksum. :rtype: bool """ phrase = self.phrase.split(" ") if self.word_list.get_checksum(self.phrase) == phrase[-1]: return True raise ValueError("Invalid checksum")
def public_address(self, net='mainnet'): """Returns the master :class:`Address <monero.address.Address>` represented by the seed. :param net: the network, one of 'mainnet', 'testnet', 'stagenet'. Default is 'mainnet'. :rtype: :class:`Address <monero.address.Address>` """ if net not in ('mainnet', 'testnet', 'stagenet'): raise ValueError( "Invalid net argument. Must be one of ('mainnet', 'testnet', 'stagenet').") netbyte = 18 if net == 'mainnet' else 53 if net == 'testnet' else 24 data = "{:x}{:s}{:s}".format(netbyte, self.public_spend_key(), self.public_view_key()) h = keccak_256() h.update(unhexlify(data)) checksum = h.hexdigest() return address(base58.encode(data + checksum[0:8]))
def address(addr, label=None): """Discover the proper class and return instance for a given Monero address. :param addr: the address as a string-like object :param label: a label for the address (defaults to `None`) :rtype: :class:`Address`, :class:`SubAddress` or :class:`IntegratedAddress` """ addr = str(addr) if _ADDR_REGEX.match(addr): netbyte = bytearray(unhexlify(base58.decode(addr)))[0] if netbyte in Address._valid_netbytes: return Address(addr, label=label) elif netbyte in SubAddress._valid_netbytes: return SubAddress(addr, label=label) raise ValueError("Invalid address netbyte {nb:x}. Allowed values are: {allowed}".format( nb=netbyte, allowed=", ".join(map( lambda b: '%02x' % b, sorted(Address._valid_netbytes + SubAddress._valid_netbytes))))) elif _IADDR_REGEX.match(addr): return IntegratedAddress(addr) raise ValueError("Address must be either 95 or 106 characters long base58-encoded string, " "is {addr} ({len} chars length)".format(addr=addr, len=len(addr)))
def with_payment_id(self, payment_id=0): """Integrates payment id into the address. :param payment_id: int, hexadecimal string or :class:`PaymentID <monero.numbers.PaymentID>` (max 64-bit long) :rtype: `IntegratedAddress` :raises: `TypeError` if the payment id is too long """ payment_id = numbers.PaymentID(payment_id) if not payment_id.is_short(): raise TypeError("Payment ID {0} has more than 64 bits and cannot be integrated".format(payment_id)) prefix = 54 if self.is_testnet() else 25 if self.is_stagenet() else 19 data = bytearray([prefix]) + self._decoded[1:65] + struct.pack('>Q', int(payment_id)) checksum = bytearray(keccak_256(data).digest()[:4]) return IntegratedAddress(base58.encode(hexlify(data + checksum)))
def base_address(self): """Returns the base address without payment id. :rtype: :class:`Address` """ prefix = 53 if self.is_testnet() else 24 if self.is_stagenet() else 18 data = bytearray([prefix]) + self._decoded[1:65] checksum = keccak_256(data).digest()[:4] return Address(base58.encode(hexlify(data + checksum)))
def encode(hex): '''Encode hexadecimal string as base58 (ex: encoding a Monero address).''' data = _hexToBin(hex) l_data = len(data) if l_data == 0: return "" full_block_count = l_data // __fullBlockSize last_block_size = l_data % __fullBlockSize res_size = full_block_count * __fullEncodedBlockSize + __encodedBlockSizes[last_block_size] res = bytearray([__alphabet[0]] * res_size) for i in range(full_block_count): res = encode_block(data[(i*__fullBlockSize):(i*__fullBlockSize+__fullBlockSize)], res, i * __fullEncodedBlockSize) if last_block_size > 0: res = encode_block(data[(full_block_count*__fullBlockSize):(full_block_count*__fullBlockSize+last_block_size)], res, full_block_count * __fullEncodedBlockSize) return bytes(res).decode('ascii')
def decode(enc): '''Decode a base58 string (ex: a Monero address) into hexidecimal form.''' enc = bytearray(enc, encoding='ascii') l_enc = len(enc) if l_enc == 0: return "" full_block_count = l_enc // __fullEncodedBlockSize last_block_size = l_enc % __fullEncodedBlockSize try: last_block_decoded_size = __encodedBlockSizes.index(last_block_size) except ValueError: raise ValueError("Invalid encoded length: %d" % l_enc) data_size = full_block_count * __fullBlockSize + last_block_decoded_size data = bytearray(data_size) for i in range(full_block_count): data = decode_block(enc[(i*__fullEncodedBlockSize):(i*__fullEncodedBlockSize+__fullEncodedBlockSize)], data, i * __fullBlockSize) if last_block_size > 0: data = decode_block(enc[(full_block_count*__fullEncodedBlockSize):(full_block_count*__fullEncodedBlockSize+last_block_size)], data, full_block_count * __fullBlockSize) return _binToHex(data)
def encode(cls, hex): """Convert hexadecimal string to mnemonic word representation with checksum. """ out = [] for i in range(len(hex) // 8): word = endian_swap(hex[8*i:8*i+8]) x = int(word, 16) w1 = x % cls.n w2 = (x // cls.n + w1) % cls.n w3 = (x // cls.n // cls.n + w2) % cls.n out += [cls.word_list[w1], cls.word_list[w2], cls.word_list[w3]] checksum = cls.get_checksum(" ".join(out)) out.append(checksum) return " ".join(out)
def decode(cls, phrase): """Calculate hexadecimal representation of the phrase. """ phrase = phrase.split(" ") out = "" for i in range(len(phrase) // 3): word1, word2, word3 = phrase[3*i:3*i+3] w1 = cls.word_list.index(word1) w2 = cls.word_list.index(word2) % cls.n w3 = cls.word_list.index(word3) % cls.n x = w1 + cls.n *((w2 - w1) % cls.n) + cls.n * cls.n * ((w3 - w2) % cls.n) out += endian_swap("%08x" % x) return out
def get_checksum(cls, phrase): """Given a mnemonic word string, return a string of the computed checksum. :rtype: str """ phrase_split = phrase.split(" ") if len(phrase_split) < 12: raise ValueError("Invalid mnemonic phrase") if len(phrase_split) > 13: # Standard format phrase = phrase_split[:24] else: # MyMonero format phrase = phrase_split[:12] wstr = "".join(word[:cls.unique_prefix_length] for word in phrase) wstr = bytearray(wstr.encode('utf-8')) z = ((crc32(wstr) & 0xffffffff) ^ 0xffffffff ) >> 0 z2 = ((z ^ 0xffffffff) >> 0) % len(phrase) return phrase_split[z2]
def send_transaction(self, tx, relay=True): """ Sends a transaction generated by a :class:`Wallet <monero.wallet.Wallet>`. :param tx: :class:`Transaction <monero.transaction.Transaction>` :param relay: whether to relay the transaction to peers. If `False`, the daemon will have to mine the transaction itself in order to have it included in the blockchain. """ return self._backend.send_transaction(tx.blob, relay=relay)
def one(prompt, *args, **kwargs): """Instantiates a picker, registers custom handlers for going back, and starts the picker. """ indicator = '‣' if sys.version_info < (3, 0): indicator = '>' def go_back(picker): return None, -1 options, verbose_options = prepare_options(args) idx = kwargs.get('idx', 0) picker = Picker(verbose_options, title=prompt, indicator=indicator, default_index=idx) picker.register_custom_handler(ord('h'), go_back) picker.register_custom_handler(curses.KEY_LEFT, go_back) with stdout_redirected(sys.stderr): option, index = picker.start() if index == -1: raise QuestionnaireGoBack if kwargs.get('return_index', False): # `one` was called by a special client, e.g. `many` return index return options[index]
def many(prompt, *args, **kwargs): """Calls `pick` in a while loop to allow user to pick many options. Returns a list of chosen options. """ def get_options(options, chosen): return [options[i] for i, c in enumerate(chosen) if c] def get_verbose_options(verbose_options, chosen): no, yes = ' ', '✔' if sys.version_info < (3, 3): no, yes = ' ', '@' opts = ['{} {}'.format(yes if c else no, verbose_options[i]) for i, c in enumerate(chosen)] return opts + ['{}{}'.format(' ', kwargs.get('done', 'done...'))] options, verbose_options = prepare_options(args) chosen = [False] * len(options) index = kwargs.get('idx', 0) default = kwargs.get('default', None) if isinstance(default, list): for idx in default: chosen[idx] = True if isinstance(default, int): chosen[default] = True while True: try: index = one(prompt, *get_verbose_options(verbose_options, chosen), return_index=True, idx=index) except QuestionnaireGoBack: if any(chosen): raise QuestionnaireGoBack(0) else: raise QuestionnaireGoBack if index == len(options): return get_options(options, chosen) chosen[index] = not chosen[index]
def prepare_options(options): """Create options and verbose options from strings and non-string iterables in `options` array. """ options_, verbose_options = [], [] for option in options: if is_string(option): options_.append(option) verbose_options.append(option) else: options_.append(option[0]) verbose_options.append(option[1]) return options_, verbose_options
def raw(prompt, *args, **kwargs): """Calls input to allow user to input an arbitrary string. User can go back by entering the `go_back` string. Works in both Python 2 and 3. """ go_back = kwargs.get('go_back', '<') type_ = kwargs.get('type', str) default = kwargs.get('default', '') with stdout_redirected(sys.stderr): while True: try: if kwargs.get('secret', False): answer = getpass.getpass(prompt) elif sys.version_info < (3, 0): answer = raw_input(prompt) else: answer = input(prompt) if not answer: answer = default if answer == go_back: raise QuestionnaireGoBack return type_(answer) except ValueError: eprint('\n`{}` is not a valid `{}`\n'.format(answer, type_))
def stdout_redirected(to): """Lifted from: https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python This is the only way I've found to redirect stdout with curses. This way the output from questionnaire can be piped to another program, without piping what's written to the terminal by the prompters. """ stdout = sys.stdout stdout_fd = fileno(stdout) # copy stdout_fd before it is overwritten with os.fdopen(os.dup(stdout_fd), 'wb') as copied: stdout.flush() # flush library buffers that dup2 knows nothing about try: os.dup2(fileno(to), stdout_fd) # $ exec >&to except ValueError: # filename with open(to, 'wb') as to_file: os.dup2(to_file.fileno(), stdout_fd) # $ exec > to try: yield stdout # allow code to be run with the redirected stdout finally: # restore stdout to its previous value stdout.flush() os.dup2(copied.fileno(), stdout_fd)
def exit_on_keyboard_interrupt(f): """Decorator that allows user to exit script by sending a keyboard interrupt (ctrl + c) without raising an exception. """ @wraps(f) def wrapper(*args, **kwargs): raise_exception = kwargs.pop('raise_exception', False) try: return f(*args, **kwargs) except KeyboardInterrupt: if not raise_exception: sys.exit() raise KeyboardInterrupt return wrapper
def get_operator(self, op): """Assigns function to the operators property of the instance. """ if op in self.OPERATORS: return self.OPERATORS.get(op) try: n_args = len(inspect.getargspec(op)[0]) if n_args != 2: raise TypeError except: eprint('Error: invalid operator function. Operators must accept two args.') raise else: return op
def assign_prompter(self, prompter): """If you want to change the core prompters registry, you can override this method in a Question subclass. """ if is_string(prompter): if prompter not in prompters: eprint("Error: '{}' is not a core prompter".format(prompter)) sys.exit() self.prompter = prompters[prompter] else: self.prompter = prompter
def add(self, *args, **kwargs): """Add a Question instance to the questions dict. Each key points to a list of Question instances with that key. Use the `question` kwarg to pass a Question instance if you want, or pass in the same args you would pass to instantiate a question. """ if 'question' in kwargs and isinstance(kwargs['question'], Question): question = kwargs['question'] else: question = Question(*args, **kwargs) self.questions.setdefault(question.key, []).append(question) return question
def ask(self, error=None): """Asks the next question in the questionnaire and returns the answer, unless user goes back. """ q = self.next_question if q is None: return try: answer = q.prompter(self.get_prompt(q, error), *q.prompter_args, **q.prompter_kwargs) except QuestionnaireGoBack as e: steps = e.args[0] if e.args else 1 if steps == 0: self.ask() # user can redo current question even if `can_go_back` is `False` return self.go_back(steps) else: if q._validate: error = q._validate(answer) if error: self.ask(error) return if q._transform: answer = q._transform(answer) self.answers[q.key] = answer return answer
def next_question(self): """Returns the next `Question` in the questionnaire, or `None` if there are no questions left. Returns first question for whose key there is no answer and for which condition is satisfied, or for which there is no condition. """ for key, questions in self.questions.items(): if key in self.answers: continue for question in questions: if self.check_condition(question._condition): return question return None
def check_condition(self, condition): """Helper that returns True if condition is satisfied/doesn't exist. """ if not condition: return True for c in condition.conditions: key, value, operator = c if not operator(self.answers[key], value): return False return True
def go_back(self, n=1): """Move `n` questions back in the questionnaire by removing the last `n` answers. """ if not self.can_go_back: return N = max(len(self.answers)-abs(n), 0) self.answers = OrderedDict(islice(self.answers.items(), N))
def format_answers(self, fmt='obj'): """Formats answers depending on `fmt`. """ fmts = ('obj', 'array', 'plain') if fmt not in fmts: eprint("Error: '{}' not in {}".format(fmt, fmts)) return def stringify(val): if type(val) in (list, tuple): return ', '.join(str(e) for e in val) return val if fmt == 'obj': return json.dumps(self.answers) elif fmt == 'array': answers = [[k, v] for k, v in self.answers.items()] return json.dumps(answers) elif fmt == 'plain': answers = '\n'.join('{}: {}'.format(k, stringify(v)) for k, v in self.answers.items()) return answers
def answer_display(self, s=''): """Helper method for displaying the answers so far. """ padding = len(max(self.questions.keys(), key=len)) + 5 for key in list(self.answers.keys()): s += '{:>{}} : {}\n'.format(key, padding, self.answers[key]) return s
def add_intent(self, name, lines, reload_cache=False): """ Creates a new intent, optionally checking the cache first Args: name (str): The associated name of the intent lines (list<str>): All the sentences that should activate the intent reload_cache: Whether to ignore cached intent if exists """ self.intents.add(name, lines, reload_cache) self.padaos.add_intent(name, lines) self.must_train = True
def add_entity(self, name, lines, reload_cache=False): """ Adds an entity that matches the given lines. Example: self.add_intent('weather', ['will it rain on {weekday}?']) self.add_entity('{weekday}', ['monday', 'tuesday', 'wednesday']) # ... Args: name (str): The name of the entity lines (list<str>): Lines of example extracted entities reload_cache (bool): Whether to refresh all of cache """ Entity.verify_name(name) self.entities.add(Entity.wrap_name(name), lines, reload_cache) self.padaos.add_entity(name, lines) self.must_train = True
def load_entity(self, name, file_name, reload_cache=False): """ Loads an entity, optionally checking the cache first Args: name (str): The associated name of the entity file_name (str): The location of the entity file reload_cache (bool): Whether to refresh all of cache """ Entity.verify_name(name) self.entities.load(Entity.wrap_name(name), file_name, reload_cache) with open(file_name) as f: self.padaos.add_entity(name, f.read().split('\n')) self.must_train = True
def load_intent(self, name, file_name, reload_cache=False): """ Loads an intent, optionally checking the cache first Args: name (str): The associated name of the intent file_name (str): The location of the intent file reload_cache (bool): Whether to refresh all of cache """ self.intents.load(name, file_name, reload_cache) with open(file_name) as f: self.padaos.add_intent(name, f.read().split('\n')) self.must_train = True
def remove_intent(self, name): """Unload an intent""" self.intents.remove(name) self.padaos.remove_intent(name) self.must_train = True
def remove_entity(self, name): """Unload an entity""" self.entities.remove(name) self.padaos.remove_entity(name)
def train(self, debug=True, force=False, single_thread=False, timeout=20): """ Trains all the loaded intents that need to be updated If a cache file exists with the same hash as the intent file, the intent will not be trained and just loaded from file Args: debug (bool): Whether to print a message to stdout each time a new intent is trained force (bool): Whether to force training if already finished single_thread (bool): Whether to force running in a single thread timeout (float): Seconds before cancelling training Returns: bool: True if training succeeded without timeout """ if not self.must_train and not force: return self.padaos.compile() self.train_thread = Thread(target=self._train, kwargs=dict( debug=debug, single_thread=single_thread, timeout=timeout ), daemon=True) self.train_thread.start() self.train_thread.join(timeout) self.must_train = False return not self.train_thread.is_alive()
def train_subprocess(self, *args, **kwargs): """ Trains in a subprocess which provides a timeout guarantees everything shuts down properly Args: See <train> Returns: bool: True for success, False if timed out """ ret = call([ sys.executable, '-m', 'padatious', 'train', self.cache_dir, '-d', json.dumps(self.serialized_args), '-a', json.dumps(args), '-k', json.dumps(kwargs), ]) if ret == 2: raise TypeError('Invalid train arguments: {} {}'.format(args, kwargs)) data = self.serialized_args self.clear() self.apply_training_args(data) self.padaos.compile() if ret == 0: self.must_train = False return True elif ret == 10: # timeout return False else: raise ValueError('Training failed and returned code: {}'.format(ret))
def calc_intents(self, query): """ Tests all the intents against the query and returns data on how well each one matched against the query Args: query (str): Input sentence to test against intents Returns: list<MatchData>: List of intent matches See calc_intent() for a description of the returned MatchData """ if self.must_train: self.train() intents = {} if self.train_thread and self.train_thread.is_alive() else { i.name: i for i in self.intents.calc_intents(query, self.entities) } sent = tokenize(query) for perfect_match in self.padaos.calc_intents(query): name = perfect_match['name'] intents[name] = MatchData(name, sent, matches=perfect_match['entities'], conf=1.0) return list(intents.values())
def calc_intent(self, query): """ Tests all the intents against the query and returns match data of the best intent Args: query (str): Input sentence to test against intents Returns: MatchData: Best intent match """ matches = self.calc_intents(query) if len(matches) == 0: return MatchData('', '') best_match = max(matches, key=lambda x: x.conf) best_matches = (match for match in matches if match.conf == best_match.conf) return min(best_matches, key=lambda x: sum(map(len, x.matches.values())))
def expand(self): """ Creates a combination of all sub-sentences. Returns: List<List<str>>: A list with all subsentence expansions combined in every possible way """ old_expanded = [[]] for sub in self._tree: sub_expanded = sub.expand() new_expanded = [] while len(old_expanded) > 0: sentence = old_expanded.pop() for new in sub_expanded: new_expanded.append(sentence + new) old_expanded = new_expanded return old_expanded
def expand(self): """ Returns all of its options as seperated sub-sentences. Returns: List<List<str>>: A list containing the sentences created by all expansions of its sub-sentences """ options = [] for option in self._tree: options.extend(option.expand()) return options
def _parse_expr(self): """ Generate sentence token trees from the current position to the next closing parentheses / end of the list and return it ['1', '(', '2', '|', '3, ')'] -> ['1', [['2'], ['3']]] ['2', '|', '3'] -> [['2'], ['3']] """ # List of all generated sentences sentence_list = [] # Currently active sentence cur_sentence = [] sentence_list.append(Sentence(cur_sentence)) # Determine which form the current expression has while self._current_position < len(self.tokens): cur = self.tokens[self._current_position] self._current_position += 1 if cur == '(': # Parse the subexpression subexpr = self._parse_expr() # Check if the subexpression only has one branch # -> If so, append "(" and ")" and add it as is normal_brackets = False if len(subexpr.tree()) == 1: normal_brackets = True cur_sentence.append(Word('(')) # add it to the sentence cur_sentence.append(subexpr) if normal_brackets: cur_sentence.append(Word(')')) elif cur == '|': # Begin parsing a new sentence cur_sentence = [] sentence_list.append(Sentence(cur_sentence)) elif cur == ')': # End parsing the current subexpression break # TODO anything special about {sth}? else: cur_sentence.append(Word(cur)) return Options(sentence_list)
def _train_and_save(obj, cache, data, print_updates): """Internal pickleable function used to train objects in another process""" obj.train(data) if print_updates: print('Regenerated ' + obj.name + '.') obj.save(cache)
def wrap_name(name): """Wraps SkillName:entity into SkillName:{entity}""" if ':' in name: parts = name.split(':') intent_name, ent_name = parts[0], parts[1:] return intent_name + ':{' + ':'.join(ent_name) + '}' else: return '{' + name + '}'
def lines_hash(lines): """ Creates a unique binary id for the given lines Args: lines (list<str>): List of strings that should be collectively hashed Returns: bytearray: Binary hash """ x = xxh32() for i in lines: x.update(i.encode()) return x.digest()
def tokenize(sentence): """ Converts a single sentence into a list of individual significant units Args: sentence (str): Input string ie. 'This is a sentence.' Returns: list<str>: List of tokens ie. ['this', 'is', 'a', 'sentence'] """ tokens = [] class Vars: start_pos = -1 last_type = 'o' def update(c, i): if c.isalpha() or c in '-{}': t = 'a' elif c.isdigit() or c == '#': t = 'n' elif c.isspace(): t = 's' else: t = 'o' if t != Vars.last_type or t == 'o': if Vars.start_pos >= 0: token = sentence[Vars.start_pos:i].lower() if token not in '.!?': tokens.append(token) Vars.start_pos = -1 if t == 's' else i Vars.last_type = t for i, char in enumerate(sentence): update(char, i) update(' ', len(sentence)) return tokens
def resolve_conflicts(inputs, outputs): """ Checks for duplicate inputs and if there are any, remove one and set the output to the max of the two outputs Args: inputs (list<list<float>>): Array of input vectors outputs (list<list<float>>): Array of output vectors Returns: tuple<inputs, outputs>: The modified inputs and outputs """ data = {} for inp, out in zip(inputs, outputs): tup = tuple(inp) if tup in data: data[tup].append(out) else: data[tup] = [out] inputs, outputs = [], [] for inp, outs in data.items(): inputs.append(list(inp)) combined = [0] * len(outs[0]) for i in range(len(combined)): combined[i] = max(j[i] for j in outs) outputs.append(combined) return inputs, outputs
def main(src, pyi_dir, target_dir, incremental, quiet, replace_any, hg, traceback): """Re-apply type annotations from .pyi stubs to your codebase.""" Config.incremental = incremental Config.replace_any = replace_any returncode = 0 for src_entry in src: for file, error, exc_type, tb in retype_path( Path(src_entry), pyi_dir=Path(pyi_dir), targets=Path(target_dir), src_explicitly_given=True, quiet=quiet, hg=hg, ): print(f'error: {file}: {error}', file=sys.stderr) if traceback: print('Traceback (most recent call last):', file=sys.stderr) for line in tb: print(line, file=sys.stderr, end='') print(f'{exc_type.__name__}: {error}', file=sys.stderr) returncode += 1 if not src and not quiet: print('warning: no sources given', file=sys.stderr) # According to http://tldp.org/LDP/abs/html/index.html starting with 126 # we have special returncodes. sys.exit(min(returncode, 125))
def retype_path( src, pyi_dir, targets, *, src_explicitly_given=False, quiet=False, hg=False ): """Recursively retype files or directories given. Generate errors.""" if src.is_dir(): for child in src.iterdir(): if child == pyi_dir or child == targets: continue yield from retype_path( child, pyi_dir / src.name, targets / src.name, quiet=quiet, hg=hg, ) elif src.suffix == '.py' or src_explicitly_given: try: retype_file(src, pyi_dir, targets, quiet=quiet, hg=hg) except Exception as e: yield ( src, str(e), type(e), traceback.format_tb(e.__traceback__), )
def retype_file(src, pyi_dir, targets, *, quiet=False, hg=False): """Retype `src`, finding types in `pyi_dir`. Save in `targets`. The file should remain formatted exactly as it was before, save for: - annotations - additional imports needed to satisfy annotations - additional module-level names needed to satisfy annotations Type comments in sources are normalized to type annotations. """ with tokenize.open(src) as src_buffer: src_encoding = src_buffer.encoding src_node = lib2to3_parse(src_buffer.read()) try: with open((pyi_dir / src.name).with_suffix('.pyi')) as pyi_file: pyi_txt = pyi_file.read() except FileNotFoundError: if not quiet: print( f'warning: .pyi file for source {src} not found in {pyi_dir}', file=sys.stderr, ) else: pyi_ast = ast3.parse(pyi_txt) assert isinstance(pyi_ast, ast3.Module) reapply_all(pyi_ast.body, src_node) fix_remaining_type_comments(src_node) targets.mkdir(parents=True, exist_ok=True) with open(targets / src.name, 'w', encoding=src_encoding) as target_file: target_file.write(lib2to3_unparse(src_node, hg=hg)) return targets / src.name
def lib2to3_parse(src_txt): """Given a string with source, return the lib2to3 Node.""" grammar = pygram.python_grammar_no_print_statement drv = driver.Driver(grammar, pytree.convert) if src_txt[-1] != '\n': nl = '\r\n' if '\r\n' in src_txt[:1024] else '\n' src_txt += nl try: result = drv.parse_string(src_txt, True) except ParseError as pe: lineno, column = pe.context[1] lines = src_txt.splitlines() try: faulty_line = lines[lineno - 1] except IndexError: faulty_line = "<line number missing in source>" raise ValueError(f"Cannot parse: {lineno}:{column}: {faulty_line}") from None if isinstance(result, Leaf): result = Node(syms.file_input, [result]) return result
def lib2to3_unparse(node, *, hg=False): """Given a lib2to3 node, return its string representation.""" code = str(node) if hg: from retype_hgext import apply_job_security code = apply_job_security(code) return code
def reapply_all(ast_node, lib2to3_node): """Reapplies the typed_ast node into the lib2to3 tree. Also does post-processing. This is done in reverse order to enable placing TypeVars and aliases that depend on one another. """ late_processing = reapply(ast_node, lib2to3_node) for lazy_func in reversed(late_processing): lazy_func()
def fix_remaining_type_comments(node): """Converts type comments in `node` to proper annotated assignments.""" assert node.type == syms.file_input last_n = None for n in node.post_order(): if last_n is not None: if n.type == token.NEWLINE and is_assignment(last_n): fix_variable_annotation_type_comment(n, last_n) elif n.type == syms.funcdef and last_n.type == syms.suite: fix_signature_annotation_type_comment(n, last_n, offset=1) elif n.type == syms.async_funcdef and last_n.type == syms.suite: fix_signature_annotation_type_comment(n, last_n, offset=2) last_n = n
def get_function_signature(fun, *, is_method=False): """Returns (args, returns). `args` is ast3.arguments, `returns` is the return type AST node. The kicker about this function is that it pushes type comments into proper annotation fields, standardizing type handling. """ args = fun.args returns = fun.returns if fun.type_comment: try: args_tc, returns_tc = parse_signature_type_comment(fun.type_comment) if returns and returns_tc: raise ValueError( "using both a type annotation and a type comment is not allowed" ) returns = returns_tc copy_arguments_to_annotations(args, args_tc, is_method=is_method) except (SyntaxError, ValueError) as exc: raise ValueError( f"Annotation problem in function {fun.name!r}: " + f"{fun.lineno}:{fun.col_offset + 1}: {exc}" ) copy_type_comments_to_annotations(args) return args, returns
def parse_signature_type_comment(type_comment): """Parse the fugly signature type comment into AST nodes. Caveats: ASTifying **kwargs is impossible with the current grammar so we hack it into unary subtraction (to differentiate from Starred in vararg). For example from: "(str, int, *int, **Any) -> 'SomeReturnType'" To: ([ast3.Name, ast.Name, ast3.Name, ast.Name], ast3.Str) """ try: result = ast3.parse(type_comment, '<func_type>', 'func_type') except SyntaxError: raise ValueError(f"invalid function signature type comment: {type_comment!r}") assert isinstance(result, ast3.FunctionType) if len(result.argtypes) == 1: argtypes = result.argtypes[0] else: argtypes = result.argtypes return argtypes, result.returns
def parse_type_comment(type_comment): """Parse a type comment string into AST nodes.""" try: result = ast3.parse(type_comment, '<type_comment>', 'eval') except SyntaxError: raise ValueError(f"invalid type comment: {type_comment!r}") from None assert isinstance(result, ast3.Expression) return result.body
def parse_arguments(arguments): """parse_arguments('(a, b, *, c=False, **d)') -> ast3.arguments Parse a string with function arguments into an AST node. """ arguments = f"def f{arguments}: ..." try: result = ast3.parse(arguments, '<arguments>', 'exec') except SyntaxError: raise ValueError(f"invalid arguments: {arguments!r}") from None assert isinstance(result, ast3.Module) assert len(result.body) == 1 assert isinstance(result.body[0], ast3.FunctionDef) args = result.body[0].args copy_type_comments_to_annotations(args) return args
def copy_arguments_to_annotations(args, type_comment, *, is_method=False): """Copies AST nodes from `type_comment` into the ast3.arguments in `args`. Does validaation of argument count (allowing for untyped self/cls) and type (vararg and kwarg). """ if isinstance(type_comment, ast3.Ellipsis): return expected = len(args.args) if args.vararg: expected += 1 expected += len(args.kwonlyargs) if args.kwarg: expected += 1 actual = len(type_comment) if isinstance(type_comment, list) else 1 if expected != actual: if is_method and expected - actual == 1: pass # fine, we're just skipping `self`, `cls`, etc. else: raise ValueError( f"number of arguments in type comment doesn't match; " + f"expected {expected}, found {actual}" ) if isinstance(type_comment, list): next_value = type_comment.pop else: # If there's just one value, only one of the loops and ifs below will # be populated. We ensure this with the expected/actual length check # above. _tc = type_comment def next_value(index: int = 0) -> ast3.expr: return _tc for arg in args.args[expected - actual:]: ensure_no_annotation(arg.annotation) arg.annotation = next_value(0) if args.vararg: ensure_no_annotation(args.vararg.annotation) args.vararg.annotation = next_value(0) for arg in args.kwonlyargs: ensure_no_annotation(arg.annotation) arg.annotation = next_value(0) if args.kwarg: ensure_no_annotation(args.kwarg.annotation) args.kwarg.annotation = next_value(0)
def copy_type_comments_to_annotations(args): """Copies argument type comments from the legacy long form to annotations in the entire function signature. """ for arg in args.args: copy_type_comment_to_annotation(arg) if args.vararg: copy_type_comment_to_annotation(args.vararg) for arg in args.kwonlyargs: copy_type_comment_to_annotation(arg) if args.kwarg: copy_type_comment_to_annotation(args.kwarg)
def maybe_replace_any_if_equal(name, expected, actual): """Return the type given in `expected`. Raise ValueError if `expected` isn't equal to `actual`. If --replace-any is used, the Any type in `actual` is considered equal. The implementation is naively checking if the string representation of `actual` is one of "Any", "typing.Any", or "t.Any". This is done for two reasons: 1. I'm lazy. 2. We want people to be able to explicitly state that they want Any without it being replaced. This way they can use an alias. """ is_equal = expected == actual if not is_equal and Config.replace_any: actual_str = minimize_whitespace(str(actual)) if actual_str and actual_str[0] in {'"', "'"}: actual_str = actual_str[1:-1] is_equal = actual_str in {'Any', 'typing.Any', 't.Any'} if not is_equal: expected_annotation = minimize_whitespace(str(expected)) actual_annotation = minimize_whitespace(str(actual)) raise ValueError( f"incompatible existing {name}. " + f"Expected: {expected_annotation!r}, actual: {actual_annotation!r}" ) return expected or actual
def remove_function_signature_type_comment(body): """Removes the legacy signature type comment, leaving other comments if any.""" for node in body.children: if node.type == token.INDENT: prefix = node.prefix.lstrip() if prefix.startswith('# type: '): node.prefix = '\n'.join(prefix.split('\n')[1:]) break
def flatten_some(children): """Generates nodes or leaves, unpacking bodies of try:except:finally: statements.""" for node in children: if node.type in (syms.try_stmt, syms.suite): yield from flatten_some(node.children) else: yield node
def pop_param(params): """Pops the parameter and the "remainder" (comma, default value). Returns a tuple of ('name', default) or (_star, 'name') or (_dstar, 'name'). """ default = None name = params.pop(0) if name in (_star, _dstar): default = params.pop(0) if default == _comma: return name, default try: remainder = params.pop(0) if remainder == _eq: default = params.pop(0) remainder = params.pop(0) if remainder != _comma: raise ValueError(f"unexpected token: {remainder}") except IndexError: pass return name, default
def get_offset_and_prefix(body, skip_assignments=False): """Returns the offset after which a statement can be inserted to the `body`. This offset is calculated to come after all imports, and maybe existing (possibly annotated) assignments if `skip_assignments` is True. Also returns the indentation prefix that should be applied to the inserted node. """ assert body.type in (syms.file_input, syms.suite) _offset = 0 prefix = '' for _offset, child in enumerate(body.children): if child.type == syms.simple_stmt: stmt = child.children[0] if stmt.type == syms.expr_stmt: expr = stmt.children if not skip_assignments: break if ( len(expr) != 2 or expr[0].type != token.NAME or expr[1].type != syms.annassign or _eq in expr[1].children ): break elif stmt.type not in (syms.import_name, syms.import_from, token.STRING): break elif child.type == token.INDENT: assert isinstance(child, Leaf) prefix = child.value elif child.type != token.NEWLINE: break prefix, child.prefix = child.prefix, prefix return _offset, prefix
def fix_line_numbers(body): r"""Recomputes all line numbers based on the number of \n characters.""" maxline = 0 for node in body.pre_order(): maxline += node.prefix.count('\n') if isinstance(node, Leaf): node.lineno = maxline maxline += str(node.value).count('\n')
def new(n, prefix=None): """lib2to3's AST requires unique objects as children.""" if isinstance(n, Leaf): return Leaf(n.type, n.value, prefix=n.prefix if prefix is None else prefix) # this is hacky, we assume complex nodes are just being reused once from the # original AST. n.parent = None if prefix is not None: n.prefix = prefix return n
def apply_job_security(code): """Treat input `code` like Python 2 (implicit strings are byte literals). The implementation is horribly inefficient but the goal is to be compatible with what Mercurial does at runtime. """ buf = io.BytesIO(code.encode('utf8')) tokens = tokenize.tokenize(buf.readline) # NOTE: by setting the fullname to `mercurial.pycompat` below, we're # ensuring that hg-specific pycompat imports aren't inserted to the code. data = tokenize.untokenize(replacetokens(list(tokens), 'mercurial.pycompat')) return cast(str, data.decode('utf8'))
def _load_info(self): '''Get user info for GBDX S3, put into instance vars for convenience. Args: None. Returns: Dictionary with S3 access key, S3 secret key, S3 session token, user bucket and user prefix (dict). ''' url = '%s/prefix?duration=36000' % self.base_url r = self.gbdx_connection.get(url) r.raise_for_status() return r.json()
def download(self, location, local_dir='.'): '''Download content from bucket/prefix/location. Location can be a directory or a file (e.g., my_dir or my_dir/my_image.tif) If location is a directory, all files in the directory are downloaded. If it is a file, then that file is downloaded. Args: location (str): S3 location within prefix. local_dir (str): Local directory where file(s) will be stored. Default is here. ''' self.logger.debug('Getting S3 info') bucket = self.info['bucket'] prefix = self.info['prefix'] self.logger.debug('Connecting to S3') s3conn = self.client # remove head and/or trail backslash from location location = location.strip('/') self.logger.debug('Downloading contents') objects = s3conn.list_objects(Bucket=bucket, Prefix=(prefix+'/'+location)) if 'Contents' not in objects: raise ValueError('Download target {}/{}/{} was not found or inaccessible.'.format(bucket, prefix, location)) for s3key in objects['Contents']: key = s3key['Key'] # skip directory keys if not key or key.endswith('/'): continue # get path to each file filepath = key.replace(prefix+'/'+location, '', 1).lstrip('/') filename = key.split('/')[-1] #self.logger.debug(filename) file_dir = filepath.split('/')[:-1] file_dir = '/'.join(file_dir) full_dir = os.path.join(local_dir, file_dir) # make sure directory exists if not os.path.isdir(full_dir): os.makedirs(full_dir) # download file s3conn.download_file(bucket, key, os.path.join(full_dir, filename)) self.logger.debug('Done!')
def delete(self, location): '''Delete content in bucket/prefix/location. Location can be a directory or a file (e.g., my_dir or my_dir/my_image.tif) If location is a directory, all files in the directory are deleted. If it is a file, then that file is deleted. Args: location (str): S3 location within prefix. Can be a directory or a file (e.g., my_dir or my_dir/my_image.tif). ''' bucket = self.info['bucket'] prefix = self.info['prefix'] self.logger.debug('Connecting to S3') s3conn = self.client # remove head and/or trail backslash from location if location[0] == '/': location = location[1:] if location[-1] == '/': location = location[:-2] self.logger.debug('Deleting contents') for s3key in s3conn.list_objects(Bucket=bucket, Prefix=(prefix+'/'+location))['Contents']: s3conn.delete_object(Bucket=bucket, Key=s3key['Key']) self.logger.debug('Done!')
def upload(self, local_file, s3_path=None): ''' Upload files to your DG S3 bucket/prefix. Args: local_file (str): a path to a local file to upload, directory structures are not mirrored s3_path: a key (location) on s3 to upload the file to Returns: str: s3 path file was saved to Examples: >>> upload('path/to/image.tif') 'mybucket/myprefix/image.tif' >>> upload('./images/image.tif') 'mybucket/myprefix/image.tif' >>> upload('./images/image.tif', s3_path='images/image.tif') 'mybucket/myprefix/images/image.tif' ''' if not os.path.exists(local_file): raise Exception(local_file + " does not exist.") if s3_path is None: s3_path = os.path.basename(local_file) bucket = self.info['bucket'] prefix = self.info['prefix'] self.logger.debug('Connecting to S3') s3conn = self.client self.logger.debug('Uploading file {}'.format(local_file)) s3conn.upload_file(local_file, bucket, prefix+'/'+s3_path) self.logger.debug('Done!') return '{}/{}/{}'.format(bucket, prefix, s3_path)
def rgb(self, **kwargs): ''' Convert the image to a 3 band RGB for plotting This method shares the same arguments as plot(). It will perform visual adjustment on the image and prepare the data for plotting in MatplotLib. Values are converted to an appropriate precision and the axis order is changed to put the band axis last. ''' if "bands" in kwargs: use_bands = kwargs["bands"] assert len(use_bands) == 3, 'Plot method only supports single or 3-band outputs' del kwargs["bands"] else: use_bands = self._rgb_bands if kwargs.get('blm') == True: return self.histogram_match(use_bands, **kwargs) # if not specified or DRA'ed, default to a 2-98 stretch if "histogram" not in kwargs: if "stretch" not in kwargs: if not self.options.get('dra'): kwargs['stretch'] = [2,98] return self.histogram_stretch(use_bands, **kwargs) elif kwargs["histogram"] == "equalize": return self.histogram_equalize(use_bands, **kwargs) elif kwargs["histogram"] == "match": return self.histogram_match(use_bands, **kwargs) elif kwargs["histogram"] == "minmax": return self.histogram_stretch(use_bands, stretch=[0, 100], **kwargs) # DRA'ed images should be left alone if not explicitly adjusted elif kwargs["histogram"] == "ignore" or self.options.get('dra'): data = self._read(self[use_bands,...], **kwargs) return np.rollaxis(data, 0, 3) else: raise KeyError('Unknown histogram parameter, use "equalize", "match", "minmax", or "ignore"')
def histogram_equalize(self, use_bands, **kwargs): ''' Equalize and the histogram and normalize value range Equalization is on all three bands, not per-band''' data = self._read(self[use_bands,...], **kwargs) data = np.rollaxis(data.astype(np.float32), 0, 3) flattened = data.flatten() if 0 in data: masked = np.ma.masked_values(data, 0).compressed() image_histogram, bin_edges = np.histogram(masked, 256) else: image_histogram, bin_edges = np.histogram(flattened, 256) bins = (bin_edges[:-1] + bin_edges[1:]) / 2.0 cdf = image_histogram.cumsum() cdf = cdf / float(cdf[-1]) image_equalized = np.interp(flattened, bins, cdf).reshape(data.shape) if 'stretch' in kwargs or 'gamma' in kwargs: return self._histogram_stretch(image_equalized, **kwargs) else: return image_equalized
def histogram_match(self, use_bands, blm_source=None, **kwargs): ''' Match the histogram to existing imagery ''' assert has_rio, "To match image histograms please install rio_hist" data = self._read(self[use_bands,...], **kwargs) data = np.rollaxis(data.astype(np.float32), 0, 3) if 0 in data: data = np.ma.masked_values(data, 0) bounds = self._reproject(box(*self.bounds), from_proj=self.proj, to_proj="EPSG:4326").bounds if blm_source == 'browse': from gbdxtools.images.browse_image import BrowseImage ref = BrowseImage(self.cat_id, bbox=bounds).read() else: from gbdxtools.images.tms_image import TmsImage tms = TmsImage(zoom=self._calc_tms_zoom(self.affine[0]), bbox=bounds, **kwargs) ref = np.rollaxis(tms.read(), 0, 3) out = np.dstack([rio_match(data[:,:,idx], ref[:,:,idx].astype(np.double)/255.0) for idx in range(data.shape[-1])]) if 'stretch' in kwargs or 'gamma' in kwargs: return self._histogram_stretch(out, **kwargs) else: return out
def histogram_stretch(self, use_bands, **kwargs): ''' entry point for contrast stretching ''' data = self._read(self[use_bands,...], **kwargs) data = np.rollaxis(data.astype(np.float32), 0, 3) return self._histogram_stretch(data, **kwargs)
def _histogram_stretch(self, data, **kwargs): ''' perform a contrast stretch and/or gamma adjustment ''' limits = {} # get the image min-max statistics for x in range(3): band = data[:,:,x] try: limits[x] = np.percentile(band, kwargs.get("stretch", [0,100])) except IndexError: # this band has no dynamic range and cannot be stretched return data # compute the stretch for x in range(3): band = data[:,:,x] if 0 in band: band = np.ma.masked_values(band, 0).compressed() top = limits[x][1] bottom = limits[x][0] if top != bottom: # catch divide by zero data[:,:,x] = (data[:,:,x] - bottom) / float(top - bottom) * 255.0 data = np.clip(data, 0, 255).astype("uint8") # gamma adjust if "gamma" in kwargs: invGamma = 1.0 / kwargs['gamma'] lut = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") data = np.take(lut, data) return data
def ndvi(self, **kwargs): """ Calculates Normalized Difference Vegetation Index using NIR and Red of an image. Returns: numpy array with ndvi values """ data = self._read(self[self._ndvi_bands,...]).astype(np.float32) return (data[0,:,:] - data[1,:,:]) / (data[0,:,:] + data[1,:,:])
def ndwi(self): """ Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03. For Landsat8 and sentinel2 calculated by using Green and NIR bands. Returns: numpy array of ndwi values """ data = self._read(self[self._ndwi_bands,...]).astype(np.float32) return (data[1,:,:] - data[0,:,:]) / (data[0,:,:] + data[1,:,:])
def plot(self, spec="rgb", **kwargs): ''' Plot the image with MatplotLib Plot sizing includes default borders and spacing. If the image is shown in Jupyter the outside whitespace will be automatically cropped to save size, resulting in a smaller sized image than expected. Histogram options: * 'equalize': performs histogram equalization on the image. * 'minmax': stretch the pixel range to the minimum and maximum input pixel values. Equivalent to stretch=[0,100]. * 'match': match the histogram to the Maps API imagery. Pass the additional keyword blm_source='browse' to match to the Browse Service (image thumbnail) instead. * 'ignore': Skip dynamic range adjustment, in the event the image is already correctly balanced and the values are in the correct range. Gamma values greater than 1 will brighten the image midtones, values less than 1 will darken the midtones. Plots generated with the histogram options of 'match' and 'equalize' can be combined with the stretch and gamma options. The stretch and gamma adjustments will be applied after the histogram adjustments. Args: w (float or int): width of plot in inches at 72 dpi, default is 10 h (float or int): height of plot in inches at 72 dpi, default is 10 title (str): Title to use on the plot fontsize (int): Size of title font, default is 22. Size is measured in points. bands (list): bands to use for plotting, such as bands=[4,2,1]. Defaults to the image's natural RGB bands. This option is useful for generating pseudocolor images when passed a list of three bands. If only a single band is provided, a colormapped plot will be generated instead. cmap (str): MatPlotLib colormap name to use for single band images. Default is colormap='Grey_R'. histogram (str): either 'equalize', 'minmax', 'match', or ignore stretch (list): stretch the histogram between two percentile values, default is [2,98] gamma (float): adjust image gamma, default is 1.0 ''' if self.shape[0] == 1 or ("bands" in kwargs and len(kwargs["bands"]) == 1): if "cmap" in kwargs: cmap = kwargs["cmap"] del kwargs["cmap"] else: cmap = "Greys_r" self._plot(tfm=self._single_band, cmap=cmap, **kwargs) else: if spec == "rgb" and self._has_token(**kwargs): self._plot(tfm=self.rgb, **kwargs) else: self._plot(tfm=getattr(self, spec), **kwargs)
def get_images_by_catid_and_aoi(self, catid, aoi_wkt): """ Retrieves the IDAHO image records associated with a given catid. Args: catid (str): The source catalog ID from the platform catalog. aoi_wkt (str): The well known text of the area of interest. Returns: results (json): The full catalog-search response for IDAHO images within the catID. """ self.logger.debug('Retrieving IDAHO metadata') # use the footprint to get the IDAHO id url = '%s/search' % self.base_url body = {"filters": ["catalogID = '%s'" % catid], "types": ["IDAHOImage"], "searchAreaWkt": aoi_wkt} r = self.gbdx_connection.post(url, data=json.dumps(body)) r.raise_for_status() if r.status_code == 200: results = r.json() numresults = len(results['results']) self.logger.debug('%s IDAHO images found associated with catid %s' % (numresults, catid)) return results
def get_images_by_catid(self, catid): """ Retrieves the IDAHO image records associated with a given catid. Args: catid (str): The source catalog ID from the platform catalog. Returns: results (json): The full catalog-search response for IDAHO images within the catID. """ self.logger.debug('Retrieving IDAHO metadata') # get the footprint of the catid's strip footprint = self.catalog.get_strip_footprint_wkt(catid) # try to convert from multipolygon to polygon: try: footprint = from_wkt(footprint).geoms[0].wkt except: pass if not footprint: self.logger.debug("""Cannot get IDAHO metadata for strip %s, footprint not found""" % catid) return None return self.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=footprint)
def describe_images(self, idaho_image_results): """Describe the result set of a catalog search for IDAHO images. Args: idaho_image_results (dict): Result set of catalog search. Returns: results (json): The full catalog-search response for IDAHO images corresponding to the given catID. """ results = idaho_image_results['results'] # filter only idaho images: results = [r for r in results if 'IDAHOImage' in r['type']] self.logger.debug('Describing %s IDAHO images.' % len(results)) # figure out which catids are represented in this set of images catids = set([r['properties']['catalogID'] for r in results]) description = {} for catid in catids: # images associated with a single catid description[catid] = {} description[catid]['parts'] = {} images = [r for r in results if r['properties']['catalogID'] == catid] for image in images: description[catid]['sensorPlatformName'] = image['properties']['sensorPlatformName'] part = int(image['properties']['vendorDatasetIdentifier'].split(':')[1][-3:]) color = image['properties']['colorInterpretation'] bucket = image['properties']['tileBucketName'] identifier = image['identifier'] boundstr = image['properties']['footprintWkt'] try: description[catid]['parts'][part] except: description[catid]['parts'][part] = {} description[catid]['parts'][part][color] = {} description[catid]['parts'][part][color]['id'] = identifier description[catid]['parts'][part][color]['bucket'] = bucket description[catid]['parts'][part][color]['boundstr'] = boundstr return description
def get_chip(self, coordinates, catid, chip_type='PAN', chip_format='TIF', filename='chip.tif'): """Downloads a native resolution, orthorectified chip in tif format from a user-specified catalog id. Args: coordinates (list): Rectangle coordinates in order West, South, East, North. West and East are longitudes, North and South are latitudes. The maximum chip size is (2048 pix)x(2048 pix) catid (str): The image catalog id. chip_type (str): 'PAN' (panchromatic), 'MS' (multispectral), 'PS' (pansharpened). 'MS' is 4 or 8 bands depending on sensor. chip_format (str): 'TIF' or 'PNG' filename (str): Where to save chip. Returns: True if chip is successfully downloaded; else False. """ def t2s1(t): # Tuple to string 1 return str(t).strip('(,)').replace(',', '') def t2s2(t): # Tuple to string 2 return str(t).strip('(,)').replace(' ', '') if len(coordinates) != 4: print('Wrong coordinate entry') return False W, S, E, N = coordinates box = ((W, S), (W, N), (E, N), (E, S), (W, S)) box_wkt = 'POLYGON ((' + ','.join([t2s1(corner) for corner in box]) + '))' # get IDAHO images which intersect box results = self.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=box_wkt) description = self.describe_images(results) pan_id, ms_id, num_bands = None, None, 0 for catid, images in description.items(): for partnum, part in images['parts'].items(): if 'PAN' in part.keys(): pan_id = part['PAN']['id'] bucket = part['PAN']['bucket'] if 'WORLDVIEW_8_BAND' in part.keys(): ms_id = part['WORLDVIEW_8_BAND']['id'] num_bands = 8 bucket = part['WORLDVIEW_8_BAND']['bucket'] elif 'RGBN' in part.keys(): ms_id = part['RGBN']['id'] num_bands = 4 bucket = part['RGBN']['bucket'] # specify band information band_str = '' if chip_type == 'PAN': band_str = pan_id + '?bands=0' elif chip_type == 'MS': band_str = ms_id + '?' elif chip_type == 'PS': if num_bands == 8: band_str = ms_id + '?bands=4,2,1&panId=' + pan_id elif num_bands == 4: band_str = ms_id + '?bands=0,1,2&panId=' + pan_id # specify location information location_str = '&upperLeft={}&lowerRight={}'.format(t2s2((W, N)), t2s2((E, S))) service_url = 'https://idaho.geobigdata.io/v1/chip/bbox/' + bucket + '/' url = service_url + band_str + location_str url += '&format=' + chip_format + '&token=' + self.gbdx_connection.access_token r = requests.get(url) if r.status_code == 200: with open(filename, 'wb') as f: f.write(r.content) return True else: print('Cannot download chip') return False
def get_tms_layers(self, catid, bands='4,2,1', gamma=1.3, highcutoff=0.98, lowcutoff=0.02, brightness=1.0, contrast=1.0): """Get list of urls and bounding boxes corrsponding to idaho images for a given catalog id. Args: catid (str): Catalog id bands (str): Bands to display, separated by commas (0-7). gamma (float): gamma coefficient. This is for on-the-fly pansharpening. highcutoff (float): High cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. lowcutoff (float): Low cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. brightness (float): Brightness coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. contrast (float): Contrast coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. Returns: urls (list): TMS urls. bboxes (list of tuples): Each tuple is (W, S, E, N) where (W,S,E,N) are the bounds of the corresponding idaho part. """ description = self.describe_images(self.get_images_by_catid(catid)) service_url = 'http://idaho.geobigdata.io/v1/tile/' urls, bboxes = [], [] for catid, images in description.items(): for partnum, part in images['parts'].items(): if 'PAN' in part.keys(): pan_id = part['PAN']['id'] if 'WORLDVIEW_8_BAND' in part.keys(): ms_id = part['WORLDVIEW_8_BAND']['id'] ms_partname = 'WORLDVIEW_8_BAND' elif 'RGBN' in part.keys(): ms_id = part['RGBN']['id'] ms_partname = 'RGBN' if ms_id: if pan_id: band_str = ms_id + '/{z}/{x}/{y}?bands=' + bands + '&panId=' + pan_id else: band_str = ms_id + '/{z}/{x}/{y}?bands=' + bands bbox = from_wkt(part[ms_partname]['boundstr']).bounds elif not ms_id and pan_id: band_str = pan_id + '/{z}/{x}/{y}?bands=0' bbox = from_wkt(part['PAN']['boundstr']).bounds else: continue bboxes.append(bbox) # Get the bucket. It has to be the same for all entries in the part. bucket = part[list(part.keys())[0]]['bucket'] # Get the token token = self.gbdx_connection.access_token # Assemble url url = (service_url + bucket + '/' + band_str + """&gamma={} &highCutoff={} &lowCutoff={} &brightness={} &contrast={} &token={}""".format(gamma, highcutoff, lowcutoff, brightness, contrast, token)) urls.append(url) return urls, bboxes
def create_leaflet_viewer(self, idaho_image_results, filename): """Create a leaflet viewer html file for viewing idaho images. Args: idaho_image_results (dict): IDAHO image result set as returned from the catalog. filename (str): Where to save output html file. """ description = self.describe_images(idaho_image_results) if len(description) > 0: functionstring = '' for catid, images in description.items(): for partnum, part in images['parts'].items(): num_images = len(list(part.keys())) partname = None if num_images == 1: # there is only one image, use the PAN partname = [p for p in list(part.keys())][0] pan_image_id = '' elif num_images == 2: # there are two images in this part, use the multi (or pansharpen) partname = [p for p in list(part.keys()) if p is not 'PAN'][0] pan_image_id = part['PAN']['id'] if not partname: self.logger.debug("Cannot find part for idaho image.") continue bandstr = { 'RGBN': '0,1,2', 'WORLDVIEW_8_BAND': '4,2,1', 'PAN': '0' }.get(partname, '0,1,2') part_boundstr_wkt = part[partname]['boundstr'] part_polygon = from_wkt(part_boundstr_wkt) bucketname = part[partname]['bucket'] image_id = part[partname]['id'] W, S, E, N = part_polygon.bounds functionstring += "addLayerToMap('%s','%s',%s,%s,%s,%s,'%s');\n" % ( bucketname, image_id, W, S, E, N, pan_image_id) __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) try: with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile: data = htmlfile.read().decode("utf8") except AttributeError: with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile: data = htmlfile.read() data = data.replace('FUNCTIONSTRING', functionstring) data = data.replace('CENTERLAT', str(S)) data = data.replace('CENTERLON', str(W)) data = data.replace('BANDS', bandstr) data = data.replace('TOKEN', self.gbdx_connection.access_token) with codecs.open(filename, 'w', 'utf8') as outputfile: self.logger.debug("Saving %s" % filename) outputfile.write(data) else: print('No items returned.')