Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def keys(self):
"""List names of options and positional arguments."""
return self.options.keys() + [p.name for p in self.positional_args] |
def values(self):
"""List values of options and positional arguments."""
return self.options.values() + [p.value for p in self.positional_args] |
def items(self):
"""List values of options and positional arguments."""
return [(p.name, p.value) for p in self.options.values() + self.positional_args] |
def getparam(self, key):
"""Get option or positional argument, by name, index or abbreviation.
Abbreviations must be prefixed by a '-' character, like so: ui['-a']
"""
try:
return self.options[key]
except:
pass
for posarg in self.positional_args:
if posarg.name == key:
return posarg
try:
return self.abbreviations[key[1:]]
except:
raise KeyError('no such option or positional argument') |
def _add_option(self, option):
"""Add an Option object to the user interface."""
if option.name in self.options:
raise ValueError('name already in use')
if option.abbreviation in self.abbreviations:
raise ValueError('abbreviation already in use')
if option.name in [arg.name for arg in self.positional_args]:
raise ValueError('name already in use by a positional argument')
self.options[option.name] = option
if option.abbreviation:
self.abbreviations[option.abbreviation] = option
self.option_order.append(option.name) |
def _add_positional_argument(self, posarg):
"""Append a positional argument to the user interface.
Optional positional arguments must be added after the required ones.
The user interface can have at most one recurring positional argument,
and if present, that argument must be the last one.
"""
if self.positional_args:
if self.positional_args[-1].recurring:
raise ValueError("recurring positional arguments must be last")
if self.positional_args[-1].optional and not posarg.optional:
raise ValueError("required positional arguments must precede optional ones")
self.positional_args.append(posarg) |
def read_docs(self, docsfiles):
"""Read program documentation from a DocParser compatible file.
docsfiles is a list of paths to potential docsfiles: parse if present.
A string is taken as a list of one item.
"""
updates = DocParser()
for docsfile in _list(docsfiles):
if os.path.isfile(docsfile):
updates.parse(docsfile)
self.docs.update((k, _docs(updates[k], self.docvars)) for k in self.docs if updates.blocks[k])
for name, text in updates['parameters'].items():
if name in self:
self.getparam(name).docs = text[0] % self.docvars
elif name not in self.ignore:
raise ValueError("parameter %r does not exist" % name) |
def parse_files(self, files=None, sections=None):
"""Parse configfiles.
files <list str>, <str> or None:
What files to parse. None means use self.configfiles. New
values override old ones. A string value will be interpreted
as a list of one item.
sections <list str>, <str> or None:
Which sections to parse from the files. None means use
self.sections. A string value will be interpreted as a list
of one item. The [DEFAULT]
section is always read, and it is read as if its contents were
copied to the beginning of all other sections.
"""
files = _list(files, self.configfiles)
sections = _list(sections, self.sections)
for file in files:
parser = StrictConfigParser()
parser.read(file)
for section in sections:
if not parser.has_section(section):
continue
for unused in parser.unusedoptions(section):
if unused not in self.options and unused not in self.ignore:
templ = "The option %r in section [%s] of file %s does not exist."
raise InvalidOption(unused, message=templ % (unused, section, file))
for name in parser.options(section):
if name in self.options:
if self.options[name].reserved:
templ = "The option %s in section [%s] of file %s is reserved for command line use."
raise ReservedOptionError(message=templ % (unused, section, file))
value = parser.get(section, name)
self.options[name].parsestr(value, name, '%s [%s]' % (file, section)) |
def _parse_options(self, argv, location):
"""Parse the options part of an argument list.
IN:
lsArgs <list str>:
List of arguments. Will be altered.
location <str>:
A user friendly string describing where this data came from.
"""
observed = []
while argv:
if argv[0].startswith('--'):
name = argv.pop(0)[2:]
# '--' means end of options.
if not name:
break
if name not in self.options:
raise InvalidOption(name)
option = self.options[name]
if not option.recurring:
if option in observed:
raise OptionRecurrenceError(name)
observed.append(option)
option.parse(argv, name, location)
elif argv[0].startswith('-'):
# A single - is not an abbreviation block, but the first positional arg.
if argv[0] == '-':
break
block = argv.pop(0)[1:]
# Abbrevs for options that take values go last in the block.
for abbreviation in block[:-1]:
if self.abbreviations[abbreviation].nargs != 0:
raise BadAbbreviationBlock(abbreviation, block, "options that require value arguments must be last in abbreviation blocks")
# Parse individual options.
for abbreviation in block:
option = self.abbreviations[abbreviation]
if not option.recurring:
if option in observed:
raise OptionRecurrenceError(option.name)
observed.append(option)
option.parse(argv, '-' + abbreviation, location)
# only arguments that start with -- or - can be Options.
else:
break |
def _parse_positional_arguments(self, argv):
"""Parse the positional arguments part of an argument list.
argv <list str>:
List of arguments. Will be altered.
"""
for posarg in self.positional_args:
posarg.parse(argv)
if argv:
if None in [p.nargs for p in self.positional_args]:
msg = '%s too many argument%s given'
plural_s = len(argv) > 1 and 's' or ''
raise BadNumberOfArguments(message=msg % (len(argv), plural_s))
msg = 'This program accepts exactly %s positional arguments (%s given).'
required = len([p.nargs for p in self.positional_args])
raise BadNumberOfArguments(message=msg % (required, required + len(argv))) |
def parse_argv(self, argv=None, location='Command line.'):
"""Parse command line arguments.
args <list str> or None:
The argument list to parse. None means use a copy of sys.argv. argv[0] is
ignored.
location = '' <str>:
A user friendly string describing where the parser got this
data from. '' means use "Command line." if args == None, and
"Builtin default." otherwise.
"""
if argv is None:
argv = list(sys.argv)
argv.pop(0)
self._parse_options(argv, location)
self._parse_positional_arguments(argv) |
def optionhelp(self, indent=0, maxindent=25, width=79):
"""Return user friendly help on program options."""
def makelabels(option):
labels = '%*s--%s' % (indent, ' ', option.name)
if option.abbreviation:
labels += ', -' + option.abbreviation
return labels + ': '
docs = []
helpindent = _autoindent([makelabels(o) for o in self.options.values()], indent, maxindent)
for name in self.option_order:
option = self.options[name]
labels = makelabels(option)
helpstring = "%s(%s). %s" % (option.formatname, option.strvalue, option.docs)
wrapped = self._wrap_labelled(labels, helpstring, helpindent, width)
docs.extend(wrapped)
return '\n'.join(docs) |
def posarghelp(self, indent=0, maxindent=25, width=79):
"""Return user friendly help on positional arguments in the program."""
docs = []
makelabel = lambda posarg: ' ' * indent + posarg.displayname + ': '
helpindent = _autoindent([makelabel(p) for p in self.positional_args], indent, maxindent)
for posarg in self.positional_args:
label = makelabel(posarg)
text = posarg.formatname + '. ' + posarg.docs
wrapped = self._wrap_labelled(label, text, helpindent, width)
docs.extend(wrapped)
return '\n'.join(docs) |
def format_usage(self, usage=None):
"""Return a formatted usage string.
If usage is None, use self.docs['usage'], and if that is also None,
generate one.
"""
if usage is None:
usage = self.docs['usage']
if usage is not None:
return usage[0] % self.docvars
usage = self.docvars['command']
if self.basic_option_names.get('help'):
usage += ' [--%s]' % self.basic_option_names.get('help')
if self.options:
usage += ' <OPTIONS>'
optional = 0
for posarg in self.positional_args:
usage += ' '
if posarg.optional:
usage += "["
optional += 1
usage += posarg.displayname
if posarg.recurring:
usage += ' [%s2 [...]]' % posarg.displayname
usage += ']' * optional
return usage |
def _wrap(self, text, indent=0, width=0):
"""Textwrap an indented paragraph.
ARGS:
width = 0 <int>:
Maximum allowed page width. 0 means use default from
self.iMaxHelpWidth.
"""
text = _list(text)
if not width:
width = self.width
paragraph = text[0].lstrip()
s = ' ' * (len(text[0]) - len(paragraph) + indent)
wrapped = textwrap.wrap(paragraph.strip(), width, initial_indent=s, subsequent_indent=s)
return '\n'.join(wrapped) |
def _wraptext(self, text, indent=0, width=0):
"""Shorthand for '\n'.join(self._wrap(par, indent, width) for par in text)."""
return '\n'.join(self._wrap(par, indent, width) for par in text) |
def _wrapusage(self, usage=None, width=0):
"""Textwrap usage instructions.
ARGS:
width = 0 <int>:
Maximum allowed page width. 0 means use default from
self.iMaxHelpWidth.
"""
if not width:
width = self.width
return textwrap.fill('USAGE: ' + self.format_usage(usage), width=width, subsequent_indent=' ...') |
def shorthelp(self, width=0):
"""Return brief help containing Title and usage instructions.
ARGS:
width = 0 <int>:
Maximum allowed page width. 0 means use default from
self.iMaxHelpWidth.
"""
out = []
out.append(self._wrap(self.docs['title'], width=width))
if self.docs['description']:
out.append(self._wrap(self.docs['description'], indent=2, width=width))
out.append('')
out.append(self._wrapusage(width=width))
out.append('')
return '\n'.join(out) |
def strsettings(self, indent=0, maxindent=25, width=0):
"""Return user friendly help on positional arguments.
indent is the number of spaces preceeding the text on each line.
The indent of the documentation is dependent on the length of the
longest label that is shorter than maxindent. A label longer than
maxindent will be printed on its own line.
width is maximum allowed page width, use self.width if 0.
"""
out = []
makelabel = lambda name: ' ' * indent + name + ': '
settingsindent = _autoindent([makelabel(s) for s in self.options], indent, maxindent)
for name in self.option_order:
option = self.options[name]
label = makelabel(name)
settingshelp = "%s(%s): %s" % (option.formatname, option.strvalue, option.location)
wrapped = self._wrap_labelled(label, settingshelp, settingsindent, width)
out.extend(wrapped)
return '\n'.join(out) |
def settingshelp(self, width=0):
"""Return a summary of program options, their values and origins.
width is maximum allowed page width, use self.width if 0.
"""
out = []
out.append(self._wrap(self.docs['title'], width=width))
if self.docs['description']:
out.append(self._wrap(self.docs['description'], indent=2, width=width))
out.append('')
out.append('SETTINGS:')
out.append(self.strsettings(indent=2, width=width))
out.append('')
return '\n'.join(out) |
def launch(self,
argv=None,
showusageonnoargs=False,
width=0,
helphint="Use with --help for more information.\n",
debug_parser=False):
"""Do the usual stuff to initiallize the program.
Read config files and parse arguments, and if the user has used any
of the help/version/settings options, display help and exit.
If debug_parser is false, don't catch ParseErrors and exit with user
friendly help. Crash with traceback instead.
configfiles is a list of config files. None means use self.configfiles.
sections is a list of configfile section names to read. None means use
self.sections.
argv is a list of arguments to parse. Will be modified. None means use
copy of sys.argv. argv[0] is ignored.
If showusageonnoargs is true, show usage and exit if the user didn't
give any args. Should be False if there are no required PositionalArgs.
width is the maximum allowed page width. 0 means use self.width.
helphint is a string that hints on how to get more help which is
displayed at the end of usage help messages.
"""
if showusageonnoargs and len(argv) == 1:
print self.shorthelp(width=width)
if helphint:
print self._wrap(helphint, indent=2, width=width)
sys.exit(0)
parsing_error = None
try:
self.parse_files()
self.parse_argv(argv)
except ParseError, parsing_error:
if debug_parser:
raise
for optiontype in ['help', 'longhelp', 'settings', 'version']:
name = self.basic_option_names.get(optiontype)
if name and self[name]:
methodname = optiontype.rstrip('help') + 'help'
print getattr(self, methodname)(width)
sys.exit()
if parsing_error:
self.graceful_exit(parsing_error, width) |
def parse(self, file):
"""Parse text blocks from a file."""
if isinstance(file, basestring):
file = open(file)
line_number = 0
label = None
block = self.untagged
for line in file:
line_number += 1
line = line.rstrip('\n')
if self.tabsize > 0:
line = line.replace('\t', ' ' * self.tabsize)
if self.decommenter:
line = self.decommenter.decomment(line)
if line is None:
continue
tag = line.split(':', 1)[0].strip()
# Still in the same block?
if tag not in self.names:
if block is None:
if line and not line.isspace():
raise ParseError(file.name, line, "garbage before first block: %r" % line)
continue
block.addline(line)
continue
# Open a new block.
name = self.names[tag]
label = line.split(':',1)[1].strip()
if name in self.labelled_classes:
if not label:
raise ParseError(file.name, line, "missing label for %r block" % name)
block = self.blocks[name].setdefault(label, self.labelled_classes[name]())
else:
if label:
msg = "label %r present for unlabelled block %r" % (label, name)
raise ParseError(file.name, line_number, msg)
block = self.blocks[name]
block.startblock() |
def get_format(format):
"""Get a format object.
If format is a format object, return unchanged. If it is a string
matching one of the BaseFormat subclasses in the tui.formats module
(case insensitive), return an instance of that class. Otherwise assume
it'a factory function for Formats (such as a class) so call and return,
and raise ValueError on error.
"""
if isinstance(format, BaseFormat):
return format
if isinstance(format, basestring):
for name, formatclass in globals().items():
if name.lower() == format.lower():
if not issubclass(formatclass, BaseFormat):
raise ValueError('%s is not the name of a format class' % format)
return formatclass()
try:
return format()
except:
raise ValueError('no such format') |
def parsestr(self, argstr):
"""Parse arguments found in settings files.
argstr is the string that should be parsed. Use e.g. '""' to pass an
empty string.
if self.nargs > 1 a list of parsed values will be returned.
NOTE: formats with nargs == 0 or None probably want to override this
method.
"""
argv = shlex.split(argstr, comments=True)
if len(argv) != self.nargs:
raise BadNumberOfArguments(self.nargs, len(argv))
return self.parse(argv) |
def parse_argument(self, arg):
"""Parse a single argument.
Lookup arg in self.specials, or call .to_python() if absent. Raise
BadArgument on errors.
"""
lookup = self.casesensitive and arg or arg.lower()
if lookup in self.special:
return self.special[lookup]
try:
return self.to_python(arg, *self.args, **self.kw)
except Exception, e:
raise BadArgument(arg, str(e)) |
def parse(self, argv):
"""Pop, parse and return the first self.nargs items from args.
if self.nargs > 1 a list of parsed values will be returned.
Raise BadNumberOfArguments or BadArgument on errors.
NOTE: argv may be modified in place by this method.
"""
if len(argv) < self.nargs:
raise BadNumberOfArguments(self.nargs, len(argv))
if self.nargs == 1:
return self.parse_argument(argv.pop(0))
return [self.parse_argument(argv.pop(0)) for tmp in range(self.nargs)] |
def present(self, value):
"""Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent.
"""
for k, v in self.special.items():
if v == value:
return k
return self.to_literal(value, *self.args, **self.kw) |
def parsestr(self, argstr):
"""Parse arguments found in settings files.
Use the values in self.true for True in settings files, or those in
self.false for False, case insensitive.
"""
argv = shlex.split(argstr, comments=True)
if len(argv) != 1:
raise BadNumberOfArguments(1, len(argv))
arg = argv[0]
lower = arg.lower()
if lower in self.true:
return True
if lower in self.false:
return False
raise BadArgument(arg, "Allowed values are " + self.allowed + '.') |
def parse(self, argv):
"""Pop, parse and return the first arg from argv.
The arg will be .split() based on self.separator and the (optionally
stripped) items will be parsed by self.format and returned as a list.
Raise BadNumberOfArguments or BadArgument on errors.
NOTE: args will be modified.
"""
if not argv:
raise BadNumberOfArguments(1, 0)
argument = argv.pop(0)
lookup = self.casesensitive and argument or argument.lower()
if lookup in self.special:
return self.special[lookup]
argv = [(self.strip and s.strip() or s) for s in argument.split(self.separator)]
values = []
while argv:
values.append(self.format.parse(argv))
return values |
def present(self, value):
"""Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent.
"""
for k, v in self.special.items():
if v == value:
return k
return self.separator.join(self.format.present(v) for v in value) |
def get_separator(self, i):
"""Return the separator that preceding format i, or '' for i == 0."""
return i and self.separator[min(i - 1, len(self.separator) - 1)] or '' |
def parse(self, argv):
"""Pop, parse and return the first arg from argv.
The arg will be repeatedly .split(x, 1) based on self.get_separator() and the
(optionally stripped) items will be parsed by self.format and returned
as a list.
Raise BadNumberOfArguments or BadArgument on errors.
NOTE: args will be modified.
"""
if not argv:
raise BadNumberOfArguments(1, 0)
remainder = argv.pop(0)
lookup = self.casesensitive and remainder or remainder.lower()
if lookup in self.special:
return self.special[lookup]
values = []
for i, format in enumerate(self.format[:-1]):
print i, self.get_separator(i)
try:
arg, remainder = remainder.split(self.get_separator(i + 1), 1)
except:
raise BadArgument(remainder, 'does not contain required separator ' + repr(self.get_separator(i + 1)))
if self.strip:
arg = arg.strip()
values.append(format.parse([arg]))
if self.strip:
remainder = remainder.strip()
values.append(format.parse([remainder]))
return values |
def present(self, value):
"""Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent.
"""
for k, v in self.special.items():
if v == value:
return k
return ''.join(self.get_separator(i) + self.format[i].present(v) for i, v in enumerate(value)) |
def authorize_url(self):
"""
Return a URL to redirect the user to for OAuth authentication.
"""
auth_url = OAUTH_ROOT + '/authorize'
params = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
}
return "{}?{}".format(auth_url, urlencode(params)) |
def exchange_token(self, code):
"""
Exchange the authorization code for an access token.
"""
access_token_url = OAUTH_ROOT + '/access_token'
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri,
'code': code,
}
resp = requests.get(access_token_url, params=params)
if not resp.ok:
raise MixcloudOauthError("Could not get access token.")
return resp.json()['access_token'] |
def get_open_port() -> int:
"""
Gets a PORT that will (probably) be available on the machine.
It is possible that in-between the time in which the open PORT of found and when it is used, another process may
bind to it instead.
:return: the (probably) available PORT
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(("", 0))
free_socket.listen(1)
port = free_socket.getsockname()[1]
free_socket.close()
return port |
def extract_version_number(string: str) -> str:
"""
Extracts a version from a string in the form: `.*[0-9]+(_[0-9]+)*.*`, e.g. Irods4_1_9CompatibleController.
If the string contains multiple version numbers, the first (from left) is extracted.
Will raise a `ValueError` if there is no version number in the given string.
:param string: the string containing the version number
:return: the extracted version
"""
matched = _EXTRACT_VERSION_PATTERN.search(string)
if matched is None:
raise ValueError("No version number in string")
return matched.group().replace("_", ".") |
def acquire(self, *args, **kwargs):
""" Wraps Lock.acquire """
with self._stat_lock:
self._waiting += 1
self._lock.acquire(*args, **kwargs)
with self._stat_lock:
self._locked = True
self._waiting -= 1 |
def release(self):
""" Wraps Lock.release """
self._lock.release()
with self._stat_lock:
self._locked = False
self._last_released = datetime.now() |
def default_decoder(self, obj):
"""Handle a dict that might contain a wrapped state for a custom type."""
typename, marshalled_state = self.unwrap_callback(obj)
if typename is None:
return obj
try:
cls, unmarshaller = self.serializer.unmarshallers[typename]
except KeyError:
raise LookupError('no unmarshaller found for type "{}"'.format(typename)) from None
if cls is not None:
instance = cls.__new__(cls)
unmarshaller(instance, marshalled_state)
return instance
else:
return unmarshaller(marshalled_state) |
def wrap_state_dict(self, typename: str, state) -> Dict[str, Any]:
"""
Wrap the marshalled state in a dictionary.
The returned dictionary has two keys, corresponding to the ``type_key`` and ``state_key``
options. The former holds the type name and the latter holds the marshalled state.
:param typename: registered name of the custom type
:param state: the marshalled state of the object
:return: an object serializable by the serializer
"""
return {self.type_key: typename, self.state_key: state} |
def unwrap_state_dict(self, obj: Dict[str, Any]) -> Union[Tuple[str, Any], Tuple[None, None]]:
"""Unwraps a marshalled state previously wrapped using :meth:`wrap_state_dict`."""
if len(obj) == 2:
typename = obj.get(self.type_key)
state = obj.get(self.state_key)
if typename is not None:
return typename, state
return None, None |
def publish(quiet, dataset_uri):
"""Enable HTTP access to a dataset.
This only works on datasets in some systems. For example, datasets stored
in AWS S3 object storage and Microsoft Azure Storage can be published as
datasets accessible over HTTP. A published dataset is world readable.
"""
access_uri = http_publish(dataset_uri)
if not quiet:
click.secho("Dataset accessible at ", nl=False, fg="green")
click.secho(access_uri) |
def register_custom_type(
self, cls: type, marshaller: Optional[Callable[[Any], Any]] = default_marshaller,
unmarshaller: Union[Callable[[Any, Any], None],
Callable[[Any], Any], None] = default_unmarshaller, *,
typename: str = None, wrap_state: bool = True) -> None:
"""
Register a marshaller and/or unmarshaller for the given class.
The state object returned by the marshaller and passed to the unmarshaller can be any
serializable type. Usually a dictionary mapping of attribute names to values is used.
.. warning:: Registering marshallers/unmarshallers for any custom type will override any
serializer specific encoding/decoding hooks (respectively) already in place!
:param cls: the class to register
:param marshaller: a callable that takes the object to be marshalled as the argument and
returns a state object
:param unmarshaller: a callable that either:
* takes an uninitialized instance of ``cls`` and its state object as arguments and
restores the state of the object
* takes a state object and returns a new instance of ``cls``
:param typename: a unique identifier for the type (defaults to the ``module:varname``
reference to the class)
:param wrap_state: ``True`` to wrap the marshalled state before serialization so that it
can be recognized later for unmarshalling, ``False`` to serialize it as is
"""
assert check_argument_types()
typename = typename or qualified_name(cls)
if marshaller:
self.marshallers[cls] = typename, marshaller, wrap_state
self.custom_type_codec.register_object_encoder_hook(self)
if unmarshaller and self.custom_type_codec is not None:
target_cls = cls # type: Optional[type]
if len(signature(unmarshaller).parameters) == 1:
target_cls = None
self.unmarshallers[typename] = target_cls, unmarshaller
self.custom_type_codec.register_object_decoder_hook(self) |
def _prompt_for_values(d):
"""Update the descriptive metadata interactively.
Uses values entered by the user. Note that the function keeps recursing
whenever a value is another ``CommentedMap`` or a ``list``. The
function works as passing dictionaries and lists into a function edits
the values in place.
"""
for key, value in d.items():
if isinstance(value, CommentedMap):
_prompt_for_values(value)
elif isinstance(value, list):
for item in value:
_prompt_for_values(item)
else:
typ = type(value)
if isinstance(value, ScalarFloat): # Deal with ruamel.yaml floats.
typ = float
new_value = click.prompt(key, type=typ, default=value)
d[key] = new_value
return d |
def create(quiet, name, base_uri, symlink_path):
"""Create a proto dataset."""
_validate_name(name)
admin_metadata = dtoolcore.generate_admin_metadata(name)
parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)
if parsed_base_uri.scheme == "symlink":
if symlink_path is None:
raise click.UsageError("Need to specify symlink path using the -s/--symlink-path option") # NOQA
if symlink_path:
base_uri = dtoolcore.utils.sanitise_uri(
"symlink:" + parsed_base_uri.path
)
parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)
# Create the dataset.
proto_dataset = dtoolcore.generate_proto_dataset(
admin_metadata=admin_metadata,
base_uri=dtoolcore.utils.urlunparse(parsed_base_uri),
config_path=CONFIG_PATH)
# If we are creating a symlink dataset we need to set the symlink_path
# attribute on the storage broker.
if symlink_path:
symlink_abspath = os.path.abspath(symlink_path)
proto_dataset._storage_broker.symlink_path = symlink_abspath
try:
proto_dataset.create()
except dtoolcore.storagebroker.StorageBrokerOSError as err:
raise click.UsageError(str(err))
proto_dataset.put_readme("")
if quiet:
click.secho(proto_dataset.uri)
else:
# Give the user some feedback and hints on what to do next.
click.secho("Created proto dataset ", nl=False, fg="green")
click.secho(proto_dataset.uri)
click.secho("Next steps: ")
step = 1
if parsed_base_uri.scheme != "symlink":
click.secho("{}. Add raw data, eg:".format(step))
click.secho(
" dtool add item my_file.txt {}".format(proto_dataset.uri),
fg="cyan")
if parsed_base_uri.scheme == "file":
# Find the abspath of the data directory for user feedback.
data_path = proto_dataset._storage_broker._data_abspath
click.secho(" Or use your system commands, e.g: ")
click.secho(
" mv my_data_directory {}/".format(data_path),
fg="cyan"
)
step = step + 1
click.secho("{}. Add descriptive metadata, e.g: ".format(step))
click.secho(
" dtool readme interactive {}".format(proto_dataset.uri),
fg="cyan")
step = step + 1
click.secho(
"{}. Convert the proto dataset into a dataset: ".format(step)
)
click.secho(" dtool freeze {}".format(proto_dataset.uri), fg="cyan") |
def name(dataset_uri, new_name):
"""
Report / update the name of the dataset.
It is only possible to update the name of a proto dataset,
i.e. a dataset that has not yet been frozen.
"""
if new_name != "":
_validate_name(new_name)
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
dataset.update_name(new_name)
admin_metadata = dtoolcore._admin_metadata_from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
click.secho(admin_metadata["name"]) |
def interactive(proto_dataset_uri):
"""Interactive prompting to populate the readme."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH)
# Create an CommentedMap representation of the yaml readme template.
readme_template = _get_readme_template()
yaml = YAML()
yaml.explicit_start = True
yaml.indent(mapping=2, sequence=4, offset=2)
descriptive_metadata = yaml.load(readme_template)
descriptive_metadata = _prompt_for_values(descriptive_metadata)
# Write out the descriptive metadata to the readme file.
stream = StringIO()
yaml.dump(descriptive_metadata, stream)
proto_dataset.put_readme(stream.getvalue())
click.secho("Updated readme ", fg="green")
click.secho("To edit the readme using your default editor:")
click.secho(
"dtool readme edit {}".format(proto_dataset_uri),
fg="cyan") |
def edit(dataset_uri):
"""Default editor updating of readme content.
"""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
readme_content = dataset.get_readme_content()
try:
# Python2 compatibility.
readme_content = unicode(readme_content, "utf-8")
except NameError:
pass
edited_content = click.edit(readme_content)
if edited_content is not None:
_validate_and_put_readme(dataset, edited_content)
click.secho("Updated readme ", nl=False, fg="green")
else:
click.secho("Did not update readme ", nl=False, fg="red")
click.secho(dataset_uri) |
def show(dataset_uri):
"""Show the descriptive metadata in the readme."""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
readme_content = dataset.get_readme_content()
click.secho(readme_content) |
def write(proto_dataset_uri, input):
"""Use YAML from a file or stdin to populate the readme.
To stream content from stdin use "-", e.g.
echo "desc: my data" | dtool readme write <DS_URI> -
"""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri
)
_validate_and_put_readme(proto_dataset, input.read()) |
def item(proto_dataset_uri, input_file, relpath_in_dataset):
"""Add a file to the proto dataset."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
proto_dataset_uri,
config_path=CONFIG_PATH)
if relpath_in_dataset == "":
relpath_in_dataset = os.path.basename(input_file)
proto_dataset.put_item(input_file, relpath_in_dataset) |
def metadata(proto_dataset_uri, relpath_in_dataset, key, value):
"""Add metadata to a file in the proto dataset."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH)
proto_dataset.add_item_metadata(
handle=relpath_in_dataset,
key=key,
value=value) |
def freeze(proto_dataset_uri):
"""Convert a proto dataset into a dataset.
This step is carried out after all files have been added to the dataset.
Freezing a dataset finalizes it with a stamp marking it as frozen.
"""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH
)
num_items = len(list(proto_dataset._identifiers()))
max_files_limit = int(dtoolcore.utils.get_config_value(
"DTOOL_MAX_FILES_LIMIT",
CONFIG_PATH,
10000
))
assert isinstance(max_files_limit, int)
if num_items > max_files_limit:
click.secho(
"Too many items ({} > {}) in proto dataset".format(
num_items,
max_files_limit
),
fg="red"
)
click.secho("1. Consider splitting the dataset into smaller datasets")
click.secho("2. Consider packaging small files using tar")
click.secho("3. Increase the limit using the DTOOL_MAX_FILES_LIMIT")
click.secho(" environment variable")
sys.exit(2)
handles = [h for h in proto_dataset._storage_broker.iter_item_handles()]
for h in handles:
if not valid_handle(h):
click.secho(
"Invalid item name: {}".format(h),
fg="red"
)
click.secho("1. Consider renaming the item")
click.secho("2. Consider removing the item")
sys.exit(3)
with click.progressbar(length=len(list(proto_dataset._identifiers())),
label="Generating manifest") as progressbar:
try:
proto_dataset.freeze(progressbar=progressbar)
except dtoolcore.storagebroker.DiskStorageBrokerValidationWarning as e:
click.secho("")
click.secho(str(e), fg="red", nl=False)
sys.exit(4)
click.secho("Dataset frozen ", nl=False, fg="green")
click.secho(proto_dataset_uri) |
def copy(resume, quiet, dataset_uri, dest_base_uri):
"""DEPRECATED: Copy a dataset to a different location."""
click.secho(
"The ``dtool copy`` command is deprecated",
fg="red",
err=True
)
click.secho(
"Use ``dtool cp`` instead",
fg="red",
err=True
)
_copy(resume, quiet, dataset_uri, dest_base_uri) |
def cp(resume, quiet, dataset_uri, dest_base_uri):
"""Copy a dataset to a different location."""
_copy(resume, quiet, dataset_uri, dest_base_uri) |
def compress(obj, level=6, return_type="bytes"):
"""Compress anything to bytes or string.
:params obj:
:params level:
:params return_type: if bytes, then return bytes; if str, then return
base64.b64encode bytes in utf-8 string.
"""
if isinstance(obj, binary_type):
b = zlib.compress(obj, level)
elif isinstance(obj, string_types):
b = zlib.compress(obj.encode("utf-8"), level)
else:
b = zlib.compress(pickle.dumps(obj, protocol=2), level)
if return_type == "bytes":
return b
elif return_type == "str":
return base64.b64encode(b).decode("utf-8")
else:
raise ValueError("'return_type' has to be one of 'bytes', 'str'!") |
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6]) |
def get_token(self):
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
while not self.eof:
# We only realize that we've reached the end of a token when we
# find a character that's not part of the current token - since
# that character may be part of the next token, it's stored in the
# charstack.
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
# First character of the token - determines if we're starting
# to parse a word, a number or something else.
token = nextchar
if self.isword(nextchar):
state = 'a'
elif self.isnum(nextchar):
state = '0'
elif self.isspace(nextchar):
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
# If we've already started reading a word, we keep reading
# letters until we find something that's not part of a word.
seenletters = True
if self.isword(nextchar):
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
# If we've already started reading a number, we keep reading
# numbers until we find something that doesn't fit.
if self.isnum(nextchar):
token += nextchar
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
# If we've seen some letters and a dot separator, continue
# parsing, and the tokens will be broken up later.
seenletters = True
if nextchar == '.' or self.isword(nextchar):
token += nextchar
elif self.isnum(nextchar) and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
# If we've seen at least one dot separator, keep going, we'll
# break up the tokens later.
if nextchar == '.' or self.isnum(nextchar):
token += nextchar
elif self.isword(nextchar) and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
token[-1] in '.,')):
l = self._split_decimal.split(token)
token = l[0]
for tok in l[1:]:
if tok:
self.tokenstack.append(tok)
if state == '0.' and token.count('.') == 0:
token = token.replace(',', '.')
return token |
def find_probable_year_index(self, tokens):
"""
attempt to deduce if a pre 100 year was lost
due to padded zeros being taken off
"""
for index, token in enumerate(self):
potential_year_tokens = _ymd.find_potential_year_tokens(
token, tokens)
if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2:
return index |
def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in minutes or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param **kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("Unknown string format")
if len(res) == 0:
raise ValueError("String does not contain a date.")
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back to
# the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret + relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if (isinstance(tzinfos, collections.Callable) or
tzinfos and res.tzname in tzinfos):
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError("Offset must be tzinfo subclass, "
"tz string, or int offset.")
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret |
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
# keep up with the last token skipped so we can recombine
# consecutively skipped tokens (-2 for when i begins at 0).
last_skipped_token_i = -2
skipped_tokens = list()
try:
# year/month/day list
ymd = _ymd(timestr)
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and res.hour is None and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i - 1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i - 1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i - 1]
if not ymd and l[i - 1].find('.') == -1:
# ymd.append(info.convertyear(int(s[:2])))
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = l[i - 1]
ymd.append(s[:4])
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i + 1 < len_l and l[i] == ' ' and
info.hms(l[i + 1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value % 1:
res.minute = int(60 * (value % 1))
elif idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif (i == len_l and l[i - 2] == ' ' and
info.hms(l[i - 3]) is not None):
# X h MM or X m SS
idx = info.hms(l[i - 3])
if idx == 0: # h
res.minute = int(value)
sec_remainder = value % 1
if sec_remainder:
res.second = int(60 * sec_remainder)
elif idx == 1: # m
res.second, res.microsecond = \
_parsems(value_repr)
# We don't need to advance the tokens here because the
# i == len_l call indicates that we're looking at all
# the tokens already.
elif i + 1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i + 1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(value_repr)
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(l[i])
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
else:
return None, None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd) - 1
assert mstridx == -1
else:
ymd.append(l[i])
i += 1
elif i >= len_l or info.jump(l[i]):
if i + 1 < len_l and info.ampm(l[i + 1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i + 1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i + 1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(value)
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None, None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(l[i])
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(l[i])
i += 1
elif (i + 3 < len_l and l[i] == l[i + 2] == ' '
and info.pertain(l[i + 1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i + 3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(str(info.convertyear(value)))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
# For fuzzy parsing, 'a' or 'am' (both valid English words)
# may erroneously trigger the AM/PM flag. Deal with that
# here.
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and res.ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if res.hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with ' +
'AM or PM flag.')
elif not 0 <= res.hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for ' +
'12-hour clock.')
if val_is_ampm:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
res.ampm = value
elif fuzzy:
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in
string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2]) * \
3600 + int(l[i][2:]) * 60
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
res.tzoffset = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2]) * 3600
else:
return None, None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i + 3 < len_l and
info.jump(l[i]) and l[i + 1] == '(' and l[i + 3] == ')' and
3 <= len(l[i + 2]) <= 5 and
not [x for x in l[i + 2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i + 2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None, None
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst)
if year is not None:
res.year = year
res.century_specified = ymd.century_specified
if month is not None:
res.month = month
if day is not None:
res.day = day
except (IndexError, ValueError, AssertionError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
return res, tuple(skipped_tokens)
else:
return res, None |
def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None and not PY3:
name = name.encode()
return name
return adjust_encoding |
def _validate_fromutc_inputs(f):
"""
The CPython version of ``fromutc`` checks that the input is a ``datetime``
object and that ``self`` is attached as its ``tzinfo``.
"""
@wraps(f)
def fromutc(self, dt):
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
return f(self, dt)
return fromutc |
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
dt = dt.replace(tzinfo=self)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
return same_dt and not same_offset |
def _fold_status(self, dt_utc, dt_wall):
"""
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
"""
if self.is_ambiguous(dt_wall):
delta_wall = dt_wall - dt_utc
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
else:
_fold = 0
return _fold |
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst |
def fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurance, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
dt_wall = self._fromutc(dt)
# Calculate the fold status given the two datetimes.
_fold = self._fold_status(dt, dt_wall)
# Set the default fold value for ambiguous dates
return enfold(dt_wall, fold=_fold) |
def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold) |
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if not self.hasdst:
return False
start, end = self.transitions(dt.year)
dt = dt.replace(tzinfo=None)
return (end <= dt < end + self._dst_base_offset) |
def strip_comment_line_with_symbol(line, start):
"""Strip comments from line string.
"""
parts = line.split(start)
counts = [len(findall(r'(?:^|[^"\\]|(?:\\\\|\\")+)(")', part))
for part in parts]
total = 0
for nr, count in enumerate(counts):
total += count
if total % 2 == 0:
return start.join(parts[:nr + 1]).rstrip()
else: # pragma: no cover
return line.rstrip() |
def strip_comments(string, comment_symbols=frozenset(('#', '//'))):
"""Strip comments from json string.
:param string: A string containing json with comments started by comment_symbols.
:param comment_symbols: Iterable of symbols that start a line comment (default # or //).
:return: The string with the comments removed.
"""
lines = string.splitlines()
for k in range(len(lines)):
for symbol in comment_symbols:
lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)
return '\n'.join(lines) |
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
filename is the timezone tarball from ftp.iana.org/tz.
"""
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
try:
with tar_open(filename) as tf:
for name in zonegroups:
tf.extract(name, tmpdir)
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
try:
check_call(["zic", "-d", zonedir] + filepaths)
except OSError as e:
_print_on_nosuchfile(e)
raise
# write metadata file
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
json.dump(metadata, f, indent=4, sort_keys=True)
target = os.path.join(moduledir, ZONEFILENAME)
with tar_open(target, "w:%s" % format) as tf:
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
finally:
shutil.rmtree(tmpdir) |
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if (wd.month != month):
wd -= ONEWEEK
return wd |
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith('@tzres'):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip('\x00') # Remove trailing nulls
dout[key_name] = value
return dout |
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
..note:
Offsets found in the registry are generally of the form
`@tzres.dll,-114`. The offset in this case if 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar] |
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset) |
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff |
def get_zonefile_instance(new_instance=False):
"""
This is a convenience function which provides a :class:`ZoneInfoFile`
instance using the data provided by the ``dateutil`` package. By default, it
caches a single instance of the ZoneInfoFile object and returns that.
:param new_instance:
If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
used as the cached instance for the next call. Otherwise, new instances
are created only as necessary.
:return:
Returns a :class:`ZoneInfoFile` object.
.. versionadded:: 2.6
"""
if new_instance:
zif = None
else:
zif = getattr(get_zonefile_instance, '_cached_instance', None)
if zif is None:
zif = ZoneInfoFile(getzoneinfofile_stream())
get_zonefile_instance._cached_instance = zif
return zif |
def gettz(name):
"""
This retrieves a time zone from the local zoneinfo tarball that is packaged
with dateutil.
:param name:
An IANA-style time zone name, as found in the zoneinfo file.
:return:
Returns a :class:`dateutil.tz.tzfile` time zone object.
.. warning::
It is generally inadvisable to use this function, and it is only
provided for API compatibility with earlier versions. This is *not*
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
time zone based on the inputs, favoring system zoneinfo. This is ONLY
for accessing the dateutil-specific zoneinfo (which may be out of
date compared to the system zoneinfo).
.. deprecated:: 2.6
If you need to use a specific zoneinfofile over the system zoneinfo,
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
Use :func:`get_zonefile_instance` to retrieve an instance of the
dateutil-provided zoneinfo.
"""
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
"to use the dateutil-provided zoneinfo files, instantiate a "
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].zones.get(name) |
def gettz_db_metadata():
""" Get the zonefile metadata
See `zonefile_metadata`_
:returns:
A dictionary with the database metadata
.. deprecated:: 2.6
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
"""
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
"versions, to use the dateutil-provided zoneinfo files, "
"ZoneInfoFile object and query the 'metadata' attribute "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].metadata |
def get_config(jid):
"""Get the configuration for the given JID based on XMPP_HTTP_UPLOAD_ACCESS.
If the JID does not match any rule, ``False`` is returned.
"""
acls = getattr(settings, 'XMPP_HTTP_UPLOAD_ACCESS', (('.*', False), ))
for regex, config in acls:
if isinstance(regex, six.string_types):
regex = [regex]
for subex in regex:
if re.search(subex, jid):
return config
return False |
def datetime_exists(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
would fall in a gap.
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" exists in ``tz``.
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
dt = dt.replace(tzinfo=None)
# This is essentially a test of whether or not the datetime can survive
# a round trip to UTC.
dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz)
dt_rt = dt_rt.replace(tzinfo=None)
return dt == dt_rt |
def datetime_ambiguous(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
is ambiguous (i.e if there are two times differentiated only by their DST
status).
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" is ambiguous in
``tz``.
.. versionadded:: 2.6.0
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
# If a time zone defines its own "is_ambiguous" function, we'll use that.
is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
if is_ambiguous_fn is not None:
try:
return tz.is_ambiguous(dt)
except:
pass
# If it doesn't come out and tell us it's ambiguous, we'll just check if
# the fold attribute has any effect on this particular date and time.
dt = dt.replace(tzinfo=tz)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dst = wall_0.dst() == wall_1.dst()
return not (same_offset and same_dst) |
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
naive_dst = self._naive_is_dst(dt)
return (not naive_dst and
(naive_dst != self._naive_is_dst(dt - self._dst_saved))) |
def _set_tzdata(self, tzobj):
""" Set the time zone data of this object from a _tzfile object """
# Copy the relevant attributes over as private attributes
for attr in _tzfile.attrs:
setattr(self, '_' + attr, getattr(tzobj, attr)) |
def fromutc(self, dt):
"""
The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
:param dt:
A :py:class:`datetime.datetime` object.
:raises TypeError:
Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
:raises ValueError:
Raised if this is called with a ``dt`` which does not have this
``tzinfo`` attached.
:return:
Returns a :py:class:`datetime.datetime` object representing the
wall time in ``self``'s time zone.
"""
# These isinstance checks are in datetime.tzinfo, so we'll preserve
# them, even if we don't care about duck typing.
if not isinstance(dt, datetime.datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# First treat UTC as wall time and get the transition we're in.
idx = self._find_last_transition(dt, in_utc=True)
tti = self._get_ttinfo(idx)
dt_out = dt + datetime.timedelta(seconds=tti.offset)
fold = self.is_ambiguous(dt_out, idx=idx)
return enfold(dt_out, fold=int(fold)) |
def is_ambiguous(self, dt, idx=None):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if idx is None:
idx = self._find_last_transition(dt)
# Calculate the difference in offsets from current to previous
timestamp = _datetime_to_timestamp(dt)
tti = self._get_ttinfo(idx)
if idx is None or idx <= 0:
return False
od = self._get_ttinfo(idx - 1).offset - tti.offset
tt = self._trans_list[idx] # Transition time
return timestamp < tt + od |
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
base_year = datetime.datetime(year, 1, 1)
start = base_year + self._start_delta
end = base_year + self._end_delta
return (start, end) |
def get(self, tzid=None):
"""
Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
:param tzid:
If there is exactly one time zone available, omitting ``tzid``
or passing :py:const:`None` value returns it. Otherwise a valid
key (which can be retrieved from :func:`keys`) is required.
:raises ValueError:
Raised if ``tzid`` is not specified but there are either more
or fewer than 1 zone defined.
:returns:
Returns either a :py:class:`datetime.tzinfo` object representing
the relevant time zone or :py:const:`None` if the ``tzid`` was
not found.
"""
if tzid is None:
if len(self._vtz) == 0:
raise ValueError("no timezones defined")
elif len(self._vtz) > 1:
raise ValueError("more than one timezone available")
tzid = next(iter(self._vtz))
return self._vtz.get(tzid) |
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=1, hours=14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest
# microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond) |
def get_algorithm(alg: str) -> Callable:
"""
:param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError
"""
if alg not in algorithms:
raise ValueError('Invalid algorithm: {:s}'.format(alg))
return algorithms[alg] |
def _hash(secret: bytes, data: bytes, alg: str) -> bytes:
"""
Create a new HMAC hash.
:param secret: The secret used when hashing data.
:type secret: bytes
:param data: The data to hash.
:type data: bytes
:param alg: The algorithm to use when hashing `data`.
:type alg: str
:return: New HMAC hash.
:rtype: bytes
"""
algorithm = get_algorithm(alg)
return hmac \
.new(secret, msg=data, digestmod=algorithm) \
.digest() |
def encode(secret: Union[str, bytes], payload: dict = None,
alg: str = default_alg, header: dict = None) -> str:
"""
:param secret: The secret used to encode the token.
:type secret: Union[str, bytes]
:param payload: The payload to be encoded in the token.
:type payload: dict
:param alg: The algorithm used to hash the token.
:type alg: str
:param header: The header to be encoded in the token.
:type header: dict
:return: A new token
:rtype: str
"""
secret = util.to_bytes(secret)
payload = payload or {}
header = header or {}
header_json = util.to_bytes(json.dumps(header))
header_b64 = util.b64_encode(header_json)
payload_json = util.to_bytes(json.dumps(payload))
payload_b64 = util.b64_encode(payload_json)
pre_signature = util.join(header_b64, payload_b64)
signature = _hash(secret, pre_signature, alg)
signature_b64 = util.b64_encode(signature)
token = util.join(pre_signature, signature_b64)
return util.from_bytes(token) |
def decode(secret: Union[str, bytes], token: Union[str, bytes],
alg: str = default_alg) -> Tuple[dict, dict]:
"""
Decodes the given token's header and payload and validates the signature.
:param secret: The secret used to decode the token. Must match the
secret used when creating the token.
:type secret: Union[str, bytes]
:param token: The token to decode.
:type token: Union[str, bytes]
:param alg: The algorithm used to decode the token. Must match the
algorithm used when creating the token.
:type alg: str
:return: The decoded header and payload.
:rtype: Tuple[dict, dict]
"""
secret = util.to_bytes(secret)
token = util.to_bytes(token)
pre_signature, signature_segment = token.rsplit(b'.', 1)
header_b64, payload_b64 = pre_signature.split(b'.')
try:
header_json = util.b64_decode(header_b64)
header = json.loads(util.from_bytes(header_json))
except (json.decoder.JSONDecodeError, UnicodeDecodeError, ValueError):
raise InvalidHeaderError('Invalid header')
try:
payload_json = util.b64_decode(payload_b64)
payload = json.loads(util.from_bytes(payload_json))
except (json.decoder.JSONDecodeError, UnicodeDecodeError, ValueError):
raise InvalidPayloadError('Invalid payload')
if not isinstance(header, dict):
raise InvalidHeaderError('Invalid header: {}'.format(header))
if not isinstance(payload, dict):
raise InvalidPayloadError('Invalid payload: {}'.format(payload))
signature = util.b64_decode(signature_segment)
calculated_signature = _hash(secret, pre_signature, alg)
if not compare_signature(signature, calculated_signature):
raise InvalidSignatureError('Invalid signature')
return header, payload |
def compare_signature(expected: Union[str, bytes],
actual: Union[str, bytes]) -> bool:
"""
Compares the given signatures.
:param expected: The expected signature.
:type expected: Union[str, bytes]
:param actual: The actual signature.
:type actual: Union[str, bytes]
:return: Do the signatures match?
:rtype: bool
"""
expected = util.to_bytes(expected)
actual = util.to_bytes(actual)
return hmac.compare_digest(expected, actual) |
def compare_token(expected: Union[str, bytes],
actual: Union[str, bytes]) -> bool:
"""
Compares the given tokens.
:param expected: The expected token.
:type expected: Union[str, bytes]
:param actual: The actual token.
:type actual: Union[str, bytes]
:return: Do the tokens match?
:rtype: bool
"""
expected = util.to_bytes(expected)
actual = util.to_bytes(actual)
_, expected_sig_seg = expected.rsplit(b'.', 1)
_, actual_sig_seg = actual.rsplit(b'.', 1)
expected_sig = util.b64_decode(expected_sig_seg)
actual_sig = util.b64_decode(actual_sig_seg)
return compare_signature(expected_sig, actual_sig) |
def header(self) -> dict:
"""
:return: Token header.
:rtype: dict
"""
header = {}
if isinstance(self._header, dict):
header = self._header.copy()
header.update(self._header)
header.update({
'type': 'JWT',
'alg': self.alg
})
return header |
def valid(self, time: int = None) -> bool:
"""
Is the token valid? This method only checks the timestamps within the
token and compares them against the current time if none is provided.
:param time: The timestamp to validate against
:type time: Union[int, None]
:return: The validity of the token.
:rtype: bool
"""
if time is None:
epoch = datetime(1970, 1, 1, 0, 0, 0)
now = datetime.utcnow()
time = int((now - epoch).total_seconds())
if isinstance(self.valid_from, int) and time < self.valid_from:
return False
if isinstance(self.valid_to, int) and time > self.valid_to:
return False
return True |
def _pop_claims_from_payload(self):
"""
Check for registered claims in the payload and move them to the
registered_claims property, overwriting any extant claims.
"""
claims_in_payload = [k for k in self.payload.keys() if
k in registered_claims.values()]
for name in claims_in_payload:
self.registered_claims[name] = self.payload.pop(name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.