repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
galaxyproject/pulsar | pulsar/client/job_directory.py | RemoteJobDirectory.calculate_path | def calculate_path(self, remote_relative_path, input_type):
""" Only for used by Pulsar client, should override for managers to
enforce security and make the directory if needed.
"""
directory, allow_nested_files = self._directory_for_file_type(input_type)
return self.path_helper.remote_join(directory, remote_relative_path) | python | def calculate_path(self, remote_relative_path, input_type):
""" Only for used by Pulsar client, should override for managers to
enforce security and make the directory if needed.
"""
directory, allow_nested_files = self._directory_for_file_type(input_type)
return self.path_helper.remote_join(directory, remote_relative_path) | Only for used by Pulsar client, should override for managers to
enforce security and make the directory if needed. | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/job_directory.py#L71-L76 |
galaxyproject/pulsar | pulsar/managers/stateful.py | StatefulManagerProxy.get_status | def get_status(self, job_id):
""" Compute status used proxied manager and handle state transitions
and track additional state information needed.
"""
job_directory = self._proxied_manager.job_directory(job_id)
with job_directory.lock("status"):
proxy_status, state_change = self.__proxy_status(job_directory, job_id)
if state_change == "to_complete":
self.__deactivate(job_id, proxy_status)
elif state_change == "to_running":
self.__state_change_callback(status.RUNNING, job_id)
return self.__status(job_directory, proxy_status) | python | def get_status(self, job_id):
""" Compute status used proxied manager and handle state transitions
and track additional state information needed.
"""
job_directory = self._proxied_manager.job_directory(job_id)
with job_directory.lock("status"):
proxy_status, state_change = self.__proxy_status(job_directory, job_id)
if state_change == "to_complete":
self.__deactivate(job_id, proxy_status)
elif state_change == "to_running":
self.__state_change_callback(status.RUNNING, job_id)
return self.__status(job_directory, proxy_status) | Compute status used proxied manager and handle state transitions
and track additional state information needed. | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/stateful.py#L137-L150 |
galaxyproject/pulsar | pulsar/managers/stateful.py | StatefulManagerProxy.__proxy_status | def __proxy_status(self, job_directory, job_id):
""" Determine state with proxied job manager and if this job needs
to be marked as deactivated (this occurs when job first returns a
complete status from proxy.
"""
state_change = None
if job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED):
proxy_status = status.FAILED
job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status)
state_change = "to_complete"
elif not job_directory.has_metadata(JOB_FILE_PREPROCESSED):
proxy_status = status.PREPROCESSING
elif job_directory.has_metadata(JOB_FILE_FINAL_STATUS):
proxy_status = job_directory.load_metadata(JOB_FILE_FINAL_STATUS)
else:
proxy_status = self._proxied_manager.get_status(job_id)
if proxy_status == status.RUNNING:
if not job_directory.has_metadata(JOB_METADATA_RUNNING):
job_directory.store_metadata(JOB_METADATA_RUNNING, True)
state_change = "to_running"
elif proxy_status in [status.COMPLETE, status.CANCELLED]:
job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status)
state_change = "to_complete"
return proxy_status, state_change | python | def __proxy_status(self, job_directory, job_id):
""" Determine state with proxied job manager and if this job needs
to be marked as deactivated (this occurs when job first returns a
complete status from proxy.
"""
state_change = None
if job_directory.has_metadata(JOB_FILE_PREPROCESSING_FAILED):
proxy_status = status.FAILED
job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status)
state_change = "to_complete"
elif not job_directory.has_metadata(JOB_FILE_PREPROCESSED):
proxy_status = status.PREPROCESSING
elif job_directory.has_metadata(JOB_FILE_FINAL_STATUS):
proxy_status = job_directory.load_metadata(JOB_FILE_FINAL_STATUS)
else:
proxy_status = self._proxied_manager.get_status(job_id)
if proxy_status == status.RUNNING:
if not job_directory.has_metadata(JOB_METADATA_RUNNING):
job_directory.store_metadata(JOB_METADATA_RUNNING, True)
state_change = "to_running"
elif proxy_status in [status.COMPLETE, status.CANCELLED]:
job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status)
state_change = "to_complete"
return proxy_status, state_change | Determine state with proxied job manager and if this job needs
to be marked as deactivated (this occurs when job first returns a
complete status from proxy. | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/stateful.py#L152-L175 |
galaxyproject/pulsar | pulsar/managers/stateful.py | StatefulManagerProxy.__status | def __status(self, job_directory, proxy_status):
""" Use proxied manager's status to compute the real
(stateful) status of job.
"""
if proxy_status == status.COMPLETE:
if not job_directory.has_metadata(JOB_FILE_POSTPROCESSED):
job_status = status.POSTPROCESSING
else:
job_status = status.COMPLETE
else:
job_status = proxy_status
return job_status | python | def __status(self, job_directory, proxy_status):
""" Use proxied manager's status to compute the real
(stateful) status of job.
"""
if proxy_status == status.COMPLETE:
if not job_directory.has_metadata(JOB_FILE_POSTPROCESSED):
job_status = status.POSTPROCESSING
else:
job_status = status.COMPLETE
else:
job_status = proxy_status
return job_status | Use proxied manager's status to compute the real
(stateful) status of job. | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/stateful.py#L177-L188 |
galaxyproject/pulsar | pulsar/client/staging/__init__.py | PulsarOutputs.output_extras | def output_extras(self, output_file):
"""
Returns dict mapping local path to remote name.
"""
output_directory = dirname(output_file)
def local_path(name):
return join(output_directory, self.path_helper.local_name(name))
files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator)
names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents)
return dict(map(lambda name: (local_path(name), name), names)) | python | def output_extras(self, output_file):
"""
Returns dict mapping local path to remote name.
"""
output_directory = dirname(output_file)
def local_path(name):
return join(output_directory, self.path_helper.local_name(name))
files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator)
names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents)
return dict(map(lambda name: (local_path(name), name), names)) | Returns dict mapping local path to remote name. | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/staging/__init__.py#L185-L196 |
galaxyproject/pulsar | pulsar/managers/util/sudo.py | sudo_popen | def sudo_popen(*args, **kwargs):
"""
Helper method for building and executing Popen command. This is potentially
sensetive code so should probably be centralized.
"""
user = kwargs.get("user", None)
full_command = [SUDO_PATH, SUDO_PRESERVE_ENVIRONMENT_ARG]
if user:
full_command.extend([SUDO_USER_ARG, user])
full_command.extend(args)
log.info("About to execute the following sudo command - [%s]" % ' '.join(full_command))
p = Popen(full_command, shell=False, stdout=PIPE, stderr=PIPE)
return p | python | def sudo_popen(*args, **kwargs):
"""
Helper method for building and executing Popen command. This is potentially
sensetive code so should probably be centralized.
"""
user = kwargs.get("user", None)
full_command = [SUDO_PATH, SUDO_PRESERVE_ENVIRONMENT_ARG]
if user:
full_command.extend([SUDO_USER_ARG, user])
full_command.extend(args)
log.info("About to execute the following sudo command - [%s]" % ' '.join(full_command))
p = Popen(full_command, shell=False, stdout=PIPE, stderr=PIPE)
return p | Helper method for building and executing Popen command. This is potentially
sensetive code so should probably be centralized. | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/sudo.py#L14-L26 |
bitprophet/releases | releases/line_manager.py | LineManager.add_family | def add_family(self, major_number):
"""
Expand to a new release line with given ``major_number``.
This will flesh out mandatory buckets like ``unreleased_bugfix`` and do
other necessary bookkeeping.
"""
# Normally, we have separate buckets for bugfixes vs features
keys = ['unreleased_bugfix', 'unreleased_feature']
# But unstable prehistorical releases roll all up into just
# 'unreleased'
if major_number == 0 and self.config.releases_unstable_prehistory:
keys = ['unreleased']
# Either way, the buckets default to an empty list
self[major_number] = {key: [] for key in keys} | python | def add_family(self, major_number):
"""
Expand to a new release line with given ``major_number``.
This will flesh out mandatory buckets like ``unreleased_bugfix`` and do
other necessary bookkeeping.
"""
# Normally, we have separate buckets for bugfixes vs features
keys = ['unreleased_bugfix', 'unreleased_feature']
# But unstable prehistorical releases roll all up into just
# 'unreleased'
if major_number == 0 and self.config.releases_unstable_prehistory:
keys = ['unreleased']
# Either way, the buckets default to an empty list
self[major_number] = {key: [] for key in keys} | Expand to a new release line with given ``major_number``.
This will flesh out mandatory buckets like ``unreleased_bugfix`` and do
other necessary bookkeeping. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/line_manager.py#L23-L37 |
bitprophet/releases | releases/line_manager.py | LineManager.has_stable_releases | def has_stable_releases(self):
"""
Returns whether stable (post-0.x) releases seem to exist.
"""
nonzeroes = self.stable_families
# Nothing but 0.x releases -> yup we're prehistory
if not nonzeroes:
return False
# Presumably, if there's >1 major family besides 0.x, we're at least
# one release into the 1.0 (or w/e) line.
if len(nonzeroes) > 1:
return True
# If there's only one, we may still be in the space before its N.0.0 as
# well; we can check by testing for existence of bugfix buckets
return any(
x for x in self[nonzeroes[0]] if not x.startswith('unreleased')
) | python | def has_stable_releases(self):
"""
Returns whether stable (post-0.x) releases seem to exist.
"""
nonzeroes = self.stable_families
# Nothing but 0.x releases -> yup we're prehistory
if not nonzeroes:
return False
# Presumably, if there's >1 major family besides 0.x, we're at least
# one release into the 1.0 (or w/e) line.
if len(nonzeroes) > 1:
return True
# If there's only one, we may still be in the space before its N.0.0 as
# well; we can check by testing for existence of bugfix buckets
return any(
x for x in self[nonzeroes[0]] if not x.startswith('unreleased')
) | Returns whether stable (post-0.x) releases seem to exist. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/line_manager.py#L59-L75 |
bitprophet/releases | releases/util.py | parse_changelog | def parse_changelog(path, **kwargs):
"""
Load and parse changelog file from ``path``, returning data structures.
This function does not alter any files on disk; it is solely for
introspecting a Releases ``changelog.rst`` and programmatically answering
questions like "are there any unreleased bugfixes for the 2.3 line?" or
"what was included in release 1.2.1?".
For example, answering the above questions is as simple as::
changelog = parse_changelog("/path/to/changelog")
print("Unreleased issues for 2.3.x: {}".format(changelog['2.3']))
print("Contents of v1.2.1: {}".format(changelog['1.2.1']))
Aside from the documented arguments, any additional keyword arguments are
passed unmodified into an internal `get_doctree` call (which then passes
them to `make_app`).
:param str path: A relative or absolute file path string.
:returns:
A dict whose keys map to lists of ``releases.models.Issue`` objects, as
follows:
- Actual releases are full version number keys, such as ``"1.2.1"`` or
``"2.0.0"``.
- Unreleased bugs (or bug-like issues; see the Releases docs) are
stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``.
- Unreleased features (or feature-like issues) are found in
``"unreleased_N_feature"``, where ``N`` is one of the major release
families (so, a changelog spanning only 1.x will only have
``unreleased_1_feature``, whereas one with 1.x and 2.x releases will
have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc).
.. versionchanged:: 1.6
Added support for passing kwargs to `get_doctree`/`make_app`.
"""
app, doctree = get_doctree(path, **kwargs)
# Have to semi-reproduce the 'find first bullet list' bit from main code,
# which is unfortunately side-effect-heavy (thanks to Sphinx plugin
# design).
first_list = None
for node in doctree[0]:
if isinstance(node, bullet_list):
first_list = node
break
# Initial parse into the structures Releases finds useful internally
releases, manager = construct_releases(first_list.children, app)
ret = changelog2dict(releases)
# Stitch them together into something an end-user would find better:
# - nuke unreleased_N.N_Y as their contents will be represented in the
# per-line buckets
for key in ret.copy():
if key.startswith('unreleased'):
del ret[key]
for family in manager:
# - remove unreleased_bugfix, as they are accounted for in the per-line
# buckets too. No need to store anywhere.
manager[family].pop('unreleased_bugfix', None)
# - bring over each major family's unreleased_feature as
# unreleased_N_feature
unreleased = manager[family].pop('unreleased_feature', None)
if unreleased is not None:
ret['unreleased_{}_feature'.format(family)] = unreleased
# - bring over all per-line buckets from manager (flattening)
# Here, all that's left in the per-family bucket should be lines, not
# unreleased_*
ret.update(manager[family])
return ret | python | def parse_changelog(path, **kwargs):
"""
Load and parse changelog file from ``path``, returning data structures.
This function does not alter any files on disk; it is solely for
introspecting a Releases ``changelog.rst`` and programmatically answering
questions like "are there any unreleased bugfixes for the 2.3 line?" or
"what was included in release 1.2.1?".
For example, answering the above questions is as simple as::
changelog = parse_changelog("/path/to/changelog")
print("Unreleased issues for 2.3.x: {}".format(changelog['2.3']))
print("Contents of v1.2.1: {}".format(changelog['1.2.1']))
Aside from the documented arguments, any additional keyword arguments are
passed unmodified into an internal `get_doctree` call (which then passes
them to `make_app`).
:param str path: A relative or absolute file path string.
:returns:
A dict whose keys map to lists of ``releases.models.Issue`` objects, as
follows:
- Actual releases are full version number keys, such as ``"1.2.1"`` or
``"2.0.0"``.
- Unreleased bugs (or bug-like issues; see the Releases docs) are
stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``.
- Unreleased features (or feature-like issues) are found in
``"unreleased_N_feature"``, where ``N`` is one of the major release
families (so, a changelog spanning only 1.x will only have
``unreleased_1_feature``, whereas one with 1.x and 2.x releases will
have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc).
.. versionchanged:: 1.6
Added support for passing kwargs to `get_doctree`/`make_app`.
"""
app, doctree = get_doctree(path, **kwargs)
# Have to semi-reproduce the 'find first bullet list' bit from main code,
# which is unfortunately side-effect-heavy (thanks to Sphinx plugin
# design).
first_list = None
for node in doctree[0]:
if isinstance(node, bullet_list):
first_list = node
break
# Initial parse into the structures Releases finds useful internally
releases, manager = construct_releases(first_list.children, app)
ret = changelog2dict(releases)
# Stitch them together into something an end-user would find better:
# - nuke unreleased_N.N_Y as their contents will be represented in the
# per-line buckets
for key in ret.copy():
if key.startswith('unreleased'):
del ret[key]
for family in manager:
# - remove unreleased_bugfix, as they are accounted for in the per-line
# buckets too. No need to store anywhere.
manager[family].pop('unreleased_bugfix', None)
# - bring over each major family's unreleased_feature as
# unreleased_N_feature
unreleased = manager[family].pop('unreleased_feature', None)
if unreleased is not None:
ret['unreleased_{}_feature'.format(family)] = unreleased
# - bring over all per-line buckets from manager (flattening)
# Here, all that's left in the per-family bucket should be lines, not
# unreleased_*
ret.update(manager[family])
return ret | Load and parse changelog file from ``path``, returning data structures.
This function does not alter any files on disk; it is solely for
introspecting a Releases ``changelog.rst`` and programmatically answering
questions like "are there any unreleased bugfixes for the 2.3 line?" or
"what was included in release 1.2.1?".
For example, answering the above questions is as simple as::
changelog = parse_changelog("/path/to/changelog")
print("Unreleased issues for 2.3.x: {}".format(changelog['2.3']))
print("Contents of v1.2.1: {}".format(changelog['1.2.1']))
Aside from the documented arguments, any additional keyword arguments are
passed unmodified into an internal `get_doctree` call (which then passes
them to `make_app`).
:param str path: A relative or absolute file path string.
:returns:
A dict whose keys map to lists of ``releases.models.Issue`` objects, as
follows:
- Actual releases are full version number keys, such as ``"1.2.1"`` or
``"2.0.0"``.
- Unreleased bugs (or bug-like issues; see the Releases docs) are
stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``.
- Unreleased features (or feature-like issues) are found in
``"unreleased_N_feature"``, where ``N`` is one of the major release
families (so, a changelog spanning only 1.x will only have
``unreleased_1_feature``, whereas one with 1.x and 2.x releases will
have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc).
.. versionchanged:: 1.6
Added support for passing kwargs to `get_doctree`/`make_app`. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/util.py#L37-L106 |
bitprophet/releases | releases/util.py | get_doctree | def get_doctree(path, **kwargs):
"""
Obtain a Sphinx doctree from the RST file at ``path``.
Performs no Releases-specific processing; this code would, ideally, be in
Sphinx itself, but things there are pretty tightly coupled. So we wrote
this.
Any additional kwargs are passed unmodified into an internal `make_app`
call.
:param str path: A relative or absolute file path string.
:returns:
A two-tuple of the generated ``sphinx.application.Sphinx`` app and the
doctree (a ``docutils.document`` object).
.. versionchanged:: 1.6
Added support for passing kwargs to `make_app`.
"""
root, filename = os.path.split(path)
docname, _ = os.path.splitext(filename)
# TODO: this only works for top level changelog files (i.e. ones where
# their dirname is the project/doc root)
app = make_app(srcdir=root, **kwargs)
# Create & init a BuildEnvironment. Mm, tasty side effects.
app._init_env(freshenv=True)
env = app.env
# More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app
# obj in BuildEnvironment.update(); modern Sphinx performs that inside
# Application._init_env() (which we just called above) and so that kwarg is
# removed from update(). EAFP.
kwargs = dict(
config=app.config,
srcdir=root,
doctreedir=app.doctreedir,
app=app,
)
try:
env.update(**kwargs)
except TypeError:
# Assume newer Sphinx w/o an app= kwarg
del kwargs['app']
env.update(**kwargs)
# Code taken from sphinx.environment.read_doc; easier to manually call
# it with a working Environment object, instead of doing more random crap
# to trick the higher up build system into thinking our single changelog
# document was "updated".
env.temp_data['docname'] = docname
env.app = app
# NOTE: SphinxStandaloneReader API changed in 1.4 :(
reader_kwargs = {
'app': app,
'parsers': env.config.source_parsers,
}
if sphinx.version_info[:2] < (1, 4):
del reader_kwargs['app']
# This monkeypatches (!!!) docutils to 'inject' all registered Sphinx
# domains' roles & so forth. Without this, rendering the doctree lacks
# almost all Sphinx magic, including things like :ref: and :doc:!
with sphinx_domains(env):
try:
reader = SphinxStandaloneReader(**reader_kwargs)
except TypeError:
# If we import from io, this happens automagically, not in API
del reader_kwargs['parsers']
reader = SphinxStandaloneReader(**reader_kwargs)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, env.settings, None)
# NOTE: docname derived higher up, from our given path
src_path = env.doc2path(docname)
source = SphinxFileInput(
app,
env,
source=None,
source_path=src_path,
encoding=env.config.source_encoding,
)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
return app, pub.document | python | def get_doctree(path, **kwargs):
"""
Obtain a Sphinx doctree from the RST file at ``path``.
Performs no Releases-specific processing; this code would, ideally, be in
Sphinx itself, but things there are pretty tightly coupled. So we wrote
this.
Any additional kwargs are passed unmodified into an internal `make_app`
call.
:param str path: A relative or absolute file path string.
:returns:
A two-tuple of the generated ``sphinx.application.Sphinx`` app and the
doctree (a ``docutils.document`` object).
.. versionchanged:: 1.6
Added support for passing kwargs to `make_app`.
"""
root, filename = os.path.split(path)
docname, _ = os.path.splitext(filename)
# TODO: this only works for top level changelog files (i.e. ones where
# their dirname is the project/doc root)
app = make_app(srcdir=root, **kwargs)
# Create & init a BuildEnvironment. Mm, tasty side effects.
app._init_env(freshenv=True)
env = app.env
# More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app
# obj in BuildEnvironment.update(); modern Sphinx performs that inside
# Application._init_env() (which we just called above) and so that kwarg is
# removed from update(). EAFP.
kwargs = dict(
config=app.config,
srcdir=root,
doctreedir=app.doctreedir,
app=app,
)
try:
env.update(**kwargs)
except TypeError:
# Assume newer Sphinx w/o an app= kwarg
del kwargs['app']
env.update(**kwargs)
# Code taken from sphinx.environment.read_doc; easier to manually call
# it with a working Environment object, instead of doing more random crap
# to trick the higher up build system into thinking our single changelog
# document was "updated".
env.temp_data['docname'] = docname
env.app = app
# NOTE: SphinxStandaloneReader API changed in 1.4 :(
reader_kwargs = {
'app': app,
'parsers': env.config.source_parsers,
}
if sphinx.version_info[:2] < (1, 4):
del reader_kwargs['app']
# This monkeypatches (!!!) docutils to 'inject' all registered Sphinx
# domains' roles & so forth. Without this, rendering the doctree lacks
# almost all Sphinx magic, including things like :ref: and :doc:!
with sphinx_domains(env):
try:
reader = SphinxStandaloneReader(**reader_kwargs)
except TypeError:
# If we import from io, this happens automagically, not in API
del reader_kwargs['parsers']
reader = SphinxStandaloneReader(**reader_kwargs)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, env.settings, None)
# NOTE: docname derived higher up, from our given path
src_path = env.doc2path(docname)
source = SphinxFileInput(
app,
env,
source=None,
source_path=src_path,
encoding=env.config.source_encoding,
)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
return app, pub.document | Obtain a Sphinx doctree from the RST file at ``path``.
Performs no Releases-specific processing; this code would, ideally, be in
Sphinx itself, but things there are pretty tightly coupled. So we wrote
this.
Any additional kwargs are passed unmodified into an internal `make_app`
call.
:param str path: A relative or absolute file path string.
:returns:
A two-tuple of the generated ``sphinx.application.Sphinx`` app and the
doctree (a ``docutils.document`` object).
.. versionchanged:: 1.6
Added support for passing kwargs to `make_app`. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/util.py#L109-L194 |
bitprophet/releases | releases/util.py | load_conf | def load_conf(srcdir):
"""
Load ``conf.py`` from given ``srcdir``.
:returns: Dictionary derived from the conf module.
"""
path = os.path.join(srcdir, 'conf.py')
mylocals = {'__file__': path}
with open(path) as fd:
exec(fd.read(), mylocals)
return mylocals | python | def load_conf(srcdir):
"""
Load ``conf.py`` from given ``srcdir``.
:returns: Dictionary derived from the conf module.
"""
path = os.path.join(srcdir, 'conf.py')
mylocals = {'__file__': path}
with open(path) as fd:
exec(fd.read(), mylocals)
return mylocals | Load ``conf.py`` from given ``srcdir``.
:returns: Dictionary derived from the conf module. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/util.py#L197-L207 |
bitprophet/releases | releases/util.py | make_app | def make_app(**kwargs):
"""
Create a dummy Sphinx app, filling in various hardcoded assumptions.
For example, Sphinx assumes the existence of various source/dest
directories, even if you're only calling internals that never generate (or
sometimes, even read!) on-disk files. This function creates safe temp
directories for these instances.
It also neuters Sphinx's internal logging, which otherwise causes verbosity
in one's own test output and/or debug logs.
Finally, it does load the given srcdir's ``conf.py``, but only to read
specific bits like ``extensions`` (if requested); most of it is ignored.
All args are stored in a single ``**kwargs``. Aside from the params listed
below (all of which are optional), all kwargs given are turned into
'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like
setting ``releases_foo = 'bar'`` in ``conf.py``.
:param str docname:
Override the document name used (mostly for internal testing).
:param str srcdir:
Sphinx source directory path.
:param str dstdir:
Sphinx dest directory path.
:param str doctreedir:
Sphinx doctree directory path.
:param bool load_extensions:
Whether to load the real ``conf.py`` and setup any extensions it
configures. Default: ``False``.
:returns: A Sphinx ``Application`` instance.
.. versionchanged:: 1.6
Added the ``load_extensions`` kwarg.
"""
srcdir = kwargs.pop('srcdir', mkdtemp())
dstdir = kwargs.pop('dstdir', mkdtemp())
doctreedir = kwargs.pop('doctreedir', mkdtemp())
load_extensions = kwargs.pop('load_extensions', False)
real_conf = None
try:
# Sphinx <1.6ish
Sphinx._log = lambda self, message, wfile, nonl=False: None
# Sphinx >=1.6ish. Technically still lets Very Bad Things through,
# unlike the total muting above, but probably OK.
# NOTE: used to just do 'sphinx' but that stopped working, even on
# sphinx 1.6.x. Weird. Unsure why hierarchy not functioning.
for name in ('sphinx', 'sphinx.sphinx.application'):
logging.getLogger(name).setLevel(logging.ERROR)
# App API seems to work on all versions so far.
app = Sphinx(
srcdir=srcdir,
confdir=None,
outdir=dstdir,
doctreedir=doctreedir,
buildername='html',
)
# Might as well load the conf file here too.
if load_extensions:
real_conf = load_conf(srcdir)
finally:
for d in (srcdir, dstdir, doctreedir):
# Only remove empty dirs; non-empty dirs are implicitly something
# that existed before we ran, and should not be touched.
try:
os.rmdir(d)
except OSError:
pass
setup(app)
# Mock out the config within. More assumptions by Sphinx :(
# TODO: just use real config and overlay what truly needs changing? is that
# feasible given the rest of the weird ordering we have to do? If it is,
# maybe just literally slap this over the return value of load_conf()...
config = {
'releases_release_uri': 'foo_%s',
'releases_issue_uri': 'bar_%s',
'releases_debug': False,
'master_doc': 'index',
}
# Allow tinkering with document filename
if 'docname' in kwargs:
app.env.temp_data['docname'] = kwargs.pop('docname')
# Allow config overrides via kwargs
for name in kwargs:
config['releases_{}'.format(name)] = kwargs[name]
# Stitch together as the sphinx app init() usually does w/ real conf files
app.config._raw_config = config
# init_values() requires a 'warn' runner on Sphinx 1.3-1.6, so if we seem
# to be hitting arity errors, give it a dummy such callable. Hopefully
# calling twice doesn't introduce any wacko state issues :(
try:
app.config.init_values()
except TypeError: # boy I wish Python had an ArityError or w/e
app.config.init_values(lambda x: x)
# Initialize extensions (the internal call to this happens at init time,
# which of course had no valid config yet here...)
if load_extensions:
for extension in real_conf.get('extensions', []):
# But don't set up ourselves again, that causes errors
if extension == 'releases':
continue
app.setup_extension(extension)
return app | python | def make_app(**kwargs):
"""
Create a dummy Sphinx app, filling in various hardcoded assumptions.
For example, Sphinx assumes the existence of various source/dest
directories, even if you're only calling internals that never generate (or
sometimes, even read!) on-disk files. This function creates safe temp
directories for these instances.
It also neuters Sphinx's internal logging, which otherwise causes verbosity
in one's own test output and/or debug logs.
Finally, it does load the given srcdir's ``conf.py``, but only to read
specific bits like ``extensions`` (if requested); most of it is ignored.
All args are stored in a single ``**kwargs``. Aside from the params listed
below (all of which are optional), all kwargs given are turned into
'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like
setting ``releases_foo = 'bar'`` in ``conf.py``.
:param str docname:
Override the document name used (mostly for internal testing).
:param str srcdir:
Sphinx source directory path.
:param str dstdir:
Sphinx dest directory path.
:param str doctreedir:
Sphinx doctree directory path.
:param bool load_extensions:
Whether to load the real ``conf.py`` and setup any extensions it
configures. Default: ``False``.
:returns: A Sphinx ``Application`` instance.
.. versionchanged:: 1.6
Added the ``load_extensions`` kwarg.
"""
srcdir = kwargs.pop('srcdir', mkdtemp())
dstdir = kwargs.pop('dstdir', mkdtemp())
doctreedir = kwargs.pop('doctreedir', mkdtemp())
load_extensions = kwargs.pop('load_extensions', False)
real_conf = None
try:
# Sphinx <1.6ish
Sphinx._log = lambda self, message, wfile, nonl=False: None
# Sphinx >=1.6ish. Technically still lets Very Bad Things through,
# unlike the total muting above, but probably OK.
# NOTE: used to just do 'sphinx' but that stopped working, even on
# sphinx 1.6.x. Weird. Unsure why hierarchy not functioning.
for name in ('sphinx', 'sphinx.sphinx.application'):
logging.getLogger(name).setLevel(logging.ERROR)
# App API seems to work on all versions so far.
app = Sphinx(
srcdir=srcdir,
confdir=None,
outdir=dstdir,
doctreedir=doctreedir,
buildername='html',
)
# Might as well load the conf file here too.
if load_extensions:
real_conf = load_conf(srcdir)
finally:
for d in (srcdir, dstdir, doctreedir):
# Only remove empty dirs; non-empty dirs are implicitly something
# that existed before we ran, and should not be touched.
try:
os.rmdir(d)
except OSError:
pass
setup(app)
# Mock out the config within. More assumptions by Sphinx :(
# TODO: just use real config and overlay what truly needs changing? is that
# feasible given the rest of the weird ordering we have to do? If it is,
# maybe just literally slap this over the return value of load_conf()...
config = {
'releases_release_uri': 'foo_%s',
'releases_issue_uri': 'bar_%s',
'releases_debug': False,
'master_doc': 'index',
}
# Allow tinkering with document filename
if 'docname' in kwargs:
app.env.temp_data['docname'] = kwargs.pop('docname')
# Allow config overrides via kwargs
for name in kwargs:
config['releases_{}'.format(name)] = kwargs[name]
# Stitch together as the sphinx app init() usually does w/ real conf files
app.config._raw_config = config
# init_values() requires a 'warn' runner on Sphinx 1.3-1.6, so if we seem
# to be hitting arity errors, give it a dummy such callable. Hopefully
# calling twice doesn't introduce any wacko state issues :(
try:
app.config.init_values()
except TypeError: # boy I wish Python had an ArityError or w/e
app.config.init_values(lambda x: x)
# Initialize extensions (the internal call to this happens at init time,
# which of course had no valid config yet here...)
if load_extensions:
for extension in real_conf.get('extensions', []):
# But don't set up ourselves again, that causes errors
if extension == 'releases':
continue
app.setup_extension(extension)
return app | Create a dummy Sphinx app, filling in various hardcoded assumptions.
For example, Sphinx assumes the existence of various source/dest
directories, even if you're only calling internals that never generate (or
sometimes, even read!) on-disk files. This function creates safe temp
directories for these instances.
It also neuters Sphinx's internal logging, which otherwise causes verbosity
in one's own test output and/or debug logs.
Finally, it does load the given srcdir's ``conf.py``, but only to read
specific bits like ``extensions`` (if requested); most of it is ignored.
All args are stored in a single ``**kwargs``. Aside from the params listed
below (all of which are optional), all kwargs given are turned into
'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like
setting ``releases_foo = 'bar'`` in ``conf.py``.
:param str docname:
Override the document name used (mostly for internal testing).
:param str srcdir:
Sphinx source directory path.
:param str dstdir:
Sphinx dest directory path.
:param str doctreedir:
Sphinx doctree directory path.
:param bool load_extensions:
Whether to load the real ``conf.py`` and setup any extensions it
configures. Default: ``False``.
:returns: A Sphinx ``Application`` instance.
.. versionchanged:: 1.6
Added the ``load_extensions`` kwarg. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/util.py#L210-L318 |
bitprophet/releases | releases/__init__.py | _log | def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush() | python | def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush() | Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L15-L23 |
bitprophet/releases | releases/__init__.py | scan_for_spec | def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None | python | def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None | Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L38-L55 |
bitprophet/releases | releases/__init__.py | issues_role | def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], [] | python | def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], [] | Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L58-L128 |
bitprophet/releases | releases/__init__.py | release_role | def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], [] | python | def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], [] | Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L164-L181 |
bitprophet/releases | releases/__init__.py | append_unreleased_entries | def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
) | python | def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
) | Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L202-L221 |
bitprophet/releases | releases/__init__.py | reorder_release_entries | def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type]) | python | def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type]) | Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L224-L231 |
bitprophet/releases | releases/__init__.py | construct_entry_with_release | def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x) | python | def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x) | Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L234-L352 |
bitprophet/releases | releases/__init__.py | handle_first_release_line | def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0) | python | def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0) | Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L434-L455 |
bitprophet/releases | releases/models.py | Issue.minor_releases | def minor_releases(self, manager):
"""
Return all minor release line labels found in ``manager``.
"""
# TODO: yea deffo need a real object for 'manager', heh. E.g. we do a
# very similar test for "do you have any actual releases yet?"
# elsewhere. (This may be fodder for changing how we roll up
# pre-major-release features though...?)
return [
key for key, value in six.iteritems(manager)
if any(x for x in value if not x.startswith('unreleased'))
] | python | def minor_releases(self, manager):
"""
Return all minor release line labels found in ``manager``.
"""
# TODO: yea deffo need a real object for 'manager', heh. E.g. we do a
# very similar test for "do you have any actual releases yet?"
# elsewhere. (This may be fodder for changing how we roll up
# pre-major-release features though...?)
return [
key for key, value in six.iteritems(manager)
if any(x for x in value if not x.startswith('unreleased'))
] | Return all minor release line labels found in ``manager``. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/models.py#L69-L80 |
bitprophet/releases | releases/models.py | Issue.default_spec | def default_spec(self, manager):
"""
Given the current release-lines structure, return a default Spec.
Specifics:
* For feature-like issues, only the highest major release is used, so
given a ``manager`` with top level keys of ``[1, 2]``, this would
return ``Spec(">=2")``.
* When ``releases_always_forwardport_features`` is ``True``, that
behavior is nullified, and this function always returns the empty
``Spec`` (which matches any and all versions/lines).
* For bugfix-like issues, we only consider major release families which
have actual releases already.
* Thus the core difference here is that features are 'consumed' by
upcoming major releases, and bugfixes are not.
* When the ``unstable_prehistory`` setting is ``True``, the default
spec starts at the oldest non-zero release line. (Otherwise, issues
posted after prehistory ends would try being added to the 0.x part of
the tree, which makes no sense in unstable-prehistory mode.)
"""
# TODO: I feel like this + the surrounding bits in add_to_manager()
# could be consolidated & simplified...
specstr = ""
# Make sure truly-default spec skips 0.x if prehistory was unstable.
stable_families = manager.stable_families
if manager.config.releases_unstable_prehistory and stable_families:
specstr = ">={}".format(min(stable_families))
if self.is_featurelike:
# TODO: if app->config-><releases_always_forwardport_features or
# w/e
if True:
specstr = ">={}".format(max(manager.keys()))
else:
# Can only meaningfully limit to minor release buckets if they
# actually exist yet.
buckets = self.minor_releases(manager)
if buckets:
specstr = ">={}".format(max(buckets))
return Spec(specstr) if specstr else Spec() | python | def default_spec(self, manager):
"""
Given the current release-lines structure, return a default Spec.
Specifics:
* For feature-like issues, only the highest major release is used, so
given a ``manager`` with top level keys of ``[1, 2]``, this would
return ``Spec(">=2")``.
* When ``releases_always_forwardport_features`` is ``True``, that
behavior is nullified, and this function always returns the empty
``Spec`` (which matches any and all versions/lines).
* For bugfix-like issues, we only consider major release families which
have actual releases already.
* Thus the core difference here is that features are 'consumed' by
upcoming major releases, and bugfixes are not.
* When the ``unstable_prehistory`` setting is ``True``, the default
spec starts at the oldest non-zero release line. (Otherwise, issues
posted after prehistory ends would try being added to the 0.x part of
the tree, which makes no sense in unstable-prehistory mode.)
"""
# TODO: I feel like this + the surrounding bits in add_to_manager()
# could be consolidated & simplified...
specstr = ""
# Make sure truly-default spec skips 0.x if prehistory was unstable.
stable_families = manager.stable_families
if manager.config.releases_unstable_prehistory and stable_families:
specstr = ">={}".format(min(stable_families))
if self.is_featurelike:
# TODO: if app->config-><releases_always_forwardport_features or
# w/e
if True:
specstr = ">={}".format(max(manager.keys()))
else:
# Can only meaningfully limit to minor release buckets if they
# actually exist yet.
buckets = self.minor_releases(manager)
if buckets:
specstr = ">={}".format(max(buckets))
return Spec(specstr) if specstr else Spec() | Given the current release-lines structure, return a default Spec.
Specifics:
* For feature-like issues, only the highest major release is used, so
given a ``manager`` with top level keys of ``[1, 2]``, this would
return ``Spec(">=2")``.
* When ``releases_always_forwardport_features`` is ``True``, that
behavior is nullified, and this function always returns the empty
``Spec`` (which matches any and all versions/lines).
* For bugfix-like issues, we only consider major release families which
have actual releases already.
* Thus the core difference here is that features are 'consumed' by
upcoming major releases, and bugfixes are not.
* When the ``unstable_prehistory`` setting is ``True``, the default
spec starts at the oldest non-zero release line. (Otherwise, issues
posted after prehistory ends would try being added to the 0.x part of
the tree, which makes no sense in unstable-prehistory mode.) | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/models.py#L82-L125 |
bitprophet/releases | releases/models.py | Issue.add_to_manager | def add_to_manager(self, manager):
"""
Given a 'manager' structure, add self to one or more of its 'buckets'.
"""
# Derive version spec allowing us to filter against major/minor buckets
spec = self.spec or self.default_spec(manager)
# Only look in appropriate major version/family; if self is an issue
# declared as living in e.g. >=2, this means we don't even bother
# looking in the 1.x family.
families = [Version(str(x)) for x in manager]
versions = list(spec.filter(families))
for version in versions:
family = version.major
# Within each family, we further limit which bugfix lines match up
# to what self cares about (ignoring 'unreleased' until later)
candidates = [
Version(x)
for x in manager[family]
if not x.startswith('unreleased')
]
# Select matching release lines (& stringify)
buckets = []
bugfix_buckets = [str(x) for x in spec.filter(candidates)]
# Add back in unreleased_* as appropriate
# TODO: probably leverage Issue subclasses for this eventually?
if self.is_buglike:
buckets.extend(bugfix_buckets)
# Don't put into JUST unreleased_bugfix; it implies that this
# major release/family hasn't actually seen any releases yet
# and only exists for features to go into.
if bugfix_buckets:
buckets.append('unreleased_bugfix')
# Obtain list of minor releases to check for "haven't had ANY
# releases yet" corner case, in which case ALL issues get thrown in
# unreleased_feature for the first release to consume.
# NOTE: assumes first release is a minor or major one,
# but...really? why would your first release be a bugfix one??
no_releases = not self.minor_releases(manager)
if self.is_featurelike or self.backported or no_releases:
buckets.append('unreleased_feature')
# Now that we know which buckets are appropriate, add ourself to
# all of them. TODO: or just...do it above...instead...
for bucket in buckets:
manager[family][bucket].append(self) | python | def add_to_manager(self, manager):
"""
Given a 'manager' structure, add self to one or more of its 'buckets'.
"""
# Derive version spec allowing us to filter against major/minor buckets
spec = self.spec or self.default_spec(manager)
# Only look in appropriate major version/family; if self is an issue
# declared as living in e.g. >=2, this means we don't even bother
# looking in the 1.x family.
families = [Version(str(x)) for x in manager]
versions = list(spec.filter(families))
for version in versions:
family = version.major
# Within each family, we further limit which bugfix lines match up
# to what self cares about (ignoring 'unreleased' until later)
candidates = [
Version(x)
for x in manager[family]
if not x.startswith('unreleased')
]
# Select matching release lines (& stringify)
buckets = []
bugfix_buckets = [str(x) for x in spec.filter(candidates)]
# Add back in unreleased_* as appropriate
# TODO: probably leverage Issue subclasses for this eventually?
if self.is_buglike:
buckets.extend(bugfix_buckets)
# Don't put into JUST unreleased_bugfix; it implies that this
# major release/family hasn't actually seen any releases yet
# and only exists for features to go into.
if bugfix_buckets:
buckets.append('unreleased_bugfix')
# Obtain list of minor releases to check for "haven't had ANY
# releases yet" corner case, in which case ALL issues get thrown in
# unreleased_feature for the first release to consume.
# NOTE: assumes first release is a minor or major one,
# but...really? why would your first release be a bugfix one??
no_releases = not self.minor_releases(manager)
if self.is_featurelike or self.backported or no_releases:
buckets.append('unreleased_feature')
# Now that we know which buckets are appropriate, add ourself to
# all of them. TODO: or just...do it above...instead...
for bucket in buckets:
manager[family][bucket].append(self) | Given a 'manager' structure, add self to one or more of its 'buckets'. | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/models.py#L127-L170 |
brendonh/pyth | pyth/plugins/rtf15/reader.py | Rtf15Reader.read | def read(self, source, errors='strict', clean_paragraphs=True):
"""
source: A list of P objects.
"""
reader = Rtf15Reader(source, errors, clean_paragraphs)
return reader.go() | python | def read(self, source, errors='strict', clean_paragraphs=True):
"""
source: A list of P objects.
"""
reader = Rtf15Reader(source, errors, clean_paragraphs)
return reader.go() | source: A list of P objects. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rtf15/reader.py#L80-L86 |
brendonh/pyth | pyth/plugins/rtf15/reader.py | DocBuilder.cleanParagraph | def cleanParagraph(self):
"""
Compress text runs, remove whitespace at start and end,
skip empty blocks, etc
"""
runs = self.block.content
if not runs:
self.block = None
return
if not self.clean_paragraphs:
return
joinedRuns = []
hasContent = False
for run in runs:
if run.content[0]:
hasContent = True
else:
continue
# For whitespace-only groups, remove any property stuff,
# to avoid extra markup in output
if not run.content[0].strip():
run.properties = {}
# Join runs only if their properties match
if joinedRuns and (run.properties == joinedRuns[-1].properties):
joinedRuns[-1].content[0] += run.content[0]
else:
joinedRuns.append(run)
if hasContent:
# Strip beginning of paragraph
joinedRuns[0].content[0] = joinedRuns[0].content[0].lstrip()
# And then strip the end
joinedRuns[-1].content[0] = joinedRuns[-1].content[0].rstrip()
self.block.content = joinedRuns
else:
self.block = None | python | def cleanParagraph(self):
"""
Compress text runs, remove whitespace at start and end,
skip empty blocks, etc
"""
runs = self.block.content
if not runs:
self.block = None
return
if not self.clean_paragraphs:
return
joinedRuns = []
hasContent = False
for run in runs:
if run.content[0]:
hasContent = True
else:
continue
# For whitespace-only groups, remove any property stuff,
# to avoid extra markup in output
if not run.content[0].strip():
run.properties = {}
# Join runs only if their properties match
if joinedRuns and (run.properties == joinedRuns[-1].properties):
joinedRuns[-1].content[0] += run.content[0]
else:
joinedRuns.append(run)
if hasContent:
# Strip beginning of paragraph
joinedRuns[0].content[0] = joinedRuns[0].content[0].lstrip()
# And then strip the end
joinedRuns[-1].content[0] = joinedRuns[-1].content[0].rstrip()
self.block.content = joinedRuns
else:
self.block = None | Compress text runs, remove whitespace at start and end,
skip empty blocks, etc | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rtf15/reader.py#L241-L284 |
brendonh/pyth | pyth/plugins/xhtml/css.py | CSS.parse_css | def parse_css(self, css):
"""
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
"""
rulesets = self.ruleset_re.findall(css)
for (selector, declarations) in rulesets:
rule = Rule(self.parse_selector(selector))
rule.properties = self.parse_declarations(declarations)
self.rules.append(rule) | python | def parse_css(self, css):
"""
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
"""
rulesets = self.ruleset_re.findall(css)
for (selector, declarations) in rulesets:
rule = Rule(self.parse_selector(selector))
rule.properties = self.parse_declarations(declarations)
self.rules.append(rule) | Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L73-L85 |
brendonh/pyth | pyth/plugins/xhtml/css.py | CSS.parse_declarations | def parse_declarations(self, declarations):
"""
parse a css declaration list
"""
declarations = self.declaration_re.findall(declarations)
return dict(declarations) | python | def parse_declarations(self, declarations):
"""
parse a css declaration list
"""
declarations = self.declaration_re.findall(declarations)
return dict(declarations) | parse a css declaration list | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L87-L92 |
brendonh/pyth | pyth/plugins/xhtml/css.py | CSS.parse_selector | def parse_selector(self, selector):
"""
parse a css selector
"""
tag, klass = self.selector_re.match(selector).groups()
return Selector(tag, klass) | python | def parse_selector(self, selector):
"""
parse a css selector
"""
tag, klass = self.selector_re.match(selector).groups()
return Selector(tag, klass) | parse a css selector | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L94-L99 |
brendonh/pyth | pyth/plugins/xhtml/css.py | CSS.get_properties | def get_properties(self, node):
"""
return a dict of all the properties of a given BeautifulSoup
node found by applying the css style.
"""
ret = {}
# Try all the rules one by one
for rule in self.rules:
if rule.selector(node):
ret.update(rule.properties)
# Also search for direct 'style' arguments in the html doc
for style_node in node.findParents(attrs={'style': True}):
style = style_node.get('style')
properties = self.parse_declarations(style)
ret.update(properties)
return ret | python | def get_properties(self, node):
"""
return a dict of all the properties of a given BeautifulSoup
node found by applying the css style.
"""
ret = {}
# Try all the rules one by one
for rule in self.rules:
if rule.selector(node):
ret.update(rule.properties)
# Also search for direct 'style' arguments in the html doc
for style_node in node.findParents(attrs={'style': True}):
style = style_node.get('style')
properties = self.parse_declarations(style)
ret.update(properties)
return ret | return a dict of all the properties of a given BeautifulSoup
node found by applying the css style. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L101-L116 |
brendonh/pyth | pyth/__init__.py | namedModule | def namedModule(name):
"""Return a module given its name."""
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m | python | def namedModule(name):
"""Return a module given its name."""
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m | Return a module given its name. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/__init__.py#L37-L44 |
brendonh/pyth | pyth/__init__.py | namedObject | def namedObject(name):
"""Get a fully named module-global object.
"""
classSplit = name.split('.')
module = namedModule('.'.join(classSplit[:-1]))
return getattr(module, classSplit[-1]) | python | def namedObject(name):
"""Get a fully named module-global object.
"""
classSplit = name.split('.')
module = namedModule('.'.join(classSplit[:-1]))
return getattr(module, classSplit[-1]) | Get a fully named module-global object. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/__init__.py#L47-L52 |
brendonh/pyth | pyth/plugins/rst/writer.py | RSTWriter.text | def text(self, text):
"""
process a pyth text and return the formatted string
"""
ret = u"".join(text.content)
if 'url' in text.properties:
return u"`%s`_" % ret
if 'bold' in text.properties:
return u"**%s**" % ret
if 'italic' in text.properties:
return u"*%s*" % ret
if 'sub' in text.properties:
return ur"\ :sub:`%s`\ " % ret
if 'super' in text.properties:
return ur"\ :sup:`%s`\ " % ret
return ret | python | def text(self, text):
"""
process a pyth text and return the formatted string
"""
ret = u"".join(text.content)
if 'url' in text.properties:
return u"`%s`_" % ret
if 'bold' in text.properties:
return u"**%s**" % ret
if 'italic' in text.properties:
return u"*%s*" % ret
if 'sub' in text.properties:
return ur"\ :sub:`%s`\ " % ret
if 'super' in text.properties:
return ur"\ :sup:`%s`\ " % ret
return ret | process a pyth text and return the formatted string | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rst/writer.py#L40-L55 |
brendonh/pyth | pyth/plugins/rst/writer.py | RSTWriter.paragraph | def paragraph(self, paragraph, prefix=""):
"""
process a pyth paragraph into the target
"""
content = []
for text in paragraph.content:
content.append(self.text(text))
content = u"".join(content).encode("utf-8")
for line in content.split("\n"):
self.target.write(" " * self.indent)
self.target.write(prefix)
self.target.write(line)
self.target.write("\n")
if prefix:
prefix = " "
# handle the links
if any('url' in text.properties for text in paragraph.content):
self.target.write("\n")
for text in paragraph.content:
if 'url' in text.properties:
string = u"".join(text.content)
url = text.properties['url']
self.target.write(".. _%s: %s\n" % (string, url)) | python | def paragraph(self, paragraph, prefix=""):
"""
process a pyth paragraph into the target
"""
content = []
for text in paragraph.content:
content.append(self.text(text))
content = u"".join(content).encode("utf-8")
for line in content.split("\n"):
self.target.write(" " * self.indent)
self.target.write(prefix)
self.target.write(line)
self.target.write("\n")
if prefix:
prefix = " "
# handle the links
if any('url' in text.properties for text in paragraph.content):
self.target.write("\n")
for text in paragraph.content:
if 'url' in text.properties:
string = u"".join(text.content)
url = text.properties['url']
self.target.write(".. _%s: %s\n" % (string, url)) | process a pyth paragraph into the target | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rst/writer.py#L57-L81 |
brendonh/pyth | pyth/plugins/rst/writer.py | RSTWriter.list | def list(self, list, prefix=None):
"""
Process a pyth list into the target
"""
self.indent += 1
for (i, entry) in enumerate(list.content):
for (j, paragraph) in enumerate(entry.content):
prefix = "- " if j == 0 else " "
handler = self.paragraphDispatch[paragraph.__class__]
handler(paragraph, prefix)
self.target.write("\n")
self.indent -= 1 | python | def list(self, list, prefix=None):
"""
Process a pyth list into the target
"""
self.indent += 1
for (i, entry) in enumerate(list.content):
for (j, paragraph) in enumerate(entry.content):
prefix = "- " if j == 0 else " "
handler = self.paragraphDispatch[paragraph.__class__]
handler(paragraph, prefix)
self.target.write("\n")
self.indent -= 1 | Process a pyth list into the target | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rst/writer.py#L83-L94 |
brendonh/pyth | pyth/plugins/xhtml/reader.py | XHTMLReader.format | def format(self, soup):
"""format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
"""
# Remove all the newline characters before a closing tag.
for node in soup.findAll(text=True):
if node.rstrip(" ").endswith("\n"):
node.replaceWith(node.rstrip(" ").rstrip("\n"))
# Join the block elements lines into a single long line
for tag in ['p', 'li']:
for node in soup.findAll(tag):
text = unicode(node)
lines = [x.strip() for x in text.splitlines()]
text = ' '.join(lines)
node.replaceWith(BeautifulSoup.BeautifulSoup(text))
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
# replace all <br/> tag by newline character
for node in soup.findAll('br'):
node.replaceWith("\n")
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
return soup | python | def format(self, soup):
"""format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
"""
# Remove all the newline characters before a closing tag.
for node in soup.findAll(text=True):
if node.rstrip(" ").endswith("\n"):
node.replaceWith(node.rstrip(" ").rstrip("\n"))
# Join the block elements lines into a single long line
for tag in ['p', 'li']:
for node in soup.findAll(tag):
text = unicode(node)
lines = [x.strip() for x in text.splitlines()]
text = ' '.join(lines)
node.replaceWith(BeautifulSoup.BeautifulSoup(text))
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
# replace all <br/> tag by newline character
for node in soup.findAll('br'):
node.replaceWith("\n")
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
return soup | format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/reader.py#L40-L65 |
brendonh/pyth | pyth/plugins/xhtml/reader.py | XHTMLReader.url | def url(self, node):
"""
return the url of a BeautifulSoup node or None if there is no
url.
"""
a_node = node.findParent('a')
if not a_node:
return None
if self.link_callback is None:
return a_node.get('href')
else:
return self.link_callback(a_node.get('href')) | python | def url(self, node):
"""
return the url of a BeautifulSoup node or None if there is no
url.
"""
a_node = node.findParent('a')
if not a_node:
return None
if self.link_callback is None:
return a_node.get('href')
else:
return self.link_callback(a_node.get('href')) | return the url of a BeautifulSoup node or None if there is no
url. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/reader.py#L99-L111 |
brendonh/pyth | pyth/plugins/xhtml/reader.py | XHTMLReader.process_text | def process_text(self, node):
"""
Return a pyth Text object from a BeautifulSoup node or None if
the text is empty.
"""
text = node.string.strip()
if not text:
return
# Set all the properties
properties=dict()
if self.is_bold(node):
properties['bold'] = True
if self.is_italic(node):
properties['italic'] = True
if self.url(node):
properties['url'] = self.url(node)
if self.is_sub(node):
properties['sub'] = True
if self.is_super(node):
properties['super'] = True
content=[node.string]
return document.Text(properties, content) | python | def process_text(self, node):
"""
Return a pyth Text object from a BeautifulSoup node or None if
the text is empty.
"""
text = node.string.strip()
if not text:
return
# Set all the properties
properties=dict()
if self.is_bold(node):
properties['bold'] = True
if self.is_italic(node):
properties['italic'] = True
if self.url(node):
properties['url'] = self.url(node)
if self.is_sub(node):
properties['sub'] = True
if self.is_super(node):
properties['super'] = True
content=[node.string]
return document.Text(properties, content) | Return a pyth Text object from a BeautifulSoup node or None if
the text is empty. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/reader.py#L113-L137 |
brendonh/pyth | pyth/plugins/xhtml/reader.py | XHTMLReader.process_into | def process_into(self, node, obj):
"""
Process a BeautifulSoup node and fill its elements into a pyth
base object.
"""
if isinstance(node, BeautifulSoup.NavigableString):
text = self.process_text(node)
if text:
obj.append(text)
return
if node.name == 'p':
# add a new paragraph into the pyth object
new_obj = document.Paragraph()
obj.append(new_obj)
obj = new_obj
elif node.name == 'ul':
# add a new list
new_obj = document.List()
obj.append(new_obj)
obj = new_obj
elif node.name == 'li':
# add a new list entry
new_obj = document.ListEntry()
obj.append(new_obj)
obj = new_obj
for child in node:
self.process_into(child, obj) | python | def process_into(self, node, obj):
"""
Process a BeautifulSoup node and fill its elements into a pyth
base object.
"""
if isinstance(node, BeautifulSoup.NavigableString):
text = self.process_text(node)
if text:
obj.append(text)
return
if node.name == 'p':
# add a new paragraph into the pyth object
new_obj = document.Paragraph()
obj.append(new_obj)
obj = new_obj
elif node.name == 'ul':
# add a new list
new_obj = document.List()
obj.append(new_obj)
obj = new_obj
elif node.name == 'li':
# add a new list entry
new_obj = document.ListEntry()
obj.append(new_obj)
obj = new_obj
for child in node:
self.process_into(child, obj) | Process a BeautifulSoup node and fill its elements into a pyth
base object. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/reader.py#L139-L165 |
brendonh/pyth | pyth/document.py | _PythBase.append | def append(self, item):
"""
Try to add an item to this element.
If the item is of the wrong type, and if this element has a sub-type,
then try to create such a sub-type and insert the item into that, instead.
This happens recursively, so (in python-markup):
L [ u'Foo' ]
actually creates:
L [ LE [ P [ T [ u'Foo' ] ] ] ]
If that doesn't work, raise a TypeError.
"""
okay = True
if not isinstance(item, self.contentType):
if hasattr(self.contentType, 'contentType'):
try:
item = self.contentType(content=[item])
except TypeError:
okay = False
else:
okay = False
if not okay:
raise TypeError("Wrong content type for %s: %s (%s)" % (
self.__class__.__name__, repr(type(item)), repr(item)))
self.content.append(item) | python | def append(self, item):
"""
Try to add an item to this element.
If the item is of the wrong type, and if this element has a sub-type,
then try to create such a sub-type and insert the item into that, instead.
This happens recursively, so (in python-markup):
L [ u'Foo' ]
actually creates:
L [ LE [ P [ T [ u'Foo' ] ] ] ]
If that doesn't work, raise a TypeError.
"""
okay = True
if not isinstance(item, self.contentType):
if hasattr(self.contentType, 'contentType'):
try:
item = self.contentType(content=[item])
except TypeError:
okay = False
else:
okay = False
if not okay:
raise TypeError("Wrong content type for %s: %s (%s)" % (
self.__class__.__name__, repr(type(item)), repr(item)))
self.content.append(item) | Try to add an item to this element.
If the item is of the wrong type, and if this element has a sub-type,
then try to create such a sub-type and insert the item into that, instead.
This happens recursively, so (in python-markup):
L [ u'Foo' ]
actually creates:
L [ LE [ P [ T [ u'Foo' ] ] ] ]
If that doesn't work, raise a TypeError. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/document.py#L30-L59 |
brendonh/pyth | pyth/plugins/python/reader.py | _MetaPythonBase | def _MetaPythonBase():
"""
Return a metaclass which implements __getitem__,
allowing e.g. P[...] instead of P()[...]
"""
class MagicGetItem(type):
def __new__(mcs, name, bases, dict):
klass = type.__new__(mcs, name, bases, dict)
mcs.__getitem__ = lambda _, k: klass()[k]
return klass
return MagicGetItem | python | def _MetaPythonBase():
"""
Return a metaclass which implements __getitem__,
allowing e.g. P[...] instead of P()[...]
"""
class MagicGetItem(type):
def __new__(mcs, name, bases, dict):
klass = type.__new__(mcs, name, bases, dict)
mcs.__getitem__ = lambda _, k: klass()[k]
return klass
return MagicGetItem | Return a metaclass which implements __getitem__,
allowing e.g. P[...] instead of P()[...] | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/python/reader.py#L40-L52 |
brendonh/pyth | pyth/plugins/latex/writer.py | LatexWriter.write | def write(klass, document, target=None, stylesheet=""):
"""
convert a pyth document to a latex document
we can specify a stylesheet as a latex document fragment that
will be inserted after the headers. This way we can override
the default style.
"""
writer = LatexWriter(document, target, stylesheet)
return writer.go() | python | def write(klass, document, target=None, stylesheet=""):
"""
convert a pyth document to a latex document
we can specify a stylesheet as a latex document fragment that
will be inserted after the headers. This way we can override
the default style.
"""
writer = LatexWriter(document, target, stylesheet)
return writer.go() | convert a pyth document to a latex document
we can specify a stylesheet as a latex document fragment that
will be inserted after the headers. This way we can override
the default style. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/latex/writer.py#L19-L28 |
brendonh/pyth | pyth/plugins/latex/writer.py | LatexWriter.full_stylesheet | def full_stylesheet(self):
"""
Return the style sheet that will ultimately be inserted into
the latex document.
This is the user given style sheet plus some additional parts
to add the meta data.
"""
latex_fragment = r"""
\usepackage[colorlinks=true,linkcolor=blue,urlcolor=blue]{hyperref}
\hypersetup{
pdftitle={%s},
pdfauthor={%s},
pdfsubject={%s}
}
""" % (self.document.properties.get("title"),
self.document.properties.get("author"),
self.document.properties.get("subject"))
return latex_fragment + self.stylesheet | python | def full_stylesheet(self):
"""
Return the style sheet that will ultimately be inserted into
the latex document.
This is the user given style sheet plus some additional parts
to add the meta data.
"""
latex_fragment = r"""
\usepackage[colorlinks=true,linkcolor=blue,urlcolor=blue]{hyperref}
\hypersetup{
pdftitle={%s},
pdfauthor={%s},
pdfsubject={%s}
}
""" % (self.document.properties.get("title"),
self.document.properties.get("author"),
self.document.properties.get("subject"))
return latex_fragment + self.stylesheet | Return the style sheet that will ultimately be inserted into
the latex document.
This is the user given style sheet plus some additional parts
to add the meta data. | https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/latex/writer.py#L42-L60 |
opentok/Opentok-Python-SDK | opentok/endpoints.py | Endpoints.get_stream_url | def get_stream_url(self, session_id, stream_id=None):
""" this method returns the url to get streams information """
url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream'
if stream_id:
url = url + '/' + stream_id
return url | python | def get_stream_url(self, session_id, stream_id=None):
""" this method returns the url to get streams information """
url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream'
if stream_id:
url = url + '/' + stream_id
return url | this method returns the url to get streams information | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/endpoints.py#L29-L34 |
opentok/Opentok-Python-SDK | opentok/endpoints.py | Endpoints.force_disconnect_url | def force_disconnect_url(self, session_id, connection_id):
""" this method returns the force disconnect url endpoint """
url = (
self.api_url + '/v2/project/' + self.api_key + '/session/' +
session_id + '/connection/' + connection_id
)
return url | python | def force_disconnect_url(self, session_id, connection_id):
""" this method returns the force disconnect url endpoint """
url = (
self.api_url + '/v2/project/' + self.api_key + '/session/' +
session_id + '/connection/' + connection_id
)
return url | this method returns the force disconnect url endpoint | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/endpoints.py#L36-L42 |
opentok/Opentok-Python-SDK | opentok/endpoints.py | Endpoints.set_archive_layout_url | def set_archive_layout_url(self, archive_id):
""" this method returns the url to set the archive layout """
url = self.api_url + '/v2/project/' + self.api_key + '/archive/' + archive_id + '/layout'
return url | python | def set_archive_layout_url(self, archive_id):
""" this method returns the url to set the archive layout """
url = self.api_url + '/v2/project/' + self.api_key + '/archive/' + archive_id + '/layout'
return url | this method returns the url to set the archive layout | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/endpoints.py#L44-L47 |
opentok/Opentok-Python-SDK | opentok/endpoints.py | Endpoints.set_stream_class_lists_url | def set_stream_class_lists_url(self, session_id):
""" this method returns the url to set the stream class list """
url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream'
return url | python | def set_stream_class_lists_url(self, session_id):
""" this method returns the url to set the stream class list """
url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream'
return url | this method returns the url to set the stream class list | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/endpoints.py#L54-L57 |
opentok/Opentok-Python-SDK | opentok/endpoints.py | Endpoints.broadcast_url | def broadcast_url(self, broadcast_id=None, stop=False, layout=False):
""" this method returns urls for working with broadcast """
url = self.api_url + '/v2/project/' + self.api_key + '/broadcast'
if broadcast_id:
url = url + '/' + broadcast_id
if stop:
url = url + '/stop'
if layout:
url = url + '/layout'
return url | python | def broadcast_url(self, broadcast_id=None, stop=False, layout=False):
""" this method returns urls for working with broadcast """
url = self.api_url + '/v2/project/' + self.api_key + '/broadcast'
if broadcast_id:
url = url + '/' + broadcast_id
if stop:
url = url + '/stop'
if layout:
url = url + '/layout'
return url | this method returns urls for working with broadcast | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/endpoints.py#L59-L69 |
opentok/Opentok-Python-SDK | opentok/archives.py | Archive.stop | def stop(self):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 120 minutes or when all clients have
disconnected from the session being archived.
"""
temp_archive = self.sdk.stop_archive(self.id)
for k,v in iteritems(temp_archive.attrs()):
setattr(self, k, v) | python | def stop(self):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 120 minutes or when all clients have
disconnected from the session being archived.
"""
temp_archive = self.sdk.stop_archive(self.id)
for k,v in iteritems(temp_archive.attrs()):
setattr(self, k, v) | Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 120 minutes or when all clients have
disconnected from the session being archived. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/archives.py#L107-L116 |
opentok/Opentok-Python-SDK | opentok/archives.py | Archive.attrs | def attrs(self):
"""
Returns a dictionary of the archive's attributes.
"""
return dict((k, v) for k, v in iteritems(self.__dict__) if k is not "sdk") | python | def attrs(self):
"""
Returns a dictionary of the archive's attributes.
"""
return dict((k, v) for k, v in iteritems(self.__dict__) if k is not "sdk") | Returns a dictionary of the archive's attributes. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/archives.py#L129-L133 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.generate_token | def generate_token(self, session_id, role=Roles.publisher, expire_time=None, data=None,
initial_layout_class_list=[]):
"""
Generates a token for a given session.
:param String session_id: The session ID of the session to be accessed by the client using
the token.
:param String role: The role for the token. Valid values are defined in the Role
class:
* `Roles.subscriber` -- A subscriber can only subscribe to streams.
* `Roles.publisher` -- A publisher can publish streams, subscribe to
streams, and signal. (This is the default value if you do not specify a role.)
* `Roles.moderator` -- In addition to the privileges granted to a
publisher, in clients using the OpenTok.js 2.2 library, a moderator can call the
`forceUnpublish()` and `forceDisconnect()` method of the
Session object.
:param int expire_time: The expiration time of the token, in seconds since the UNIX epoch.
The maximum expiration time is 30 days after the creation time. The default expiration
time is 24 hours after the token creation time.
:param String data: A string containing connection metadata describing the
end-user. For example, you can pass the user ID, name, or other data describing the
end-user. The length of the string is limited to 1000 characters. This data cannot be
updated once it is set.
:param list initial_layout_class_list: An array of class names (strings)
to be used as the initial layout classes for streams published by the client. Layout
classes are used in customizing the layout of videos in
`live streaming broadcasts <https://tokbox.com/developer/guides/broadcast/#live-streaming>`_ and
`composed archives <https://tokbox.com/developer/guides/archiving/layout-control.html>`_
:rtype:
The token string.
"""
# normalize
# expire_time can be an integer, a datetime object, or anything else that can be coerced into an integer
# after this block it will only be an integer
if expire_time is not None:
if isinstance(expire_time, datetime):
expire_time = calendar.timegm(expire_time.utctimetuple())
else:
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise OpenTokException(u('Cannot generate token, invalid expire time {0}').format(expire_time))
else:
expire_time = int(time.time()) + (60*60*24) # 1 day
# validations
if not text_type(session_id):
raise OpenTokException(u('Cannot generate token, session_id was not valid {0}').format(session_id))
if not isinstance(role, Roles):
raise OpenTokException(u('Cannot generate token, {0} is not a valid role').format(role))
now = int(time.time())
if expire_time < now:
raise OpenTokException(u('Cannot generate token, expire_time is not in the future {0}').format(expire_time))
if expire_time > now + (60*60*24*30): # 30 days
raise OpenTokException(u('Cannot generate token, expire_time is not in the next 30 days {0}').format(expire_time))
if data and len(data) > 1000:
raise OpenTokException(u('Cannot generate token, data must be less than 1000 characters'))
if initial_layout_class_list and not all(text_type(c) for c in initial_layout_class_list):
raise OpenTokException(u('Cannot generate token, all items in initial_layout_class_list must be strings'))
initial_layout_class_list_serialized = u(' ').join(initial_layout_class_list)
if len(initial_layout_class_list_serialized) > 1000:
raise OpenTokException(u('Cannot generate token, initial_layout_class_list must be less than 1000 characters'))
# decode session id to verify api_key
sub_session_id = session_id[2:]
sub_session_id_bytes = sub_session_id.encode('utf-8')
sub_session_id_bytes_padded = sub_session_id_bytes + (b('=') * (-len(sub_session_id_bytes) % 4))
try:
decoded_session_id = base64.b64decode(sub_session_id_bytes_padded, b('-_'))
parts = decoded_session_id.decode('utf-8').split(u('~'))
except Exception as e:
raise OpenTokException(u('Cannot generate token, the session_id {0} was not valid').format(session_id))
if self.api_key not in parts:
raise OpenTokException(u('Cannot generate token, the session_id {0} does not belong to the api_key {1}').format(session_id, self.api_key))
data_params = dict(
session_id = session_id,
create_time = now,
expire_time = expire_time,
role = role.value,
nonce = random.randint(0,999999),
initial_layout_class_list = initial_layout_class_list_serialized
)
if data:
data_params['connection_data'] = data
data_string = urlencode(data_params, True)
sig = self._sign_string(data_string, self.api_secret)
decoded_base64_bytes = u('partner_id={api_key}&sig={sig}:{payload}').format(
api_key = self.api_key,
sig = sig,
payload = data_string
)
if PY3:
decoded_base64_bytes = decoded_base64_bytes.encode('utf-8')
token = u('{sentinal}{base64_data}').format(
sentinal = self.TOKEN_SENTINEL,
base64_data = base64.b64encode(decoded_base64_bytes).decode()
)
return token | python | def generate_token(self, session_id, role=Roles.publisher, expire_time=None, data=None,
initial_layout_class_list=[]):
"""
Generates a token for a given session.
:param String session_id: The session ID of the session to be accessed by the client using
the token.
:param String role: The role for the token. Valid values are defined in the Role
class:
* `Roles.subscriber` -- A subscriber can only subscribe to streams.
* `Roles.publisher` -- A publisher can publish streams, subscribe to
streams, and signal. (This is the default value if you do not specify a role.)
* `Roles.moderator` -- In addition to the privileges granted to a
publisher, in clients using the OpenTok.js 2.2 library, a moderator can call the
`forceUnpublish()` and `forceDisconnect()` method of the
Session object.
:param int expire_time: The expiration time of the token, in seconds since the UNIX epoch.
The maximum expiration time is 30 days after the creation time. The default expiration
time is 24 hours after the token creation time.
:param String data: A string containing connection metadata describing the
end-user. For example, you can pass the user ID, name, or other data describing the
end-user. The length of the string is limited to 1000 characters. This data cannot be
updated once it is set.
:param list initial_layout_class_list: An array of class names (strings)
to be used as the initial layout classes for streams published by the client. Layout
classes are used in customizing the layout of videos in
`live streaming broadcasts <https://tokbox.com/developer/guides/broadcast/#live-streaming>`_ and
`composed archives <https://tokbox.com/developer/guides/archiving/layout-control.html>`_
:rtype:
The token string.
"""
# normalize
# expire_time can be an integer, a datetime object, or anything else that can be coerced into an integer
# after this block it will only be an integer
if expire_time is not None:
if isinstance(expire_time, datetime):
expire_time = calendar.timegm(expire_time.utctimetuple())
else:
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise OpenTokException(u('Cannot generate token, invalid expire time {0}').format(expire_time))
else:
expire_time = int(time.time()) + (60*60*24) # 1 day
# validations
if not text_type(session_id):
raise OpenTokException(u('Cannot generate token, session_id was not valid {0}').format(session_id))
if not isinstance(role, Roles):
raise OpenTokException(u('Cannot generate token, {0} is not a valid role').format(role))
now = int(time.time())
if expire_time < now:
raise OpenTokException(u('Cannot generate token, expire_time is not in the future {0}').format(expire_time))
if expire_time > now + (60*60*24*30): # 30 days
raise OpenTokException(u('Cannot generate token, expire_time is not in the next 30 days {0}').format(expire_time))
if data and len(data) > 1000:
raise OpenTokException(u('Cannot generate token, data must be less than 1000 characters'))
if initial_layout_class_list and not all(text_type(c) for c in initial_layout_class_list):
raise OpenTokException(u('Cannot generate token, all items in initial_layout_class_list must be strings'))
initial_layout_class_list_serialized = u(' ').join(initial_layout_class_list)
if len(initial_layout_class_list_serialized) > 1000:
raise OpenTokException(u('Cannot generate token, initial_layout_class_list must be less than 1000 characters'))
# decode session id to verify api_key
sub_session_id = session_id[2:]
sub_session_id_bytes = sub_session_id.encode('utf-8')
sub_session_id_bytes_padded = sub_session_id_bytes + (b('=') * (-len(sub_session_id_bytes) % 4))
try:
decoded_session_id = base64.b64decode(sub_session_id_bytes_padded, b('-_'))
parts = decoded_session_id.decode('utf-8').split(u('~'))
except Exception as e:
raise OpenTokException(u('Cannot generate token, the session_id {0} was not valid').format(session_id))
if self.api_key not in parts:
raise OpenTokException(u('Cannot generate token, the session_id {0} does not belong to the api_key {1}').format(session_id, self.api_key))
data_params = dict(
session_id = session_id,
create_time = now,
expire_time = expire_time,
role = role.value,
nonce = random.randint(0,999999),
initial_layout_class_list = initial_layout_class_list_serialized
)
if data:
data_params['connection_data'] = data
data_string = urlencode(data_params, True)
sig = self._sign_string(data_string, self.api_secret)
decoded_base64_bytes = u('partner_id={api_key}&sig={sig}:{payload}').format(
api_key = self.api_key,
sig = sig,
payload = data_string
)
if PY3:
decoded_base64_bytes = decoded_base64_bytes.encode('utf-8')
token = u('{sentinal}{base64_data}').format(
sentinal = self.TOKEN_SENTINEL,
base64_data = base64.b64encode(decoded_base64_bytes).decode()
)
return token | Generates a token for a given session.
:param String session_id: The session ID of the session to be accessed by the client using
the token.
:param String role: The role for the token. Valid values are defined in the Role
class:
* `Roles.subscriber` -- A subscriber can only subscribe to streams.
* `Roles.publisher` -- A publisher can publish streams, subscribe to
streams, and signal. (This is the default value if you do not specify a role.)
* `Roles.moderator` -- In addition to the privileges granted to a
publisher, in clients using the OpenTok.js 2.2 library, a moderator can call the
`forceUnpublish()` and `forceDisconnect()` method of the
Session object.
:param int expire_time: The expiration time of the token, in seconds since the UNIX epoch.
The maximum expiration time is 30 days after the creation time. The default expiration
time is 24 hours after the token creation time.
:param String data: A string containing connection metadata describing the
end-user. For example, you can pass the user ID, name, or other data describing the
end-user. The length of the string is limited to 1000 characters. This data cannot be
updated once it is set.
:param list initial_layout_class_list: An array of class names (strings)
to be used as the initial layout classes for streams published by the client. Layout
classes are used in customizing the layout of videos in
`live streaming broadcasts <https://tokbox.com/developer/guides/broadcast/#live-streaming>`_ and
`composed archives <https://tokbox.com/developer/guides/archiving/layout-control.html>`_
:rtype:
The token string. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L94-L202 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.create_session | def create_session(self, location=None, media_mode=MediaModes.relayed, archive_mode=ArchiveModes.manual):
"""
Creates a new OpenTok session and returns the session ID, which uniquely identifies
the session.
For example, when using the OpenTok JavaScript library, use the session ID when calling the
OT.initSession() method (to initialize an OpenTok session).
OpenTok sessions do not expire. However, authentication tokens do expire (see the
generateToken() method). Also note that sessions cannot explicitly be destroyed.
A session ID string can be up to 255 characters long.
Calling this method results in an OpenTokException in the event of an error.
Check the error message for details.
You can also create a session using the OpenTok
`REST API <https://tokbox.com/opentok/api/#session_id_production>`_ or
`the OpenTok dashboard <https://dashboard.tokbox.com/projects>`_.
:param String media_mode: Determines whether the session will transmit streams using the
OpenTok Media Router (MediaMode.routed) or not (MediaMode.relayed). By default,
the setting is MediaMode.relayed.
With the media_mode property set to MediaMode.relayed, the session
will attempt to transmit streams directly between clients. If clients cannot connect
due to firewall restrictions, the session uses the OpenTok TURN server to relay
audio-video streams.
The `OpenTok Media
Router <https://tokbox.com/opentok/tutorials/create-session/#media-mode>`_
provides the following benefits:
* The OpenTok Media Router can decrease bandwidth usage in multiparty sessions.
(When the mediaMode property is set to MediaMode.relayed, each client must send
a separate audio-video stream to each client subscribing to it.)
* The OpenTok Media Router can improve the quality of the user experience through
audio fallback and video recovery (see https://tokbox.com/platform/fallback). With
these features, if a client's connectivity degrades to a degree that
it does not support video for a stream it's subscribing to, the video is dropped on
that client (without affecting other clients), and the client receives audio only.
If the client's connectivity improves, the video returns.
* The OpenTok Media Router supports the archiving feature, which lets
you record, save, and retrieve OpenTok sessions (see http://tokbox.com/platform/archiving).
:param String archive_mode: Whether the session is automatically archived
(ArchiveModes.always) or not (ArchiveModes.manual). By default,
the setting is ArchiveModes.manual, and you must call the
start_archive() method of the OpenTok object to start archiving. To archive the session
(either automatically or not), you must set the media_mode parameter to
MediaModes.routed.
:param String location: An IP address that the OpenTok servers will use to
situate the session in its global network. If you do not set a location hint,
the OpenTok servers will be based on the first client connecting to the session.
:rtype: The Session object. The session_id property of the object is the session ID.
"""
# build options
options = {}
if not isinstance(media_mode, MediaModes):
raise OpenTokException(u('Cannot create session, {0} is not a valid media mode').format(media_mode))
if not isinstance(archive_mode, ArchiveModes):
raise OpenTokException(u('Cannot create session, {0} is not a valid archive mode').format(archive_mode))
if archive_mode == ArchiveModes.always and media_mode != MediaModes.routed:
raise OpenTokException(u('A session with always archive mode must also have the routed media mode.'))
options[u('p2p.preference')] = media_mode.value
options[u('archiveMode')] = archive_mode.value
if location:
# validate IP address
try:
inet_aton(location)
except:
raise OpenTokException(u('Cannot create session. Location must be either None or a valid IPv4 address {0}').format(location))
options[u('location')] = location
try:
response = requests.post(self.endpoints.session_url(), data=options, headers=self.headers(), proxies=self.proxies, timeout=self.timeout)
response.encoding = 'utf-8'
if response.status_code == 403:
raise AuthError('Failed to create session, invalid credentials')
if not response.content:
raise RequestError()
dom = xmldom.parseString(response.content)
except Exception as e:
raise RequestError('Failed to create session: %s' % str(e))
try:
error = dom.getElementsByTagName('error')
if error:
error = error[0]
raise AuthError('Failed to create session (code=%s): %s' % (error.attributes['code'].value, error.firstChild.attributes['message'].value))
session_id = dom.getElementsByTagName('session_id')[0].childNodes[0].nodeValue
return Session(self, session_id, location=location, media_mode=media_mode, archive_mode=archive_mode)
except Exception as e:
raise OpenTokException('Failed to generate session: %s' % str(e)) | python | def create_session(self, location=None, media_mode=MediaModes.relayed, archive_mode=ArchiveModes.manual):
"""
Creates a new OpenTok session and returns the session ID, which uniquely identifies
the session.
For example, when using the OpenTok JavaScript library, use the session ID when calling the
OT.initSession() method (to initialize an OpenTok session).
OpenTok sessions do not expire. However, authentication tokens do expire (see the
generateToken() method). Also note that sessions cannot explicitly be destroyed.
A session ID string can be up to 255 characters long.
Calling this method results in an OpenTokException in the event of an error.
Check the error message for details.
You can also create a session using the OpenTok
`REST API <https://tokbox.com/opentok/api/#session_id_production>`_ or
`the OpenTok dashboard <https://dashboard.tokbox.com/projects>`_.
:param String media_mode: Determines whether the session will transmit streams using the
OpenTok Media Router (MediaMode.routed) or not (MediaMode.relayed). By default,
the setting is MediaMode.relayed.
With the media_mode property set to MediaMode.relayed, the session
will attempt to transmit streams directly between clients. If clients cannot connect
due to firewall restrictions, the session uses the OpenTok TURN server to relay
audio-video streams.
The `OpenTok Media
Router <https://tokbox.com/opentok/tutorials/create-session/#media-mode>`_
provides the following benefits:
* The OpenTok Media Router can decrease bandwidth usage in multiparty sessions.
(When the mediaMode property is set to MediaMode.relayed, each client must send
a separate audio-video stream to each client subscribing to it.)
* The OpenTok Media Router can improve the quality of the user experience through
audio fallback and video recovery (see https://tokbox.com/platform/fallback). With
these features, if a client's connectivity degrades to a degree that
it does not support video for a stream it's subscribing to, the video is dropped on
that client (without affecting other clients), and the client receives audio only.
If the client's connectivity improves, the video returns.
* The OpenTok Media Router supports the archiving feature, which lets
you record, save, and retrieve OpenTok sessions (see http://tokbox.com/platform/archiving).
:param String archive_mode: Whether the session is automatically archived
(ArchiveModes.always) or not (ArchiveModes.manual). By default,
the setting is ArchiveModes.manual, and you must call the
start_archive() method of the OpenTok object to start archiving. To archive the session
(either automatically or not), you must set the media_mode parameter to
MediaModes.routed.
:param String location: An IP address that the OpenTok servers will use to
situate the session in its global network. If you do not set a location hint,
the OpenTok servers will be based on the first client connecting to the session.
:rtype: The Session object. The session_id property of the object is the session ID.
"""
# build options
options = {}
if not isinstance(media_mode, MediaModes):
raise OpenTokException(u('Cannot create session, {0} is not a valid media mode').format(media_mode))
if not isinstance(archive_mode, ArchiveModes):
raise OpenTokException(u('Cannot create session, {0} is not a valid archive mode').format(archive_mode))
if archive_mode == ArchiveModes.always and media_mode != MediaModes.routed:
raise OpenTokException(u('A session with always archive mode must also have the routed media mode.'))
options[u('p2p.preference')] = media_mode.value
options[u('archiveMode')] = archive_mode.value
if location:
# validate IP address
try:
inet_aton(location)
except:
raise OpenTokException(u('Cannot create session. Location must be either None or a valid IPv4 address {0}').format(location))
options[u('location')] = location
try:
response = requests.post(self.endpoints.session_url(), data=options, headers=self.headers(), proxies=self.proxies, timeout=self.timeout)
response.encoding = 'utf-8'
if response.status_code == 403:
raise AuthError('Failed to create session, invalid credentials')
if not response.content:
raise RequestError()
dom = xmldom.parseString(response.content)
except Exception as e:
raise RequestError('Failed to create session: %s' % str(e))
try:
error = dom.getElementsByTagName('error')
if error:
error = error[0]
raise AuthError('Failed to create session (code=%s): %s' % (error.attributes['code'].value, error.firstChild.attributes['message'].value))
session_id = dom.getElementsByTagName('session_id')[0].childNodes[0].nodeValue
return Session(self, session_id, location=location, media_mode=media_mode, archive_mode=archive_mode)
except Exception as e:
raise OpenTokException('Failed to generate session: %s' % str(e)) | Creates a new OpenTok session and returns the session ID, which uniquely identifies
the session.
For example, when using the OpenTok JavaScript library, use the session ID when calling the
OT.initSession() method (to initialize an OpenTok session).
OpenTok sessions do not expire. However, authentication tokens do expire (see the
generateToken() method). Also note that sessions cannot explicitly be destroyed.
A session ID string can be up to 255 characters long.
Calling this method results in an OpenTokException in the event of an error.
Check the error message for details.
You can also create a session using the OpenTok
`REST API <https://tokbox.com/opentok/api/#session_id_production>`_ or
`the OpenTok dashboard <https://dashboard.tokbox.com/projects>`_.
:param String media_mode: Determines whether the session will transmit streams using the
OpenTok Media Router (MediaMode.routed) or not (MediaMode.relayed). By default,
the setting is MediaMode.relayed.
With the media_mode property set to MediaMode.relayed, the session
will attempt to transmit streams directly between clients. If clients cannot connect
due to firewall restrictions, the session uses the OpenTok TURN server to relay
audio-video streams.
The `OpenTok Media
Router <https://tokbox.com/opentok/tutorials/create-session/#media-mode>`_
provides the following benefits:
* The OpenTok Media Router can decrease bandwidth usage in multiparty sessions.
(When the mediaMode property is set to MediaMode.relayed, each client must send
a separate audio-video stream to each client subscribing to it.)
* The OpenTok Media Router can improve the quality of the user experience through
audio fallback and video recovery (see https://tokbox.com/platform/fallback). With
these features, if a client's connectivity degrades to a degree that
it does not support video for a stream it's subscribing to, the video is dropped on
that client (without affecting other clients), and the client receives audio only.
If the client's connectivity improves, the video returns.
* The OpenTok Media Router supports the archiving feature, which lets
you record, save, and retrieve OpenTok sessions (see http://tokbox.com/platform/archiving).
:param String archive_mode: Whether the session is automatically archived
(ArchiveModes.always) or not (ArchiveModes.manual). By default,
the setting is ArchiveModes.manual, and you must call the
start_archive() method of the OpenTok object to start archiving. To archive the session
(either automatically or not), you must set the media_mode parameter to
MediaModes.routed.
:param String location: An IP address that the OpenTok servers will use to
situate the session in its global network. If you do not set a location hint,
the OpenTok servers will be based on the first client connecting to the session.
:rtype: The Session object. The session_id property of the object is the session ID. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L204-L304 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.start_archive | def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None):
"""
Starts archiving an OpenTok session.
Clients must be actively connected to the OpenTok session for you to successfully start
recording an archive.
You can only record one archive at a time for a given session. You can only record archives
of sessions that use the OpenTok Media Router (sessions with the media mode set to routed);
you cannot archive sessions with the media mode set to relayed.
For more information on archiving, see the
`OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide.
:param String session_id: The session ID of the OpenTok session to archive.
:param String name: This is the name of the archive. You can use this name
to identify the archive. It is a property of the Archive object, and it is a property
of archive-related events in the OpenTok.js library.
:param Boolean has_audio: if set to True, an audio track will be inserted to the archive.
has_audio is an optional parameter that is set to True by default. If you set both
has_audio and has_video to False, the call to the start_archive() method results in
an error.
:param Boolean has_video: if set to True, a video track will be inserted to the archive.
has_video is an optional parameter that is set to True by default.
:param OutputModes output_mode: Whether all streams in the archive are recorded
to a single file (OutputModes.composed, the default) or to individual files
(OutputModes.individual).
:param String resolution (Optional): The resolution of the archive, either "640x480" (the default)
or "1280x720". This parameter only applies to composed archives. If you set this
parameter and set the output_mode parameter to OutputModes.individual, the call to the
start_archive() method results in an error.
:rtype: The Archive object, which includes properties defining the archive,
including the archive ID.
"""
if not isinstance(output_mode, OutputModes):
raise OpenTokException(u('Cannot start archive, {0} is not a valid output mode').format(output_mode))
if resolution and output_mode == OutputModes.individual:
raise OpenTokException(u('Invalid parameters: Resolution cannot be supplied for individual output mode.'))
payload = {'name': name,
'sessionId': session_id,
'hasAudio': has_audio,
'hasVideo': has_video,
'outputMode': output_mode.value,
'resolution': resolution,
}
response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 400:
"""
The HTTP response has a 400 status code in the following cases:
You do not pass in a session ID or you pass in an invalid session ID.
No clients are actively connected to the OpenTok session.
You specify an invalid resolution value.
The outputMode property is set to "individual" and you set the resolution property and (which is not supported in individual stream archives).
"""
raise RequestError(response.json().get("message"))
elif response.status_code == 404:
raise NotFoundError("Session not found")
elif response.status_code == 409:
raise ArchiveError(response.json().get("message"))
else:
raise RequestError("An unexpected error occurred", response.status_code) | python | def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None):
"""
Starts archiving an OpenTok session.
Clients must be actively connected to the OpenTok session for you to successfully start
recording an archive.
You can only record one archive at a time for a given session. You can only record archives
of sessions that use the OpenTok Media Router (sessions with the media mode set to routed);
you cannot archive sessions with the media mode set to relayed.
For more information on archiving, see the
`OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide.
:param String session_id: The session ID of the OpenTok session to archive.
:param String name: This is the name of the archive. You can use this name
to identify the archive. It is a property of the Archive object, and it is a property
of archive-related events in the OpenTok.js library.
:param Boolean has_audio: if set to True, an audio track will be inserted to the archive.
has_audio is an optional parameter that is set to True by default. If you set both
has_audio and has_video to False, the call to the start_archive() method results in
an error.
:param Boolean has_video: if set to True, a video track will be inserted to the archive.
has_video is an optional parameter that is set to True by default.
:param OutputModes output_mode: Whether all streams in the archive are recorded
to a single file (OutputModes.composed, the default) or to individual files
(OutputModes.individual).
:param String resolution (Optional): The resolution of the archive, either "640x480" (the default)
or "1280x720". This parameter only applies to composed archives. If you set this
parameter and set the output_mode parameter to OutputModes.individual, the call to the
start_archive() method results in an error.
:rtype: The Archive object, which includes properties defining the archive,
including the archive ID.
"""
if not isinstance(output_mode, OutputModes):
raise OpenTokException(u('Cannot start archive, {0} is not a valid output mode').format(output_mode))
if resolution and output_mode == OutputModes.individual:
raise OpenTokException(u('Invalid parameters: Resolution cannot be supplied for individual output mode.'))
payload = {'name': name,
'sessionId': session_id,
'hasAudio': has_audio,
'hasVideo': has_video,
'outputMode': output_mode.value,
'resolution': resolution,
}
response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 400:
"""
The HTTP response has a 400 status code in the following cases:
You do not pass in a session ID or you pass in an invalid session ID.
No clients are actively connected to the OpenTok session.
You specify an invalid resolution value.
The outputMode property is set to "individual" and you set the resolution property and (which is not supported in individual stream archives).
"""
raise RequestError(response.json().get("message"))
elif response.status_code == 404:
raise NotFoundError("Session not found")
elif response.status_code == 409:
raise ArchiveError(response.json().get("message"))
else:
raise RequestError("An unexpected error occurred", response.status_code) | Starts archiving an OpenTok session.
Clients must be actively connected to the OpenTok session for you to successfully start
recording an archive.
You can only record one archive at a time for a given session. You can only record archives
of sessions that use the OpenTok Media Router (sessions with the media mode set to routed);
you cannot archive sessions with the media mode set to relayed.
For more information on archiving, see the
`OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide.
:param String session_id: The session ID of the OpenTok session to archive.
:param String name: This is the name of the archive. You can use this name
to identify the archive. It is a property of the Archive object, and it is a property
of archive-related events in the OpenTok.js library.
:param Boolean has_audio: if set to True, an audio track will be inserted to the archive.
has_audio is an optional parameter that is set to True by default. If you set both
has_audio and has_video to False, the call to the start_archive() method results in
an error.
:param Boolean has_video: if set to True, a video track will be inserted to the archive.
has_video is an optional parameter that is set to True by default.
:param OutputModes output_mode: Whether all streams in the archive are recorded
to a single file (OutputModes.composed, the default) or to individual files
(OutputModes.individual).
:param String resolution (Optional): The resolution of the archive, either "640x480" (the default)
or "1280x720". This parameter only applies to composed archives. If you set this
parameter and set the output_mode parameter to OutputModes.individual, the call to the
start_archive() method results in an error.
:rtype: The Archive object, which includes properties defining the archive,
including the archive ID. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L319-L389 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.stop_archive | def stop_archive(self, archive_id):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 90 minutes or when all clients have disconnected
from the session being archived.
@param [String] archive_id The archive ID of the archive you want to stop recording.
:rtype: The Archive object corresponding to the archive being stopped.
"""
response = requests.post(self.endpoints.archive_url(archive_id) + '/stop', headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
elif response.status_code == 409:
raise ArchiveError("Archive is not in started state")
else:
raise RequestError("An unexpected error occurred", response.status_code) | python | def stop_archive(self, archive_id):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 90 minutes or when all clients have disconnected
from the session being archived.
@param [String] archive_id The archive ID of the archive you want to stop recording.
:rtype: The Archive object corresponding to the archive being stopped.
"""
response = requests.post(self.endpoints.archive_url(archive_id) + '/stop', headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
elif response.status_code == 409:
raise ArchiveError("Archive is not in started state")
else:
raise RequestError("An unexpected error occurred", response.status_code) | Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 90 minutes or when all clients have disconnected
from the session being archived.
@param [String] archive_id The archive ID of the archive you want to stop recording.
:rtype: The Archive object corresponding to the archive being stopped. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L391-L413 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.delete_archive | def delete_archive(self, archive_id):
"""
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted.
"""
response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
pass
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | python | def delete_archive(self, archive_id):
"""
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted.
"""
response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
pass
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L415-L434 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.get_archive | def get_archive(self, archive_id):
"""Gets an Archive object for the given archive ID.
:param String archive_id: The archive ID.
:rtype: The Archive object.
"""
response = requests.get(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | python | def get_archive(self, archive_id):
"""Gets an Archive object for the given archive ID.
:param String archive_id: The archive ID.
:rtype: The Archive object.
"""
response = requests.get(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | Gets an Archive object for the given archive ID.
:param String archive_id: The archive ID.
:rtype: The Archive object. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L436-L452 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.get_archives | def get_archives(self, offset=None, count=None, session_id=None):
"""Returns an ArchiveList, which is an array of archives that are completed and in-progress,
for your API key.
:param int: offset Optional. The index offset of the first archive. 0 is offset
of the most recently started archive. 1 is the offset of the archive that started prior to
the most recent archive. If you do not specify an offset, 0 is used.
:param int: count Optional. The number of archives to be returned. The maximum
number of archives returned is 1000.
:param string: session_id Optional. Used to list archives for a specific session ID.
:rtype: An ArchiveList object, which is an array of Archive objects.
"""
params = {}
if offset is not None:
params['offset'] = offset
if count is not None:
params['count'] = count
if session_id is not None:
params['sessionId'] = session_id
endpoint = self.endpoints.archive_url() + "?" + urlencode(params)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code < 300:
return ArchiveList(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | python | def get_archives(self, offset=None, count=None, session_id=None):
"""Returns an ArchiveList, which is an array of archives that are completed and in-progress,
for your API key.
:param int: offset Optional. The index offset of the first archive. 0 is offset
of the most recently started archive. 1 is the offset of the archive that started prior to
the most recent archive. If you do not specify an offset, 0 is used.
:param int: count Optional. The number of archives to be returned. The maximum
number of archives returned is 1000.
:param string: session_id Optional. Used to list archives for a specific session ID.
:rtype: An ArchiveList object, which is an array of Archive objects.
"""
params = {}
if offset is not None:
params['offset'] = offset
if count is not None:
params['count'] = count
if session_id is not None:
params['sessionId'] = session_id
endpoint = self.endpoints.archive_url() + "?" + urlencode(params)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code < 300:
return ArchiveList(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | Returns an ArchiveList, which is an array of archives that are completed and in-progress,
for your API key.
:param int: offset Optional. The index offset of the first archive. 0 is offset
of the most recently started archive. 1 is the offset of the archive that started prior to
the most recent archive. If you do not specify an offset, 0 is used.
:param int: count Optional. The number of archives to be returned. The maximum
number of archives returned is 1000.
:param string: session_id Optional. Used to list archives for a specific session ID.
:rtype: An ArchiveList object, which is an array of Archive objects. | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L454-L488 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.list_archives | def list_archives(self, offset=None, count=None, session_id=None):
"""
New method to get archive list, it's alternative to 'get_archives()',
both methods exist to have backwards compatible
"""
return self.get_archives(offset, count, session_id) | python | def list_archives(self, offset=None, count=None, session_id=None):
"""
New method to get archive list, it's alternative to 'get_archives()',
both methods exist to have backwards compatible
"""
return self.get_archives(offset, count, session_id) | New method to get archive list, it's alternative to 'get_archives()',
both methods exist to have backwards compatible | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L490-L495 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.signal | def signal(self, session_id, payload, connection_id=None):
"""
Send signals to all participants in an active OpenTok session or to a specific client
connected to that session.
:param String session_id: The session ID of the OpenTok session that receives the signal
:param Dictionary payload: Structure that contains both the type and data fields. These
correspond to the type and data parameters passed in the client signal received handlers
:param String connection_id: The connection_id parameter is an optional string used to
specify the connection ID of a client connected to the session. If you specify this value,
the signal is sent to the specified client. Otherwise, the signal is sent to all clients
connected to the session
"""
response = requests.post(
self.endpoints.signaling_url(session_id, connection_id),
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise SignalingError('One of the signal properties - data, type, sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to send the signal. Check your authentication credentials.')
elif response.status_code == 404:
raise SignalingError('The client specified by the connectionId property is not connected to the session.')
elif response.status_code == 413:
raise SignalingError('The type string exceeds the maximum length (128 bytes), or the data string exceeds the maximum size (8 kB).')
else:
raise RequestError('An unexpected error occurred', response.status_code) | python | def signal(self, session_id, payload, connection_id=None):
"""
Send signals to all participants in an active OpenTok session or to a specific client
connected to that session.
:param String session_id: The session ID of the OpenTok session that receives the signal
:param Dictionary payload: Structure that contains both the type and data fields. These
correspond to the type and data parameters passed in the client signal received handlers
:param String connection_id: The connection_id parameter is an optional string used to
specify the connection ID of a client connected to the session. If you specify this value,
the signal is sent to the specified client. Otherwise, the signal is sent to all clients
connected to the session
"""
response = requests.post(
self.endpoints.signaling_url(session_id, connection_id),
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise SignalingError('One of the signal properties - data, type, sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to send the signal. Check your authentication credentials.')
elif response.status_code == 404:
raise SignalingError('The client specified by the connectionId property is not connected to the session.')
elif response.status_code == 413:
raise SignalingError('The type string exceeds the maximum length (128 bytes), or the data string exceeds the maximum size (8 kB).')
else:
raise RequestError('An unexpected error occurred', response.status_code) | Send signals to all participants in an active OpenTok session or to a specific client
connected to that session.
:param String session_id: The session ID of the OpenTok session that receives the signal
:param Dictionary payload: Structure that contains both the type and data fields. These
correspond to the type and data parameters passed in the client signal received handlers
:param String connection_id: The connection_id parameter is an optional string used to
specify the connection ID of a client connected to the session. If you specify this value,
the signal is sent to the specified client. Otherwise, the signal is sent to all clients
connected to the session | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L497-L531 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.get_stream | def get_stream(self, session_id, stream_id):
"""
Returns an Stream object that contains information of an OpenTok stream:
-id: The stream ID
-videoType: "camera" or "screen"
-name: The stream name (if one was set when the client published the stream)
-layoutClassList: It's an array of the layout classes for the stream
"""
endpoint = self.endpoints.get_stream_url(session_id, stream_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return Stream(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
elif response.status_code == 408:
raise GetStreamError('You passed in an invalid stream ID.')
else:
raise RequestError('An unexpected error occurred', response.status_code) | python | def get_stream(self, session_id, stream_id):
"""
Returns an Stream object that contains information of an OpenTok stream:
-id: The stream ID
-videoType: "camera" or "screen"
-name: The stream name (if one was set when the client published the stream)
-layoutClassList: It's an array of the layout classes for the stream
"""
endpoint = self.endpoints.get_stream_url(session_id, stream_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return Stream(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
elif response.status_code == 408:
raise GetStreamError('You passed in an invalid stream ID.')
else:
raise RequestError('An unexpected error occurred', response.status_code) | Returns an Stream object that contains information of an OpenTok stream:
-id: The stream ID
-videoType: "camera" or "screen"
-name: The stream name (if one was set when the client published the stream)
-layoutClassList: It's an array of the layout classes for the stream | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L533-L556 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.list_streams | def list_streams(self, session_id):
"""
Returns a list of Stream objects that contains information of all
the streams in a OpenTok session, with the following attributes:
-count: An integer that indicates the number of streams in the session
-items: List of the Stream objects
"""
endpoint = self.endpoints.get_stream_url(session_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return StreamList(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
else:
raise RequestError('An unexpected error occurred', response.status_code) | python | def list_streams(self, session_id):
"""
Returns a list of Stream objects that contains information of all
the streams in a OpenTok session, with the following attributes:
-count: An integer that indicates the number of streams in the session
-items: List of the Stream objects
"""
endpoint = self.endpoints.get_stream_url(session_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return StreamList(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
else:
raise RequestError('An unexpected error occurred', response.status_code) | Returns a list of Stream objects that contains information of all
the streams in a OpenTok session, with the following attributes:
-count: An integer that indicates the number of streams in the session
-items: List of the Stream objects | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L558-L579 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.force_disconnect | def force_disconnect(self, session_id, connection_id):
"""
Sends a request to disconnect a client from an OpenTok session
:param String session_id: The session ID of the OpenTok session from which the
client will be disconnected
:param String connection_id: The connection ID of the client that will be disconnected
"""
endpoint = self.endpoints.force_disconnect_url(session_id, connection_id)
response = requests.delete(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise ForceDisconnectError('One of the arguments - sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to forceDisconnect, check your authentication credentials.')
elif response.status_code == 404:
raise ForceDisconnectError('The client specified by the connectionId property is not connected to the session.')
else:
raise RequestError('An unexpected error occurred', response.status_code) | python | def force_disconnect(self, session_id, connection_id):
"""
Sends a request to disconnect a client from an OpenTok session
:param String session_id: The session ID of the OpenTok session from which the
client will be disconnected
:param String connection_id: The connection ID of the client that will be disconnected
"""
endpoint = self.endpoints.force_disconnect_url(session_id, connection_id)
response = requests.delete(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise ForceDisconnectError('One of the arguments - sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to forceDisconnect, check your authentication credentials.')
elif response.status_code == 404:
raise ForceDisconnectError('The client specified by the connectionId property is not connected to the session.')
else:
raise RequestError('An unexpected error occurred', response.status_code) | Sends a request to disconnect a client from an OpenTok session
:param String session_id: The session ID of the OpenTok session from which the
client will be disconnected
:param String connection_id: The connection ID of the client that will be disconnected | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L581-L604 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.set_archive_layout | def set_archive_layout(self, archive_id, layout_type, stylesheet=None):
"""
Use this method to change the layout of videos in an OpenTok archive
:param String archive_id: The ID of the archive that will be updated
:param String layout_type: The layout type for the archive. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom'
"""
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.set_archive_layout_url(archive_id)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise ArchiveError('Invalid request. This response may indicate that data in your request data is invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code) | python | def set_archive_layout(self, archive_id, layout_type, stylesheet=None):
"""
Use this method to change the layout of videos in an OpenTok archive
:param String archive_id: The ID of the archive that will be updated
:param String layout_type: The layout type for the archive. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom'
"""
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.set_archive_layout_url(archive_id)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise ArchiveError('Invalid request. This response may indicate that data in your request data is invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code) | Use this method to change the layout of videos in an OpenTok archive
:param String archive_id: The ID of the archive that will be updated
:param String layout_type: The layout type for the archive. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom' | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L606-L642 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.dial | def dial(self, session_id, token, sip_uri, options=[]):
"""
Use this method to connect a SIP platform to an OpenTok session. The audio from the end
of the SIP call is added to the OpenTok session as an audio-only stream. The OpenTok Media
Router mixes audio from other streams in the session and sends the mixed audio to the SIP
endpoint
:param String session_id: The OpenTok session ID for the SIP call to join
:param String token: The OpenTok token to be used for the participant being called
:param String sip_uri: The SIP URI to be used as destination of the SIP call initiated from
OpenTok to the SIP platform
:param Dictionary options optional: Aditional options with the following properties:
String 'from': The number or string that will be sent to the final SIP number
as the caller
Dictionary 'headers': Defines custom headers to be added to the SIP INVITE request
initiated from OpenTok to the SIP platform. Each of the custom headers must
start with the "X-" prefix, or the call will result in a Bad Request (400) response
Dictionary 'auth': Contains the username and password to be used in the the SIP
INVITE request for HTTP digest authentication, if it is required by the SIP platform
For example:
'auth': {
'username': 'username',
'password': 'password'
}
Boolean 'secure': A Boolean flag that indicates whether the media must be transmitted
encrypted (true) or not (false, the default)
:rtype: A SipCall object, which contains data of the SIP call: id, connectionId and streamId
"""
payload = {
'sessionId': session_id,
'token': token,
'sip': {
'uri': sip_uri
}
}
if 'from' in options:
payload['sip']['from'] = options['from']
if 'headers' in options:
payload['sip']['headers'] = options['headers']
if 'auth' in options:
payload['sip']['auth'] = options['auth']
if 'secure' in options:
payload['sip']['secure'] = options['secure']
endpoint = self.endpoints.dial_url()
response = requests.post(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return SipCall(response.json())
elif response.status_code == 400:
raise SipDialError('Invalid request. Invalid session ID.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 404:
raise SipDialError('The session does not exist.')
elif response.status_code == 409:
raise SipDialError(
'You attempted to start a SIP call for a session that '
'does not use the OpenTok Media Router.')
else:
raise RequestError('OpenTok server error.', response.status_code) | python | def dial(self, session_id, token, sip_uri, options=[]):
"""
Use this method to connect a SIP platform to an OpenTok session. The audio from the end
of the SIP call is added to the OpenTok session as an audio-only stream. The OpenTok Media
Router mixes audio from other streams in the session and sends the mixed audio to the SIP
endpoint
:param String session_id: The OpenTok session ID for the SIP call to join
:param String token: The OpenTok token to be used for the participant being called
:param String sip_uri: The SIP URI to be used as destination of the SIP call initiated from
OpenTok to the SIP platform
:param Dictionary options optional: Aditional options with the following properties:
String 'from': The number or string that will be sent to the final SIP number
as the caller
Dictionary 'headers': Defines custom headers to be added to the SIP INVITE request
initiated from OpenTok to the SIP platform. Each of the custom headers must
start with the "X-" prefix, or the call will result in a Bad Request (400) response
Dictionary 'auth': Contains the username and password to be used in the the SIP
INVITE request for HTTP digest authentication, if it is required by the SIP platform
For example:
'auth': {
'username': 'username',
'password': 'password'
}
Boolean 'secure': A Boolean flag that indicates whether the media must be transmitted
encrypted (true) or not (false, the default)
:rtype: A SipCall object, which contains data of the SIP call: id, connectionId and streamId
"""
payload = {
'sessionId': session_id,
'token': token,
'sip': {
'uri': sip_uri
}
}
if 'from' in options:
payload['sip']['from'] = options['from']
if 'headers' in options:
payload['sip']['headers'] = options['headers']
if 'auth' in options:
payload['sip']['auth'] = options['auth']
if 'secure' in options:
payload['sip']['secure'] = options['secure']
endpoint = self.endpoints.dial_url()
response = requests.post(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return SipCall(response.json())
elif response.status_code == 400:
raise SipDialError('Invalid request. Invalid session ID.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 404:
raise SipDialError('The session does not exist.')
elif response.status_code == 409:
raise SipDialError(
'You attempted to start a SIP call for a session that '
'does not use the OpenTok Media Router.')
else:
raise RequestError('OpenTok server error.', response.status_code) | Use this method to connect a SIP platform to an OpenTok session. The audio from the end
of the SIP call is added to the OpenTok session as an audio-only stream. The OpenTok Media
Router mixes audio from other streams in the session and sends the mixed audio to the SIP
endpoint
:param String session_id: The OpenTok session ID for the SIP call to join
:param String token: The OpenTok token to be used for the participant being called
:param String sip_uri: The SIP URI to be used as destination of the SIP call initiated from
OpenTok to the SIP platform
:param Dictionary options optional: Aditional options with the following properties:
String 'from': The number or string that will be sent to the final SIP number
as the caller
Dictionary 'headers': Defines custom headers to be added to the SIP INVITE request
initiated from OpenTok to the SIP platform. Each of the custom headers must
start with the "X-" prefix, or the call will result in a Bad Request (400) response
Dictionary 'auth': Contains the username and password to be used in the the SIP
INVITE request for HTTP digest authentication, if it is required by the SIP platform
For example:
'auth': {
'username': 'username',
'password': 'password'
}
Boolean 'secure': A Boolean flag that indicates whether the media must be transmitted
encrypted (true) or not (false, the default)
:rtype: A SipCall object, which contains data of the SIP call: id, connectionId and streamId | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L644-L723 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.set_stream_class_lists | def set_stream_class_lists(self, session_id, payload):
"""
Use this method to change layout classes for OpenTok streams. The layout classes
define how the streams are displayed in the layout of a composed OpenTok archive
:param String session_id: The ID of the session of the streams that will be updated
:param List payload: A list defining the class lists to apply to the streams.
Each element in the list is a dictionary with two properties: 'id' and 'layoutClassList'.
The 'id' property is the stream ID (a String), and the 'layoutClassList' is an array of
class names (Strings) to apply to the stream. For example:
payload = [
{'id': '7b09ec3c-26f9-43d7-8197-f608f13d4fb6', 'layoutClassList': ['focus']},
{'id': '567bc941-6ea0-4c69-97fc-70a740b68976', 'layoutClassList': ['top']},
{'id': '307dc941-0450-4c09-975c-705740d08970', 'layoutClassList': ['bottom']}
]
"""
items_payload = {'items': payload}
endpoint = self.endpoints.set_stream_class_lists_url(session_id)
response = requests.put(
endpoint,
data=json.dumps(items_payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise SetStreamClassError(
'Invalid request. This response may indicate that data in your request data '
'is invalid JSON. It may also indicate that you passed in invalid layout options.'
)
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code) | python | def set_stream_class_lists(self, session_id, payload):
"""
Use this method to change layout classes for OpenTok streams. The layout classes
define how the streams are displayed in the layout of a composed OpenTok archive
:param String session_id: The ID of the session of the streams that will be updated
:param List payload: A list defining the class lists to apply to the streams.
Each element in the list is a dictionary with two properties: 'id' and 'layoutClassList'.
The 'id' property is the stream ID (a String), and the 'layoutClassList' is an array of
class names (Strings) to apply to the stream. For example:
payload = [
{'id': '7b09ec3c-26f9-43d7-8197-f608f13d4fb6', 'layoutClassList': ['focus']},
{'id': '567bc941-6ea0-4c69-97fc-70a740b68976', 'layoutClassList': ['top']},
{'id': '307dc941-0450-4c09-975c-705740d08970', 'layoutClassList': ['bottom']}
]
"""
items_payload = {'items': payload}
endpoint = self.endpoints.set_stream_class_lists_url(session_id)
response = requests.put(
endpoint,
data=json.dumps(items_payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise SetStreamClassError(
'Invalid request. This response may indicate that data in your request data '
'is invalid JSON. It may also indicate that you passed in invalid layout options.'
)
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code) | Use this method to change layout classes for OpenTok streams. The layout classes
define how the streams are displayed in the layout of a composed OpenTok archive
:param String session_id: The ID of the session of the streams that will be updated
:param List payload: A list defining the class lists to apply to the streams.
Each element in the list is a dictionary with two properties: 'id' and 'layoutClassList'.
The 'id' property is the stream ID (a String), and the 'layoutClassList' is an array of
class names (Strings) to apply to the stream. For example:
payload = [
{'id': '7b09ec3c-26f9-43d7-8197-f608f13d4fb6', 'layoutClassList': ['focus']},
{'id': '567bc941-6ea0-4c69-97fc-70a740b68976', 'layoutClassList': ['top']},
{'id': '307dc941-0450-4c09-975c-705740d08970', 'layoutClassList': ['bottom']}
] | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L725-L764 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.start_broadcast | def start_broadcast(self, session_id, options):
"""
Use this method to start a live streaming for an OpenTok session. This broadcasts the
session to an HLS (HTTP live streaming) or to RTMP streams. To successfully start
broadcasting a session, at least one client must be connected to the session. You can only
start live streaming for sessions that use the OpenTok Media Router (with the media mode set
to routed); you cannot use live streaming with sessions that have the media mode set to
relayed
:param String session_id: The session ID of the OpenTok session you want to broadcast
:param Dictionary options, with the following properties:
Dictionary 'layout' optional: Specify this to assign the initial layout type for the
broadcast. Valid values for the layout property are "bestFit", "custom",
"horizontalPresentation", "pip" and "verticalPresentation". If you specify a "custom"
layout type, set the stylesheet property of the layout object to the stylesheet.
If you do not specify an initial layout type, the broadcast stream uses the Best Fit
layout type
Integer 'maxDuration' optional: The maximum duration for the broadcast, in seconds.
The broadcast will automatically stop when the maximum duration is reached. You can
set the maximum duration to a value from 60 (60 seconds) to 36000 (10 hours). The
default maximum duration is 2 hours (7200 seconds)
Dictionary 'outputs': This object defines the types of broadcast streams you want to
start (both HLS and RTMP). You can include HLS, RTMP, or both as broadcast streams.
If you include RTMP streaming, you can specify up to five target RTMP streams. For
each RTMP stream, specify 'serverUrl' (the RTMP server URL), 'streamName' (the stream
name, such as the YouTube Live stream name or the Facebook stream key), and
(optionally) 'id' (a unique ID for the stream)
String 'resolution' optional: The resolution of the broadcast, either "640x480"
(SD, the default) or "1280x720" (HD)
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt, resolution, status and broadcastUrls
"""
payload = {
'sessionId': session_id
}
payload.update(options)
endpoint = self.endpoints.broadcast_url()
response = requests.post(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options. '
'Or you have exceeded the limit of five simultaneous RTMP streams for an OpenTok '
'session. Or you specified and invalid resolution.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError('The broadcast has already started for the session.')
else:
raise RequestError('OpenTok server error.', response.status_code) | python | def start_broadcast(self, session_id, options):
"""
Use this method to start a live streaming for an OpenTok session. This broadcasts the
session to an HLS (HTTP live streaming) or to RTMP streams. To successfully start
broadcasting a session, at least one client must be connected to the session. You can only
start live streaming for sessions that use the OpenTok Media Router (with the media mode set
to routed); you cannot use live streaming with sessions that have the media mode set to
relayed
:param String session_id: The session ID of the OpenTok session you want to broadcast
:param Dictionary options, with the following properties:
Dictionary 'layout' optional: Specify this to assign the initial layout type for the
broadcast. Valid values for the layout property are "bestFit", "custom",
"horizontalPresentation", "pip" and "verticalPresentation". If you specify a "custom"
layout type, set the stylesheet property of the layout object to the stylesheet.
If you do not specify an initial layout type, the broadcast stream uses the Best Fit
layout type
Integer 'maxDuration' optional: The maximum duration for the broadcast, in seconds.
The broadcast will automatically stop when the maximum duration is reached. You can
set the maximum duration to a value from 60 (60 seconds) to 36000 (10 hours). The
default maximum duration is 2 hours (7200 seconds)
Dictionary 'outputs': This object defines the types of broadcast streams you want to
start (both HLS and RTMP). You can include HLS, RTMP, or both as broadcast streams.
If you include RTMP streaming, you can specify up to five target RTMP streams. For
each RTMP stream, specify 'serverUrl' (the RTMP server URL), 'streamName' (the stream
name, such as the YouTube Live stream name or the Facebook stream key), and
(optionally) 'id' (a unique ID for the stream)
String 'resolution' optional: The resolution of the broadcast, either "640x480"
(SD, the default) or "1280x720" (HD)
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt, resolution, status and broadcastUrls
"""
payload = {
'sessionId': session_id
}
payload.update(options)
endpoint = self.endpoints.broadcast_url()
response = requests.post(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options. '
'Or you have exceeded the limit of five simultaneous RTMP streams for an OpenTok '
'session. Or you specified and invalid resolution.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError('The broadcast has already started for the session.')
else:
raise RequestError('OpenTok server error.', response.status_code) | Use this method to start a live streaming for an OpenTok session. This broadcasts the
session to an HLS (HTTP live streaming) or to RTMP streams. To successfully start
broadcasting a session, at least one client must be connected to the session. You can only
start live streaming for sessions that use the OpenTok Media Router (with the media mode set
to routed); you cannot use live streaming with sessions that have the media mode set to
relayed
:param String session_id: The session ID of the OpenTok session you want to broadcast
:param Dictionary options, with the following properties:
Dictionary 'layout' optional: Specify this to assign the initial layout type for the
broadcast. Valid values for the layout property are "bestFit", "custom",
"horizontalPresentation", "pip" and "verticalPresentation". If you specify a "custom"
layout type, set the stylesheet property of the layout object to the stylesheet.
If you do not specify an initial layout type, the broadcast stream uses the Best Fit
layout type
Integer 'maxDuration' optional: The maximum duration for the broadcast, in seconds.
The broadcast will automatically stop when the maximum duration is reached. You can
set the maximum duration to a value from 60 (60 seconds) to 36000 (10 hours). The
default maximum duration is 2 hours (7200 seconds)
Dictionary 'outputs': This object defines the types of broadcast streams you want to
start (both HLS and RTMP). You can include HLS, RTMP, or both as broadcast streams.
If you include RTMP streaming, you can specify up to five target RTMP streams. For
each RTMP stream, specify 'serverUrl' (the RTMP server URL), 'streamName' (the stream
name, such as the YouTube Live stream name or the Facebook stream key), and
(optionally) 'id' (a unique ID for the stream)
String 'resolution' optional: The resolution of the broadcast, either "640x480"
(SD, the default) or "1280x720" (HD)
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt, resolution, status and broadcastUrls | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L766-L832 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.stop_broadcast | def stop_broadcast(self, broadcast_id):
"""
Use this method to stop a live broadcast of an OpenTok session
:param String broadcast_id: The ID of the broadcast you want to stop
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt and resolution
"""
endpoint = self.endpoints.broadcast_url(broadcast_id, stop=True)
response = requests.post(
endpoint,
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request '
'data is invalid JSON.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError(
'The broadcast (with the specified ID) was not found or it has already '
'stopped.')
else:
raise RequestError('OpenTok server error.', response.status_code) | python | def stop_broadcast(self, broadcast_id):
"""
Use this method to stop a live broadcast of an OpenTok session
:param String broadcast_id: The ID of the broadcast you want to stop
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt and resolution
"""
endpoint = self.endpoints.broadcast_url(broadcast_id, stop=True)
response = requests.post(
endpoint,
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request '
'data is invalid JSON.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError(
'The broadcast (with the specified ID) was not found or it has already '
'stopped.')
else:
raise RequestError('OpenTok server error.', response.status_code) | Use this method to stop a live broadcast of an OpenTok session
:param String broadcast_id: The ID of the broadcast you want to stop
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt and resolution | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L834-L864 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.get_broadcast | def get_broadcast(self, broadcast_id):
"""
Use this method to get details on a broadcast that is in-progress.
:param String broadcast_id: The ID of the broadcast you want to stop
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt, resolution, broadcastUrls and status
"""
endpoint = self.endpoints.broadcast_url(broadcast_id)
response = requests.get(
endpoint,
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request '
'data is invalid JSON.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError('No matching broadcast found (with the specified ID).')
else:
raise RequestError('OpenTok server error.', response.status_code) | python | def get_broadcast(self, broadcast_id):
"""
Use this method to get details on a broadcast that is in-progress.
:param String broadcast_id: The ID of the broadcast you want to stop
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt, resolution, broadcastUrls and status
"""
endpoint = self.endpoints.broadcast_url(broadcast_id)
response = requests.get(
endpoint,
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request '
'data is invalid JSON.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError('No matching broadcast found (with the specified ID).')
else:
raise RequestError('OpenTok server error.', response.status_code) | Use this method to get details on a broadcast that is in-progress.
:param String broadcast_id: The ID of the broadcast you want to stop
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt, resolution, broadcastUrls and status | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L866-L894 |
opentok/Opentok-Python-SDK | opentok/opentok.py | OpenTok.set_broadcast_layout | def set_broadcast_layout(self, broadcast_id, layout_type, stylesheet=None):
"""
Use this method to change the layout type of a live streaming broadcast
:param String broadcast_id: The ID of the broadcast that will be updated
:param String layout_type: The layout type for the broadcast. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom'
"""
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.broadcast_url(broadcast_id, layout=True)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code) | python | def set_broadcast_layout(self, broadcast_id, layout_type, stylesheet=None):
"""
Use this method to change the layout type of a live streaming broadcast
:param String broadcast_id: The ID of the broadcast that will be updated
:param String layout_type: The layout type for the broadcast. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom'
"""
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.broadcast_url(broadcast_id, layout=True)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code) | Use this method to change the layout type of a live streaming broadcast
:param String broadcast_id: The ID of the broadcast that will be updated
:param String layout_type: The layout type for the broadcast. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom' | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L896-L934 |
mdorn/pyinstapaper | pyinstapaper/instapaper.py | Instapaper.login | def login(self, username, password):
'''Authenticate using XAuth variant of OAuth.
:param str username: Username or email address for the relevant account
:param str password: Password for the account
'''
response = self.request(
ACCESS_TOKEN,
{
'x_auth_mode': 'client_auth',
'x_auth_username': username,
'x_auth_password': password
},
returns_json=False
)
token = dict(parse_qsl(response['data'].decode()))
self.token = oauth.Token(
token['oauth_token'], token['oauth_token_secret'])
self.oauth_client = oauth.Client(self.consumer, self.token) | python | def login(self, username, password):
'''Authenticate using XAuth variant of OAuth.
:param str username: Username or email address for the relevant account
:param str password: Password for the account
'''
response = self.request(
ACCESS_TOKEN,
{
'x_auth_mode': 'client_auth',
'x_auth_username': username,
'x_auth_password': password
},
returns_json=False
)
token = dict(parse_qsl(response['data'].decode()))
self.token = oauth.Token(
token['oauth_token'], token['oauth_token_secret'])
self.oauth_client = oauth.Client(self.consumer, self.token) | Authenticate using XAuth variant of OAuth.
:param str username: Username or email address for the relevant account
:param str password: Password for the account | https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L34-L52 |
mdorn/pyinstapaper | pyinstapaper/instapaper.py | Instapaper.request | def request(self, path, params=None, returns_json=True,
method='POST', api_version=API_VERSION):
'''Process a request using the OAuth client's request method.
:param str path: Path fragment to the API endpoint, e.g. "resource/ID"
:param dict params: Parameters to pass to request
:param str method: Optional HTTP method, normally POST for Instapaper
:param str api_version: Optional alternative API version
:returns: response headers and body
:retval: dict
'''
time.sleep(REQUEST_DELAY_SECS)
full_path = '/'.join([BASE_URL, 'api/%s' % api_version, path])
params = urlencode(params) if params else None
log.debug('URL: %s', full_path)
request_kwargs = {'method': method}
if params:
request_kwargs['body'] = params
response, content = self.oauth_client.request(
full_path, **request_kwargs)
log.debug('CONTENT: %s ...', content[:50])
if returns_json:
try:
data = json.loads(content)
if isinstance(data, list) and len(data) == 1:
# ugly -- API always returns a list even when you expect
# only one item
if data[0]['type'] == 'error':
raise Exception('Instapaper error %d: %s' % (
data[0]['error_code'],
data[0]['message'])
)
# TODO: PyInstapaperException custom class?
except ValueError:
# Instapaper API can be unpredictable/inconsistent, e.g.
# bookmarks/get_text doesn't return JSON
data = content
else:
data = content
return {
'response': response,
'data': data
} | python | def request(self, path, params=None, returns_json=True,
method='POST', api_version=API_VERSION):
'''Process a request using the OAuth client's request method.
:param str path: Path fragment to the API endpoint, e.g. "resource/ID"
:param dict params: Parameters to pass to request
:param str method: Optional HTTP method, normally POST for Instapaper
:param str api_version: Optional alternative API version
:returns: response headers and body
:retval: dict
'''
time.sleep(REQUEST_DELAY_SECS)
full_path = '/'.join([BASE_URL, 'api/%s' % api_version, path])
params = urlencode(params) if params else None
log.debug('URL: %s', full_path)
request_kwargs = {'method': method}
if params:
request_kwargs['body'] = params
response, content = self.oauth_client.request(
full_path, **request_kwargs)
log.debug('CONTENT: %s ...', content[:50])
if returns_json:
try:
data = json.loads(content)
if isinstance(data, list) and len(data) == 1:
# ugly -- API always returns a list even when you expect
# only one item
if data[0]['type'] == 'error':
raise Exception('Instapaper error %d: %s' % (
data[0]['error_code'],
data[0]['message'])
)
# TODO: PyInstapaperException custom class?
except ValueError:
# Instapaper API can be unpredictable/inconsistent, e.g.
# bookmarks/get_text doesn't return JSON
data = content
else:
data = content
return {
'response': response,
'data': data
} | Process a request using the OAuth client's request method.
:param str path: Path fragment to the API endpoint, e.g. "resource/ID"
:param dict params: Parameters to pass to request
:param str method: Optional HTTP method, normally POST for Instapaper
:param str api_version: Optional alternative API version
:returns: response headers and body
:retval: dict | https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L54-L96 |
mdorn/pyinstapaper | pyinstapaper/instapaper.py | Instapaper.get_bookmarks | def get_bookmarks(self, folder='unread', limit=25, have=None):
"""Return list of user's bookmarks.
:param str folder: Optional. Possible values are unread (default),
starred, archive, or a folder_id value.
:param int limit: Optional. A number between 1 and 500, default 25.
:param list have: Optional. A list of IDs to exclude from results
:returns: List of user's bookmarks
:rtype: list
"""
path = 'bookmarks/list'
params = {'folder_id': folder, 'limit': limit}
if have:
have_concat = ','.join(str(id_) for id_ in have)
params['have'] = have_concat
response = self.request(path, params)
items = response['data']
bookmarks = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'bookmark':
bookmarks.append(Bookmark(self, **item))
return bookmarks | python | def get_bookmarks(self, folder='unread', limit=25, have=None):
"""Return list of user's bookmarks.
:param str folder: Optional. Possible values are unread (default),
starred, archive, or a folder_id value.
:param int limit: Optional. A number between 1 and 500, default 25.
:param list have: Optional. A list of IDs to exclude from results
:returns: List of user's bookmarks
:rtype: list
"""
path = 'bookmarks/list'
params = {'folder_id': folder, 'limit': limit}
if have:
have_concat = ','.join(str(id_) for id_ in have)
params['have'] = have_concat
response = self.request(path, params)
items = response['data']
bookmarks = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'bookmark':
bookmarks.append(Bookmark(self, **item))
return bookmarks | Return list of user's bookmarks.
:param str folder: Optional. Possible values are unread (default),
starred, archive, or a folder_id value.
:param int limit: Optional. A number between 1 and 500, default 25.
:param list have: Optional. A list of IDs to exclude from results
:returns: List of user's bookmarks
:rtype: list | https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L98-L121 |
mdorn/pyinstapaper | pyinstapaper/instapaper.py | Instapaper.get_folders | def get_folders(self):
"""Return list of user's folders.
:rtype: list
"""
path = 'folders/list'
response = self.request(path)
items = response['data']
folders = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'folder':
folders.append(Folder(self, **item))
return folders | python | def get_folders(self):
"""Return list of user's folders.
:rtype: list
"""
path = 'folders/list'
response = self.request(path)
items = response['data']
folders = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'folder':
folders.append(Folder(self, **item))
return folders | Return list of user's folders.
:rtype: list | https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L123-L137 |
mdorn/pyinstapaper | pyinstapaper/instapaper.py | InstapaperObject.add | def add(self):
'''Save an object to Instapaper after instantiating it.
Example::
folder = Folder(instapaper, title='stuff')
result = folder.add()
'''
# TODO validation per object type
submit_attribs = {}
for attrib in self.ATTRIBUTES:
val = getattr(self, attrib, None)
if val:
submit_attribs[attrib] = val
path = '/'.join([self.RESOURCE, 'add'])
result = self.client.request(path, submit_attribs)
return result | python | def add(self):
'''Save an object to Instapaper after instantiating it.
Example::
folder = Folder(instapaper, title='stuff')
result = folder.add()
'''
# TODO validation per object type
submit_attribs = {}
for attrib in self.ATTRIBUTES:
val = getattr(self, attrib, None)
if val:
submit_attribs[attrib] = val
path = '/'.join([self.RESOURCE, 'add'])
result = self.client.request(path, submit_attribs)
return result | Save an object to Instapaper after instantiating it.
Example::
folder = Folder(instapaper, title='stuff')
result = folder.add() | https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L173-L189 |
mdorn/pyinstapaper | pyinstapaper/instapaper.py | InstapaperObject._simple_action | def _simple_action(self, action=None):
'''Issue a request for an API method whose only param is the obj ID.
:param str action: The name of the action for the resource
:returns: Response from the API
:rtype: dict
'''
if not action:
raise Exception('No simple action defined')
path = "/".join([self.RESOURCE, action])
response = self.client.request(
path, {self.RESOURCE_ID_ATTRIBUTE: self.object_id}
)
return response | python | def _simple_action(self, action=None):
'''Issue a request for an API method whose only param is the obj ID.
:param str action: The name of the action for the resource
:returns: Response from the API
:rtype: dict
'''
if not action:
raise Exception('No simple action defined')
path = "/".join([self.RESOURCE, action])
response = self.client.request(
path, {self.RESOURCE_ID_ATTRIBUTE: self.object_id}
)
return response | Issue a request for an API method whose only param is the obj ID.
:param str action: The name of the action for the resource
:returns: Response from the API
:rtype: dict | https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L191-L204 |
mdorn/pyinstapaper | pyinstapaper/instapaper.py | Bookmark.get_highlights | def get_highlights(self):
'''Get highlights for Bookmark instance.
:return: list of ``Highlight`` objects
:rtype: list
'''
# NOTE: all Instapaper API methods use POST except this one!
path = '/'.join([self.RESOURCE, str(self.object_id), 'highlights'])
response = self.client.request(path, method='GET', api_version='1.1')
items = response['data']
highlights = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'highlight':
highlights.append(Highlight(self, **item))
return highlights | python | def get_highlights(self):
'''Get highlights for Bookmark instance.
:return: list of ``Highlight`` objects
:rtype: list
'''
# NOTE: all Instapaper API methods use POST except this one!
path = '/'.join([self.RESOURCE, str(self.object_id), 'highlights'])
response = self.client.request(path, method='GET', api_version='1.1')
items = response['data']
highlights = []
for item in items:
if item.get('type') == 'error':
raise Exception(item.get('message'))
elif item.get('type') == 'highlight':
highlights.append(Highlight(self, **item))
return highlights | Get highlights for Bookmark instance.
:return: list of ``Highlight`` objects
:rtype: list | https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L242-L258 |
Jetsetter/graphyte | graphyte.py | Sender.stop | def stop(self):
"""Tell the sender thread to finish and wait for it to stop sending
(should be at most "timeout" seconds).
"""
if self.interval is not None:
self._queue.put_nowait(None)
self._thread.join()
self.interval = None | python | def stop(self):
"""Tell the sender thread to finish and wait for it to stop sending
(should be at most "timeout" seconds).
"""
if self.interval is not None:
self._queue.put_nowait(None)
self._thread.join()
self.interval = None | Tell the sender thread to finish and wait for it to stop sending
(should be at most "timeout" seconds). | https://github.com/Jetsetter/graphyte/blob/200781c5105140df32b8e18bbec497cc0be5d40e/graphyte.py#L58-L65 |
Jetsetter/graphyte | graphyte.py | Sender.build_message | def build_message(self, metric, value, timestamp, tags={}):
"""Build a Graphite message to send and return it as a byte string."""
if not metric or metric.split(None, 1)[0] != metric:
raise ValueError('"metric" must not have whitespace in it')
if not isinstance(value, (int, float)):
raise TypeError('"value" must be an int or a float, not a {}'.format(
type(value).__name__))
tags_suffix = ''.join(';{}={}'.format(x[0], x[1]) for x in sorted(tags.items()))
message = u'{}{}{} {} {}\n'.format(
self.prefix + '.' if self.prefix else '',
metric,
tags_suffix,
value,
int(round(timestamp))
)
message = message.encode('utf-8')
return message | python | def build_message(self, metric, value, timestamp, tags={}):
"""Build a Graphite message to send and return it as a byte string."""
if not metric or metric.split(None, 1)[0] != metric:
raise ValueError('"metric" must not have whitespace in it')
if not isinstance(value, (int, float)):
raise TypeError('"value" must be an int or a float, not a {}'.format(
type(value).__name__))
tags_suffix = ''.join(';{}={}'.format(x[0], x[1]) for x in sorted(tags.items()))
message = u'{}{}{} {} {}\n'.format(
self.prefix + '.' if self.prefix else '',
metric,
tags_suffix,
value,
int(round(timestamp))
)
message = message.encode('utf-8')
return message | Build a Graphite message to send and return it as a byte string. | https://github.com/Jetsetter/graphyte/blob/200781c5105140df32b8e18bbec497cc0be5d40e/graphyte.py#L67-L85 |
Jetsetter/graphyte | graphyte.py | Sender.send | def send(self, metric, value, timestamp=None, tags={}):
"""Send given metric and (int or float) value to Graphite host.
Performs send on background thread if "interval" was specified when
creating this Sender.
If a "tags" dict is specified, send the tags to the Graphite host along with the metric.
"""
if timestamp is None:
timestamp = time.time()
message = self.build_message(metric, value, timestamp, tags)
if self.interval is None:
self.send_socket(message)
else:
try:
self._queue.put_nowait(message)
except queue.Full:
logger.error('queue full when sending {!r}'.format(message)) | python | def send(self, metric, value, timestamp=None, tags={}):
"""Send given metric and (int or float) value to Graphite host.
Performs send on background thread if "interval" was specified when
creating this Sender.
If a "tags" dict is specified, send the tags to the Graphite host along with the metric.
"""
if timestamp is None:
timestamp = time.time()
message = self.build_message(metric, value, timestamp, tags)
if self.interval is None:
self.send_socket(message)
else:
try:
self._queue.put_nowait(message)
except queue.Full:
logger.error('queue full when sending {!r}'.format(message)) | Send given metric and (int or float) value to Graphite host.
Performs send on background thread if "interval" was specified when
creating this Sender.
If a "tags" dict is specified, send the tags to the Graphite host along with the metric. | https://github.com/Jetsetter/graphyte/blob/200781c5105140df32b8e18bbec497cc0be5d40e/graphyte.py#L87-L104 |
Jetsetter/graphyte | graphyte.py | Sender.send_socket | def send_socket(self, message):
"""Low-level function to send message bytes to this Sender's socket.
You should usually call send() instead of this function (unless you're
subclassing or writing unit tests).
"""
if self.log_sends:
start_time = time.time()
try:
self.send_message(message)
except Exception as error:
logger.error('error sending message {!r}: {}'.format(message, error))
else:
if self.log_sends:
elapsed_time = time.time() - start_time
logger.info('sent message {!r} to {}:{} in {:.03f} seconds'.format(
message, self.host, self.port, elapsed_time)) | python | def send_socket(self, message):
"""Low-level function to send message bytes to this Sender's socket.
You should usually call send() instead of this function (unless you're
subclassing or writing unit tests).
"""
if self.log_sends:
start_time = time.time()
try:
self.send_message(message)
except Exception as error:
logger.error('error sending message {!r}: {}'.format(message, error))
else:
if self.log_sends:
elapsed_time = time.time() - start_time
logger.info('sent message {!r} to {}:{} in {:.03f} seconds'.format(
message, self.host, self.port, elapsed_time)) | Low-level function to send message bytes to this Sender's socket.
You should usually call send() instead of this function (unless you're
subclassing or writing unit tests). | https://github.com/Jetsetter/graphyte/blob/200781c5105140df32b8e18bbec497cc0be5d40e/graphyte.py#L123-L138 |
Jetsetter/graphyte | graphyte.py | Sender._thread_loop | def _thread_loop(self):
"""Background thread used when Sender is in asynchronous/interval mode."""
last_check_time = time.time()
messages = []
while True:
# Get first message from queue, blocking until the next time we
# should be sending
time_since_last_check = time.time() - last_check_time
time_till_next_check = max(0, self.interval - time_since_last_check)
try:
message = self._queue.get(timeout=time_till_next_check)
except queue.Empty:
pass
else:
if message is None:
# None is the signal to stop this background thread
break
messages.append(message)
# Get any other messages currently on queue without blocking,
# paying attention to None ("stop thread" signal)
should_stop = False
while True:
try:
message = self._queue.get_nowait()
except queue.Empty:
break
if message is None:
should_stop = True
break
messages.append(message)
if should_stop:
break
# If it's time to send, send what we've collected
current_time = time.time()
if current_time - last_check_time >= self.interval:
last_check_time = current_time
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch))
messages = []
# Send any final messages before exiting thread
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch)) | python | def _thread_loop(self):
"""Background thread used when Sender is in asynchronous/interval mode."""
last_check_time = time.time()
messages = []
while True:
# Get first message from queue, blocking until the next time we
# should be sending
time_since_last_check = time.time() - last_check_time
time_till_next_check = max(0, self.interval - time_since_last_check)
try:
message = self._queue.get(timeout=time_till_next_check)
except queue.Empty:
pass
else:
if message is None:
# None is the signal to stop this background thread
break
messages.append(message)
# Get any other messages currently on queue without blocking,
# paying attention to None ("stop thread" signal)
should_stop = False
while True:
try:
message = self._queue.get_nowait()
except queue.Empty:
break
if message is None:
should_stop = True
break
messages.append(message)
if should_stop:
break
# If it's time to send, send what we've collected
current_time = time.time()
if current_time - last_check_time >= self.interval:
last_check_time = current_time
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch))
messages = []
# Send any final messages before exiting thread
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch)) | Background thread used when Sender is in asynchronous/interval mode. | https://github.com/Jetsetter/graphyte/blob/200781c5105140df32b8e18bbec497cc0be5d40e/graphyte.py#L140-L186 |
gbowerman/azurerm | examples/get_vmss_rolling_upgrade.py | main | def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--vmssname', '-n', required=True, action='store', help='VMSS Name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--details', '-a', required=False, action='store_true',
default=False, help='Print all details')
args = arg_parser.parse_args()
name = args.vmssname
rgname = args.rgname
details = args.details
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
print("Error: Expecting azurermconfig.json in current folder")
sys.exit()
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# get rolling upgrade latest status
upgrade_status = azurerm.get_vmss_rolling_upgrades(
access_token, subscription_id, rgname, name)
# print details
if details is True:
print(json.dumps(upgrade_status, sort_keys=False,
indent=2, separators=(',', ': ')))
else:
print(json.dumps(upgrade_status, sort_keys=False,
indent=2, separators=(',', ': '))) | python | def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--vmssname', '-n', required=True, action='store', help='VMSS Name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--details', '-a', required=False, action='store_true',
default=False, help='Print all details')
args = arg_parser.parse_args()
name = args.vmssname
rgname = args.rgname
details = args.details
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
print("Error: Expecting azurermconfig.json in current folder")
sys.exit()
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# get rolling upgrade latest status
upgrade_status = azurerm.get_vmss_rolling_upgrades(
access_token, subscription_id, rgname, name)
# print details
if details is True:
print(json.dumps(upgrade_status, sort_keys=False,
indent=2, separators=(',', ': ')))
else:
print(json.dumps(upgrade_status, sort_keys=False,
indent=2, separators=(',', ': '))) | Main routine. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/get_vmss_rolling_upgrade.py#L9-L50 |
gbowerman/azurerm | examples/get_vmss.py | main | def main():
'''Main routine.'''
# process arguments
if len(sys.argv) < 3:
usage()
rgname = sys.argv[1]
vmss_name = sys.argv[2]
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
print('Printing VMSS details\n')
vmssget = azurerm.get_vmss(
access_token, subscription_id, rgname, vmss_name)
name = vmssget['name']
capacity = vmssget['sku']['capacity']
location = vmssget['location']
offer = \
vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['offer']
sku = vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku']
print(json.dumps(vmssget, sort_keys=False, indent=2, separators=(',', ': ')))
print('Name: ' + name + ', capacity: ' + str(capacity) + ', ' + location + ', ' + offer + ', '
+ sku)
print('Printing VMSS instance view')
instance_view = azurerm.get_vmss_instance_view(
access_token, subscription_id, rgname, vmss_name)
print(json.dumps(instance_view, sort_keys=False,
indent=2, separators=(',', ': ')))
'''
print('Listing VMSS VMs')
vmss_vms = azurerm.list_vmss_vms(access_token, subscription_id, rg, vmss)
print(json.dumps(vmss_vms, sort_keys=False, indent=2, separators=(',', ': ')))
for vm in vmss_vms['value']:
instanceId = vm['instanceId']
vminstance_view = azurerm.get_vmss_vm_instance_view(access_token, subscription_id, rg, vmss,
instanceId)
print('VM ' + str(instanceId) + ' instance view')
print(json.dumps(vminstance_view, sort_keys=False, indent=2, separators=(',', ': ')))
''' | python | def main():
'''Main routine.'''
# process arguments
if len(sys.argv) < 3:
usage()
rgname = sys.argv[1]
vmss_name = sys.argv[2]
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
print('Printing VMSS details\n')
vmssget = azurerm.get_vmss(
access_token, subscription_id, rgname, vmss_name)
name = vmssget['name']
capacity = vmssget['sku']['capacity']
location = vmssget['location']
offer = \
vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['offer']
sku = vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku']
print(json.dumps(vmssget, sort_keys=False, indent=2, separators=(',', ': ')))
print('Name: ' + name + ', capacity: ' + str(capacity) + ', ' + location + ', ' + offer + ', '
+ sku)
print('Printing VMSS instance view')
instance_view = azurerm.get_vmss_instance_view(
access_token, subscription_id, rgname, vmss_name)
print(json.dumps(instance_view, sort_keys=False,
indent=2, separators=(',', ': ')))
'''
print('Listing VMSS VMs')
vmss_vms = azurerm.list_vmss_vms(access_token, subscription_id, rg, vmss)
print(json.dumps(vmss_vms, sort_keys=False, indent=2, separators=(',', ': ')))
for vm in vmss_vms['value']:
instanceId = vm['instanceId']
vminstance_view = azurerm.get_vmss_vm_instance_view(access_token, subscription_id, rg, vmss,
instanceId)
print('VM ' + str(instanceId) + ' instance view')
print(json.dumps(vminstance_view, sort_keys=False, indent=2, separators=(',', ': ')))
''' | Main routine. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/get_vmss.py#L13-L63 |
gbowerman/azurerm | examples/list_vmss_nics.py | get_rg_from_id | def get_rg_from_id(vmss_id):
'''get a resource group name from a VMSS ID string'''
rgname = re.search('Groups/(.+?)/providers', vmss_id).group(1)
print('Resource group: ' + rgname)
return rgname | python | def get_rg_from_id(vmss_id):
'''get a resource group name from a VMSS ID string'''
rgname = re.search('Groups/(.+?)/providers', vmss_id).group(1)
print('Resource group: ' + rgname)
return rgname | get a resource group name from a VMSS ID string | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/list_vmss_nics.py#L9-L13 |
gbowerman/azurerm | examples/list_vmss_nics.py | main | def main():
'''main routine'''
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
vmsslist = azurerm.list_vmss_sub(access_token, subscription_id)
for vmss in vmsslist['value']:
name = vmss['name']
location = vmss['location']
capacity = vmss['sku']['capacity']
print(''.join(['Name: ', name, ', location: ',
location, ', Capacity: ', str(capacity)]))
print('VMSS NICs...')
rgname = get_rg_from_id(vmss['id'])
vmss_nics = azurerm.get_vmss_nics(
access_token, subscription_id, rgname, name)
print(json.dumps(vmss_nics, sort_keys=False,
indent=2, separators=(',', ': ')))
print('VMSS Virtual machines...')
vms = azurerm.list_vmss_vms(
access_token, subscription_id, rgname, name)
#print(json.dumps(vms, sort_keys=False, indent=2, separators=(',', ': ')))
for vmssvm in vms['value']:
vm_id = vmssvm['instanceId']
print(vm_id + ', ' + vmssvm['name'] + '\n')
print('VMSS VM NICs...')
vmnics = azurerm.get_vmss_vm_nics(access_token, subscription_id, rgname, name,
vm_id)
print(json.dumps(vmnics, sort_keys=False,
indent=2, separators=(',', ': '))) | python | def main():
'''main routine'''
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
vmsslist = azurerm.list_vmss_sub(access_token, subscription_id)
for vmss in vmsslist['value']:
name = vmss['name']
location = vmss['location']
capacity = vmss['sku']['capacity']
print(''.join(['Name: ', name, ', location: ',
location, ', Capacity: ', str(capacity)]))
print('VMSS NICs...')
rgname = get_rg_from_id(vmss['id'])
vmss_nics = azurerm.get_vmss_nics(
access_token, subscription_id, rgname, name)
print(json.dumps(vmss_nics, sort_keys=False,
indent=2, separators=(',', ': ')))
print('VMSS Virtual machines...')
vms = azurerm.list_vmss_vms(
access_token, subscription_id, rgname, name)
#print(json.dumps(vms, sort_keys=False, indent=2, separators=(',', ': ')))
for vmssvm in vms['value']:
vm_id = vmssvm['instanceId']
print(vm_id + ', ' + vmssvm['name'] + '\n')
print('VMSS VM NICs...')
vmnics = azurerm.get_vmss_vm_nics(access_token, subscription_id, rgname, name,
vm_id)
print(json.dumps(vmnics, sort_keys=False,
indent=2, separators=(',', ': '))) | main routine | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/list_vmss_nics.py#L16-L56 |
gbowerman/azurerm | azurerm/restfns.py | get_user_agent | def get_user_agent():
'''User-Agent Header. Sends library identification to Azure endpoint.
'''
version = pkg_resources.require("azurerm")[0].version
user_agent = "python/{} ({}) requests/{} azurerm/{}".format(
platform.python_version(),
platform.platform(),
requests.__version__,
version)
return user_agent | python | def get_user_agent():
'''User-Agent Header. Sends library identification to Azure endpoint.
'''
version = pkg_resources.require("azurerm")[0].version
user_agent = "python/{} ({}) requests/{} azurerm/{}".format(
platform.python_version(),
platform.platform(),
requests.__version__,
version)
return user_agent | User-Agent Header. Sends library identification to Azure endpoint. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L9-L18 |
gbowerman/azurerm | azurerm/restfns.py | do_get | def do_get(endpoint, access_token):
'''Do an HTTP GET request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.get(endpoint, headers=headers).json() | python | def do_get(endpoint, access_token):
'''Do an HTTP GET request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.get(endpoint, headers=headers).json() | Do an HTTP GET request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L20-L32 |
gbowerman/azurerm | azurerm/restfns.py | do_get_next | def do_get_next(endpoint, access_token):
'''Do an HTTP GET request, follow the nextLink chain and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
looping = True
value_list = []
vm_dict = {}
while looping:
get_return = requests.get(endpoint, headers=headers).json()
if not 'value' in get_return:
return get_return
if not 'nextLink' in get_return:
looping = False
else:
endpoint = get_return['nextLink']
value_list += get_return['value']
vm_dict['value'] = value_list
return vm_dict | python | def do_get_next(endpoint, access_token):
'''Do an HTTP GET request, follow the nextLink chain and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
looping = True
value_list = []
vm_dict = {}
while looping:
get_return = requests.get(endpoint, headers=headers).json()
if not 'value' in get_return:
return get_return
if not 'nextLink' in get_return:
looping = False
else:
endpoint = get_return['nextLink']
value_list += get_return['value']
vm_dict['value'] = value_list
return vm_dict | Do an HTTP GET request, follow the nextLink chain and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L35-L60 |
gbowerman/azurerm | azurerm/restfns.py | do_delete | def do_delete(endpoint, access_token):
'''Do an HTTP GET request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response.
'''
headers = {"Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.delete(endpoint, headers=headers) | python | def do_delete(endpoint, access_token):
'''Do an HTTP GET request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response.
'''
headers = {"Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.delete(endpoint, headers=headers) | Do an HTTP GET request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L63-L75 |
gbowerman/azurerm | azurerm/restfns.py | do_patch | def do_patch(endpoint, body, access_token):
'''Do an HTTP PATCH request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to patch.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.patch(endpoint, data=body, headers=headers) | python | def do_patch(endpoint, body, access_token):
'''Do an HTTP PATCH request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to patch.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.patch(endpoint, data=body, headers=headers) | Do an HTTP PATCH request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to patch.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L78-L91 |
gbowerman/azurerm | azurerm/restfns.py | do_post | def do_post(endpoint, body, access_token):
'''Do an HTTP POST request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to post.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.post(endpoint, data=body, headers=headers) | python | def do_post(endpoint, body, access_token):
'''Do an HTTP POST request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to post.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.post(endpoint, data=body, headers=headers) | Do an HTTP POST request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to post.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L94-L107 |
gbowerman/azurerm | azurerm/restfns.py | do_put | def do_put(endpoint, body, access_token):
'''Do an HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to put.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.put(endpoint, data=body, headers=headers) | python | def do_put(endpoint, body, access_token):
'''Do an HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to put.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.put(endpoint, data=body, headers=headers) | Do an HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to put.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L110-L123 |
gbowerman/azurerm | azurerm/restfns.py | get_url | def get_url(access_token, endpoint=ams_rest_endpoint, flag=True):
'''Get Media Services Final Endpoint URL.
Args:
access_token (str): A valid Azure authentication token.
endpoint (str): Azure Media Services Initial Endpoint.
flag (bol): flag.
Returns:
HTTP response. JSON body.
'''
return do_ams_get_url(endpoint, access_token, flag) | python | def get_url(access_token, endpoint=ams_rest_endpoint, flag=True):
'''Get Media Services Final Endpoint URL.
Args:
access_token (str): A valid Azure authentication token.
endpoint (str): Azure Media Services Initial Endpoint.
flag (bol): flag.
Returns:
HTTP response. JSON body.
'''
return do_ams_get_url(endpoint, access_token, flag) | Get Media Services Final Endpoint URL.
Args:
access_token (str): A valid Azure authentication token.
endpoint (str): Azure Media Services Initial Endpoint.
flag (bol): flag.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L126-L136 |
gbowerman/azurerm | azurerm/restfns.py | do_ams_auth | def do_ams_auth(endpoint, body):
'''Acquire Media Services Authentication Token.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
body (str): A Content Body.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/x-www-form-urlencoded",
"Accept": json_acceptformat}
return requests.post(endpoint, data=body, headers=headers) | python | def do_ams_auth(endpoint, body):
'''Acquire Media Services Authentication Token.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
body (str): A Content Body.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/x-www-form-urlencoded",
"Accept": json_acceptformat}
return requests.post(endpoint, data=body, headers=headers) | Acquire Media Services Authentication Token.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
body (str): A Content Body.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L139-L150 |
gbowerman/azurerm | azurerm/restfns.py | do_ams_put | def do_ams_put(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"):
'''Do a AMS HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body.
'''
min_ds = dsversion_min
content_acceptformat = json_acceptformat
if rformat == "json_only":
min_ds = ds_min_version
content_acceptformat = json_only_acceptformat
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.put(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.put(redirected_url, data=body, headers=headers)
return response | python | def do_ams_put(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"):
'''Do a AMS HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body.
'''
min_ds = dsversion_min
content_acceptformat = json_acceptformat
if rformat == "json_only":
min_ds = ds_min_version
content_acceptformat = json_only_acceptformat
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.put(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.put(redirected_url, data=body, headers=headers)
return response | Do a AMS HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L180-L211 |
gbowerman/azurerm | azurerm/restfns.py | do_ams_post | def do_ams_post(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"):
'''Do a AMS HTTP POST request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body.
'''
min_ds = dsversion_min
content_acceptformat = json_acceptformat
acceptformat = json_acceptformat
if rformat == "json_only":
min_ds = ds_min_version
content_acceptformat = json_only_acceptformat
if rformat == "xml":
content_acceptformat = xml_acceptformat
acceptformat = xml_acceptformat + ",application/xml"
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": dsversion_max,
"Accept": acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.post(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.post(redirected_url, data=body, headers=headers)
return response | python | def do_ams_post(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"):
'''Do a AMS HTTP POST request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body.
'''
min_ds = dsversion_min
content_acceptformat = json_acceptformat
acceptformat = json_acceptformat
if rformat == "json_only":
min_ds = ds_min_version
content_acceptformat = json_only_acceptformat
if rformat == "xml":
content_acceptformat = xml_acceptformat
acceptformat = xml_acceptformat + ",application/xml"
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": dsversion_max,
"Accept": acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.post(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.post(redirected_url, data=body, headers=headers)
return response | Do a AMS HTTP POST request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L214-L249 |
gbowerman/azurerm | azurerm/restfns.py | do_ams_patch | def do_ams_patch(endpoint, path, body, access_token):
'''Do a AMS PATCH request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"Content-Type": json_acceptformat,
"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.patch(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.patch(redirected_url, data=body, headers=headers)
return response | python | def do_ams_patch(endpoint, path, body, access_token):
'''Do a AMS PATCH request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"Content-Type": json_acceptformat,
"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.patch(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.patch(redirected_url, data=body, headers=headers)
return response | Do a AMS PATCH request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L252-L276 |
gbowerman/azurerm | azurerm/restfns.py | do_ams_delete | def do_ams_delete(endpoint, path, access_token):
'''Do a AMS DELETE request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": 'Bearer ' + access_token,
"x-ms-version" : xmsversion}
response = requests.delete(endpoint, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.delete(redirected_url, headers=headers)
return response | python | def do_ams_delete(endpoint, path, access_token):
'''Do a AMS DELETE request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": 'Bearer ' + access_token,
"x-ms-version" : xmsversion}
response = requests.delete(endpoint, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.delete(redirected_url, headers=headers)
return response | Do a AMS DELETE request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L279-L301 |
gbowerman/azurerm | azurerm/restfns.py | do_ams_sto_put | def do_ams_sto_put(endpoint, body, content_length):
'''Do a PUT request to the Azure Storage API and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
body (str): Azure Media Services Content Body.
content_length (str): Content_length.
Returns:
HTTP response. JSON body.
'''
headers = {"Accept": json_acceptformat,
"Accept-Charset" : charset,
"x-ms-blob-type" : "BlockBlob",
"x-ms-meta-m1": "v1",
"x-ms-meta-m2": "v2",
"x-ms-version" : "2015-02-21",
"Content-Length" : str(content_length)}
return requests.put(endpoint, data=body, headers=headers) | python | def do_ams_sto_put(endpoint, body, content_length):
'''Do a PUT request to the Azure Storage API and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
body (str): Azure Media Services Content Body.
content_length (str): Content_length.
Returns:
HTTP response. JSON body.
'''
headers = {"Accept": json_acceptformat,
"Accept-Charset" : charset,
"x-ms-blob-type" : "BlockBlob",
"x-ms-meta-m1": "v1",
"x-ms-meta-m2": "v2",
"x-ms-version" : "2015-02-21",
"Content-Length" : str(content_length)}
return requests.put(endpoint, data=body, headers=headers) | Do a PUT request to the Azure Storage API and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
body (str): Azure Media Services Content Body.
content_length (str): Content_length.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L304-L321 |
gbowerman/azurerm | azurerm/restfns.py | do_ams_get_url | def do_ams_get_url(endpoint, access_token, flag=True):
'''Do an AMS GET request to retrieve the Final AMS Endpoint and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
access_token (str): A valid Azure authentication token.
flag (str): A Flag to follow the redirect or not.
Returns:
HTTP response. JSON body.
'''
headers = {"Content-Type": json_acceptformat,
"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
body = ''
response = requests.get(endpoint, headers=headers, allow_redirects=flag)
if flag:
if response.status_code == 301:
response = requests.get(response.headers['location'], data=body, headers=headers)
return response | python | def do_ams_get_url(endpoint, access_token, flag=True):
'''Do an AMS GET request to retrieve the Final AMS Endpoint and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
access_token (str): A valid Azure authentication token.
flag (str): A Flag to follow the redirect or not.
Returns:
HTTP response. JSON body.
'''
headers = {"Content-Type": json_acceptformat,
"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
body = ''
response = requests.get(endpoint, headers=headers, allow_redirects=flag)
if flag:
if response.status_code == 301:
response = requests.get(response.headers['location'], data=body, headers=headers)
return response | Do an AMS GET request to retrieve the Final AMS Endpoint and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
access_token (str): A valid Azure authentication token.
flag (str): A Flag to follow the redirect or not.
Returns:
HTTP response. JSON body. | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L324-L346 |
gbowerman/azurerm | examples/vip_swap.py | handle_bad_update | def handle_bad_update(operation, ret):
'''report error for bad update'''
print("Error " + operation)
sys.exit('Return code: ' + str(ret.status_code) + ' Error: ' + ret.text) | python | def handle_bad_update(operation, ret):
'''report error for bad update'''
print("Error " + operation)
sys.exit('Return code: ' + str(ret.status_code) + ' Error: ' + ret.text) | report error for bad update | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/vip_swap.py#L12-L15 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.