repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
jart/fabulous | fabulous/rotating_cube.py | Frame.line | def line(self, x0, y0, x1, y1, c='*'):
r"""Draws a line
Who would have thought this would be so complicated? Thanks
again Wikipedia_ <3
.. _Wikipedia: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
"""
steep = abs(y1 - y0) > abs(x1 - x0)
if steep:
(x0, y0) = (y0, x0)
(x1, y1) = (y1, x1)
if x0 > x1:
(x0, x1) = (x1, x0)
(y0, y1) = (y1, y0)
deltax = x1 - x0
deltay = abs(y1 - y0)
error = deltax / 2
y = y0
if y0 < y1:
ystep = 1
else:
ystep = -1
for x in range(x0, x1 - 1):
if steep:
self[y, x] = c
else:
self[x, y] = c
error = error - deltay
if error < 0:
y = y + ystep
error = error + deltax | python | def line(self, x0, y0, x1, y1, c='*'):
r"""Draws a line
Who would have thought this would be so complicated? Thanks
again Wikipedia_ <3
.. _Wikipedia: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
"""
steep = abs(y1 - y0) > abs(x1 - x0)
if steep:
(x0, y0) = (y0, x0)
(x1, y1) = (y1, x1)
if x0 > x1:
(x0, x1) = (x1, x0)
(y0, y1) = (y1, y0)
deltax = x1 - x0
deltay = abs(y1 - y0)
error = deltax / 2
y = y0
if y0 < y1:
ystep = 1
else:
ystep = -1
for x in range(x0, x1 - 1):
if steep:
self[y, x] = c
else:
self[x, y] = c
error = error - deltay
if error < 0:
y = y + ystep
error = error + deltax | r"""Draws a line
Who would have thought this would be so complicated? Thanks
again Wikipedia_ <3
.. _Wikipedia: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/rotating_cube.py#L52-L83 |
taskcluster/taskcluster-client.py | taskcluster/ec2manager.py | EC2Manager.terminateWorkerType | def terminateWorkerType(self, *args, **kwargs):
"""
Terminate all resources from a worker type
Terminate all instances for this worker type
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["terminateWorkerType"], *args, **kwargs) | python | def terminateWorkerType(self, *args, **kwargs):
"""
Terminate all resources from a worker type
Terminate all instances for this worker type
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["terminateWorkerType"], *args, **kwargs) | Terminate all resources from a worker type
Terminate all instances for this worker type
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/ec2manager.py#L62-L71 |
taskcluster/taskcluster-client.py | taskcluster/ec2manager.py | EC2Manager.getPrices | def getPrices(self, *args, **kwargs):
"""
Request prices for EC2
Return a list of possible prices for EC2
This method gives output: ``v1/prices.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getPrices"], *args, **kwargs) | python | def getPrices(self, *args, **kwargs):
"""
Request prices for EC2
Return a list of possible prices for EC2
This method gives output: ``v1/prices.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getPrices"], *args, **kwargs) | Request prices for EC2
Return a list of possible prices for EC2
This method gives output: ``v1/prices.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/ec2manager.py#L160-L171 |
taskcluster/taskcluster-client.py | taskcluster/ec2manager.py | EC2Manager.allState | def allState(self, *args, **kwargs):
"""
List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs) | python | def allState(self, *args, **kwargs):
"""
List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs) | List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/ec2manager.py#L272-L281 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.task | def task(self, *args, **kwargs):
"""
Get Task Definition
This end-point will return the task-definition. Notice that the task
definition may have been modified by queue, if an optional property is
not specified the queue may provide a default value.
This method gives output: ``v1/task.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["task"], *args, **kwargs) | python | def task(self, *args, **kwargs):
"""
Get Task Definition
This end-point will return the task-definition. Notice that the task
definition may have been modified by queue, if an optional property is
not specified the queue may provide a default value.
This method gives output: ``v1/task.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["task"], *args, **kwargs) | Get Task Definition
This end-point will return the task-definition. Notice that the task
definition may have been modified by queue, if an optional property is
not specified the queue may provide a default value.
This method gives output: ``v1/task.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L44-L57 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.defineTask | def defineTask(self, *args, **kwargs):
"""
Define Task
**Deprecated**, this is the same as `createTask` with a **self-dependency**.
This is only present for legacy.
This method takes input: ``v1/create-task-request.json#``
This method gives output: ``v1/task-status-response.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["defineTask"], *args, **kwargs) | python | def defineTask(self, *args, **kwargs):
"""
Define Task
**Deprecated**, this is the same as `createTask` with a **self-dependency**.
This is only present for legacy.
This method takes input: ``v1/create-task-request.json#``
This method gives output: ``v1/task-status-response.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["defineTask"], *args, **kwargs) | Define Task
**Deprecated**, this is the same as `createTask` with a **self-dependency**.
This is only present for legacy.
This method takes input: ``v1/create-task-request.json#``
This method gives output: ``v1/task-status-response.json#``
This method is ``deprecated`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L172-L186 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.scheduleTask | def scheduleTask(self, *args, **kwargs):
"""
Schedule Defined Task
scheduleTask will schedule a task to be executed, even if it has
unresolved dependencies. A task would otherwise only be scheduled if
its dependencies were resolved.
This is useful if you have defined a task that depends on itself or on
some other task that has not been resolved, but you wish the task to be
scheduled immediately.
This will announce the task as pending and workers will be allowed to
claim it and resolve the task.
**Note** this operation is **idempotent** and will not fail or complain
if called with a `taskId` that is already scheduled, or even resolved.
To reschedule a task previously resolved, use `rerunTask`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs) | python | def scheduleTask(self, *args, **kwargs):
"""
Schedule Defined Task
scheduleTask will schedule a task to be executed, even if it has
unresolved dependencies. A task would otherwise only be scheduled if
its dependencies were resolved.
This is useful if you have defined a task that depends on itself or on
some other task that has not been resolved, but you wish the task to be
scheduled immediately.
This will announce the task as pending and workers will be allowed to
claim it and resolve the task.
**Note** this operation is **idempotent** and will not fail or complain
if called with a `taskId` that is already scheduled, or even resolved.
To reschedule a task previously resolved, use `rerunTask`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs) | Schedule Defined Task
scheduleTask will schedule a task to be executed, even if it has
unresolved dependencies. A task would otherwise only be scheduled if
its dependencies were resolved.
This is useful if you have defined a task that depends on itself or on
some other task that has not been resolved, but you wish the task to be
scheduled immediately.
This will announce the task as pending and workers will be allowed to
claim it and resolve the task.
**Note** this operation is **idempotent** and will not fail or complain
if called with a `taskId` that is already scheduled, or even resolved.
To reschedule a task previously resolved, use `rerunTask`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L188-L212 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.rerunTask | def rerunTask(self, *args, **kwargs):
"""
Rerun a Resolved Task
This method _reruns_ a previously resolved task, even if it was
_completed_. This is useful if your task completes unsuccessfully, and
you just want to run it from scratch again. This will also reset the
number of `retries` allowed.
This method is deprecated in favour of creating a new task with the same
task definition (but with a new taskId).
Remember that `retries` in the task status counts the number of runs that
the queue have started because the worker stopped responding, for example
because a spot node died.
**Remark** this operation is idempotent, if you try to rerun a task that
is not either `failed` or `completed`, this operation will just return
the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs) | python | def rerunTask(self, *args, **kwargs):
"""
Rerun a Resolved Task
This method _reruns_ a previously resolved task, even if it was
_completed_. This is useful if your task completes unsuccessfully, and
you just want to run it from scratch again. This will also reset the
number of `retries` allowed.
This method is deprecated in favour of creating a new task with the same
task definition (but with a new taskId).
Remember that `retries` in the task status counts the number of runs that
the queue have started because the worker stopped responding, for example
because a spot node died.
**Remark** this operation is idempotent, if you try to rerun a task that
is not either `failed` or `completed`, this operation will just return
the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs) | Rerun a Resolved Task
This method _reruns_ a previously resolved task, even if it was
_completed_. This is useful if your task completes unsuccessfully, and
you just want to run it from scratch again. This will also reset the
number of `retries` allowed.
This method is deprecated in favour of creating a new task with the same
task definition (but with a new taskId).
Remember that `retries` in the task status counts the number of runs that
the queue have started because the worker stopped responding, for example
because a spot node died.
**Remark** this operation is idempotent, if you try to rerun a task that
is not either `failed` or `completed`, this operation will just return
the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``deprecated`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L214-L239 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.cancelTask | def cancelTask(self, *args, **kwargs):
"""
Cancel Task
This method will cancel a task that is either `unscheduled`, `pending` or
`running`. It will resolve the current run as `exception` with
`reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
it doesn't have any runs, an initial run will be added and resolved as
described above. Hence, after canceling a task, it cannot be scheduled
with `queue.scheduleTask`, but a new run can be created with
`queue.rerun`. These semantics is equivalent to calling
`queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that
isn't `unscheduled`, `pending` or `running`, this operation will just
return the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs) | python | def cancelTask(self, *args, **kwargs):
"""
Cancel Task
This method will cancel a task that is either `unscheduled`, `pending` or
`running`. It will resolve the current run as `exception` with
`reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
it doesn't have any runs, an initial run will be added and resolved as
described above. Hence, after canceling a task, it cannot be scheduled
with `queue.scheduleTask`, but a new run can be created with
`queue.rerun`. These semantics is equivalent to calling
`queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that
isn't `unscheduled`, `pending` or `running`, this operation will just
return the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs) | Cancel Task
This method will cancel a task that is either `unscheduled`, `pending` or
`running`. It will resolve the current run as `exception` with
`reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
it doesn't have any runs, an initial run will be added and resolved as
described above. Hence, after canceling a task, it cannot be scheduled
with `queue.scheduleTask`, but a new run can be created with
`queue.rerun`. These semantics is equivalent to calling
`queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that
isn't `unscheduled`, `pending` or `running`, this operation will just
return the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L241-L263 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.reportFailed | def reportFailed(self, *args, **kwargs):
"""
Report Run Failed
Report a run failed, resolving the run as `failed`. Use this to resolve
a run that failed because the task specific code behaved unexpectedly.
For example the task exited non-zero, or didn't produce expected output.
Do not use this if the task couldn't be run because if malformed
payload, or other unexpected condition. In these cases we have a task
exception, which should be reported with `reportException`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs) | python | def reportFailed(self, *args, **kwargs):
"""
Report Run Failed
Report a run failed, resolving the run as `failed`. Use this to resolve
a run that failed because the task specific code behaved unexpectedly.
For example the task exited non-zero, or didn't produce expected output.
Do not use this if the task couldn't be run because if malformed
payload, or other unexpected condition. In these cases we have a task
exception, which should be reported with `reportException`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs) | Report Run Failed
Report a run failed, resolving the run as `failed`. Use this to resolve
a run that failed because the task specific code behaved unexpectedly.
For example the task exited non-zero, or didn't produce expected output.
Do not use this if the task couldn't be run because if malformed
payload, or other unexpected condition. In these cases we have a task
exception, which should be reported with `reportException`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L348-L365 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.reportException | def reportException(self, *args, **kwargs):
"""
Report Task Exception
Resolve a run as _exception_. Generally, you will want to report tasks as
failed instead of exception. You should `reportException` if,
* The `task.payload` is invalid,
* Non-existent resources are referenced,
* Declared actions cannot be executed due to unavailable resources,
* The worker had to shutdown prematurely,
* The worker experienced an unknown error, or,
* The task explicitly requested a retry.
Do not use this to signal that some user-specified code crashed for any
reason specific to this code. If user-specific code hits a resource that
is temporarily unavailable worker should report task _failed_.
This method takes input: ``v1/task-exception-request.json#``
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs) | python | def reportException(self, *args, **kwargs):
"""
Report Task Exception
Resolve a run as _exception_. Generally, you will want to report tasks as
failed instead of exception. You should `reportException` if,
* The `task.payload` is invalid,
* Non-existent resources are referenced,
* Declared actions cannot be executed due to unavailable resources,
* The worker had to shutdown prematurely,
* The worker experienced an unknown error, or,
* The task explicitly requested a retry.
Do not use this to signal that some user-specified code crashed for any
reason specific to this code. If user-specific code hits a resource that
is temporarily unavailable worker should report task _failed_.
This method takes input: ``v1/task-exception-request.json#``
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs) | Report Task Exception
Resolve a run as _exception_. Generally, you will want to report tasks as
failed instead of exception. You should `reportException` if,
* The `task.payload` is invalid,
* Non-existent resources are referenced,
* Declared actions cannot be executed due to unavailable resources,
* The worker had to shutdown prematurely,
* The worker experienced an unknown error, or,
* The task explicitly requested a retry.
Do not use this to signal that some user-specified code crashed for any
reason specific to this code. If user-specific code hits a resource that
is temporarily unavailable worker should report task _failed_.
This method takes input: ``v1/task-exception-request.json#``
This method gives output: ``v1/task-status-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L367-L392 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.createArtifact | def createArtifact(self, *args, **kwargs):
"""
Create Artifact
This API end-point creates an artifact for a specific run of a task. This
should **only** be used by a worker currently operating on this task, or
from a process running within the task (ie. on the worker).
All artifacts must specify when they `expires`, the queue will
automatically take care of deleting artifacts past their
expiration point. This features makes it feasible to upload large
intermediate artifacts from data processing applications, as the
artifacts can be set to expire a few days later.
We currently support 3 different `storageType`s, each storage type have
slightly different features and in some cases difference semantics.
We also have 2 deprecated `storageType`s which are only maintained for
backwards compatiability and should not be used in new implementations
**Blob artifacts**, are useful for storing large files. Currently, these
are all stored in S3 but there are facilities for adding support for other
backends in futre. A call for this type of artifact must provide information
about the file which will be uploaded. This includes sha256 sums and sizes.
This method will return a list of general form HTTP requests which are signed
by AWS S3 credentials managed by the Queue. Once these requests are completed
the list of `ETag` values returned by the requests must be passed to the
queue `completeArtifact` method
**S3 artifacts**, DEPRECATED is useful for static files which will be
stored on S3. When creating an S3 artifact the queue will return a
pre-signed URL to which you can do a `PUT` request to upload your
artifact. Note that `PUT` request **must** specify the `content-length`
header and **must** give the `content-type` header the same value as in
the request to `createArtifact`.
**Azure artifacts**, DEPRECATED are stored in _Azure Blob Storage_ service
which given the consistency guarantees and API interface offered by Azure
is more suitable for artifacts that will be modified during the execution
of the task. For example docker-worker has a feature that persists the
task log to Azure Blob Storage every few seconds creating a somewhat
live log. A request to create an Azure artifact will return a URL
featuring a [Shared-Access-Signature](http://msdn.microsoft.com/en-us/library/azure/dn140256.aspx),
refer to MSDN for further information on how to use these.
**Warning: azure artifact is currently an experimental feature subject
to changes and data-drops.**
**Reference artifacts**, only consists of meta-data which the queue will
store for you. These artifacts really only have a `url` property and
when the artifact is requested the client will be redirect the URL
provided with a `303` (See Other) redirect. Please note that we cannot
delete artifacts you upload to other service, we can only delete the
reference to the artifact, when it expires.
**Error artifacts**, only consists of meta-data which the queue will
store for you. These artifacts are only meant to indicate that you the
worker or the task failed to generate a specific artifact, that you
would otherwise have uploaded. For example docker-worker will upload an
error artifact, if the file it was supposed to upload doesn't exists or
turns out to be a directory. Clients requesting an error artifact will
get a `424` (Failed Dependency) response. This is mainly designed to
ensure that dependent tasks can distinguish between artifacts that were
suppose to be generated and artifacts for which the name is misspelled.
**Artifact immutability**, generally speaking you cannot overwrite an
artifact when created. But if you repeat the request with the same
properties the request will succeed as the operation is idempotent.
This is useful if you need to refresh a signed URL while uploading.
Do not abuse this to overwrite artifacts created by another entity!
Such as worker-host overwriting artifact created by worker-code.
As a special case the `url` property on _reference artifacts_ can be
updated. You should only use this to update the `url` property for
reference artifacts your process has created.
This method takes input: ``v1/post-artifact-request.json#``
This method gives output: ``v1/post-artifact-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createArtifact"], *args, **kwargs) | python | def createArtifact(self, *args, **kwargs):
"""
Create Artifact
This API end-point creates an artifact for a specific run of a task. This
should **only** be used by a worker currently operating on this task, or
from a process running within the task (ie. on the worker).
All artifacts must specify when they `expires`, the queue will
automatically take care of deleting artifacts past their
expiration point. This features makes it feasible to upload large
intermediate artifacts from data processing applications, as the
artifacts can be set to expire a few days later.
We currently support 3 different `storageType`s, each storage type have
slightly different features and in some cases difference semantics.
We also have 2 deprecated `storageType`s which are only maintained for
backwards compatiability and should not be used in new implementations
**Blob artifacts**, are useful for storing large files. Currently, these
are all stored in S3 but there are facilities for adding support for other
backends in futre. A call for this type of artifact must provide information
about the file which will be uploaded. This includes sha256 sums and sizes.
This method will return a list of general form HTTP requests which are signed
by AWS S3 credentials managed by the Queue. Once these requests are completed
the list of `ETag` values returned by the requests must be passed to the
queue `completeArtifact` method
**S3 artifacts**, DEPRECATED is useful for static files which will be
stored on S3. When creating an S3 artifact the queue will return a
pre-signed URL to which you can do a `PUT` request to upload your
artifact. Note that `PUT` request **must** specify the `content-length`
header and **must** give the `content-type` header the same value as in
the request to `createArtifact`.
**Azure artifacts**, DEPRECATED are stored in _Azure Blob Storage_ service
which given the consistency guarantees and API interface offered by Azure
is more suitable for artifacts that will be modified during the execution
of the task. For example docker-worker has a feature that persists the
task log to Azure Blob Storage every few seconds creating a somewhat
live log. A request to create an Azure artifact will return a URL
featuring a [Shared-Access-Signature](http://msdn.microsoft.com/en-us/library/azure/dn140256.aspx),
refer to MSDN for further information on how to use these.
**Warning: azure artifact is currently an experimental feature subject
to changes and data-drops.**
**Reference artifacts**, only consists of meta-data which the queue will
store for you. These artifacts really only have a `url` property and
when the artifact is requested the client will be redirect the URL
provided with a `303` (See Other) redirect. Please note that we cannot
delete artifacts you upload to other service, we can only delete the
reference to the artifact, when it expires.
**Error artifacts**, only consists of meta-data which the queue will
store for you. These artifacts are only meant to indicate that you the
worker or the task failed to generate a specific artifact, that you
would otherwise have uploaded. For example docker-worker will upload an
error artifact, if the file it was supposed to upload doesn't exists or
turns out to be a directory. Clients requesting an error artifact will
get a `424` (Failed Dependency) response. This is mainly designed to
ensure that dependent tasks can distinguish between artifacts that were
suppose to be generated and artifacts for which the name is misspelled.
**Artifact immutability**, generally speaking you cannot overwrite an
artifact when created. But if you repeat the request with the same
properties the request will succeed as the operation is idempotent.
This is useful if you need to refresh a signed URL while uploading.
Do not abuse this to overwrite artifacts created by another entity!
Such as worker-host overwriting artifact created by worker-code.
As a special case the `url` property on _reference artifacts_ can be
updated. You should only use this to update the `url` property for
reference artifacts your process has created.
This method takes input: ``v1/post-artifact-request.json#``
This method gives output: ``v1/post-artifact-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createArtifact"], *args, **kwargs) | Create Artifact
This API end-point creates an artifact for a specific run of a task. This
should **only** be used by a worker currently operating on this task, or
from a process running within the task (ie. on the worker).
All artifacts must specify when they `expires`, the queue will
automatically take care of deleting artifacts past their
expiration point. This features makes it feasible to upload large
intermediate artifacts from data processing applications, as the
artifacts can be set to expire a few days later.
We currently support 3 different `storageType`s, each storage type have
slightly different features and in some cases difference semantics.
We also have 2 deprecated `storageType`s which are only maintained for
backwards compatiability and should not be used in new implementations
**Blob artifacts**, are useful for storing large files. Currently, these
are all stored in S3 but there are facilities for adding support for other
backends in futre. A call for this type of artifact must provide information
about the file which will be uploaded. This includes sha256 sums and sizes.
This method will return a list of general form HTTP requests which are signed
by AWS S3 credentials managed by the Queue. Once these requests are completed
the list of `ETag` values returned by the requests must be passed to the
queue `completeArtifact` method
**S3 artifacts**, DEPRECATED is useful for static files which will be
stored on S3. When creating an S3 artifact the queue will return a
pre-signed URL to which you can do a `PUT` request to upload your
artifact. Note that `PUT` request **must** specify the `content-length`
header and **must** give the `content-type` header the same value as in
the request to `createArtifact`.
**Azure artifacts**, DEPRECATED are stored in _Azure Blob Storage_ service
which given the consistency guarantees and API interface offered by Azure
is more suitable for artifacts that will be modified during the execution
of the task. For example docker-worker has a feature that persists the
task log to Azure Blob Storage every few seconds creating a somewhat
live log. A request to create an Azure artifact will return a URL
featuring a [Shared-Access-Signature](http://msdn.microsoft.com/en-us/library/azure/dn140256.aspx),
refer to MSDN for further information on how to use these.
**Warning: azure artifact is currently an experimental feature subject
to changes and data-drops.**
**Reference artifacts**, only consists of meta-data which the queue will
store for you. These artifacts really only have a `url` property and
when the artifact is requested the client will be redirect the URL
provided with a `303` (See Other) redirect. Please note that we cannot
delete artifacts you upload to other service, we can only delete the
reference to the artifact, when it expires.
**Error artifacts**, only consists of meta-data which the queue will
store for you. These artifacts are only meant to indicate that you the
worker or the task failed to generate a specific artifact, that you
would otherwise have uploaded. For example docker-worker will upload an
error artifact, if the file it was supposed to upload doesn't exists or
turns out to be a directory. Clients requesting an error artifact will
get a `424` (Failed Dependency) response. This is mainly designed to
ensure that dependent tasks can distinguish between artifacts that were
suppose to be generated and artifacts for which the name is misspelled.
**Artifact immutability**, generally speaking you cannot overwrite an
artifact when created. But if you repeat the request with the same
properties the request will succeed as the operation is idempotent.
This is useful if you need to refresh a signed URL while uploading.
Do not abuse this to overwrite artifacts created by another entity!
Such as worker-host overwriting artifact created by worker-code.
As a special case the `url` property on _reference artifacts_ can be
updated. You should only use this to update the `url` property for
reference artifacts your process has created.
This method takes input: ``v1/post-artifact-request.json#``
This method gives output: ``v1/post-artifact-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L394-L475 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.completeArtifact | def completeArtifact(self, *args, **kwargs):
"""
Complete Artifact
This endpoint finalises an upload done through the blob `storageType`.
The queue will ensure that the task/run is still allowing artifacts
to be uploaded. For single-part S3 blob artifacts, this endpoint
will simply ensure the artifact is present in S3. For multipart S3
artifacts, the endpoint will perform the commit step of the multipart
upload flow. As the final step for both multi and single part artifacts,
the `present` entity field will be set to `true` to reflect that the
artifact is now present and a message published to pulse. NOTE: This
endpoint *must* be called for all artifacts of storageType 'blob'
This method takes input: ``v1/put-artifact-request.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["completeArtifact"], *args, **kwargs) | python | def completeArtifact(self, *args, **kwargs):
"""
Complete Artifact
This endpoint finalises an upload done through the blob `storageType`.
The queue will ensure that the task/run is still allowing artifacts
to be uploaded. For single-part S3 blob artifacts, this endpoint
will simply ensure the artifact is present in S3. For multipart S3
artifacts, the endpoint will perform the commit step of the multipart
upload flow. As the final step for both multi and single part artifacts,
the `present` entity field will be set to `true` to reflect that the
artifact is now present and a message published to pulse. NOTE: This
endpoint *must* be called for all artifacts of storageType 'blob'
This method takes input: ``v1/put-artifact-request.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["completeArtifact"], *args, **kwargs) | Complete Artifact
This endpoint finalises an upload done through the blob `storageType`.
The queue will ensure that the task/run is still allowing artifacts
to be uploaded. For single-part S3 blob artifacts, this endpoint
will simply ensure the artifact is present in S3. For multipart S3
artifacts, the endpoint will perform the commit step of the multipart
upload flow. As the final step for both multi and single part artifacts,
the `present` entity field will be set to `true` to reflect that the
artifact is now present and a message published to pulse. NOTE: This
endpoint *must* be called for all artifacts of storageType 'blob'
This method takes input: ``v1/put-artifact-request.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L477-L496 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.listProvisioners | def listProvisioners(self, *args, **kwargs):
"""
Get a list of all active provisioners
Get all active provisioners.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 provisioners in a single
page. You may limit this with the query-string parameter `limit`.
This method gives output: ``v1/list-provisioners-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs) | python | def listProvisioners(self, *args, **kwargs):
"""
Get a list of all active provisioners
Get all active provisioners.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 provisioners in a single
page. You may limit this with the query-string parameter `limit`.
This method gives output: ``v1/list-provisioners-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs) | Get a list of all active provisioners
Get all active provisioners.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 provisioners in a single
page. You may limit this with the query-string parameter `limit`.
This method gives output: ``v1/list-provisioners-response.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L654-L674 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.getProvisioner | def getProvisioner(self, *args, **kwargs):
"""
Get an active provisioner
Get an active provisioner.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs) | python | def getProvisioner(self, *args, **kwargs):
"""
Get an active provisioner
Get an active provisioner.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs) | Get an active provisioner
Get an active provisioner.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L676-L691 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.declareProvisioner | def declareProvisioner(self, *args, **kwargs):
"""
Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `aws-provisioner-v1`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method takes input: ``v1/update-provisioner-request.json#``
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs) | python | def declareProvisioner(self, *args, **kwargs):
"""
Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `aws-provisioner-v1`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method takes input: ``v1/update-provisioner-request.json#``
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs) | Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `aws-provisioner-v1`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method takes input: ``v1/update-provisioner-request.json#``
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L693-L715 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.pendingTasks | def pendingTasks(self, *args, **kwargs):
"""
Get Number of Pending Tasks
Get an approximate number of pending tasks for the given `provisionerId`
and `workerType`.
The underlying Azure Storage Queues only promises to give us an estimate.
Furthermore, we cache the result in memory for 20 seconds. So consumers
should be no means expect this to be an accurate number.
It is, however, a solid estimate of the number of pending tasks.
This method gives output: ``v1/pending-tasks-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["pendingTasks"], *args, **kwargs) | python | def pendingTasks(self, *args, **kwargs):
"""
Get Number of Pending Tasks
Get an approximate number of pending tasks for the given `provisionerId`
and `workerType`.
The underlying Azure Storage Queues only promises to give us an estimate.
Furthermore, we cache the result in memory for 20 seconds. So consumers
should be no means expect this to be an accurate number.
It is, however, a solid estimate of the number of pending tasks.
This method gives output: ``v1/pending-tasks-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["pendingTasks"], *args, **kwargs) | Get Number of Pending Tasks
Get an approximate number of pending tasks for the given `provisionerId`
and `workerType`.
The underlying Azure Storage Queues only promises to give us an estimate.
Furthermore, we cache the result in memory for 20 seconds. So consumers
should be no means expect this to be an accurate number.
It is, however, a solid estimate of the number of pending tasks.
This method gives output: ``v1/pending-tasks-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L717-L734 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.quarantineWorker | def quarantineWorker(self, *args, **kwargs):
"""
Quarantine a worker
Quarantine a worker
This method takes input: ``v1/quarantine-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs) | python | def quarantineWorker(self, *args, **kwargs):
"""
Quarantine a worker
Quarantine a worker
This method takes input: ``v1/quarantine-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs) | Quarantine a worker
Quarantine a worker
This method takes input: ``v1/quarantine-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L822-L835 |
taskcluster/taskcluster-client.py | taskcluster/queue.py | Queue.declareWorker | def declareWorker(self, *args, **kwargs):
"""
Declare a worker
Declare a worker, supplying some details about it.
`declareWorker` allows updating one or more properties of a worker as long as the required scopes are
possessed.
This method takes input: ``v1/update-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareWorker"], *args, **kwargs) | python | def declareWorker(self, *args, **kwargs):
"""
Declare a worker
Declare a worker, supplying some details about it.
`declareWorker` allows updating one or more properties of a worker as long as the required scopes are
possessed.
This method takes input: ``v1/update-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareWorker"], *args, **kwargs) | Declare a worker
Declare a worker, supplying some details about it.
`declareWorker` allows updating one or more properties of a worker as long as the required scopes are
possessed.
This method takes input: ``v1/update-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L837-L853 |
taskcluster/taskcluster-client.py | taskcluster/aio/index.py | Index.findTask | async def findTask(self, *args, **kwargs):
"""
Find Indexed Task
Find a task by index path, returning the highest-rank task with that path. If no
task exists for the given path, this API end-point will respond with a 404 status.
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs) | python | async def findTask(self, *args, **kwargs):
"""
Find Indexed Task
Find a task by index path, returning the highest-rank task with that path. If no
task exists for the given path, this API end-point will respond with a 404 status.
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs) | Find Indexed Task
Find a task by index path, returning the highest-rank task with that path. If no
task exists for the given path, this API end-point will respond with a 404 status.
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/index.py#L127-L139 |
taskcluster/taskcluster-client.py | taskcluster/aio/index.py | Index.listNamespaces | async def listNamespaces(self, *args, **kwargs):
"""
List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs) | python | async def listNamespaces(self, *args, **kwargs):
"""
List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs) | List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/index.py#L141-L158 |
taskcluster/taskcluster-client.py | taskcluster/aio/index.py | Index.listTasks | async def listTasks(self, *args, **kwargs):
"""
List Tasks
List the tasks immediately under a given namespace.
This endpoint
lists up to 1000 tasks. If more tasks are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
**Remark**, this end-point is designed for humans browsing for tasks, not
services, as that makes little sense.
This method gives output: ``v1/list-tasks-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listTasks"], *args, **kwargs) | python | async def listTasks(self, *args, **kwargs):
"""
List Tasks
List the tasks immediately under a given namespace.
This endpoint
lists up to 1000 tasks. If more tasks are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
**Remark**, this end-point is designed for humans browsing for tasks, not
services, as that makes little sense.
This method gives output: ``v1/list-tasks-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listTasks"], *args, **kwargs) | List Tasks
List the tasks immediately under a given namespace.
This endpoint
lists up to 1000 tasks. If more tasks are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
**Remark**, this end-point is designed for humans browsing for tasks, not
services, as that makes little sense.
This method gives output: ``v1/list-tasks-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/index.py#L160-L180 |
taskcluster/taskcluster-client.py | taskcluster/aio/index.py | Index.insertTask | async def insertTask(self, *args, **kwargs):
"""
Insert Task into Index
Insert a task into the index. If the new rank is less than the existing rank
at the given index path, the task is not indexed but the response is still 200 OK.
Please see the introduction above for information
about indexing successfully completed tasks automatically using custom routes.
This method takes input: ``v1/insert-task-request.json#``
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs) | python | async def insertTask(self, *args, **kwargs):
"""
Insert Task into Index
Insert a task into the index. If the new rank is less than the existing rank
at the given index path, the task is not indexed but the response is still 200 OK.
Please see the introduction above for information
about indexing successfully completed tasks automatically using custom routes.
This method takes input: ``v1/insert-task-request.json#``
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs) | Insert Task into Index
Insert a task into the index. If the new rank is less than the existing rank
at the given index path, the task is not indexed but the response is still 200 OK.
Please see the introduction above for information
about indexing successfully completed tasks automatically using custom routes.
This method takes input: ``v1/insert-task-request.json#``
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/index.py#L182-L199 |
taskcluster/taskcluster-client.py | taskcluster/aio/index.py | Index.findArtifactFromTask | async def findArtifactFromTask(self, *args, **kwargs):
"""
Get Artifact From Indexed Task
Find a task by index path and redirect to the artifact on the most recent
run with the given `name`.
Note that multiple calls to this endpoint may return artifacts from differen tasks
if a new task is inserted into the index between calls. Avoid using this method as
a stable link to multiple, connected files if the index path does not contain a
unique identifier. For example, the following two links may return unrelated files:
* https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
* https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
This problem be remedied by including the revision in the index path or by bundling both
installer and debug symbols into a single artifact.
If no task exists for the given index path, this API end-point responds with 404.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs) | python | async def findArtifactFromTask(self, *args, **kwargs):
"""
Get Artifact From Indexed Task
Find a task by index path and redirect to the artifact on the most recent
run with the given `name`.
Note that multiple calls to this endpoint may return artifacts from differen tasks
if a new task is inserted into the index between calls. Avoid using this method as
a stable link to multiple, connected files if the index path does not contain a
unique identifier. For example, the following two links may return unrelated files:
* https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
* https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
This problem be remedied by including the revision in the index path or by bundling both
installer and debug symbols into a single artifact.
If no task exists for the given index path, this API end-point responds with 404.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs) | Get Artifact From Indexed Task
Find a task by index path and redirect to the artifact on the most recent
run with the given `name`.
Note that multiple calls to this endpoint may return artifacts from differen tasks
if a new task is inserted into the index between calls. Avoid using this method as
a stable link to multiple, connected files if the index path does not contain a
unique identifier. For example, the following two links may return unrelated files:
* https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
* https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
This problem be remedied by including the revision in the index path or by bundling both
installer and debug symbols into a single artifact.
If no task exists for the given index path, this API end-point responds with 404.
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/index.py#L201-L223 |
taskcluster/taskcluster-client.py | taskcluster/aio/purgecache.py | PurgeCache.allPurgeRequests | async def allPurgeRequests(self, *args, **kwargs):
"""
All Open Purge Requests
This is useful mostly for administors to view
the set of open purge requests. It should not
be used by workers. They should use the purgeRequests
endpoint that is specific to their workerType and
provisionerId.
This method gives output: ``v1/all-purge-cache-request-list.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs) | python | async def allPurgeRequests(self, *args, **kwargs):
"""
All Open Purge Requests
This is useful mostly for administors to view
the set of open purge requests. It should not
be used by workers. They should use the purgeRequests
endpoint that is specific to their workerType and
provisionerId.
This method gives output: ``v1/all-purge-cache-request-list.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs) | All Open Purge Requests
This is useful mostly for administors to view
the set of open purge requests. It should not
be used by workers. They should use the purgeRequests
endpoint that is specific to their workerType and
provisionerId.
This method gives output: ``v1/all-purge-cache-request-list.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/purgecache.py#L55-L70 |
taskcluster/taskcluster-client.py | taskcluster/aio/purgecache.py | PurgeCache.purgeRequests | async def purgeRequests(self, *args, **kwargs):
"""
Open Purge Requests for a provisionerId/workerType pair
List of caches that need to be purged if they are from before
a certain time. This is safe to be used in automation from
workers.
This method gives output: ``v1/purge-cache-request-list.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs) | python | async def purgeRequests(self, *args, **kwargs):
"""
Open Purge Requests for a provisionerId/workerType pair
List of caches that need to be purged if they are from before
a certain time. This is safe to be used in automation from
workers.
This method gives output: ``v1/purge-cache-request-list.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs) | Open Purge Requests for a provisionerId/workerType pair
List of caches that need to be purged if they are from before
a certain time. This is safe to be used in automation from
workers.
This method gives output: ``v1/purge-cache-request-list.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/purgecache.py#L72-L85 |
jart/fabulous | fabulous/term.py | Term.write | def write(self, text):
"""Parses text and prints proper output to the terminal
This method will extract escape codes from the text and
handle them as well as possible for whichever platform
is being used. At the moment only the display escape codes
are supported.
"""
escape_parts = re.compile('\x01?\x1b\\[([0-9;]*)m\x02?')
chunks = escape_parts.split(text)
i = 0
for chunk in chunks:
if chunk != '':
if i % 2 == 0:
self.stream.write(chunk)
else:
c = chunk.split(';')
r = Magic.rdisplay(c)
self.display(**r) #see caveat 0
self.flush()
i += 1 | python | def write(self, text):
"""Parses text and prints proper output to the terminal
This method will extract escape codes from the text and
handle them as well as possible for whichever platform
is being used. At the moment only the display escape codes
are supported.
"""
escape_parts = re.compile('\x01?\x1b\\[([0-9;]*)m\x02?')
chunks = escape_parts.split(text)
i = 0
for chunk in chunks:
if chunk != '':
if i % 2 == 0:
self.stream.write(chunk)
else:
c = chunk.split(';')
r = Magic.rdisplay(c)
self.display(**r) #see caveat 0
self.flush()
i += 1 | Parses text and prints proper output to the terminal
This method will extract escape codes from the text and
handle them as well as possible for whichever platform
is being used. At the moment only the display escape codes
are supported. | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L179-L199 |
jart/fabulous | fabulous/term.py | UnixTerm.getch | def getch(self):
"""Don't use this yet
It doesn't belong here but I haven't yet thought about a proper
way to implement this feature and the features that will depend on
it.
"""
return NotImplemented
fno = stdout.fileno()
mode = self.termios.tcgetattr(fno)
try:
self.tty.setraw(fno, self.termios.TCSANOW)
ch = self.read(1)
finally:
self.termios.tcsetattr(fno, self.termios.TCSANOW, mode)
return ch | python | def getch(self):
"""Don't use this yet
It doesn't belong here but I haven't yet thought about a proper
way to implement this feature and the features that will depend on
it.
"""
return NotImplemented
fno = stdout.fileno()
mode = self.termios.tcgetattr(fno)
try:
self.tty.setraw(fno, self.termios.TCSANOW)
ch = self.read(1)
finally:
self.termios.tcsetattr(fno, self.termios.TCSANOW, mode)
return ch | Don't use this yet
It doesn't belong here but I haven't yet thought about a proper
way to implement this feature and the features that will depend on
it. | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L273-L288 |
jart/fabulous | fabulous/term.py | CursesTerm.display | def display(self, codes=[], fg=None, bg=None):
"""Displays the codes using ANSI escapes
"""
codes, fg, bg = Magic.displayformat(codes, fg, bg)
self.stream.write(Magic.display(codes, fg, bg))
self.flush() | python | def display(self, codes=[], fg=None, bg=None):
"""Displays the codes using ANSI escapes
"""
codes, fg, bg = Magic.displayformat(codes, fg, bg)
self.stream.write(Magic.display(codes, fg, bg))
self.flush() | Displays the codes using ANSI escapes | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L302-L307 |
jart/fabulous | fabulous/term.py | CursesTerm.move | def move(self, place, distance = 1):
"""see doc in Term class"""
for d in range(distance):
self.stream.write(self._get_cap('move '+place))
self.flush() | python | def move(self, place, distance = 1):
"""see doc in Term class"""
for d in range(distance):
self.stream.write(self._get_cap('move '+place))
self.flush() | see doc in Term class | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L309-L313 |
jart/fabulous | fabulous/term.py | CursesTerm.clear | def clear(self, scope = 'screen'):
"""see doc in Term class"""
if scope == 'line':
self.clear('beginning of line')
self.clear('end of line')
else: self.stream.write(self._get_cap('clear '+scope))
self.flush() | python | def clear(self, scope = 'screen'):
"""see doc in Term class"""
if scope == 'line':
self.clear('beginning of line')
self.clear('end of line')
else: self.stream.write(self._get_cap('clear '+scope))
self.flush() | see doc in Term class | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L315-L321 |
jart/fabulous | fabulous/term.py | CursesTerm.get_size | def get_size(self):
"""see doc in Term class"""
self.curses.setupterm()
return self.curses.tigetnum('cols'), self.curses.tigetnum('lines') | python | def get_size(self):
"""see doc in Term class"""
self.curses.setupterm()
return self.curses.tigetnum('cols'), self.curses.tigetnum('lines') | see doc in Term class | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L323-L326 |
jart/fabulous | fabulous/term.py | WinTerm.display | def display(self, codes=[], fg=None, bg=None):
"""Displays codes using Windows kernel calls
"""
codes, fg, bg = Magic.displayformat(codes, fg, bg)
color = 0
for c in codes:
try:
f = getattr(self, '_display_' + c)
out = f()
if out: color |= out
except AttributeError:
pass
cfg, cfgi, cbg, cbgi = self._split_attributes(
self._get_console_info()['attributes'])
if self.reverse_input:
cfg, cbg = (cbg // 0x10), (cfg * 0x10)
cfgi, cbgi = (cbgi // 0x10), (cfgi * 0x10)
if fg != None:
color |= self.FG[fg]
self.real_fg = self.FG[fg]
else: color |= cfg
if bg != None:
color |= self.BG[bg]
else: color |= cbg
color |= (cfgi | cbgi)
fg, fgi, bg, bgi = self._split_attributes(color)
if self.dim_output:
# intense black
fg = 0
fgi = self.FG_INTENSITY
if self.reverse_output:
fg, bg = (bg // 0x10), (fg * 0x10)
fgi, bgi = (bgi // 0x10), (fgi * 0x10)
self.reverse_input = True
if self.hidden_output:
fg = (bg // 0x10)
fgi = (bgi // 0x10)
self._set_attributes(fg | fgi | bg | bgi) | python | def display(self, codes=[], fg=None, bg=None):
"""Displays codes using Windows kernel calls
"""
codes, fg, bg = Magic.displayformat(codes, fg, bg)
color = 0
for c in codes:
try:
f = getattr(self, '_display_' + c)
out = f()
if out: color |= out
except AttributeError:
pass
cfg, cfgi, cbg, cbgi = self._split_attributes(
self._get_console_info()['attributes'])
if self.reverse_input:
cfg, cbg = (cbg // 0x10), (cfg * 0x10)
cfgi, cbgi = (cbgi // 0x10), (cfgi * 0x10)
if fg != None:
color |= self.FG[fg]
self.real_fg = self.FG[fg]
else: color |= cfg
if bg != None:
color |= self.BG[bg]
else: color |= cbg
color |= (cfgi | cbgi)
fg, fgi, bg, bgi = self._split_attributes(color)
if self.dim_output:
# intense black
fg = 0
fgi = self.FG_INTENSITY
if self.reverse_output:
fg, bg = (bg // 0x10), (fg * 0x10)
fgi, bgi = (bgi // 0x10), (fgi * 0x10)
self.reverse_input = True
if self.hidden_output:
fg = (bg // 0x10)
fgi = (bgi // 0x10)
self._set_attributes(fg | fgi | bg | bgi) | Displays codes using Windows kernel calls | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L429-L466 |
jart/fabulous | fabulous/term.py | WinTerm.get_size | def get_size(self):
"""see doc in Term class"""
attr = self._get_console_info()
cols = attr['window']['right'] - attr['window']['left'] + 1
lines = attr['window']['bottom'] - attr['window']['top'] + 1
return cols, lines | python | def get_size(self):
"""see doc in Term class"""
attr = self._get_console_info()
cols = attr['window']['right'] - attr['window']['left'] + 1
lines = attr['window']['bottom'] - attr['window']['top'] + 1
return cols, lines | see doc in Term class | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L468-L473 |
jart/fabulous | fabulous/term.py | WinTerm._split_attributes | def _split_attributes(self, attrs):
"""Spilt attribute code
Takes an attribute code and returns a tuple containing
foreground (fg), foreground intensity (fgi), background (bg), and
background intensity (bgi)
Attributes can be joined using ``fg | fgi | bg | bgi``
"""
fg = attrs & self.FG_ALL
fgi = attrs & self.FG_INTENSITY
bg = attrs & self.BG_ALL
bgi = attrs & self.BG_INTENSITY
return fg, fgi, bg, bgi | python | def _split_attributes(self, attrs):
"""Spilt attribute code
Takes an attribute code and returns a tuple containing
foreground (fg), foreground intensity (fgi), background (bg), and
background intensity (bgi)
Attributes can be joined using ``fg | fgi | bg | bgi``
"""
fg = attrs & self.FG_ALL
fgi = attrs & self.FG_INTENSITY
bg = attrs & self.BG_ALL
bgi = attrs & self.BG_INTENSITY
return fg, fgi, bg, bgi | Spilt attribute code
Takes an attribute code and returns a tuple containing
foreground (fg), foreground intensity (fgi), background (bg), and
background intensity (bgi)
Attributes can be joined using ``fg | fgi | bg | bgi`` | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L517-L530 |
jart/fabulous | fabulous/term.py | WinTerm.move | def move(self, place, distance = 1):
"""see doc in Term class"""
x, y = self._get_position()
if place == 'up':
y -= distance
elif place == 'down':
for i in range(distance): print
nx, ny = self._get_position()
y = ny
self.move('beginning of line')
elif place == 'left':
x -= distance
elif place == 'right':
x += distance
elif place == 'beginning of line':
x = 0
elif place == 'beginning of screen':
x = 0
y = self._get_console_info()['window']['top']
else:
raise ValueError("invalid place to move")
self._set_position((x, y)) | python | def move(self, place, distance = 1):
"""see doc in Term class"""
x, y = self._get_position()
if place == 'up':
y -= distance
elif place == 'down':
for i in range(distance): print
nx, ny = self._get_position()
y = ny
self.move('beginning of line')
elif place == 'left':
x -= distance
elif place == 'right':
x += distance
elif place == 'beginning of line':
x = 0
elif place == 'beginning of screen':
x = 0
y = self._get_console_info()['window']['top']
else:
raise ValueError("invalid place to move")
self._set_position((x, y)) | see doc in Term class | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L578-L599 |
jart/fabulous | fabulous/term.py | WinTerm.clear | def clear(self, scope = 'screen'):
"""see doc in Term class
According to http://support.microsoft.com/kb/99261 the best way
to clear the console is to write out empty spaces
"""
#TODO: clear attributes too
if scope == 'screen':
bos = (0, self._get_console_info()['window']['top'])
cols, lines = self.get_size()
length = cols * lines
self._clear_console(length, bos)
self.move('beginning of screen')
elif scope == ' beginning of line':
pass
elif scope == 'end of line':
curx, cury = self._get_position()
cols, lines = self.get_size()
coord = (curx, cury)
length = cols - curx
self._clear_console(length, coord)
elif scope == 'end of screen':
curx, cury = self._get_position()
coord = (curx, cury)
cols, lines = self.get_size()
length = (lines - cury) * cols - curx
self._clear_console(length, coord)
elif scope == 'line':
curx, cury = self._get_position()
coord = (0, cury)
cols, lines = self.get_size()
self._clear_console(cols, coord)
self._set_position((curx, cury))
elif scope == 'left':
self.move('left')
self.write(' ')
elif scope == 'right':
self.write(' ')
self.move('left')
else:
raise ValueError("invalid scope to clear") | python | def clear(self, scope = 'screen'):
"""see doc in Term class
According to http://support.microsoft.com/kb/99261 the best way
to clear the console is to write out empty spaces
"""
#TODO: clear attributes too
if scope == 'screen':
bos = (0, self._get_console_info()['window']['top'])
cols, lines = self.get_size()
length = cols * lines
self._clear_console(length, bos)
self.move('beginning of screen')
elif scope == ' beginning of line':
pass
elif scope == 'end of line':
curx, cury = self._get_position()
cols, lines = self.get_size()
coord = (curx, cury)
length = cols - curx
self._clear_console(length, coord)
elif scope == 'end of screen':
curx, cury = self._get_position()
coord = (curx, cury)
cols, lines = self.get_size()
length = (lines - cury) * cols - curx
self._clear_console(length, coord)
elif scope == 'line':
curx, cury = self._get_position()
coord = (0, cury)
cols, lines = self.get_size()
self._clear_console(cols, coord)
self._set_position((curx, cury))
elif scope == 'left':
self.move('left')
self.write(' ')
elif scope == 'right':
self.write(' ')
self.move('left')
else:
raise ValueError("invalid scope to clear") | see doc in Term class
According to http://support.microsoft.com/kb/99261 the best way
to clear the console is to write out empty spaces | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L601-L641 |
jart/fabulous | fabulous/term.py | WinCTypesTerm._get_title | def _get_title(self):
"""According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI"""
#TODO: unicode support
strbuffer = self.ctypes.create_string_buffer(1024)
size = self.ctypes.c_short(1024)
#unicode versions are (Get|Set)ConsolTitleW
self.ctypes.windll.kernel32.GetConsoleTitleA(strbuffer, size)
return strbuffer.value | python | def _get_title(self):
"""According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI"""
#TODO: unicode support
strbuffer = self.ctypes.create_string_buffer(1024)
size = self.ctypes.c_short(1024)
#unicode versions are (Get|Set)ConsolTitleW
self.ctypes.windll.kernel32.GetConsoleTitleA(strbuffer, size)
return strbuffer.value | According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L758-L768 |
jart/fabulous | fabulous/term.py | WinCTypesTerm._get_coord | def _get_coord(self, coord):
""" It's a hack, see fixcoord in pyreadline's console.py (revision
1289)
"""
x, y = coord
return self.ctypes.c_int(y << 16 | x) | python | def _get_coord(self, coord):
""" It's a hack, see fixcoord in pyreadline's console.py (revision
1289)
"""
x, y = coord
return self.ctypes.c_int(y << 16 | x) | It's a hack, see fixcoord in pyreadline's console.py (revision
1289) | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L789-L794 |
jart/fabulous | fabulous/term.py | Magic.displayformat | def displayformat(codes=[], fg=None, bg=None):
"""Makes sure all arguments are valid"""
if isinstance(codes, basestring):
codes = [codes]
else:
codes = list(codes)
for code in codes:
if code not in Magic.DISPLAY.keys():
raise ValueError("'%s' not a valid display value" % code)
for color in (fg, bg):
if color != None:
if color not in Magic.COLORS.keys():
raise ValueError("'%s' not a valid color" % color)
return [codes, fg, bg] | python | def displayformat(codes=[], fg=None, bg=None):
"""Makes sure all arguments are valid"""
if isinstance(codes, basestring):
codes = [codes]
else:
codes = list(codes)
for code in codes:
if code not in Magic.DISPLAY.keys():
raise ValueError("'%s' not a valid display value" % code)
for color in (fg, bg):
if color != None:
if color not in Magic.COLORS.keys():
raise ValueError("'%s' not a valid color" % color)
return [codes, fg, bg] | Makes sure all arguments are valid | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L825-L838 |
jart/fabulous | fabulous/term.py | Magic.rdisplay | def rdisplay(codes):
"""Reads a list of codes and generates dict
>>> Magic.rdisplay([])
{}
>>> result = Magic.rdisplay([1,2,34,46])
>>> sorted(result.keys())
['bg', 'codes', 'fg']
>>> sorted(result['codes'])
['bright', 'dim']
>>> result['bg']
'cyan'
>>> result['fg']
'blue'
"""
dcodes = []
fg = bg = None
for code in codes:
code = int(code)
offset = code // 10
decimal = code % 10
if offset == 3 and decimal in Magic.COLORS.values(): fg = decimal
elif offset == 4 and decimal in Magic.COLORS.values(): bg = decimal
elif code in Magic.DISPLAY.values(): dcodes.append(code)
else: pass # drop unhandled values
r = {}
if len(codes): r['codes'] = [Magic.rDISPLAY[c] for c in dcodes]
if fg != None: r['fg'] = Magic.rCOLORS[fg]
if bg != None: r['bg'] = Magic.rCOLORS[bg]
return r | python | def rdisplay(codes):
"""Reads a list of codes and generates dict
>>> Magic.rdisplay([])
{}
>>> result = Magic.rdisplay([1,2,34,46])
>>> sorted(result.keys())
['bg', 'codes', 'fg']
>>> sorted(result['codes'])
['bright', 'dim']
>>> result['bg']
'cyan'
>>> result['fg']
'blue'
"""
dcodes = []
fg = bg = None
for code in codes:
code = int(code)
offset = code // 10
decimal = code % 10
if offset == 3 and decimal in Magic.COLORS.values(): fg = decimal
elif offset == 4 and decimal in Magic.COLORS.values(): bg = decimal
elif code in Magic.DISPLAY.values(): dcodes.append(code)
else: pass # drop unhandled values
r = {}
if len(codes): r['codes'] = [Magic.rDISPLAY[c] for c in dcodes]
if fg != None: r['fg'] = Magic.rCOLORS[fg]
if bg != None: r['bg'] = Magic.rCOLORS[bg]
return r | Reads a list of codes and generates dict
>>> Magic.rdisplay([])
{}
>>> result = Magic.rdisplay([1,2,34,46])
>>> sorted(result.keys())
['bg', 'codes', 'fg']
>>> sorted(result['codes'])
['bright', 'dim']
>>> result['bg']
'cyan'
>>> result['fg']
'blue' | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L841-L870 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.listClients | def listClients(self, *args, **kwargs):
"""
List Clients
Get a list of all clients. With `prefix`, only clients for which
it is a prefix of the clientId are returned.
By default this end-point will try to return up to 1000 clients in one
request. But it **may return less, even none**.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listClients` with the last `continuationToken` until you
get a result without a `continuationToken`.
This method gives output: ``v1/list-clients-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listClients"], *args, **kwargs) | python | def listClients(self, *args, **kwargs):
"""
List Clients
Get a list of all clients. With `prefix`, only clients for which
it is a prefix of the clientId are returned.
By default this end-point will try to return up to 1000 clients in one
request. But it **may return less, even none**.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listClients` with the last `continuationToken` until you
get a result without a `continuationToken`.
This method gives output: ``v1/list-clients-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listClients"], *args, **kwargs) | List Clients
Get a list of all clients. With `prefix`, only clients for which
it is a prefix of the clientId are returned.
By default this end-point will try to return up to 1000 clients in one
request. But it **may return less, even none**.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listClients` with the last `continuationToken` until you
get a result without a `continuationToken`.
This method gives output: ``v1/list-clients-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L74-L93 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.client | def client(self, *args, **kwargs):
"""
Get Client
Get information about a single client.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["client"], *args, **kwargs) | python | def client(self, *args, **kwargs):
"""
Get Client
Get information about a single client.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["client"], *args, **kwargs) | Get Client
Get information about a single client.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L95-L106 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.createClient | def createClient(self, *args, **kwargs):
"""
Create Client
Create a new client and get the `accessToken` for this client.
You should store the `accessToken` from this API call as there is no
other way to retrieve it.
If you loose the `accessToken` you can call `resetAccessToken` to reset
it, and a new `accessToken` will be returned, but you cannot retrieve the
current `accessToken`.
If a client with the same `clientId` already exists this operation will
fail. Use `updateClient` if you wish to update an existing client.
The caller's scopes must satisfy `scopes`.
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/create-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createClient"], *args, **kwargs) | python | def createClient(self, *args, **kwargs):
"""
Create Client
Create a new client and get the `accessToken` for this client.
You should store the `accessToken` from this API call as there is no
other way to retrieve it.
If you loose the `accessToken` you can call `resetAccessToken` to reset
it, and a new `accessToken` will be returned, but you cannot retrieve the
current `accessToken`.
If a client with the same `clientId` already exists this operation will
fail. Use `updateClient` if you wish to update an existing client.
The caller's scopes must satisfy `scopes`.
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/create-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createClient"], *args, **kwargs) | Create Client
Create a new client and get the `accessToken` for this client.
You should store the `accessToken` from this API call as there is no
other way to retrieve it.
If you loose the `accessToken` you can call `resetAccessToken` to reset
it, and a new `accessToken` will be returned, but you cannot retrieve the
current `accessToken`.
If a client with the same `clientId` already exists this operation will
fail. Use `updateClient` if you wish to update an existing client.
The caller's scopes must satisfy `scopes`.
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/create-client-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L108-L132 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.updateClient | def updateClient(self, *args, **kwargs):
"""
Update Client
Update an exisiting client. The `clientId` and `accessToken` cannot be
updated, but `scopes` can be modified. The caller's scopes must
satisfy all scopes being added to the client in the update operation.
If no scopes are given in the request, the client's scopes remain
unchanged
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateClient"], *args, **kwargs) | python | def updateClient(self, *args, **kwargs):
"""
Update Client
Update an exisiting client. The `clientId` and `accessToken` cannot be
updated, but `scopes` can be modified. The caller's scopes must
satisfy all scopes being added to the client in the update operation.
If no scopes are given in the request, the client's scopes remain
unchanged
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateClient"], *args, **kwargs) | Update Client
Update an exisiting client. The `clientId` and `accessToken` cannot be
updated, but `scopes` can be modified. The caller's scopes must
satisfy all scopes being added to the client in the update operation.
If no scopes are given in the request, the client's scopes remain
unchanged
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/get-client-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L152-L169 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.enableClient | def enableClient(self, *args, **kwargs):
"""
Enable Client
Enable a client that was disabled with `disableClient`. If the client
is already enabled, this does nothing.
This is typically used by identity providers to re-enable clients that
had been disabled when the corresponding identity's scopes changed.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs) | python | def enableClient(self, *args, **kwargs):
"""
Enable Client
Enable a client that was disabled with `disableClient`. If the client
is already enabled, this does nothing.
This is typically used by identity providers to re-enable clients that
had been disabled when the corresponding identity's scopes changed.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs) | Enable Client
Enable a client that was disabled with `disableClient`. If the client
is already enabled, this does nothing.
This is typically used by identity providers to re-enable clients that
had been disabled when the corresponding identity's scopes changed.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L171-L186 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.disableClient | def disableClient(self, *args, **kwargs):
"""
Disable Client
Disable a client. If the client is already disabled, this does nothing.
This is typically used by identity providers to disable clients when the
corresponding identity's scopes no longer satisfy the client's scopes.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["disableClient"], *args, **kwargs) | python | def disableClient(self, *args, **kwargs):
"""
Disable Client
Disable a client. If the client is already disabled, this does nothing.
This is typically used by identity providers to disable clients when the
corresponding identity's scopes no longer satisfy the client's scopes.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["disableClient"], *args, **kwargs) | Disable Client
Disable a client. If the client is already disabled, this does nothing.
This is typically used by identity providers to disable clients when the
corresponding identity's scopes no longer satisfy the client's scopes.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L188-L202 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.deleteClient | def deleteClient(self, *args, **kwargs):
"""
Delete Client
Delete a client, please note that any roles related to this client must
be deleted independently.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["deleteClient"], *args, **kwargs) | python | def deleteClient(self, *args, **kwargs):
"""
Delete Client
Delete a client, please note that any roles related to this client must
be deleted independently.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["deleteClient"], *args, **kwargs) | Delete Client
Delete a client, please note that any roles related to this client must
be deleted independently.
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L204-L214 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.listRoles | def listRoles(self, *args, **kwargs):
"""
List Roles
Get a list of all roles, each role object also includes the list of
scopes it expands to.
This method gives output: ``v1/list-roles-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs) | python | def listRoles(self, *args, **kwargs):
"""
List Roles
Get a list of all roles, each role object also includes the list of
scopes it expands to.
This method gives output: ``v1/list-roles-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs) | List Roles
Get a list of all roles, each role object also includes the list of
scopes it expands to.
This method gives output: ``v1/list-roles-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L216-L228 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.listRoleIds | def listRoleIds(self, *args, **kwargs):
"""
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs) | python | def listRoleIds(self, *args, **kwargs):
"""
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs) | List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L230-L243 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.listRoles2 | def listRoles2(self, *args, **kwargs):
"""
List Roles
If no limit is given, all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-roles2-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoles2"], *args, **kwargs) | python | def listRoles2(self, *args, **kwargs):
"""
List Roles
If no limit is given, all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-roles2-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoles2"], *args, **kwargs) | List Roles
If no limit is given, all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-roles2-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L245-L258 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.createRole | def createRole(self, *args, **kwargs):
"""
Create Role
Create a new role.
The caller's scopes must satisfy the new role's scopes.
If there already exists a role with the same `roleId` this operation
will fail. Use `updateRole` to modify an existing role.
Creation of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createRole"], *args, **kwargs) | python | def createRole(self, *args, **kwargs):
"""
Create Role
Create a new role.
The caller's scopes must satisfy the new role's scopes.
If there already exists a role with the same `roleId` this operation
will fail. Use `updateRole` to modify an existing role.
Creation of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createRole"], *args, **kwargs) | Create Role
Create a new role.
The caller's scopes must satisfy the new role's scopes.
If there already exists a role with the same `roleId` this operation
will fail. Use `updateRole` to modify an existing role.
Creation of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L274-L295 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.updateRole | def updateRole(self, *args, **kwargs):
"""
Update Role
Update an existing role.
The caller's scopes must satisfy all of the new scopes being added, but
need not satisfy all of the role's existing scopes.
An update of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs) | python | def updateRole(self, *args, **kwargs):
"""
Update Role
Update an existing role.
The caller's scopes must satisfy all of the new scopes being added, but
need not satisfy all of the role's existing scopes.
An update of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs) | Update Role
Update an existing role.
The caller's scopes must satisfy all of the new scopes being added, but
need not satisfy all of the role's existing scopes.
An update of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L297-L316 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.deleteRole | def deleteRole(self, *args, **kwargs):
"""
Delete Role
Delete a role. This operation will succeed regardless of whether or not
the role exists.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs) | python | def deleteRole(self, *args, **kwargs):
"""
Delete Role
Delete a role. This operation will succeed regardless of whether or not
the role exists.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs) | Delete Role
Delete a role. This operation will succeed regardless of whether or not
the role exists.
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L318-L328 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.expandScopesGet | def expandScopesGet(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This call uses the GET method with an HTTP body. It remains only for
backward compatibility.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["expandScopesGet"], *args, **kwargs) | python | def expandScopesGet(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This call uses the GET method with an HTTP body. It remains only for
backward compatibility.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["expandScopesGet"], *args, **kwargs) | Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This call uses the GET method with an HTTP body. It remains only for
backward compatibility.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``deprecated`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L330-L347 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.expandScopes | def expandScopes(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs) | python | def expandScopes(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs) | Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L349-L363 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.currentScopes | def currentScopes(self, *args, **kwargs):
"""
Get Current Scopes
Return the expanded scopes available in the request, taking into account all sources
of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
and roles).
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs) | python | def currentScopes(self, *args, **kwargs):
"""
Get Current Scopes
Return the expanded scopes available in the request, taking into account all sources
of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
and roles).
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs) | Get Current Scopes
Return the expanded scopes available in the request, taking into account all sources
of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
and roles).
This method gives output: ``v1/scopeset.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L365-L378 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.awsS3Credentials | def awsS3Credentials(self, *args, **kwargs):
"""
Get Temporary Read/Write Credentials S3
Get temporary AWS credentials for `read-write` or `read-only` access to
a given `bucket` and `prefix` within that bucket.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. Please note that the `level`
parameter is required in the scope guarding access. The bucket name must
not contain `.`, as recommended by Amazon.
This method can only allow access to a whitelisted set of buckets. To add
a bucket to that whitelist, contact the Taskcluster team, who will add it to
the appropriate IAM policy. If the bucket is in a different AWS account, you
will also need to add a bucket policy allowing access from the Taskcluster
account. That policy should look like this:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "allow-taskcluster-auth-to-delegate-access",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::692406183521:root"
},
"Action": [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:GetBucketLocation"
],
"Resource": [
"arn:aws:s3:::<bucket>",
"arn:aws:s3:::<bucket>/*"
]
}
]
}
```
The credentials are set to expire after an hour, but this behavior is
subject to change. Hence, you should always read the `expires` property
from the response, if you intend to maintain active credentials in your
application.
Please note that your `prefix` may not start with slash `/`. Such a prefix
is allowed on S3, but we forbid it here to discourage bad behavior.
Also note that if your `prefix` doesn't end in a slash `/`, the STS
credentials may allow access to unexpected keys, as S3 does not treat
slashes specially. For example, a prefix of `my-folder` will allow
access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
which may not be intended.
Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
will result in an access-denied error from AWS. This limitation is due to a
security flaw in Amazon S3 which might otherwise allow indefinite access to
uploaded objects.
**EC2 metadata compatibility**, if the querystring parameter
`?format=iam-role-compat` is given, the response will be compatible
with the JSON exposed by the EC2 metadata service. This aims to ease
compatibility for libraries and tools built to auto-refresh credentials.
For details on the format returned by EC2 metadata service see:
[EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
This method gives output: ``v1/aws-s3-credentials-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["awsS3Credentials"], *args, **kwargs) | python | def awsS3Credentials(self, *args, **kwargs):
"""
Get Temporary Read/Write Credentials S3
Get temporary AWS credentials for `read-write` or `read-only` access to
a given `bucket` and `prefix` within that bucket.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. Please note that the `level`
parameter is required in the scope guarding access. The bucket name must
not contain `.`, as recommended by Amazon.
This method can only allow access to a whitelisted set of buckets. To add
a bucket to that whitelist, contact the Taskcluster team, who will add it to
the appropriate IAM policy. If the bucket is in a different AWS account, you
will also need to add a bucket policy allowing access from the Taskcluster
account. That policy should look like this:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "allow-taskcluster-auth-to-delegate-access",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::692406183521:root"
},
"Action": [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:GetBucketLocation"
],
"Resource": [
"arn:aws:s3:::<bucket>",
"arn:aws:s3:::<bucket>/*"
]
}
]
}
```
The credentials are set to expire after an hour, but this behavior is
subject to change. Hence, you should always read the `expires` property
from the response, if you intend to maintain active credentials in your
application.
Please note that your `prefix` may not start with slash `/`. Such a prefix
is allowed on S3, but we forbid it here to discourage bad behavior.
Also note that if your `prefix` doesn't end in a slash `/`, the STS
credentials may allow access to unexpected keys, as S3 does not treat
slashes specially. For example, a prefix of `my-folder` will allow
access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
which may not be intended.
Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
will result in an access-denied error from AWS. This limitation is due to a
security flaw in Amazon S3 which might otherwise allow indefinite access to
uploaded objects.
**EC2 metadata compatibility**, if the querystring parameter
`?format=iam-role-compat` is given, the response will be compatible
with the JSON exposed by the EC2 metadata service. This aims to ease
compatibility for libraries and tools built to auto-refresh credentials.
For details on the format returned by EC2 metadata service see:
[EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
This method gives output: ``v1/aws-s3-credentials-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["awsS3Credentials"], *args, **kwargs) | Get Temporary Read/Write Credentials S3
Get temporary AWS credentials for `read-write` or `read-only` access to
a given `bucket` and `prefix` within that bucket.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. Please note that the `level`
parameter is required in the scope guarding access. The bucket name must
not contain `.`, as recommended by Amazon.
This method can only allow access to a whitelisted set of buckets. To add
a bucket to that whitelist, contact the Taskcluster team, who will add it to
the appropriate IAM policy. If the bucket is in a different AWS account, you
will also need to add a bucket policy allowing access from the Taskcluster
account. That policy should look like this:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "allow-taskcluster-auth-to-delegate-access",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::692406183521:root"
},
"Action": [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:GetBucketLocation"
],
"Resource": [
"arn:aws:s3:::<bucket>",
"arn:aws:s3:::<bucket>/*"
]
}
]
}
```
The credentials are set to expire after an hour, but this behavior is
subject to change. Hence, you should always read the `expires` property
from the response, if you intend to maintain active credentials in your
application.
Please note that your `prefix` may not start with slash `/`. Such a prefix
is allowed on S3, but we forbid it here to discourage bad behavior.
Also note that if your `prefix` doesn't end in a slash `/`, the STS
credentials may allow access to unexpected keys, as S3 does not treat
slashes specially. For example, a prefix of `my-folder` will allow
access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
which may not be intended.
Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
will result in an access-denied error from AWS. This limitation is due to a
security flaw in Amazon S3 which might otherwise allow indefinite access to
uploaded objects.
**EC2 metadata compatibility**, if the querystring parameter
`?format=iam-role-compat` is given, the response will be compatible
with the JSON exposed by the EC2 metadata service. This aims to ease
compatibility for libraries and tools built to auto-refresh credentials.
For details on the format returned by EC2 metadata service see:
[EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
This method gives output: ``v1/aws-s3-credentials-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L380-L455 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.azureTables | def azureTables(self, *args, **kwargs):
"""
List Tables in an Account Managed by Auth
Retrieve a list of all tables in an account.
This method gives output: ``v1/azure-table-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs) | python | def azureTables(self, *args, **kwargs):
"""
List Tables in an Account Managed by Auth
Retrieve a list of all tables in an account.
This method gives output: ``v1/azure-table-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs) | List Tables in an Account Managed by Auth
Retrieve a list of all tables in an account.
This method gives output: ``v1/azure-table-list-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L470-L481 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.azureTableSAS | def azureTableSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Table
Get a shared access signature (SAS) string for use with a specific Azure
Table Storage table.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
table if it doesn't already exist.
This method gives output: ``v1/azure-table-access-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs) | python | def azureTableSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Table
Get a shared access signature (SAS) string for use with a specific Azure
Table Storage table.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
table if it doesn't already exist.
This method gives output: ``v1/azure-table-access-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs) | Get Shared-Access-Signature for Azure Table
Get a shared access signature (SAS) string for use with a specific Azure
Table Storage table.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
table if it doesn't already exist.
This method gives output: ``v1/azure-table-access-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L483-L499 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.azureContainers | def azureContainers(self, *args, **kwargs):
"""
List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs) | python | def azureContainers(self, *args, **kwargs):
"""
List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs) | List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L501-L512 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.azureContainerSAS | def azureContainerSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs) | python | def azureContainerSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs) | Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L514-L530 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.sentryDSN | def sentryDSN(self, *args, **kwargs):
"""
Get DSN for Sentry Project
Get temporary DSN (access credentials) for a sentry project.
The credentials returned can be used with any Sentry client for up to
24 hours, after which the credentials will be automatically disabled.
If the project doesn't exist it will be created, and assigned to the
initial team configured for this component. Contact a Sentry admin
to have the project transferred to a team you have access to if needed
This method gives output: ``v1/sentry-dsn-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs) | python | def sentryDSN(self, *args, **kwargs):
"""
Get DSN for Sentry Project
Get temporary DSN (access credentials) for a sentry project.
The credentials returned can be used with any Sentry client for up to
24 hours, after which the credentials will be automatically disabled.
If the project doesn't exist it will be created, and assigned to the
initial team configured for this component. Contact a Sentry admin
to have the project transferred to a team you have access to if needed
This method gives output: ``v1/sentry-dsn-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs) | Get DSN for Sentry Project
Get temporary DSN (access credentials) for a sentry project.
The credentials returned can be used with any Sentry client for up to
24 hours, after which the credentials will be automatically disabled.
If the project doesn't exist it will be created, and assigned to the
initial team configured for this component. Contact a Sentry admin
to have the project transferred to a team you have access to if needed
This method gives output: ``v1/sentry-dsn-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L532-L549 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.statsumToken | def statsumToken(self, *args, **kwargs):
"""
Get Token for Statsum Project
Get temporary `token` and `baseUrl` for sending metrics to statsum.
The token is valid for 24 hours, clients should refresh after expiration.
This method gives output: ``v1/statsum-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["statsumToken"], *args, **kwargs) | python | def statsumToken(self, *args, **kwargs):
"""
Get Token for Statsum Project
Get temporary `token` and `baseUrl` for sending metrics to statsum.
The token is valid for 24 hours, clients should refresh after expiration.
This method gives output: ``v1/statsum-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["statsumToken"], *args, **kwargs) | Get Token for Statsum Project
Get temporary `token` and `baseUrl` for sending metrics to statsum.
The token is valid for 24 hours, clients should refresh after expiration.
This method gives output: ``v1/statsum-token-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L551-L564 |
taskcluster/taskcluster-client.py | taskcluster/auth.py | Auth.websocktunnelToken | def websocktunnelToken(self, *args, **kwargs):
"""
Get a client token for the Websocktunnel service
Get a temporary token suitable for use connecting to a
[websocktunnel](https://github.com/taskcluster/websocktunnel) server.
The resulting token will only be accepted by servers with a matching audience
value. Reaching such a server is the callers responsibility. In general,
a server URL or set of URLs should be provided to the caller as configuration
along with the audience value.
The token is valid for a limited time (on the scale of hours). Callers should
refresh it before expiration.
This method gives output: ``v1/websocktunnel-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["websocktunnelToken"], *args, **kwargs) | python | def websocktunnelToken(self, *args, **kwargs):
"""
Get a client token for the Websocktunnel service
Get a temporary token suitable for use connecting to a
[websocktunnel](https://github.com/taskcluster/websocktunnel) server.
The resulting token will only be accepted by servers with a matching audience
value. Reaching such a server is the callers responsibility. In general,
a server URL or set of URLs should be provided to the caller as configuration
along with the audience value.
The token is valid for a limited time (on the scale of hours). Callers should
refresh it before expiration.
This method gives output: ``v1/websocktunnel-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["websocktunnelToken"], *args, **kwargs) | Get a client token for the Websocktunnel service
Get a temporary token suitable for use connecting to a
[websocktunnel](https://github.com/taskcluster/websocktunnel) server.
The resulting token will only be accepted by servers with a matching audience
value. Reaching such a server is the callers responsibility. In general,
a server URL or set of URLs should be provided to the caller as configuration
along with the audience value.
The token is valid for a limited time (on the scale of hours). Callers should
refresh it before expiration.
This method gives output: ``v1/websocktunnel-token-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L566-L586 |
jart/fabulous | fabulous/color.py | h1 | def h1(title, line=OVERLINE):
"""Prints bold text with line beneath it spanning width of terminal
"""
width = utils.term.width
printy(bold(title.center(width)).as_utf8)
printy(bold((line * width)[:width]).as_utf8) | python | def h1(title, line=OVERLINE):
"""Prints bold text with line beneath it spanning width of terminal
"""
width = utils.term.width
printy(bold(title.center(width)).as_utf8)
printy(bold((line * width)[:width]).as_utf8) | Prints bold text with line beneath it spanning width of terminal | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/color.py#L832-L837 |
jart/fabulous | fabulous/color.py | parse_color | def parse_color(color):
r"""Turns a color into an (r, g, b) tuple
>>> parse_color('white')
(255, 255, 255)
>>> parse_color('#ff0000')
(255, 0, 0)
>>> parse_color('#f00')
(255, 0, 0)
>>> parse_color((255, 0, 0))
(255, 0, 0)
>>> from fabulous import grapefruit
>>> parse_color(grapefruit.Color((0.0, 1.0, 0.0)))
(0, 255, 0)
"""
if isinstance(color, basestring):
color = grapefruit.Color.NewFromHtml(color)
if isinstance(color, int):
(r, g, b) = xterm256.xterm_to_rgb(color)
elif hasattr(color, 'rgb'):
(r, g, b) = [int(c * 255.0) for c in color.rgb]
else:
(r, g, b) = color
assert isinstance(r, int) and 0 <= r <= 255
assert isinstance(g, int) and 0 <= g <= 255
assert isinstance(b, int) and 0 <= b <= 255
return (r, g, b) | python | def parse_color(color):
r"""Turns a color into an (r, g, b) tuple
>>> parse_color('white')
(255, 255, 255)
>>> parse_color('#ff0000')
(255, 0, 0)
>>> parse_color('#f00')
(255, 0, 0)
>>> parse_color((255, 0, 0))
(255, 0, 0)
>>> from fabulous import grapefruit
>>> parse_color(grapefruit.Color((0.0, 1.0, 0.0)))
(0, 255, 0)
"""
if isinstance(color, basestring):
color = grapefruit.Color.NewFromHtml(color)
if isinstance(color, int):
(r, g, b) = xterm256.xterm_to_rgb(color)
elif hasattr(color, 'rgb'):
(r, g, b) = [int(c * 255.0) for c in color.rgb]
else:
(r, g, b) = color
assert isinstance(r, int) and 0 <= r <= 255
assert isinstance(g, int) and 0 <= g <= 255
assert isinstance(b, int) and 0 <= b <= 255
return (r, g, b) | r"""Turns a color into an (r, g, b) tuple
>>> parse_color('white')
(255, 255, 255)
>>> parse_color('#ff0000')
(255, 0, 0)
>>> parse_color('#f00')
(255, 0, 0)
>>> parse_color((255, 0, 0))
(255, 0, 0)
>>> from fabulous import grapefruit
>>> parse_color(grapefruit.Color((0.0, 1.0, 0.0)))
(0, 255, 0) | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/color.py#L840-L866 |
jart/fabulous | fabulous/color.py | complement | def complement(color):
r"""Calculates polar opposite of color
This isn't guaranteed to look good >_> (especially with brighter, higher
intensity colors.) This will be replaced with a formula that produces
better looking colors in the future.
>>> complement('red')
(0, 255, 76)
>>> complement((0, 100, 175))
(175, 101, 0)
"""
(r, g, b) = parse_color(color)
gcolor = grapefruit.Color((r / 255.0, g / 255.0, b / 255.0))
complement = gcolor.ComplementaryColor()
(r, g, b) = [int(c * 255.0) for c in complement.rgb]
return (r, g, b) | python | def complement(color):
r"""Calculates polar opposite of color
This isn't guaranteed to look good >_> (especially with brighter, higher
intensity colors.) This will be replaced with a formula that produces
better looking colors in the future.
>>> complement('red')
(0, 255, 76)
>>> complement((0, 100, 175))
(175, 101, 0)
"""
(r, g, b) = parse_color(color)
gcolor = grapefruit.Color((r / 255.0, g / 255.0, b / 255.0))
complement = gcolor.ComplementaryColor()
(r, g, b) = [int(c * 255.0) for c in complement.rgb]
return (r, g, b) | r"""Calculates polar opposite of color
This isn't guaranteed to look good >_> (especially with brighter, higher
intensity colors.) This will be replaced with a formula that produces
better looking colors in the future.
>>> complement('red')
(0, 255, 76)
>>> complement((0, 100, 175))
(175, 101, 0) | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/color.py#L869-L886 |
jart/fabulous | fabulous/color.py | section | def section(title, bar=OVERLINE, strm=sys.stdout):
"""Helper function for testing demo routines
"""
width = utils.term.width
printy(bold(title.center(width)))
printy(bold((bar * width)[:width])) | python | def section(title, bar=OVERLINE, strm=sys.stdout):
"""Helper function for testing demo routines
"""
width = utils.term.width
printy(bold(title.center(width)))
printy(bold((bar * width)[:width])) | Helper function for testing demo routines | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/color.py#L889-L894 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisionerevents.py | AwsProvisionerEvents.workerTypeCreated | def workerTypeCreated(self, *args, **kwargs):
"""
WorkerType Created Message
When a new `workerType` is created a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-created',
'name': 'workerTypeCreated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def workerTypeCreated(self, *args, **kwargs):
"""
WorkerType Created Message
When a new `workerType` is created a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-created',
'name': 'workerTypeCreated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | WorkerType Created Message
When a new `workerType` is created a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisionerevents.py#L24-L60 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisionerevents.py | AwsProvisionerEvents.workerTypeUpdated | def workerTypeUpdated(self, *args, **kwargs):
"""
WorkerType Updated Message
When a `workerType` is updated a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-updated',
'name': 'workerTypeUpdated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def workerTypeUpdated(self, *args, **kwargs):
"""
WorkerType Updated Message
When a `workerType` is updated a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-updated',
'name': 'workerTypeUpdated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | WorkerType Updated Message
When a `workerType` is updated a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisionerevents.py#L62-L98 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisionerevents.py | AwsProvisionerEvents.workerTypeRemoved | def workerTypeRemoved(self, *args, **kwargs):
"""
WorkerType Removed Message
When a `workerType` is removed a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-removed',
'name': 'workerTypeRemoved',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def workerTypeRemoved(self, *args, **kwargs):
"""
WorkerType Removed Message
When a `workerType` is removed a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-removed',
'name': 'workerTypeRemoved',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | WorkerType Removed Message
When a `workerType` is removed a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisionerevents.py#L100-L136 |
taskcluster/taskcluster-client.py | taskcluster/treeherderevents.py | TreeherderEvents.jobs | def jobs(self, *args, **kwargs):
"""
Job Messages
When a task run is scheduled or resolved, a message is posted to
this exchange in a Treeherder consumable format.
This exchange outputs: ``v1/pulse-job.json#``This exchange takes the following keys:
* destination: destination (required)
* project: project (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'jobs',
'name': 'jobs',
'routingKey': [
{
'multipleWords': False,
'name': 'destination',
},
{
'multipleWords': False,
'name': 'project',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/pulse-job.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def jobs(self, *args, **kwargs):
"""
Job Messages
When a task run is scheduled or resolved, a message is posted to
this exchange in a Treeherder consumable format.
This exchange outputs: ``v1/pulse-job.json#``This exchange takes the following keys:
* destination: destination (required)
* project: project (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'jobs',
'name': 'jobs',
'routingKey': [
{
'multipleWords': False,
'name': 'destination',
},
{
'multipleWords': False,
'name': 'project',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/pulse-job.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | Job Messages
When a task run is scheduled or resolved, a message is posted to
this exchange in a Treeherder consumable format.
This exchange outputs: ``v1/pulse-job.json#``This exchange takes the following keys:
* destination: destination (required)
* project: project (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/treeherderevents.py#L31-L66 |
taskcluster/taskcluster-client.py | taskcluster/client.py | createTemporaryCredentials | def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
""" Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials (datetime.datetime)
expiry: expiration time of credentials, (datetime.datetime)
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str}
"""
for scope in scopes:
if not isinstance(scope, six.string_types):
raise exceptions.TaskclusterFailure('Scope must be string')
# Credentials can only be valid for 31 days. I hope that
# this is validated on the server somehow...
if expiry - start > datetime.timedelta(days=31):
raise exceptions.TaskclusterFailure('Only 31 days allowed')
# We multiply times by 1000 because the auth service is JS and as a result
# uses milliseconds instead of seconds
cert = dict(
version=1,
scopes=scopes,
start=calendar.timegm(start.utctimetuple()) * 1000,
expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'),
)
# if this is a named temporary credential, include the issuer in the certificate
if name:
cert['issuer'] = utils.toStr(clientId)
sig = ['version:' + utils.toStr(cert['version'])]
if name:
sig.extend([
'clientId:' + utils.toStr(name),
'issuer:' + utils.toStr(clientId),
])
sig.extend([
'seed:' + utils.toStr(cert['seed']),
'start:' + utils.toStr(cert['start']),
'expiry:' + utils.toStr(cert['expiry']),
'scopes:'
] + scopes)
sigStr = '\n'.join(sig).encode()
if isinstance(accessToken, six.text_type):
accessToken = accessToken.encode()
sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
cert['signature'] = utils.encodeStringForB64Header(sig)
newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
return {
'clientId': name or clientId,
'accessToken': newToken,
'certificate': utils.dumpJson(cert),
} | python | def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
""" Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials (datetime.datetime)
expiry: expiration time of credentials, (datetime.datetime)
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str}
"""
for scope in scopes:
if not isinstance(scope, six.string_types):
raise exceptions.TaskclusterFailure('Scope must be string')
# Credentials can only be valid for 31 days. I hope that
# this is validated on the server somehow...
if expiry - start > datetime.timedelta(days=31):
raise exceptions.TaskclusterFailure('Only 31 days allowed')
# We multiply times by 1000 because the auth service is JS and as a result
# uses milliseconds instead of seconds
cert = dict(
version=1,
scopes=scopes,
start=calendar.timegm(start.utctimetuple()) * 1000,
expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'),
)
# if this is a named temporary credential, include the issuer in the certificate
if name:
cert['issuer'] = utils.toStr(clientId)
sig = ['version:' + utils.toStr(cert['version'])]
if name:
sig.extend([
'clientId:' + utils.toStr(name),
'issuer:' + utils.toStr(clientId),
])
sig.extend([
'seed:' + utils.toStr(cert['seed']),
'start:' + utils.toStr(cert['start']),
'expiry:' + utils.toStr(cert['expiry']),
'scopes:'
] + scopes)
sigStr = '\n'.join(sig).encode()
if isinstance(accessToken, six.text_type):
accessToken = accessToken.encode()
sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
cert['signature'] = utils.encodeStringForB64Header(sig)
newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
return {
'clientId': name or clientId,
'accessToken': newToken,
'certificate': utils.dumpJson(cert),
} | Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials (datetime.datetime)
expiry: expiration time of credentials, (datetime.datetime)
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str} | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L636-L704 |
taskcluster/taskcluster-client.py | taskcluster/client.py | BaseClient.makeHawkExt | def makeHawkExt(self):
""" Make an 'ext' for Hawk authentication """
o = self.options
c = o.get('credentials', {})
if c.get('clientId') and c.get('accessToken'):
ext = {}
cert = c.get('certificate')
if cert:
if six.PY3 and isinstance(cert, six.binary_type):
cert = cert.decode()
if isinstance(cert, six.string_types):
cert = json.loads(cert)
ext['certificate'] = cert
if 'authorizedScopes' in o:
ext['authorizedScopes'] = o['authorizedScopes']
# .encode('base64') inserts a newline, which hawk doesn't
# like but doesn't strip itself
return utils.makeB64UrlSafe(utils.encodeStringForB64Header(utils.dumpJson(ext)).strip())
else:
return {} | python | def makeHawkExt(self):
""" Make an 'ext' for Hawk authentication """
o = self.options
c = o.get('credentials', {})
if c.get('clientId') and c.get('accessToken'):
ext = {}
cert = c.get('certificate')
if cert:
if six.PY3 and isinstance(cert, six.binary_type):
cert = cert.decode()
if isinstance(cert, six.string_types):
cert = json.loads(cert)
ext['certificate'] = cert
if 'authorizedScopes' in o:
ext['authorizedScopes'] = o['authorizedScopes']
# .encode('base64') inserts a newline, which hawk doesn't
# like but doesn't strip itself
return utils.makeB64UrlSafe(utils.encodeStringForB64Header(utils.dumpJson(ext)).strip())
else:
return {} | Make an 'ext' for Hawk authentication | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L93-L114 |
taskcluster/taskcluster-client.py | taskcluster/client.py | BaseClient.buildSignedUrl | def buildSignedUrl(self, methodName, *args, **kwargs):
""" Build a signed URL. This URL contains the credentials needed to access
a resource."""
if 'expiration' in kwargs:
expiration = kwargs['expiration']
del kwargs['expiration']
else:
expiration = self.options['signedUrlExpiration']
expiration = int(time.time() + expiration) # Mainly so that we throw if it's not a number
requestUrl = self.buildUrl(methodName, *args, **kwargs)
if not self._hasCredentials():
raise exceptions.TaskclusterAuthFailure('Invalid Hawk Credentials')
clientId = utils.toStr(self.options['credentials']['clientId'])
accessToken = utils.toStr(self.options['credentials']['accessToken'])
def genBewit():
# We need to fix the output of get_bewit. It returns a url-safe base64
# encoded string, which contains a list of tokens separated by '\'.
# The first one is the clientId, the second is an int, the third is
# url-safe base64 encoded MAC, the fourth is the ext param.
# The problem is that the nested url-safe base64 encoded MAC must be
# base64 (i.e. not url safe) or server-side will complain.
# id + '\\' + exp + '\\' + mac + '\\' + options.ext;
resource = mohawk.base.Resource(
credentials={
'id': clientId,
'key': accessToken,
'algorithm': 'sha256',
},
method='GET',
ext=utils.toStr(self.makeHawkExt()),
url=requestUrl,
timestamp=expiration,
nonce='',
# content='',
# content_type='',
)
bewit = mohawk.bewit.get_bewit(resource)
return bewit.rstrip('=')
bewit = genBewit()
if not bewit:
raise exceptions.TaskclusterFailure('Did not receive a bewit')
u = urllib.parse.urlparse(requestUrl)
qs = u.query
if qs:
qs += '&'
qs += 'bewit=%s' % bewit
return urllib.parse.urlunparse((
u.scheme,
u.netloc,
u.path,
u.params,
qs,
u.fragment,
)) | python | def buildSignedUrl(self, methodName, *args, **kwargs):
""" Build a signed URL. This URL contains the credentials needed to access
a resource."""
if 'expiration' in kwargs:
expiration = kwargs['expiration']
del kwargs['expiration']
else:
expiration = self.options['signedUrlExpiration']
expiration = int(time.time() + expiration) # Mainly so that we throw if it's not a number
requestUrl = self.buildUrl(methodName, *args, **kwargs)
if not self._hasCredentials():
raise exceptions.TaskclusterAuthFailure('Invalid Hawk Credentials')
clientId = utils.toStr(self.options['credentials']['clientId'])
accessToken = utils.toStr(self.options['credentials']['accessToken'])
def genBewit():
# We need to fix the output of get_bewit. It returns a url-safe base64
# encoded string, which contains a list of tokens separated by '\'.
# The first one is the clientId, the second is an int, the third is
# url-safe base64 encoded MAC, the fourth is the ext param.
# The problem is that the nested url-safe base64 encoded MAC must be
# base64 (i.e. not url safe) or server-side will complain.
# id + '\\' + exp + '\\' + mac + '\\' + options.ext;
resource = mohawk.base.Resource(
credentials={
'id': clientId,
'key': accessToken,
'algorithm': 'sha256',
},
method='GET',
ext=utils.toStr(self.makeHawkExt()),
url=requestUrl,
timestamp=expiration,
nonce='',
# content='',
# content_type='',
)
bewit = mohawk.bewit.get_bewit(resource)
return bewit.rstrip('=')
bewit = genBewit()
if not bewit:
raise exceptions.TaskclusterFailure('Did not receive a bewit')
u = urllib.parse.urlparse(requestUrl)
qs = u.query
if qs:
qs += '&'
qs += 'bewit=%s' % bewit
return urllib.parse.urlunparse((
u.scheme,
u.netloc,
u.path,
u.params,
qs,
u.fragment,
)) | Build a signed URL. This URL contains the credentials needed to access
a resource. | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L179-L244 |
taskcluster/taskcluster-client.py | taskcluster/client.py | BaseClient._constructUrl | def _constructUrl(self, route):
"""Construct a URL for the given route on this service, based on the
rootUrl"""
return liburls.api(
self.options['rootUrl'],
self.serviceName,
self.apiVersion,
route.rstrip('/')) | python | def _constructUrl(self, route):
"""Construct a URL for the given route on this service, based on the
rootUrl"""
return liburls.api(
self.options['rootUrl'],
self.serviceName,
self.apiVersion,
route.rstrip('/')) | Construct a URL for the given route on this service, based on the
rootUrl | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L246-L253 |
taskcluster/taskcluster-client.py | taskcluster/client.py | BaseClient._makeApiCall | def _makeApiCall(self, entry, *args, **kwargs):
""" This function is used to dispatch calls to other functions
for a given API Reference entry"""
x = self._processArgs(entry, *args, **kwargs)
routeParams, payload, query, paginationHandler, paginationLimit = x
route = self._subArgsInRoute(entry, routeParams)
# TODO: Check for limit being in the Query of the api ref
if paginationLimit and 'limit' in entry.get('query', []):
query['limit'] = paginationLimit
if query:
_route = route + '?' + urllib.parse.urlencode(query)
else:
_route = route
response = self._makeHttpRequest(entry['method'], _route, payload)
if paginationHandler:
paginationHandler(response)
while response.get('continuationToken'):
query['continuationToken'] = response['continuationToken']
_route = route + '?' + urllib.parse.urlencode(query)
response = self._makeHttpRequest(entry['method'], _route, payload)
paginationHandler(response)
else:
return response | python | def _makeApiCall(self, entry, *args, **kwargs):
""" This function is used to dispatch calls to other functions
for a given API Reference entry"""
x = self._processArgs(entry, *args, **kwargs)
routeParams, payload, query, paginationHandler, paginationLimit = x
route = self._subArgsInRoute(entry, routeParams)
# TODO: Check for limit being in the Query of the api ref
if paginationLimit and 'limit' in entry.get('query', []):
query['limit'] = paginationLimit
if query:
_route = route + '?' + urllib.parse.urlencode(query)
else:
_route = route
response = self._makeHttpRequest(entry['method'], _route, payload)
if paginationHandler:
paginationHandler(response)
while response.get('continuationToken'):
query['continuationToken'] = response['continuationToken']
_route = route + '?' + urllib.parse.urlencode(query)
response = self._makeHttpRequest(entry['method'], _route, payload)
paginationHandler(response)
else:
return response | This function is used to dispatch calls to other functions
for a given API Reference entry | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L255-L281 |
taskcluster/taskcluster-client.py | taskcluster/client.py | BaseClient._processArgs | def _processArgs(self, entry, *_args, **_kwargs):
""" Given an entry, positional and keyword arguments, figure out what
the query-string options, payload and api arguments are.
"""
# We need the args to be a list so we can mutate them
args = list(_args)
kwargs = copy.deepcopy(_kwargs)
reqArgs = entry['args']
routeParams = {}
query = {}
payload = None
kwApiArgs = {}
paginationHandler = None
paginationLimit = None
# There are three formats for calling methods:
# 1. method(v1, v1, payload)
# 2. method(payload, k1=v1, k2=v2)
# 3. method(payload=payload, query=query, params={k1: v1, k2: v2})
if len(kwargs) == 0:
if 'input' in entry and len(args) == len(reqArgs) + 1:
payload = args.pop()
if len(args) != len(reqArgs):
log.debug(args)
log.debug(reqArgs)
raise exceptions.TaskclusterFailure('Incorrect number of positional arguments')
log.debug('Using method(v1, v2, payload) calling convention')
else:
# We're considering kwargs which are the api route parameters to be
# called 'flat' because they're top level keys. We're special
# casing calls which have only api-arg kwargs and possibly a payload
# value and handling them directly.
isFlatKwargs = True
if len(kwargs) == len(reqArgs):
for arg in reqArgs:
if not kwargs.get(arg, False):
isFlatKwargs = False
break
if 'input' in entry and len(args) != 1:
isFlatKwargs = False
if 'input' not in entry and len(args) != 0:
isFlatKwargs = False
else:
pass # We're using payload=, query= and param=
else:
isFlatKwargs = False
# Now we're going to handle the two types of kwargs. The first is
# 'flat' ones, which are where the api params
if isFlatKwargs:
if 'input' in entry:
payload = args.pop()
kwApiArgs = kwargs
log.debug('Using method(payload, k1=v1, k2=v2) calling convention')
warnings.warn(
"The method(payload, k1=v1, k2=v2) calling convention will soon be deprecated",
PendingDeprecationWarning
)
else:
kwApiArgs = kwargs.get('params', {})
payload = kwargs.get('payload', None)
query = kwargs.get('query', {})
paginationHandler = kwargs.get('paginationHandler', None)
paginationLimit = kwargs.get('paginationLimit', None)
log.debug('Using method(payload=payload, query=query, params={k1: v1, k2: v2}) calling convention')
if 'input' in entry and isinstance(payload, type(None)):
raise exceptions.TaskclusterFailure('Payload is required')
# These all need to be rendered down to a string, let's just check that
# they are up front and fail fast
for arg in args:
if not isinstance(arg, six.string_types) and not isinstance(arg, int):
raise exceptions.TaskclusterFailure(
'Positional arg "%s" to %s is not a string or int' % (arg, entry['name']))
for name, arg in six.iteritems(kwApiArgs):
if not isinstance(arg, six.string_types) and not isinstance(arg, int):
raise exceptions.TaskclusterFailure(
'KW arg "%s: %s" to %s is not a string or int' % (name, arg, entry['name']))
if len(args) > 0 and len(kwApiArgs) > 0:
raise exceptions.TaskclusterFailure('Specify either positional or key word arguments')
# We know for sure that if we don't give enough arguments that the call
# should fail. We don't yet know if we should fail because of two many
# arguments because we might be overwriting positional ones with kw ones
if len(reqArgs) > len(args) + len(kwApiArgs):
raise exceptions.TaskclusterFailure(
'%s takes %d args, only %d were given' % (
entry['name'], len(reqArgs), len(args) + len(kwApiArgs)))
# We also need to error out when we have more positional args than required
# because we'll need to go through the lists of provided and required args
# at the same time. Not disqualifying early means we'll get IndexErrors if
# there are more positional arguments than required
if len(args) > len(reqArgs):
raise exceptions.TaskclusterFailure('%s called with too many positional args',
entry['name'])
i = 0
for arg in args:
log.debug('Found a positional argument: %s', arg)
routeParams[reqArgs[i]] = arg
i += 1
log.debug('After processing positional arguments, we have: %s', routeParams)
routeParams.update(kwApiArgs)
log.debug('After keyword arguments, we have: %s', routeParams)
if len(reqArgs) != len(routeParams):
errMsg = '%s takes %s args, %s given' % (
entry['name'],
','.join(reqArgs),
routeParams.keys())
log.error(errMsg)
raise exceptions.TaskclusterFailure(errMsg)
for reqArg in reqArgs:
if reqArg not in routeParams:
errMsg = '%s requires a "%s" argument which was not provided' % (
entry['name'], reqArg)
log.error(errMsg)
raise exceptions.TaskclusterFailure(errMsg)
return routeParams, payload, query, paginationHandler, paginationLimit | python | def _processArgs(self, entry, *_args, **_kwargs):
""" Given an entry, positional and keyword arguments, figure out what
the query-string options, payload and api arguments are.
"""
# We need the args to be a list so we can mutate them
args = list(_args)
kwargs = copy.deepcopy(_kwargs)
reqArgs = entry['args']
routeParams = {}
query = {}
payload = None
kwApiArgs = {}
paginationHandler = None
paginationLimit = None
# There are three formats for calling methods:
# 1. method(v1, v1, payload)
# 2. method(payload, k1=v1, k2=v2)
# 3. method(payload=payload, query=query, params={k1: v1, k2: v2})
if len(kwargs) == 0:
if 'input' in entry and len(args) == len(reqArgs) + 1:
payload = args.pop()
if len(args) != len(reqArgs):
log.debug(args)
log.debug(reqArgs)
raise exceptions.TaskclusterFailure('Incorrect number of positional arguments')
log.debug('Using method(v1, v2, payload) calling convention')
else:
# We're considering kwargs which are the api route parameters to be
# called 'flat' because they're top level keys. We're special
# casing calls which have only api-arg kwargs and possibly a payload
# value and handling them directly.
isFlatKwargs = True
if len(kwargs) == len(reqArgs):
for arg in reqArgs:
if not kwargs.get(arg, False):
isFlatKwargs = False
break
if 'input' in entry and len(args) != 1:
isFlatKwargs = False
if 'input' not in entry and len(args) != 0:
isFlatKwargs = False
else:
pass # We're using payload=, query= and param=
else:
isFlatKwargs = False
# Now we're going to handle the two types of kwargs. The first is
# 'flat' ones, which are where the api params
if isFlatKwargs:
if 'input' in entry:
payload = args.pop()
kwApiArgs = kwargs
log.debug('Using method(payload, k1=v1, k2=v2) calling convention')
warnings.warn(
"The method(payload, k1=v1, k2=v2) calling convention will soon be deprecated",
PendingDeprecationWarning
)
else:
kwApiArgs = kwargs.get('params', {})
payload = kwargs.get('payload', None)
query = kwargs.get('query', {})
paginationHandler = kwargs.get('paginationHandler', None)
paginationLimit = kwargs.get('paginationLimit', None)
log.debug('Using method(payload=payload, query=query, params={k1: v1, k2: v2}) calling convention')
if 'input' in entry and isinstance(payload, type(None)):
raise exceptions.TaskclusterFailure('Payload is required')
# These all need to be rendered down to a string, let's just check that
# they are up front and fail fast
for arg in args:
if not isinstance(arg, six.string_types) and not isinstance(arg, int):
raise exceptions.TaskclusterFailure(
'Positional arg "%s" to %s is not a string or int' % (arg, entry['name']))
for name, arg in six.iteritems(kwApiArgs):
if not isinstance(arg, six.string_types) and not isinstance(arg, int):
raise exceptions.TaskclusterFailure(
'KW arg "%s: %s" to %s is not a string or int' % (name, arg, entry['name']))
if len(args) > 0 and len(kwApiArgs) > 0:
raise exceptions.TaskclusterFailure('Specify either positional or key word arguments')
# We know for sure that if we don't give enough arguments that the call
# should fail. We don't yet know if we should fail because of two many
# arguments because we might be overwriting positional ones with kw ones
if len(reqArgs) > len(args) + len(kwApiArgs):
raise exceptions.TaskclusterFailure(
'%s takes %d args, only %d were given' % (
entry['name'], len(reqArgs), len(args) + len(kwApiArgs)))
# We also need to error out when we have more positional args than required
# because we'll need to go through the lists of provided and required args
# at the same time. Not disqualifying early means we'll get IndexErrors if
# there are more positional arguments than required
if len(args) > len(reqArgs):
raise exceptions.TaskclusterFailure('%s called with too many positional args',
entry['name'])
i = 0
for arg in args:
log.debug('Found a positional argument: %s', arg)
routeParams[reqArgs[i]] = arg
i += 1
log.debug('After processing positional arguments, we have: %s', routeParams)
routeParams.update(kwApiArgs)
log.debug('After keyword arguments, we have: %s', routeParams)
if len(reqArgs) != len(routeParams):
errMsg = '%s takes %s args, %s given' % (
entry['name'],
','.join(reqArgs),
routeParams.keys())
log.error(errMsg)
raise exceptions.TaskclusterFailure(errMsg)
for reqArg in reqArgs:
if reqArg not in routeParams:
errMsg = '%s requires a "%s" argument which was not provided' % (
entry['name'], reqArg)
log.error(errMsg)
raise exceptions.TaskclusterFailure(errMsg)
return routeParams, payload, query, paginationHandler, paginationLimit | Given an entry, positional and keyword arguments, figure out what
the query-string options, payload and api arguments are. | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L283-L414 |
taskcluster/taskcluster-client.py | taskcluster/client.py | BaseClient._subArgsInRoute | def _subArgsInRoute(self, entry, args):
""" Given a route like "/task/<taskId>/artifacts" and a mapping like
{"taskId": "12345"}, return a string like "/task/12345/artifacts"
"""
route = entry['route']
for arg, val in six.iteritems(args):
toReplace = "<%s>" % arg
if toReplace not in route:
raise exceptions.TaskclusterFailure(
'Arg %s not found in route for %s' % (arg, entry['name']))
val = urllib.parse.quote(str(val).encode("utf-8"), '')
route = route.replace("<%s>" % arg, val)
return route.lstrip('/') | python | def _subArgsInRoute(self, entry, args):
""" Given a route like "/task/<taskId>/artifacts" and a mapping like
{"taskId": "12345"}, return a string like "/task/12345/artifacts"
"""
route = entry['route']
for arg, val in six.iteritems(args):
toReplace = "<%s>" % arg
if toReplace not in route:
raise exceptions.TaskclusterFailure(
'Arg %s not found in route for %s' % (arg, entry['name']))
val = urllib.parse.quote(str(val).encode("utf-8"), '')
route = route.replace("<%s>" % arg, val)
return route.lstrip('/') | Given a route like "/task/<taskId>/artifacts" and a mapping like
{"taskId": "12345"}, return a string like "/task/12345/artifacts" | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L416-L431 |
taskcluster/taskcluster-client.py | taskcluster/client.py | BaseClient._hasCredentials | def _hasCredentials(self):
""" Return True, if credentials is given """
cred = self.options.get('credentials')
return (
cred and
'clientId' in cred and
'accessToken' in cred and
cred['clientId'] and
cred['accessToken']
) | python | def _hasCredentials(self):
""" Return True, if credentials is given """
cred = self.options.get('credentials')
return (
cred and
'clientId' in cred and
'accessToken' in cred and
cred['clientId'] and
cred['accessToken']
) | Return True, if credentials is given | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L433-L442 |
taskcluster/taskcluster-client.py | taskcluster/client.py | BaseClient._makeHttpRequest | def _makeHttpRequest(self, method, route, payload):
""" Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method."""
url = self._constructUrl(route)
log.debug('Full URL used is: %s', url)
hawkExt = self.makeHawkExt()
# Serialize payload if given
if payload is not None:
payload = utils.dumpJson(payload)
# Do a loop of retries
retry = -1 # we plus first in the loop, and attempt 1 is retry 0
retries = self.options['maxRetries']
while retry < retries:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
time.sleep(utils.calculateSleepTime(retry))
# Construct header
if self._hasCredentials():
sender = mohawk.Sender(
credentials={
'id': self.options['credentials']['clientId'],
'key': self.options['credentials']['accessToken'],
'algorithm': 'sha256',
},
ext=hawkExt if hawkExt else {},
url=url,
content=payload if payload else '',
content_type='application/json' if payload else '',
method=method,
)
headers = {'Authorization': sender.request_header}
else:
log.debug('Not using hawk!')
headers = {}
if payload:
# Set header for JSON if payload is given, note that we serialize
# outside this loop.
headers['Content-Type'] = 'application/json'
log.debug('Making attempt %d', retry)
try:
response = utils.makeSingleHttpRequest(method, url, payload, headers)
except requests.exceptions.RequestException as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise exceptions.TaskclusterConnectionError(
"Failed to establish connection",
superExc=rerr
)
# Handle non 2xx status code and retry if possible
status = response.status_code
if status == 204:
return None
# Catch retryable errors and go to the beginning of the loop
# to do the retry
if 500 <= status and status < 600 and retry < retries:
log.warn('Retrying because of a %s status code' % status)
continue
# Throw errors for non-retryable errors
if status < 200 or status >= 300:
data = {}
try:
data = response.json()
except Exception:
pass # Ignore JSON errors in error messages
# Find error message
message = "Unknown Server Error"
if isinstance(data, dict):
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
return response.json()
except ValueError:
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!" | python | def _makeHttpRequest(self, method, route, payload):
""" Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method."""
url = self._constructUrl(route)
log.debug('Full URL used is: %s', url)
hawkExt = self.makeHawkExt()
# Serialize payload if given
if payload is not None:
payload = utils.dumpJson(payload)
# Do a loop of retries
retry = -1 # we plus first in the loop, and attempt 1 is retry 0
retries = self.options['maxRetries']
while retry < retries:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
time.sleep(utils.calculateSleepTime(retry))
# Construct header
if self._hasCredentials():
sender = mohawk.Sender(
credentials={
'id': self.options['credentials']['clientId'],
'key': self.options['credentials']['accessToken'],
'algorithm': 'sha256',
},
ext=hawkExt if hawkExt else {},
url=url,
content=payload if payload else '',
content_type='application/json' if payload else '',
method=method,
)
headers = {'Authorization': sender.request_header}
else:
log.debug('Not using hawk!')
headers = {}
if payload:
# Set header for JSON if payload is given, note that we serialize
# outside this loop.
headers['Content-Type'] = 'application/json'
log.debug('Making attempt %d', retry)
try:
response = utils.makeSingleHttpRequest(method, url, payload, headers)
except requests.exceptions.RequestException as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise exceptions.TaskclusterConnectionError(
"Failed to establish connection",
superExc=rerr
)
# Handle non 2xx status code and retry if possible
status = response.status_code
if status == 204:
return None
# Catch retryable errors and go to the beginning of the loop
# to do the retry
if 500 <= status and status < 600 and retry < retries:
log.warn('Retrying because of a %s status code' % status)
continue
# Throw errors for non-retryable errors
if status < 200 or status >= 300:
data = {}
try:
data = response.json()
except Exception:
pass # Ignore JSON errors in error messages
# Find error message
message = "Unknown Server Error"
if isinstance(data, dict):
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
return response.json()
except ValueError:
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!" | Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method. | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L444-L553 |
taskcluster/taskcluster-client.py | taskcluster/aio/asyncclient.py | AsyncBaseClient._makeHttpRequest | async def _makeHttpRequest(self, method, route, payload):
""" Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method."""
url = self._constructUrl(route)
log.debug('Full URL used is: %s', url)
hawkExt = self.makeHawkExt()
# Serialize payload if given
if payload is not None:
payload = utils.dumpJson(payload)
# Do a loop of retries
retry = -1 # we plus first in the loop, and attempt 1 is retry 0
retries = self.options['maxRetries']
while retry < retries:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
snooze = float(retry * retry) / 10.0
log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
await asyncio.sleep(utils.calculateSleepTime(retry))
# Construct header
if self._hasCredentials():
sender = mohawk.Sender(
credentials={
'id': self.options['credentials']['clientId'],
'key': self.options['credentials']['accessToken'],
'algorithm': 'sha256',
},
ext=hawkExt if hawkExt else {},
url=url,
content=payload if payload else '',
content_type='application/json' if payload else '',
method=method,
)
headers = {'Authorization': sender.request_header}
else:
log.debug('Not using hawk!')
headers = {}
if payload:
# Set header for JSON if payload is given, note that we serialize
# outside this loop.
headers['Content-Type'] = 'application/json'
log.debug('Making attempt %d', retry)
try:
response = await asyncutils.makeSingleHttpRequest(
method, url, payload, headers, session=self.session
)
except aiohttp.ClientError as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise exceptions.TaskclusterConnectionError(
"Failed to establish connection",
superExc=rerr
)
status = response.status
if status == 204:
return None
# Catch retryable errors and go to the beginning of the loop
# to do the retry
if 500 <= status and status < 600 and retry < retries:
log.warn('Retrying because of a %s status code' % status)
continue
# Throw errors for non-retryable errors
if status < 200 or status >= 300:
# Parse messages from errors
data = {}
try:
data = await response.json()
except Exception:
pass # Ignore JSON errors in error messages
# Find error message
message = "Unknown Server Error"
if isinstance(data, dict):
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
else:
message = "Unknown Server Error %s\n%s" % (str(status), str(data)[:1024])
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
await response.release()
return await response.json()
except (ValueError, aiohttp.client_exceptions.ContentTypeError):
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!" | python | async def _makeHttpRequest(self, method, route, payload):
""" Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method."""
url = self._constructUrl(route)
log.debug('Full URL used is: %s', url)
hawkExt = self.makeHawkExt()
# Serialize payload if given
if payload is not None:
payload = utils.dumpJson(payload)
# Do a loop of retries
retry = -1 # we plus first in the loop, and attempt 1 is retry 0
retries = self.options['maxRetries']
while retry < retries:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
snooze = float(retry * retry) / 10.0
log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
await asyncio.sleep(utils.calculateSleepTime(retry))
# Construct header
if self._hasCredentials():
sender = mohawk.Sender(
credentials={
'id': self.options['credentials']['clientId'],
'key': self.options['credentials']['accessToken'],
'algorithm': 'sha256',
},
ext=hawkExt if hawkExt else {},
url=url,
content=payload if payload else '',
content_type='application/json' if payload else '',
method=method,
)
headers = {'Authorization': sender.request_header}
else:
log.debug('Not using hawk!')
headers = {}
if payload:
# Set header for JSON if payload is given, note that we serialize
# outside this loop.
headers['Content-Type'] = 'application/json'
log.debug('Making attempt %d', retry)
try:
response = await asyncutils.makeSingleHttpRequest(
method, url, payload, headers, session=self.session
)
except aiohttp.ClientError as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise exceptions.TaskclusterConnectionError(
"Failed to establish connection",
superExc=rerr
)
status = response.status
if status == 204:
return None
# Catch retryable errors and go to the beginning of the loop
# to do the retry
if 500 <= status and status < 600 and retry < retries:
log.warn('Retrying because of a %s status code' % status)
continue
# Throw errors for non-retryable errors
if status < 200 or status >= 300:
# Parse messages from errors
data = {}
try:
data = await response.json()
except Exception:
pass # Ignore JSON errors in error messages
# Find error message
message = "Unknown Server Error"
if isinstance(data, dict):
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
else:
message = "Unknown Server Error %s\n%s" % (str(status), str(data)[:1024])
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
await response.release()
return await response.json()
except (ValueError, aiohttp.client_exceptions.ContentTypeError):
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!" | Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method. | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/asyncclient.py#L107-L223 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisioner.py | AwsProvisioner.listWorkerTypeSummaries | def listWorkerTypeSummaries(self, *args, **kwargs):
"""
List worker types with details
Return a list of worker types, including some summary information about
current capacity for each. While this list includes all defined worker types,
there may be running EC2 instances for deleted worker types that are not
included here. The list is unordered.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-summaries-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listWorkerTypeSummaries"], *args, **kwargs) | python | def listWorkerTypeSummaries(self, *args, **kwargs):
"""
List worker types with details
Return a list of worker types, including some summary information about
current capacity for each. While this list includes all defined worker types,
there may be running EC2 instances for deleted worker types that are not
included here. The list is unordered.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-summaries-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listWorkerTypeSummaries"], *args, **kwargs) | List worker types with details
Return a list of worker types, including some summary information about
current capacity for each. While this list includes all defined worker types,
there may be running EC2 instances for deleted worker types that are not
included here. The list is unordered.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-summaries-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisioner.py#L51-L65 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisioner.py | AwsProvisioner.workerTypeLastModified | def workerTypeLastModified(self, *args, **kwargs):
"""
Get Worker Type Last Modified Time
This method is provided to allow workers to see when they were
last modified. The value provided through UserData can be
compared against this value to see if changes have been made
If the worker type definition has not been changed, the date
should be identical as it is the same stored value.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["workerTypeLastModified"], *args, **kwargs) | python | def workerTypeLastModified(self, *args, **kwargs):
"""
Get Worker Type Last Modified Time
This method is provided to allow workers to see when they were
last modified. The value provided through UserData can be
compared against this value to see if changes have been made
If the worker type definition has not been changed, the date
should be identical as it is the same stored value.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["workerTypeLastModified"], *args, **kwargs) | Get Worker Type Last Modified Time
This method is provided to allow workers to see when they were
last modified. The value provided through UserData can be
compared against this value to see if changes have been made
If the worker type definition has not been changed, the date
should be identical as it is the same stored value.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisioner.py#L129-L144 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisioner.py | AwsProvisioner.removeWorkerType | def removeWorkerType(self, *args, **kwargs):
"""
Delete Worker Type
Delete a worker type definition. This method will only delete
the worker type definition from the storage table. The actual
deletion will be handled by a background worker. As soon as this
method is called for a worker type, the background worker will
immediately submit requests to cancel all spot requests for this
worker type as well as killing all instances regardless of their
state. If you want to gracefully remove a worker type, you must
either ensure that no tasks are created with that worker type name
or you could theoretically set maxCapacity to 0, though, this is
not a supported or tested action
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["removeWorkerType"], *args, **kwargs) | python | def removeWorkerType(self, *args, **kwargs):
"""
Delete Worker Type
Delete a worker type definition. This method will only delete
the worker type definition from the storage table. The actual
deletion will be handled by a background worker. As soon as this
method is called for a worker type, the background worker will
immediately submit requests to cancel all spot requests for this
worker type as well as killing all instances regardless of their
state. If you want to gracefully remove a worker type, you must
either ensure that no tasks are created with that worker type name
or you could theoretically set maxCapacity to 0, though, this is
not a supported or tested action
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["removeWorkerType"], *args, **kwargs) | Delete Worker Type
Delete a worker type definition. This method will only delete
the worker type definition from the storage table. The actual
deletion will be handled by a background worker. As soon as this
method is called for a worker type, the background worker will
immediately submit requests to cancel all spot requests for this
worker type as well as killing all instances regardless of their
state. If you want to gracefully remove a worker type, you must
either ensure that no tasks are created with that worker type name
or you could theoretically set maxCapacity to 0, though, this is
not a supported or tested action
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisioner.py#L163-L181 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisioner.py | AwsProvisioner.getSecret | def getSecret(self, *args, **kwargs):
"""
Get a Secret
Retrieve a secret from storage. The result contains any passwords or
other restricted information verbatim as well as a temporary credential
based on the scopes specified when the secret was created.
It is important that this secret is deleted by the consumer (`removeSecret`),
or else the secrets will be visible to any process which can access the
user data associated with the instance.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-secret-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getSecret"], *args, **kwargs) | python | def getSecret(self, *args, **kwargs):
"""
Get a Secret
Retrieve a secret from storage. The result contains any passwords or
other restricted information verbatim as well as a temporary credential
based on the scopes specified when the secret was created.
It is important that this secret is deleted by the consumer (`removeSecret`),
or else the secrets will be visible to any process which can access the
user data associated with the instance.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-secret-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getSecret"], *args, **kwargs) | Get a Secret
Retrieve a secret from storage. The result contains any passwords or
other restricted information verbatim as well as a temporary credential
based on the scopes specified when the secret was created.
It is important that this secret is deleted by the consumer (`removeSecret`),
or else the secrets will be visible to any process which can access the
user data associated with the instance.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-secret-response.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisioner.py#L217-L234 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisioner.py | AwsProvisioner.instanceStarted | def instanceStarted(self, *args, **kwargs):
"""
Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs) | python | def instanceStarted(self, *args, **kwargs):
"""
Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs) | Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisioner.py#L236-L249 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisioner.py | AwsProvisioner.state | def state(self, *args, **kwargs):
"""
Get AWS State for a worker type
Return the state of a given workertype as stored by the provisioner.
This state is stored as three lists: 1 for running instances, 1 for
pending requests. The `summary` property contains an updated summary
similar to that returned from `listWorkerTypeSummaries`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["state"], *args, **kwargs) | python | def state(self, *args, **kwargs):
"""
Get AWS State for a worker type
Return the state of a given workertype as stored by the provisioner.
This state is stored as three lists: 1 for running instances, 1 for
pending requests. The `summary` property contains an updated summary
similar to that returned from `listWorkerTypeSummaries`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["state"], *args, **kwargs) | Get AWS State for a worker type
Return the state of a given workertype as stored by the provisioner.
This state is stored as three lists: 1 for running instances, 1 for
pending requests. The `summary` property contains an updated summary
similar to that returned from `listWorkerTypeSummaries`.
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisioner.py#L284-L296 |
taskcluster/taskcluster-client.py | taskcluster/awsprovisioner.py | AwsProvisioner.backendStatus | def backendStatus(self, *args, **kwargs):
"""
Backend Status
This endpoint is used to show when the last time the provisioner
has checked in. A check in is done through the deadman's snitch
api. It is done at the conclusion of a provisioning iteration
and used to tell if the background provisioning process is still
running.
**Warning** this api end-point is **not stable**.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["backendStatus"], *args, **kwargs) | python | def backendStatus(self, *args, **kwargs):
"""
Backend Status
This endpoint is used to show when the last time the provisioner
has checked in. A check in is done through the deadman's snitch
api. It is done at the conclusion of a provisioning iteration
and used to tell if the background provisioning process is still
running.
**Warning** this api end-point is **not stable**.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["backendStatus"], *args, **kwargs) | Backend Status
This endpoint is used to show when the last time the provisioner
has checked in. A check in is done through the deadman's snitch
api. It is done at the conclusion of a provisioning iteration
and used to tell if the background provisioning process is still
running.
**Warning** this api end-point is **not stable**.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisioner.py#L298-L315 |
taskcluster/taskcluster-client.py | taskcluster/aio/secrets.py | Secrets.set | async def set(self, *args, **kwargs):
"""
Set Secret
Set the secret associated with some key. If the secret already exists, it is
updated instead.
This method takes input: ``v1/secret.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["set"], *args, **kwargs) | python | async def set(self, *args, **kwargs):
"""
Set Secret
Set the secret associated with some key. If the secret already exists, it is
updated instead.
This method takes input: ``v1/secret.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["set"], *args, **kwargs) | Set Secret
Set the secret associated with some key. If the secret already exists, it is
updated instead.
This method takes input: ``v1/secret.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/secrets.py#L42-L54 |
taskcluster/taskcluster-client.py | taskcluster/aio/secrets.py | Secrets.remove | async def remove(self, *args, **kwargs):
"""
Delete Secret
Delete the secret associated with some key.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["remove"], *args, **kwargs) | python | async def remove(self, *args, **kwargs):
"""
Delete Secret
Delete the secret associated with some key.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["remove"], *args, **kwargs) | Delete Secret
Delete the secret associated with some key.
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/secrets.py#L56-L65 |
taskcluster/taskcluster-client.py | taskcluster/aio/secrets.py | Secrets.get | async def get(self, *args, **kwargs):
"""
Read Secret
Read the secret associated with some key. If the secret has recently
expired, the response code 410 is returned. If the caller lacks the
scope necessary to get the secret, the call will fail with a 403 code
regardless of whether the secret exists.
This method gives output: ``v1/secret.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["get"], *args, **kwargs) | python | async def get(self, *args, **kwargs):
"""
Read Secret
Read the secret associated with some key. If the secret has recently
expired, the response code 410 is returned. If the caller lacks the
scope necessary to get the secret, the call will fail with a 403 code
regardless of whether the secret exists.
This method gives output: ``v1/secret.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["get"], *args, **kwargs) | Read Secret
Read the secret associated with some key. If the secret has recently
expired, the response code 410 is returned. If the caller lacks the
scope necessary to get the secret, the call will fail with a 403 code
regardless of whether the secret exists.
This method gives output: ``v1/secret.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/secrets.py#L67-L81 |
jart/fabulous | fabulous/prompt.py | input_object | def input_object(prompt_text, cast = None, default = None,
prompt_ext = ': ', castarg = [], castkwarg = {}):
"""Gets input from the command line and validates it.
prompt_text
A string. Used to prompt the user. Do not include a trailing
space.
prompt_ext
Added on to the prompt at the end. At the moment this must not
include any control stuff because it is send directly to
raw_input
cast
This can be any callable object (class, function, type, etc). It
simply calls the cast with the given arguements and returns the
result. If a ValueError is raised, it
will output an error message and prompt the user again.
Because some builtin python objects don't do casting in the way
that we might like you can easily write a wrapper function that
looks and the input and returns the appropriate object or exception.
Look in the cast submodule for examples.
If cast is None, then it will do nothing (and you will have a string)
default
function returns this value if the user types nothing in. This is
can be used to cancel the input so-to-speek
castarg, castkwarg
list and dictionary. Extra arguments passed on to the cast.
"""
while True:
stdout.write(prompt_text)
value = stdout.raw_input(prompt_ext)
if value == '': return default
try:
if cast != None: value = cast(value, *castarg, **castkwarg)
except ValueError as details:
if cast in NICE_INPUT_ERRORS: # see comment above this constant
stderr.write(ERROR_MESSAGE % (NICE_INPUT_ERRORS[cast] % details))
else: stderr.write(ERROR_MESSAGE % (DEFAULT_INPUT_ERRORS % str(details)))
continue
return value | python | def input_object(prompt_text, cast = None, default = None,
prompt_ext = ': ', castarg = [], castkwarg = {}):
"""Gets input from the command line and validates it.
prompt_text
A string. Used to prompt the user. Do not include a trailing
space.
prompt_ext
Added on to the prompt at the end. At the moment this must not
include any control stuff because it is send directly to
raw_input
cast
This can be any callable object (class, function, type, etc). It
simply calls the cast with the given arguements and returns the
result. If a ValueError is raised, it
will output an error message and prompt the user again.
Because some builtin python objects don't do casting in the way
that we might like you can easily write a wrapper function that
looks and the input and returns the appropriate object or exception.
Look in the cast submodule for examples.
If cast is None, then it will do nothing (and you will have a string)
default
function returns this value if the user types nothing in. This is
can be used to cancel the input so-to-speek
castarg, castkwarg
list and dictionary. Extra arguments passed on to the cast.
"""
while True:
stdout.write(prompt_text)
value = stdout.raw_input(prompt_ext)
if value == '': return default
try:
if cast != None: value = cast(value, *castarg, **castkwarg)
except ValueError as details:
if cast in NICE_INPUT_ERRORS: # see comment above this constant
stderr.write(ERROR_MESSAGE % (NICE_INPUT_ERRORS[cast] % details))
else: stderr.write(ERROR_MESSAGE % (DEFAULT_INPUT_ERRORS % str(details)))
continue
return value | Gets input from the command line and validates it.
prompt_text
A string. Used to prompt the user. Do not include a trailing
space.
prompt_ext
Added on to the prompt at the end. At the moment this must not
include any control stuff because it is send directly to
raw_input
cast
This can be any callable object (class, function, type, etc). It
simply calls the cast with the given arguements and returns the
result. If a ValueError is raised, it
will output an error message and prompt the user again.
Because some builtin python objects don't do casting in the way
that we might like you can easily write a wrapper function that
looks and the input and returns the appropriate object or exception.
Look in the cast submodule for examples.
If cast is None, then it will do nothing (and you will have a string)
default
function returns this value if the user types nothing in. This is
can be used to cancel the input so-to-speek
castarg, castkwarg
list and dictionary. Extra arguments passed on to the cast. | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/prompt.py#L38-L82 |
jart/fabulous | fabulous/prompt.py | query | def query(question, values, default=None, list_values = False, ignorecase = True ):
"""Preset a few options
The question argument is a string, nothing magical.
The values argument accepts input in two different forms. The simpler form
(a tuple with strings) looks like:
.. code-block:: python
('Male','Female')
And it will pop up a question asking the user for a gender and requiring
the user to enter either 'male' or 'female' (case doesn't matter unless
you set the third arguement to false).
The other form is something like:
.. code-block:: python
({'values':('Male','M'),'fg':'cyan'},
{'values':('Female','F'),'fg':'magenta'})
This will pop up a question with Male/Female (each with appropriate
colouring). Additionally, if the user types in just 'M', it will be
treated as if 'Male' was typed in. The first item in the 'values' tuple
is treated as default and is the one that is returned by the function
if the user chooses one in that group.
In addition the function can handle non-string objects quite fine. It
simple displays the output object.__str__() and compares the user's input
against that. So the the code
.. code-block:: python
query("Python rocks? ",(True, False))
will return a bool (True) when the user types in the string 'True' (Of
course there isn't any other reasonable answer than True anyways :P)
``default`` is the value function returns if the user types nothing in. This is
can be used to cancel the input so-to-speek
Using list_values = False will display a list, with descriptions printed out
from the 'desc' keyword
"""
values = list(values)
for i in range(len(values)):
if not isinstance(values[i], dict):
values[i] = {'values': [values[i]]}
try:
import readline, rlcomplete
wordlist = [ str(v) for value in values
for v in value['values']]
completer = rlcomplete.ListCompleter(wordlist, ignorecase)
readline.parse_and_bind("tab: complete")
readline.set_completer(completer.complete)
except ImportError:
pass
valuelist = []
for item in values:
entry = ( display('bright', item.get('fg'), item.get('bg')) +
str(item['values'][0]) + display(['default']) )
if str(item['values'][0]) == str(default): entry = '['+entry+']'
if list_values: entry += ' : ' + item['desc']
valuelist.append(entry)
if list_values: question += os.linesep + os.linesep.join(valuelist) + os.linesep
else: question += ' (' + '/'.join(valuelist) + ')'
return input_object(question, cast = query_cast, default=default,
castarg=[values,ignorecase]) | python | def query(question, values, default=None, list_values = False, ignorecase = True ):
"""Preset a few options
The question argument is a string, nothing magical.
The values argument accepts input in two different forms. The simpler form
(a tuple with strings) looks like:
.. code-block:: python
('Male','Female')
And it will pop up a question asking the user for a gender and requiring
the user to enter either 'male' or 'female' (case doesn't matter unless
you set the third arguement to false).
The other form is something like:
.. code-block:: python
({'values':('Male','M'),'fg':'cyan'},
{'values':('Female','F'),'fg':'magenta'})
This will pop up a question with Male/Female (each with appropriate
colouring). Additionally, if the user types in just 'M', it will be
treated as if 'Male' was typed in. The first item in the 'values' tuple
is treated as default and is the one that is returned by the function
if the user chooses one in that group.
In addition the function can handle non-string objects quite fine. It
simple displays the output object.__str__() and compares the user's input
against that. So the the code
.. code-block:: python
query("Python rocks? ",(True, False))
will return a bool (True) when the user types in the string 'True' (Of
course there isn't any other reasonable answer than True anyways :P)
``default`` is the value function returns if the user types nothing in. This is
can be used to cancel the input so-to-speek
Using list_values = False will display a list, with descriptions printed out
from the 'desc' keyword
"""
values = list(values)
for i in range(len(values)):
if not isinstance(values[i], dict):
values[i] = {'values': [values[i]]}
try:
import readline, rlcomplete
wordlist = [ str(v) for value in values
for v in value['values']]
completer = rlcomplete.ListCompleter(wordlist, ignorecase)
readline.parse_and_bind("tab: complete")
readline.set_completer(completer.complete)
except ImportError:
pass
valuelist = []
for item in values:
entry = ( display('bright', item.get('fg'), item.get('bg')) +
str(item['values'][0]) + display(['default']) )
if str(item['values'][0]) == str(default): entry = '['+entry+']'
if list_values: entry += ' : ' + item['desc']
valuelist.append(entry)
if list_values: question += os.linesep + os.linesep.join(valuelist) + os.linesep
else: question += ' (' + '/'.join(valuelist) + ')'
return input_object(question, cast = query_cast, default=default,
castarg=[values,ignorecase]) | Preset a few options
The question argument is a string, nothing magical.
The values argument accepts input in two different forms. The simpler form
(a tuple with strings) looks like:
.. code-block:: python
('Male','Female')
And it will pop up a question asking the user for a gender and requiring
the user to enter either 'male' or 'female' (case doesn't matter unless
you set the third arguement to false).
The other form is something like:
.. code-block:: python
({'values':('Male','M'),'fg':'cyan'},
{'values':('Female','F'),'fg':'magenta'})
This will pop up a question with Male/Female (each with appropriate
colouring). Additionally, if the user types in just 'M', it will be
treated as if 'Male' was typed in. The first item in the 'values' tuple
is treated as default and is the one that is returned by the function
if the user chooses one in that group.
In addition the function can handle non-string objects quite fine. It
simple displays the output object.__str__() and compares the user's input
against that. So the the code
.. code-block:: python
query("Python rocks? ",(True, False))
will return a bool (True) when the user types in the string 'True' (Of
course there isn't any other reasonable answer than True anyways :P)
``default`` is the value function returns if the user types nothing in. This is
can be used to cancel the input so-to-speek
Using list_values = False will display a list, with descriptions printed out
from the 'desc' keyword | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/prompt.py#L84-L151 |
jart/fabulous | fabulous/prompt.py | query_cast | def query_cast(value, answers, ignorecase = False):
"""A cast function for query
Answers should look something like it does in query
"""
if ignorecase: value = value.lower()
for item in answers:
for a in item['values']:
if ignorecase and (value == str(a).lower()):
return item['values'][0]
elif value == a:
return item['values'][0]
raise ValueError("Response '%s' not understood, please try again." % value) | python | def query_cast(value, answers, ignorecase = False):
"""A cast function for query
Answers should look something like it does in query
"""
if ignorecase: value = value.lower()
for item in answers:
for a in item['values']:
if ignorecase and (value == str(a).lower()):
return item['values'][0]
elif value == a:
return item['values'][0]
raise ValueError("Response '%s' not understood, please try again." % value) | A cast function for query
Answers should look something like it does in query | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/prompt.py#L153-L165 |
jart/fabulous | fabulous/prompt.py | file_chooser | def file_chooser(prompt_text = "Enter File: ", default=None, filearg=[], filekwarg={}):
"""A simple tool to get a file from the user. Takes keyworded arguemnts
and passes them to open().
If the user enters nothing the function will return the ``default`` value.
Otherwise it continues to prompt the user until it get's a decent response.
filekwarg may contain arguements passed on to ``open()``.
"""
try:
import readline, rlcomplete
completer = rlcomplete.PathCompleter()
readline.set_completer_delims(completer.delims)
readline.parse_and_bind("tab: complete")
readline.set_completer(completer.complete)
except ImportError:
pass
while True:
f = raw_input(prompt_text)
if f == '': return default
f = os.path.expanduser(f)
if len(f) != 0 and f[0] == os.path.sep:
f = os.path.abspath(f)
try:
return open(f, *filearg, **filekwarg)
except IOError as e:
stderr.write(ERROR_MESSAGE % ("unable to open %s : %s" % (f, e))) | python | def file_chooser(prompt_text = "Enter File: ", default=None, filearg=[], filekwarg={}):
"""A simple tool to get a file from the user. Takes keyworded arguemnts
and passes them to open().
If the user enters nothing the function will return the ``default`` value.
Otherwise it continues to prompt the user until it get's a decent response.
filekwarg may contain arguements passed on to ``open()``.
"""
try:
import readline, rlcomplete
completer = rlcomplete.PathCompleter()
readline.set_completer_delims(completer.delims)
readline.parse_and_bind("tab: complete")
readline.set_completer(completer.complete)
except ImportError:
pass
while True:
f = raw_input(prompt_text)
if f == '': return default
f = os.path.expanduser(f)
if len(f) != 0 and f[0] == os.path.sep:
f = os.path.abspath(f)
try:
return open(f, *filearg, **filekwarg)
except IOError as e:
stderr.write(ERROR_MESSAGE % ("unable to open %s : %s" % (f, e))) | A simple tool to get a file from the user. Takes keyworded arguemnts
and passes them to open().
If the user enters nothing the function will return the ``default`` value.
Otherwise it continues to prompt the user until it get's a decent response.
filekwarg may contain arguements passed on to ``open()``. | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/prompt.py#L167-L193 |
taskcluster/taskcluster-client.py | taskcluster/hooks.py | Hooks.getHookStatus | def getHookStatus(self, *args, **kwargs):
"""
Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method is deprecated in favor of listLastFires.
This method gives output: ``v1/hook-status.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs) | python | def getHookStatus(self, *args, **kwargs):
"""
Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method is deprecated in favor of listLastFires.
This method gives output: ``v1/hook-status.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs) | Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method is deprecated in favor of listLastFires.
This method gives output: ``v1/hook-status.json#``
This method is ``deprecated`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/hooks.py#L95-L109 |
taskcluster/taskcluster-client.py | taskcluster/hooks.py | Hooks.createHook | def createHook(self, *args, **kwargs):
"""
Create a hook
This endpoint will create a new hook.
The caller's credentials must include the role that will be used to
create the task. That role must satisfy task.scopes as well as the
necessary scopes to add the task to the queue.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs) | python | def createHook(self, *args, **kwargs):
"""
Create a hook
This endpoint will create a new hook.
The caller's credentials must include the role that will be used to
create the task. That role must satisfy task.scopes as well as the
necessary scopes to add the task to the queue.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs) | Create a hook
This endpoint will create a new hook.
The caller's credentials must include the role that will be used to
create the task. That role must satisfy task.scopes as well as the
necessary scopes to add the task to the queue.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/hooks.py#L111-L128 |
taskcluster/taskcluster-client.py | taskcluster/hooks.py | Hooks.listLastFires | def listLastFires(self, *args, **kwargs):
"""
Get information about recent hook fires
This endpoint will return information about the the last few times this hook has been
fired, including whether the hook was fired successfully or not
This method gives output: ``v1/list-lastFires-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listLastFires"], *args, **kwargs) | python | def listLastFires(self, *args, **kwargs):
"""
Get information about recent hook fires
This endpoint will return information about the the last few times this hook has been
fired, including whether the hook was fired successfully or not
This method gives output: ``v1/list-lastFires-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listLastFires"], *args, **kwargs) | Get information about recent hook fires
This endpoint will return information about the the last few times this hook has been
fired, including whether the hook was fired successfully or not
This method gives output: ``v1/list-lastFires-response.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/hooks.py#L223-L235 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.