_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q10200
|
rotating_cube
|
train
|
def rotating_cube(degree_change=3, frame_rate=3):
"""Rotating cube program
How it works:
1. Create two imaginary ellipses
2. Sized to fit in the top third and bottom third of screen
3. Create four imaginary points on each ellipse
4. Make those points the top and bottom corners of your cube
5. Connect the lines and render
6. Rotate the points on the ellipses and repeat
"""
degrees = 0
while True:
t1 = time.time()
with Frame() as frame:
oval_width = frame.width
oval_height = frame.height / 3.0
cube_height = int(oval_height * 2)
(p1_x, p1_y) = ellipse_point(degrees, oval_width, oval_height)
(p2_x, p2_y) = ellipse_point(degrees + 90, oval_width, oval_height)
(p3_x, p3_y) = ellipse_point(degrees + 180, oval_width, oval_height)
(p4_x, p4_y) = ellipse_point(degrees + 270, oval_width, oval_height)
degrees = (degrees + degree_change) % 360
# connect square thing at top
frame.line(p1_x, p1_y, p2_x, p2_y)
frame.line(p2_x, p2_y, p3_x, p3_y)
frame.line(p3_x, p3_y, p4_x, p4_y)
frame.line(p4_x, p4_y, p1_x, p1_y)
# connect top to bottom
frame.line(p1_x, p1_y, p1_x, p1_y + cube_height)
frame.line(p2_x, p2_y, p2_x, p2_y + cube_height)
frame.line(p3_x, p3_y, p3_x, p3_y + cube_height)
frame.line(p4_x, p4_y, p4_x, p4_y + cube_height)
# connect square thing at bottom
frame.line(p1_x, p1_y + cube_height, p2_x, p2_y + cube_height)
frame.line(p2_x, p2_y + cube_height, p3_x, p3_y + cube_height)
frame.line(p3_x, p3_y + cube_height, p4_x, p4_y + cube_height)
frame.line(p4_x, p4_y + cube_height, p1_x, p1_y + cube_height)
elapsed = (time.time() - t1)
time.sleep(abs(1.0 / frame_rate - elapsed))
|
python
|
{
"resource": ""
}
|
q10201
|
Frame.line
|
train
|
def line(self, x0, y0, x1, y1, c='*'):
r"""Draws a line
Who would have thought this would be so complicated? Thanks
again Wikipedia_ <3
.. _Wikipedia: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
"""
steep = abs(y1 - y0) > abs(x1 - x0)
if steep:
(x0, y0) = (y0, x0)
(x1, y1) = (y1, x1)
if x0 > x1:
(x0, x1) = (x1, x0)
(y0, y1) = (y1, y0)
deltax = x1 - x0
deltay = abs(y1 - y0)
error = deltax / 2
y = y0
if y0 < y1:
ystep = 1
else:
ystep = -1
for x in range(x0, x1 - 1):
if steep:
self[y, x] = c
else:
self[x, y] = c
error = error - deltay
if error < 0:
y = y + ystep
error = error + deltax
|
python
|
{
"resource": ""
}
|
q10202
|
EC2Manager.terminateWorkerType
|
train
|
def terminateWorkerType(self, *args, **kwargs):
"""
Terminate all resources from a worker type
Terminate all instances for this worker type
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["terminateWorkerType"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10203
|
EC2Manager.allState
|
train
|
def allState(self, *args, **kwargs):
"""
List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10204
|
Queue.task
|
train
|
def task(self, *args, **kwargs):
"""
Get Task Definition
This end-point will return the task-definition. Notice that the task
definition may have been modified by queue, if an optional property is
not specified the queue may provide a default value.
This method gives output: ``v1/task.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["task"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10205
|
Queue.scheduleTask
|
train
|
def scheduleTask(self, *args, **kwargs):
"""
Schedule Defined Task
scheduleTask will schedule a task to be executed, even if it has
unresolved dependencies. A task would otherwise only be scheduled if
its dependencies were resolved.
This is useful if you have defined a task that depends on itself or on
some other task that has not been resolved, but you wish the task to be
scheduled immediately.
This will announce the task as pending and workers will be allowed to
claim it and resolve the task.
**Note** this operation is **idempotent** and will not fail or complain
if called with a `taskId` that is already scheduled, or even resolved.
To reschedule a task previously resolved, use `rerunTask`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10206
|
Queue.rerunTask
|
train
|
def rerunTask(self, *args, **kwargs):
"""
Rerun a Resolved Task
This method _reruns_ a previously resolved task, even if it was
_completed_. This is useful if your task completes unsuccessfully, and
you just want to run it from scratch again. This will also reset the
number of `retries` allowed.
This method is deprecated in favour of creating a new task with the same
task definition (but with a new taskId).
Remember that `retries` in the task status counts the number of runs that
the queue have started because the worker stopped responding, for example
because a spot node died.
**Remark** this operation is idempotent, if you try to rerun a task that
is not either `failed` or `completed`, this operation will just return
the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10207
|
Queue.reportFailed
|
train
|
def reportFailed(self, *args, **kwargs):
"""
Report Run Failed
Report a run failed, resolving the run as `failed`. Use this to resolve
a run that failed because the task specific code behaved unexpectedly.
For example the task exited non-zero, or didn't produce expected output.
Do not use this if the task couldn't be run because if malformed
payload, or other unexpected condition. In these cases we have a task
exception, which should be reported with `reportException`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10208
|
Queue.reportException
|
train
|
def reportException(self, *args, **kwargs):
"""
Report Task Exception
Resolve a run as _exception_. Generally, you will want to report tasks as
failed instead of exception. You should `reportException` if,
* The `task.payload` is invalid,
* Non-existent resources are referenced,
* Declared actions cannot be executed due to unavailable resources,
* The worker had to shutdown prematurely,
* The worker experienced an unknown error, or,
* The task explicitly requested a retry.
Do not use this to signal that some user-specified code crashed for any
reason specific to this code. If user-specific code hits a resource that
is temporarily unavailable worker should report task _failed_.
This method takes input: ``v1/task-exception-request.json#``
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10209
|
Queue.listProvisioners
|
train
|
def listProvisioners(self, *args, **kwargs):
"""
Get a list of all active provisioners
Get all active provisioners.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 provisioners in a single
page. You may limit this with the query-string parameter `limit`.
This method gives output: ``v1/list-provisioners-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10210
|
Queue.getProvisioner
|
train
|
def getProvisioner(self, *args, **kwargs):
"""
Get an active provisioner
Get an active provisioner.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10211
|
Queue.declareProvisioner
|
train
|
def declareProvisioner(self, *args, **kwargs):
"""
Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `aws-provisioner-v1`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method takes input: ``v1/update-provisioner-request.json#``
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10212
|
Queue.pendingTasks
|
train
|
def pendingTasks(self, *args, **kwargs):
"""
Get Number of Pending Tasks
Get an approximate number of pending tasks for the given `provisionerId`
and `workerType`.
The underlying Azure Storage Queues only promises to give us an estimate.
Furthermore, we cache the result in memory for 20 seconds. So consumers
should be no means expect this to be an accurate number.
It is, however, a solid estimate of the number of pending tasks.
This method gives output: ``v1/pending-tasks-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["pendingTasks"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10213
|
Queue.quarantineWorker
|
train
|
def quarantineWorker(self, *args, **kwargs):
"""
Quarantine a worker
Quarantine a worker
This method takes input: ``v1/quarantine-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10214
|
Queue.declareWorker
|
train
|
def declareWorker(self, *args, **kwargs):
"""
Declare a worker
Declare a worker, supplying some details about it.
`declareWorker` allows updating one or more properties of a worker as long as the required scopes are
possessed.
This method takes input: ``v1/update-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareWorker"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10215
|
Index.findTask
|
train
|
async def findTask(self, *args, **kwargs):
"""
Find Indexed Task
Find a task by index path, returning the highest-rank task with that path. If no
task exists for the given path, this API end-point will respond with a 404 status.
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10216
|
Index.insertTask
|
train
|
async def insertTask(self, *args, **kwargs):
"""
Insert Task into Index
Insert a task into the index. If the new rank is less than the existing rank
at the given index path, the task is not indexed but the response is still 200 OK.
Please see the introduction above for information
about indexing successfully completed tasks automatically using custom routes.
This method takes input: ``v1/insert-task-request.json#``
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10217
|
Index.findArtifactFromTask
|
train
|
async def findArtifactFromTask(self, *args, **kwargs):
"""
Get Artifact From Indexed Task
Find a task by index path and redirect to the artifact on the most recent
run with the given `name`.
Note that multiple calls to this endpoint may return artifacts from differen tasks
if a new task is inserted into the index between calls. Avoid using this method as
a stable link to multiple, connected files if the index path does not contain a
unique identifier. For example, the following two links may return unrelated files:
* https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
* https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
This problem be remedied by including the revision in the index path or by bundling both
installer and debug symbols into a single artifact.
If no task exists for the given index path, this API end-point responds with 404.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10218
|
PurgeCache.allPurgeRequests
|
train
|
async def allPurgeRequests(self, *args, **kwargs):
"""
All Open Purge Requests
This is useful mostly for administors to view
the set of open purge requests. It should not
be used by workers. They should use the purgeRequests
endpoint that is specific to their workerType and
provisionerId.
This method gives output: ``v1/all-purge-cache-request-list.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10219
|
Term.write
|
train
|
def write(self, text):
"""Parses text and prints proper output to the terminal
This method will extract escape codes from the text and
handle them as well as possible for whichever platform
is being used. At the moment only the display escape codes
are supported.
"""
escape_parts = re.compile('\x01?\x1b\\[([0-9;]*)m\x02?')
chunks = escape_parts.split(text)
i = 0
for chunk in chunks:
if chunk != '':
if i % 2 == 0:
self.stream.write(chunk)
else:
c = chunk.split(';')
r = Magic.rdisplay(c)
self.display(**r) #see caveat 0
self.flush()
i += 1
|
python
|
{
"resource": ""
}
|
q10220
|
UnixTerm.getch
|
train
|
def getch(self):
"""Don't use this yet
It doesn't belong here but I haven't yet thought about a proper
way to implement this feature and the features that will depend on
it.
"""
return NotImplemented
fno = stdout.fileno()
mode = self.termios.tcgetattr(fno)
try:
self.tty.setraw(fno, self.termios.TCSANOW)
ch = self.read(1)
finally:
self.termios.tcsetattr(fno, self.termios.TCSANOW, mode)
return ch
|
python
|
{
"resource": ""
}
|
q10221
|
CursesTerm.display
|
train
|
def display(self, codes=[], fg=None, bg=None):
"""Displays the codes using ANSI escapes
"""
codes, fg, bg = Magic.displayformat(codes, fg, bg)
self.stream.write(Magic.display(codes, fg, bg))
self.flush()
|
python
|
{
"resource": ""
}
|
q10222
|
WinTerm.display
|
train
|
def display(self, codes=[], fg=None, bg=None):
"""Displays codes using Windows kernel calls
"""
codes, fg, bg = Magic.displayformat(codes, fg, bg)
color = 0
for c in codes:
try:
f = getattr(self, '_display_' + c)
out = f()
if out: color |= out
except AttributeError:
pass
cfg, cfgi, cbg, cbgi = self._split_attributes(
self._get_console_info()['attributes'])
if self.reverse_input:
cfg, cbg = (cbg // 0x10), (cfg * 0x10)
cfgi, cbgi = (cbgi // 0x10), (cfgi * 0x10)
if fg != None:
color |= self.FG[fg]
self.real_fg = self.FG[fg]
else: color |= cfg
if bg != None:
color |= self.BG[bg]
else: color |= cbg
color |= (cfgi | cbgi)
fg, fgi, bg, bgi = self._split_attributes(color)
if self.dim_output:
# intense black
fg = 0
fgi = self.FG_INTENSITY
if self.reverse_output:
fg, bg = (bg // 0x10), (fg * 0x10)
fgi, bgi = (bgi // 0x10), (fgi * 0x10)
self.reverse_input = True
if self.hidden_output:
fg = (bg // 0x10)
fgi = (bgi // 0x10)
self._set_attributes(fg | fgi | bg | bgi)
|
python
|
{
"resource": ""
}
|
q10223
|
Magic.displayformat
|
train
|
def displayformat(codes=[], fg=None, bg=None):
"""Makes sure all arguments are valid"""
if isinstance(codes, basestring):
codes = [codes]
else:
codes = list(codes)
for code in codes:
if code not in Magic.DISPLAY.keys():
raise ValueError("'%s' not a valid display value" % code)
for color in (fg, bg):
if color != None:
if color not in Magic.COLORS.keys():
raise ValueError("'%s' not a valid color" % color)
return [codes, fg, bg]
|
python
|
{
"resource": ""
}
|
q10224
|
Auth.listRoleIds
|
train
|
def listRoleIds(self, *args, **kwargs):
"""
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10225
|
Auth.currentScopes
|
train
|
def currentScopes(self, *args, **kwargs):
"""
Get Current Scopes
Return the expanded scopes available in the request, taking into account all sources
of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
and roles).
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10226
|
Auth.azureTables
|
train
|
def azureTables(self, *args, **kwargs):
"""
List Tables in an Account Managed by Auth
Retrieve a list of all tables in an account.
This method gives output: ``v1/azure-table-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10227
|
Auth.azureTableSAS
|
train
|
def azureTableSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Table
Get a shared access signature (SAS) string for use with a specific Azure
Table Storage table.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
table if it doesn't already exist.
This method gives output: ``v1/azure-table-access-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10228
|
Auth.azureContainers
|
train
|
def azureContainers(self, *args, **kwargs):
"""
List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10229
|
Auth.azureContainerSAS
|
train
|
def azureContainerSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10230
|
Auth.sentryDSN
|
train
|
def sentryDSN(self, *args, **kwargs):
"""
Get DSN for Sentry Project
Get temporary DSN (access credentials) for a sentry project.
The credentials returned can be used with any Sentry client for up to
24 hours, after which the credentials will be automatically disabled.
If the project doesn't exist it will be created, and assigned to the
initial team configured for this component. Contact a Sentry admin
to have the project transferred to a team you have access to if needed
This method gives output: ``v1/sentry-dsn-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10231
|
Auth.statsumToken
|
train
|
def statsumToken(self, *args, **kwargs):
"""
Get Token for Statsum Project
Get temporary `token` and `baseUrl` for sending metrics to statsum.
The token is valid for 24 hours, clients should refresh after expiration.
This method gives output: ``v1/statsum-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["statsumToken"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10232
|
Auth.websocktunnelToken
|
train
|
def websocktunnelToken(self, *args, **kwargs):
"""
Get a client token for the Websocktunnel service
Get a temporary token suitable for use connecting to a
[websocktunnel](https://github.com/taskcluster/websocktunnel) server.
The resulting token will only be accepted by servers with a matching audience
value. Reaching such a server is the callers responsibility. In general,
a server URL or set of URLs should be provided to the caller as configuration
along with the audience value.
The token is valid for a limited time (on the scale of hours). Callers should
refresh it before expiration.
This method gives output: ``v1/websocktunnel-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["websocktunnelToken"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10233
|
h1
|
train
|
def h1(title, line=OVERLINE):
"""Prints bold text with line beneath it spanning width of terminal
"""
width = utils.term.width
printy(bold(title.center(width)).as_utf8)
printy(bold((line * width)[:width]).as_utf8)
|
python
|
{
"resource": ""
}
|
q10234
|
complement
|
train
|
def complement(color):
r"""Calculates polar opposite of color
This isn't guaranteed to look good >_> (especially with brighter, higher
intensity colors.) This will be replaced with a formula that produces
better looking colors in the future.
>>> complement('red')
(0, 255, 76)
>>> complement((0, 100, 175))
(175, 101, 0)
"""
(r, g, b) = parse_color(color)
gcolor = grapefruit.Color((r / 255.0, g / 255.0, b / 255.0))
complement = gcolor.ComplementaryColor()
(r, g, b) = [int(c * 255.0) for c in complement.rgb]
return (r, g, b)
|
python
|
{
"resource": ""
}
|
q10235
|
section
|
train
|
def section(title, bar=OVERLINE, strm=sys.stdout):
"""Helper function for testing demo routines
"""
width = utils.term.width
printy(bold(title.center(width)))
printy(bold((bar * width)[:width]))
|
python
|
{
"resource": ""
}
|
q10236
|
AwsProvisionerEvents.workerTypeCreated
|
train
|
def workerTypeCreated(self, *args, **kwargs):
"""
WorkerType Created Message
When a new `workerType` is created a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-created',
'name': 'workerTypeCreated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10237
|
AwsProvisionerEvents.workerTypeUpdated
|
train
|
def workerTypeUpdated(self, *args, **kwargs):
"""
WorkerType Updated Message
When a `workerType` is updated a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-updated',
'name': 'workerTypeUpdated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10238
|
AwsProvisionerEvents.workerTypeRemoved
|
train
|
def workerTypeRemoved(self, *args, **kwargs):
"""
WorkerType Removed Message
When a `workerType` is removed a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-removed',
'name': 'workerTypeRemoved',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10239
|
createTemporaryCredentials
|
train
|
def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
""" Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials (datetime.datetime)
expiry: expiration time of credentials, (datetime.datetime)
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str}
"""
for scope in scopes:
if not isinstance(scope, six.string_types):
raise exceptions.TaskclusterFailure('Scope must be string')
# Credentials can only be valid for 31 days. I hope that
# this is validated on the server somehow...
if expiry - start > datetime.timedelta(days=31):
raise exceptions.TaskclusterFailure('Only 31 days allowed')
# We multiply times by 1000 because the auth service is JS and as a result
# uses milliseconds instead of seconds
cert = dict(
version=1,
scopes=scopes,
start=calendar.timegm(start.utctimetuple()) * 1000,
expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'),
)
# if this is a named temporary credential, include the issuer in the certificate
if name:
cert['issuer'] = utils.toStr(clientId)
sig = ['version:' + utils.toStr(cert['version'])]
if name:
sig.extend([
'clientId:' + utils.toStr(name),
'issuer:' + utils.toStr(clientId),
])
sig.extend([
'seed:' + utils.toStr(cert['seed']),
'start:' + utils.toStr(cert['start']),
'expiry:' + utils.toStr(cert['expiry']),
'scopes:'
] + scopes)
sigStr = '\n'.join(sig).encode()
if isinstance(accessToken, six.text_type):
accessToken = accessToken.encode()
sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
cert['signature'] = utils.encodeStringForB64Header(sig)
newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
return {
'clientId': name or clientId,
'accessToken': newToken,
'certificate': utils.dumpJson(cert),
}
|
python
|
{
"resource": ""
}
|
q10240
|
BaseClient.makeHawkExt
|
train
|
def makeHawkExt(self):
""" Make an 'ext' for Hawk authentication """
o = self.options
c = o.get('credentials', {})
if c.get('clientId') and c.get('accessToken'):
ext = {}
cert = c.get('certificate')
if cert:
if six.PY3 and isinstance(cert, six.binary_type):
cert = cert.decode()
if isinstance(cert, six.string_types):
cert = json.loads(cert)
ext['certificate'] = cert
if 'authorizedScopes' in o:
ext['authorizedScopes'] = o['authorizedScopes']
# .encode('base64') inserts a newline, which hawk doesn't
# like but doesn't strip itself
return utils.makeB64UrlSafe(utils.encodeStringForB64Header(utils.dumpJson(ext)).strip())
else:
return {}
|
python
|
{
"resource": ""
}
|
q10241
|
BaseClient.buildSignedUrl
|
train
|
def buildSignedUrl(self, methodName, *args, **kwargs):
""" Build a signed URL. This URL contains the credentials needed to access
a resource."""
if 'expiration' in kwargs:
expiration = kwargs['expiration']
del kwargs['expiration']
else:
expiration = self.options['signedUrlExpiration']
expiration = int(time.time() + expiration) # Mainly so that we throw if it's not a number
requestUrl = self.buildUrl(methodName, *args, **kwargs)
if not self._hasCredentials():
raise exceptions.TaskclusterAuthFailure('Invalid Hawk Credentials')
clientId = utils.toStr(self.options['credentials']['clientId'])
accessToken = utils.toStr(self.options['credentials']['accessToken'])
def genBewit():
# We need to fix the output of get_bewit. It returns a url-safe base64
# encoded string, which contains a list of tokens separated by '\'.
# The first one is the clientId, the second is an int, the third is
# url-safe base64 encoded MAC, the fourth is the ext param.
# The problem is that the nested url-safe base64 encoded MAC must be
# base64 (i.e. not url safe) or server-side will complain.
# id + '\\' + exp + '\\' + mac + '\\' + options.ext;
resource = mohawk.base.Resource(
credentials={
'id': clientId,
'key': accessToken,
'algorithm': 'sha256',
},
method='GET',
ext=utils.toStr(self.makeHawkExt()),
url=requestUrl,
timestamp=expiration,
nonce='',
# content='',
# content_type='',
)
bewit = mohawk.bewit.get_bewit(resource)
return bewit.rstrip('=')
bewit = genBewit()
if not bewit:
raise exceptions.TaskclusterFailure('Did not receive a bewit')
u = urllib.parse.urlparse(requestUrl)
qs = u.query
if qs:
qs += '&'
qs += 'bewit=%s' % bewit
return urllib.parse.urlunparse((
u.scheme,
u.netloc,
u.path,
u.params,
qs,
u.fragment,
))
|
python
|
{
"resource": ""
}
|
q10242
|
BaseClient._constructUrl
|
train
|
def _constructUrl(self, route):
"""Construct a URL for the given route on this service, based on the
rootUrl"""
return liburls.api(
self.options['rootUrl'],
self.serviceName,
self.apiVersion,
route.rstrip('/'))
|
python
|
{
"resource": ""
}
|
q10243
|
BaseClient._makeApiCall
|
train
|
def _makeApiCall(self, entry, *args, **kwargs):
""" This function is used to dispatch calls to other functions
for a given API Reference entry"""
x = self._processArgs(entry, *args, **kwargs)
routeParams, payload, query, paginationHandler, paginationLimit = x
route = self._subArgsInRoute(entry, routeParams)
# TODO: Check for limit being in the Query of the api ref
if paginationLimit and 'limit' in entry.get('query', []):
query['limit'] = paginationLimit
if query:
_route = route + '?' + urllib.parse.urlencode(query)
else:
_route = route
response = self._makeHttpRequest(entry['method'], _route, payload)
if paginationHandler:
paginationHandler(response)
while response.get('continuationToken'):
query['continuationToken'] = response['continuationToken']
_route = route + '?' + urllib.parse.urlencode(query)
response = self._makeHttpRequest(entry['method'], _route, payload)
paginationHandler(response)
else:
return response
|
python
|
{
"resource": ""
}
|
q10244
|
BaseClient._hasCredentials
|
train
|
def _hasCredentials(self):
""" Return True, if credentials is given """
cred = self.options.get('credentials')
return (
cred and
'clientId' in cred and
'accessToken' in cred and
cred['clientId'] and
cred['accessToken']
)
|
python
|
{
"resource": ""
}
|
q10245
|
AwsProvisioner.listWorkerTypeSummaries
|
train
|
def listWorkerTypeSummaries(self, *args, **kwargs):
"""
List worker types with details
Return a list of worker types, including some summary information about
current capacity for each. While this list includes all defined worker types,
there may be running EC2 instances for deleted worker types that are not
included here. The list is unordered.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-summaries-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listWorkerTypeSummaries"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10246
|
AwsProvisioner.workerTypeLastModified
|
train
|
def workerTypeLastModified(self, *args, **kwargs):
"""
Get Worker Type Last Modified Time
This method is provided to allow workers to see when they were
last modified. The value provided through UserData can be
compared against this value to see if changes have been made
If the worker type definition has not been changed, the date
should be identical as it is the same stored value.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["workerTypeLastModified"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10247
|
AwsProvisioner.removeWorkerType
|
train
|
def removeWorkerType(self, *args, **kwargs):
"""
Delete Worker Type
Delete a worker type definition. This method will only delete
the worker type definition from the storage table. The actual
deletion will be handled by a background worker. As soon as this
method is called for a worker type, the background worker will
immediately submit requests to cancel all spot requests for this
worker type as well as killing all instances regardless of their
state. If you want to gracefully remove a worker type, you must
either ensure that no tasks are created with that worker type name
or you could theoretically set maxCapacity to 0, though, this is
not a supported or tested action
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["removeWorkerType"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10248
|
AwsProvisioner.getSecret
|
train
|
def getSecret(self, *args, **kwargs):
"""
Get a Secret
Retrieve a secret from storage. The result contains any passwords or
other restricted information verbatim as well as a temporary credential
based on the scopes specified when the secret was created.
It is important that this secret is deleted by the consumer (`removeSecret`),
or else the secrets will be visible to any process which can access the
user data associated with the instance.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-secret-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getSecret"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10249
|
AwsProvisioner.instanceStarted
|
train
|
def instanceStarted(self, *args, **kwargs):
"""
Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10250
|
AwsProvisioner.state
|
train
|
def state(self, *args, **kwargs):
"""
Get AWS State for a worker type
Return the state of a given workertype as stored by the provisioner.
This state is stored as three lists: 1 for running instances, 1 for
pending requests. The `summary` property contains an updated summary
similar to that returned from `listWorkerTypeSummaries`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["state"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10251
|
query_cast
|
train
|
def query_cast(value, answers, ignorecase = False):
"""A cast function for query
Answers should look something like it does in query
"""
if ignorecase: value = value.lower()
for item in answers:
for a in item['values']:
if ignorecase and (value == str(a).lower()):
return item['values'][0]
elif value == a:
return item['values'][0]
raise ValueError("Response '%s' not understood, please try again." % value)
|
python
|
{
"resource": ""
}
|
q10252
|
Hooks.getHookStatus
|
train
|
def getHookStatus(self, *args, **kwargs):
"""
Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method is deprecated in favor of listLastFires.
This method gives output: ``v1/hook-status.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10253
|
Hooks.createHook
|
train
|
def createHook(self, *args, **kwargs):
"""
Create a hook
This endpoint will create a new hook.
The caller's credentials must include the role that will be used to
create the task. That role must satisfy task.scopes as well as the
necessary scopes to add the task to the queue.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10254
|
Hooks.listLastFires
|
train
|
def listLastFires(self, *args, **kwargs):
"""
Get information about recent hook fires
This endpoint will return information about the the last few times this hook has been
fired, including whether the hook was fired successfully or not
This method gives output: ``v1/list-lastFires-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listLastFires"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10255
|
QueueEvents.taskDefined
|
train
|
def taskDefined(self, *args, **kwargs):
"""
Task Defined Messages
When a task is created or just defined a message is posted to this
exchange.
This message exchange is mainly useful when tasks are scheduled by a
scheduler that uses `defineTask` as this does not make the task
`pending`. Thus, no `taskPending` message is published.
Please, note that messages are also published on this exchange if defined
using `createTask`.
This exchange outputs: ``v1/task-defined-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-defined',
'name': 'taskDefined',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-defined-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10256
|
QueueEvents.taskPending
|
train
|
def taskPending(self, *args, **kwargs):
"""
Task Pending Messages
When a task becomes `pending` a message is posted to this exchange.
This is useful for workers who doesn't want to constantly poll the queue
for new tasks. The queue will also be authority for task states and
claims. But using this exchange workers should be able to distribute work
efficiently and they would be able to reduce their polling interval
significantly without affecting general responsiveness.
This exchange outputs: ``v1/task-pending-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-pending',
'name': 'taskPending',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-pending-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10257
|
QueueEvents.taskRunning
|
train
|
def taskRunning(self, *args, **kwargs):
"""
Task Running Messages
Whenever a task is claimed by a worker, a run is started on the worker,
and a message is posted on this exchange.
This exchange outputs: ``v1/task-running-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-running',
'name': 'taskRunning',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-running-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10258
|
QueueEvents.artifactCreated
|
train
|
def artifactCreated(self, *args, **kwargs):
"""
Artifact Creation Messages
Whenever the `createArtifact` end-point is called, the queue will create
a record of the artifact and post a message on this exchange. All of this
happens before the queue returns a signed URL for the caller to upload
the actual artifact with (pending on `storageType`).
This means that the actual artifact is rarely available when this message
is posted. But it is not unreasonable to assume that the artifact will
will become available at some point later. Most signatures will expire in
30 minutes or so, forcing the uploader to call `createArtifact` with
the same payload again in-order to continue uploading the artifact.
However, in most cases (especially for small artifacts) it's very
reasonable assume the artifact will be available within a few minutes.
This property means that this exchange is mostly useful for tools
monitoring task evaluation. One could also use it count number of
artifacts per task, or _index_ artifacts though in most cases it'll be
smarter to index artifacts after the task in question have completed
successfully.
This exchange outputs: ``v1/artifact-created-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'artifact-created',
'name': 'artifactCreated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/artifact-created-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10259
|
QueueEvents.taskCompleted
|
train
|
def taskCompleted(self, *args, **kwargs):
"""
Task Completed Messages
When a task is successfully completed by a worker a message is posted
this exchange.
This message is routed using the `runId`, `workerGroup` and `workerId`
that completed the task. But information about additional runs is also
available from the task status structure.
This exchange outputs: ``v1/task-completed-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-completed',
'name': 'taskCompleted',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-completed-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10260
|
QueueEvents.taskFailed
|
train
|
def taskFailed(self, *args, **kwargs):
"""
Task Failed Messages
When a task ran, but failed to complete successfully a message is posted
to this exchange. This is same as worker ran task-specific code, but the
task specific code exited non-zero.
This exchange outputs: ``v1/task-failed-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-failed',
'name': 'taskFailed',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-failed-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10261
|
QueueEvents.taskException
|
train
|
def taskException(self, *args, **kwargs):
"""
Task Exception Messages
Whenever Taskcluster fails to run a message is posted to this exchange.
This happens if the task isn't completed before its `deadlìne`,
all retries failed (i.e. workers stopped responding), the task was
canceled by another entity, or the task carried a malformed payload.
The specific _reason_ is evident from that task status structure, refer
to the `reasonResolved` property for the last run.
This exchange outputs: ``v1/task-exception-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-exception',
'name': 'taskException',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-exception-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10262
|
QueueEvents.taskGroupResolved
|
train
|
def taskGroupResolved(self, *args, **kwargs):
"""
Task Group Resolved Messages
A message is published on task-group-resolved whenever all submitted
tasks (whether scheduled or unscheduled) for a given task group have
been resolved, regardless of whether they resolved as successful or
not. A task group may be resolved multiple times, since new tasks may
be submitted against an already resolved task group.
This exchange outputs: ``v1/task-group-resolved.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskGroupId: `taskGroupId` for the task-group this message concerns (required)
* schedulerId: `schedulerId` for the task-group this message concerns (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-group-resolved',
'name': 'taskGroupResolved',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-group-resolved.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10263
|
Completer.complete
|
train
|
def complete(self, text, state):
"""The actual completion method
This method is not meant to be overridden. Override the
completelist method instead. It will make your life much easier.
For more detail see documentation for readline.set_completer
"""
if text != self.text:
self.matches = self.completelist(text)
self.text = text
try:
return self.matches[state]
except IndexError:
return None
|
python
|
{
"resource": ""
}
|
q10264
|
Queue.status
|
train
|
async def status(self, *args, **kwargs):
"""
Get task status
Get task status structure from `taskId`
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["status"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10265
|
Queue.listTaskGroup
|
train
|
async def listTaskGroup(self, *args, **kwargs):
"""
List Task Group
List tasks sharing the same `taskGroupId`.
As a task-group may contain an unbounded number of tasks, this end-point
may return a `continuationToken`. To continue listing tasks you must call
the `listTaskGroup` again with the `continuationToken` as the
query-string option `continuationToken`.
By default this end-point will try to return up to 1000 members in one
request. But it **may return less**, even if more tasks are available.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listTaskGroup` with the last `continuationToken` until you
get a result without a `continuationToken`.
If you are not interested in listing all the members at once, you may
use the query-string option `limit` to return fewer.
This method gives output: ``v1/list-task-group-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listTaskGroup"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10266
|
Queue.listDependentTasks
|
train
|
async def listDependentTasks(self, *args, **kwargs):
"""
List Dependent Tasks
List tasks that depend on the given `taskId`.
As many tasks from different task-groups may dependent on a single tasks,
this end-point may return a `continuationToken`. To continue listing
tasks you must call `listDependentTasks` again with the
`continuationToken` as the query-string option `continuationToken`.
By default this end-point will try to return up to 1000 tasks in one
request. But it **may return less**, even if more tasks are available.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listDependentTasks` with the last `continuationToken` until
you get a result without a `continuationToken`.
If you are not interested in listing all the tasks at once, you may
use the query-string option `limit` to return fewer.
This method gives output: ``v1/list-dependent-tasks-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listDependentTasks"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10267
|
Queue.createTask
|
train
|
async def createTask(self, *args, **kwargs):
"""
Create New Task
Create a new task, this is an **idempotent** operation, so repeat it if
you get an internal server error or network connection is dropped.
**Task `deadline`**: the deadline property can be no more than 5 days
into the future. This is to limit the amount of pending tasks not being
taken care of. Ideally, you should use a much shorter deadline.
**Task expiration**: the `expires` property must be greater than the
task `deadline`. If not provided it will default to `deadline` + one
year. Notice, that artifacts created by task must expire before the task.
**Task specific routing-keys**: using the `task.routes` property you may
define task specific routing-keys. If a task has a task specific
routing-key: `<route>`, then when the AMQP message about the task is
published, the message will be CC'ed with the routing-key:
`route.<route>`. This is useful if you want another component to listen
for completed tasks you have posted. The caller must have scope
`queue:route:<route>` for each route.
**Dependencies**: any tasks referenced in `task.dependencies` must have
already been created at the time of this call.
**Scopes**: Note that the scopes required to complete this API call depend
on the content of the `scopes`, `routes`, `schedulerId`, `priority`,
`provisionerId`, and `workerType` properties of the task definition.
**Legacy Scopes**: The `queue:create-task:..` scope without a priority and
the `queue:define-task:..` and `queue:task-group-id:..` scopes are considered
legacy and should not be used. Note that the new, non-legacy scopes require
a `queue:scheduler-id:..` scope as well as scopes for the proper priority.
This method takes input: ``v1/create-task-request.json#``
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["createTask"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10268
|
Queue.reportCompleted
|
train
|
async def reportCompleted(self, *args, **kwargs):
"""
Report Run Completed
Report a task completed, resolving the run as `completed`.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10269
|
Queue.getArtifact
|
train
|
async def getArtifact(self, *args, **kwargs):
"""
Get Artifact from Run
Get artifact by `<name>` from a specific run.
**Public Artifacts**, in-order to get an artifact you need the scope
`queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
But if the artifact `name` starts with `public/`, authentication and
authorization is not necessary to fetch the artifact.
**API Clients**, this method will redirect you to the artifact, if it is
stored externally. Either way, the response may not be JSON. So API
client users might want to generate a signed URL for this end-point and
use that URL with an HTTP client that can handle responses correctly.
**Downloading artifacts**
There are some special considerations for those http clients which download
artifacts. This api endpoint is designed to be compatible with an HTTP 1.1
compliant client, but has extra features to ensure the download is valid.
It is strongly recommend that consumers use either taskcluster-lib-artifact (JS),
taskcluster-lib-artifact-go (Go) or the CLI written in Go to interact with
artifacts.
In order to download an artifact the following must be done:
1. Obtain queue url. Building a signed url with a taskcluster client is
recommended
1. Make a GET request which does not follow redirects
1. In all cases, if specified, the
x-taskcluster-location-{content,transfer}-{sha256,length} values must be
validated to be equal to the Content-Length and Sha256 checksum of the
final artifact downloaded. as well as any intermediate redirects
1. If this response is a 500-series error, retry using an exponential
backoff. No more than 5 retries should be attempted
1. If this response is a 400-series error, treat it appropriately for
your context. This might be an error in responding to this request or
an Error storage type body. This request should not be retried.
1. If this response is a 200-series response, the response body is the artifact.
If the x-taskcluster-location-{content,transfer}-{sha256,length} and
x-taskcluster-location-content-encoding are specified, they should match
this response body
1. If the response type is a 300-series redirect, the artifact will be at the
location specified by the `Location` header. There are multiple artifact storage
types which use a 300-series redirect.
1. For all redirects followed, the user must verify that the content-sha256, content-length,
transfer-sha256, transfer-length and content-encoding match every further request. The final
artifact must also be validated against the values specified in the original queue response
1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference`
must not occur
1. A request which has x-taskcluster-artifact-storage-type value of `blob` and does not
have x-taskcluster-location-content-sha256 or x-taskcluster-location-content-length
must be treated as an error
**Headers**
The following important headers are set on the response to this method:
* location: the url of the artifact if a redirect is to be performed
* x-taskcluster-artifact-storage-type: the storage type. Example: blob, s3, error
The following important headers are set on responses to this method for Blob artifacts
* x-taskcluster-location-content-sha256: the SHA256 of the artifact
*after* any content-encoding is undone. Sha256 is hex encoded (e.g. [0-9A-Fa-f]{64})
* x-taskcluster-location-content-length: the number of bytes *after* any content-encoding
is undone
* x-taskcluster-location-transfer-sha256: the SHA256 of the artifact
*before* any content-encoding is undone. This is the SHA256 of what is sent over
the wire. Sha256 is hex encoded (e.g. [0-9A-Fa-f]{64})
* x-taskcluster-location-transfer-length: the number of bytes *after* any content-encoding
is undone
* x-taskcluster-location-content-encoding: the content-encoding used. It will either
be `gzip` or `identity` right now. This is hardcoded to a value set when the artifact
was created and no content-negotiation occurs
* x-taskcluster-location-content-type: the content-type of the artifact
**Caching**, artifacts may be cached in data centers closer to the
workers in-order to reduce bandwidth costs. This can lead to longer
response times. Caching can be skipped by setting the header
`x-taskcluster-skip-cache: true`, this should only be used for resources
where request volume is known to be low, and caching not useful.
(This feature may be disabled in the future, use is sparingly!)
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["getArtifact"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10270
|
Queue.listArtifacts
|
train
|
async def listArtifacts(self, *args, **kwargs):
"""
Get Artifacts from Run
Returns a list of artifacts and associated meta-data for a given run.
As a task may have many artifacts paging may be necessary. If this
end-point returns a `continuationToken`, you should call the end-point
again with the `continuationToken` as the query-string option:
`continuationToken`.
By default this end-point will list up-to 1000 artifacts in a single page
you may limit this with the query-string parameter `limit`.
This method gives output: ``v1/list-artifacts-response.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10271
|
Queue.declareWorkerType
|
train
|
async def declareWorkerType(self, *args, **kwargs):
"""
Update a worker-type
Declare a workerType, supplying some details about it.
`declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are
possessed. For example, a request to update the `gecko-b-1-w2008` worker-type within the `aws-provisioner-v1`
provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope
`queue:declare-worker-type:aws-provisioner-v1/gecko-b-1-w2008#description`.
This method takes input: ``v1/update-workertype-request.json#``
This method gives output: ``v1/workertype-response.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10272
|
Queue.listWorkers
|
train
|
async def listWorkers(self, *args, **kwargs):
"""
Get a list of all active workers of a workerType
Get a list of all active workers of a workerType.
`listWorkers` allows a response to be filtered by quarantined and non quarantined workers.
To filter the query, you should call the end-point with `quarantined` as a query-string option with a
true or false value.
The response is paged. If this end-point returns a `continuationToken`, you
should call the end-point again with the `continuationToken` as a query-string
option. By default this end-point will list up to 1000 workers in a single
page. You may limit this with the query-string parameter `limit`.
This method gives output: ``v1/list-workers-response.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["listWorkers"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10273
|
file
|
train
|
def file(value, **kwarg):
"""value should be a path to file in the filesystem.
returns a file object
"""
#a bit weird, but I don't want to hard code default values
try:
f = open(value, **kwarg)
except IOError as e:
raise ValueError("unable to open %s : %s" % (path.abspath(value), e))
return f
|
python
|
{
"resource": ""
}
|
q10274
|
fromNow
|
train
|
def fromNow(offset, dateObj=None):
"""
Generate a `datetime.datetime` instance which is offset using a string.
See the README.md for a full example, but offset could be '1 day' for
a datetime object one day in the future
"""
# We want to handle past dates as well as future
future = True
offset = offset.lstrip()
if offset.startswith('-'):
future = False
offset = offset[1:].lstrip()
if offset.startswith('+'):
offset = offset[1:].lstrip()
# Parse offset
m = r.match(offset)
if m is None:
raise ValueError("offset string: '%s' does not parse" % offset)
# In order to calculate years and months we need to calculate how many days
# to offset the offset by, since timedelta only goes as high as weeks
days = 0
hours = 0
minutes = 0
seconds = 0
if m.group('years'):
years = int(m.group('years'))
days += 365 * years
if m.group('months'):
months = int(m.group('months'))
days += 30 * months
days += int(m.group('days') or 0)
hours += int(m.group('hours') or 0)
minutes += int(m.group('minutes') or 0)
seconds += int(m.group('seconds') or 0)
# Offset datetime from utc
delta = datetime.timedelta(
weeks=int(m.group('weeks') or 0),
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
)
if not dateObj:
dateObj = datetime.datetime.utcnow()
return dateObj + delta if future else dateObj - delta
|
python
|
{
"resource": ""
}
|
q10275
|
dumpJson
|
train
|
def dumpJson(obj, **kwargs):
""" Match JS's JSON.stringify. When using the default seperators,
base64 encoding JSON results in \n sequences in the output. Hawk
barfs in your face if you have that in the text"""
def handleDateAndBinaryForJs(x):
if six.PY3 and isinstance(x, six.binary_type):
x = x.decode()
if isinstance(x, datetime.datetime) or isinstance(x, datetime.date):
return stringDate(x)
else:
return x
d = json.dumps(obj, separators=(',', ':'), default=handleDateAndBinaryForJs, **kwargs)
assert '\n' not in d
return d
|
python
|
{
"resource": ""
}
|
q10276
|
makeB64UrlSafe
|
train
|
def makeB64UrlSafe(b64str):
""" Make a base64 string URL Safe """
if isinstance(b64str, six.text_type):
b64str = b64str.encode()
# see RFC 4648, sec. 5
return b64str.replace(b'+', b'-').replace(b'/', b'_')
|
python
|
{
"resource": ""
}
|
q10277
|
encodeStringForB64Header
|
train
|
def encodeStringForB64Header(s):
""" HTTP Headers can't have new lines in them, let's """
if isinstance(s, six.text_type):
s = s.encode()
return base64.encodestring(s).strip().replace(b'\n', b'')
|
python
|
{
"resource": ""
}
|
q10278
|
scopeMatch
|
train
|
def scopeMatch(assumedScopes, requiredScopeSets):
"""
Take a list of a assumed scopes, and a list of required scope sets on
disjunctive normal form, and check if any of the required scope sets are
satisfied.
Example:
requiredScopeSets = [
["scopeA", "scopeB"],
["scopeC"]
]
In this case assumed_scopes must contain, either:
"scopeA" AND "scopeB", OR just "scopeC".
"""
for scopeSet in requiredScopeSets:
for requiredScope in scopeSet:
for scope in assumedScopes:
if scope == requiredScope:
# requiredScope satisifed, no need to check more scopes
break
if scope.endswith("*") and requiredScope.startswith(scope[:-1]):
# requiredScope satisifed, no need to check more scopes
break
else:
# requiredScope not satisfied, stop checking scopeSet
break
else:
# scopeSet satisfied, so we're happy
return True
# none of the requiredScopeSets were satisfied
return False
|
python
|
{
"resource": ""
}
|
q10279
|
isExpired
|
train
|
def isExpired(certificate):
""" Check if certificate is expired """
if isinstance(certificate, six.string_types):
certificate = json.loads(certificate)
expiry = certificate.get('expiry', 0)
return expiry < int(time.time() * 1000) + 20 * 60
|
python
|
{
"resource": ""
}
|
q10280
|
xterm_to_rgb
|
train
|
def xterm_to_rgb(xcolor):
"""Convert xterm Color ID to an RGB value
All 256 values are precalculated and stored in :data:`COLOR_TABLE`
"""
assert 0 <= xcolor <= 255
if xcolor < 16:
# basic colors
return BASIC16[xcolor]
elif 16 <= xcolor <= 231:
# color cube
xcolor -= 16
return (CUBE_STEPS[xcolor // 36 % 6],
CUBE_STEPS[xcolor // 6 % 6],
CUBE_STEPS[xcolor % 6])
elif 232 <= xcolor <= 255:
# gray tone
c = 8 + (xcolor - 232) * 0x0A
return (c, c, c)
|
python
|
{
"resource": ""
}
|
q10281
|
rgb_to_xterm
|
train
|
def rgb_to_xterm(r, g, b):
"""Quantize RGB values to an xterm 256-color ID
This works by envisioning the RGB values for all 256 xterm colors
as 3D euclidean space and brute-force searching for the nearest
neighbor.
This is very slow. If you're very lucky, :func:`compile_speedup`
will replace this function automatically with routines in
`_xterm256.c`.
"""
if r < 5 and g < 5 and b < 5:
return 16
best_match = 0
smallest_distance = 10000000000
for c in range(16, 256):
d = (COLOR_TABLE[c][0] - r) ** 2 + \
(COLOR_TABLE[c][1] - g) ** 2 + \
(COLOR_TABLE[c][2] - b) ** 2
if d < smallest_distance:
smallest_distance = d
best_match = c
return best_match
|
python
|
{
"resource": ""
}
|
q10282
|
Image.resize
|
train
|
def resize(self, width=None):
"""Resizes image to fit inside terminal
Called by the constructor automatically.
"""
(iw, ih) = self.size
if width is None:
width = min(iw, utils.term.width)
elif isinstance(width, basestring):
percents = dict([(pct, '%s%%' % (pct)) for pct in range(101)])
width = percents[width]
height = int(float(ih) * (float(width) / float(iw)))
height //= 2
self.img = self.img.resize((width, height))
|
python
|
{
"resource": ""
}
|
q10283
|
Image.reduce
|
train
|
def reduce(self, colors):
"""Converts color codes into optimized text
This optimizer works by merging adjacent colors so we don't
have to repeat the same escape codes for each pixel. There is
no loss of information.
:param colors: Iterable yielding an xterm color code for each
pixel, None to indicate a transparent pixel, or
``'EOL'`` to indicate th end of a line.
:return: Yields lines of optimized text.
"""
need_reset = False
line = []
for color, items in itertools.groupby(colors):
if color is None:
if need_reset:
line.append("\x1b[49m")
need_reset = False
line.append(self.pad * len(list(items)))
elif color == "EOL":
if need_reset:
line.append("\x1b[49m")
need_reset = False
yield "".join(line)
else:
line.pop()
yield "".join(line)
line = []
else:
need_reset = True
line.append("\x1b[48;5;%dm%s" % (
color, self.pad * len(list(items))))
|
python
|
{
"resource": ""
}
|
q10284
|
Image.convert
|
train
|
def convert(self):
"""Yields xterm color codes for each pixel in image
"""
(width, height) = self.img.size
bgcolor = utils.term.bgcolor
self.img.load()
for y in range(height):
for x in range(width):
rgba = self.img.getpixel((x, y))
if len(rgba) == 4 and rgba[3] == 0:
yield None
elif len(rgba) == 3 or rgba[3] == 255:
yield xterm256.rgb_to_xterm(*rgba[:3])
else:
color = grapefruit.Color.NewFromRgb(
*[c / 255.0 for c in rgba])
rgba = grapefruit.Color.AlphaBlend(color, bgcolor).rgb
yield xterm256.rgb_to_xterm(
*[int(c * 255.0) for c in rgba])
yield "EOL"
|
python
|
{
"resource": ""
}
|
q10285
|
Login.oidcCredentials
|
train
|
def oidcCredentials(self, *args, **kwargs):
"""
Get Taskcluster credentials given a suitable `access_token`
Given an OIDC `access_token` from a trusted OpenID provider, return a
set of Taskcluster credentials for use on behalf of the identified
user.
This method is typically not called with a Taskcluster client library
and does not accept Hawk credentials. The `access_token` should be
given in an `Authorization` header:
```
Authorization: Bearer abc.xyz
```
The `access_token` is first verified against the named
:provider, then passed to the provider's APIBuilder to retrieve a user
profile. That profile is then used to generate Taskcluster credentials
appropriate to the user. Note that the resulting credentials may or may
not include a `certificate` property. Callers should be prepared for either
alternative.
The given credentials will expire in a relatively short time. Callers should
monitor this expiration and refresh the credentials if necessary, by calling
this endpoint again, if they have expired.
This method gives output: ``v1/oidc-credentials-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["oidcCredentials"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10286
|
Github.builds
|
train
|
async def builds(self, *args, **kwargs):
"""
List of Builds
A paginated list of builds that have been run in
Taskcluster. Can be filtered on various git-specific
fields.
This method gives output: ``v1/build-list.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10287
|
Github.repository
|
train
|
async def repository(self, *args, **kwargs):
"""
Get Repository Info
Returns any repository metadata that is
useful within Taskcluster related services.
This method gives output: ``v1/repository.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["repository"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10288
|
Github.createStatus
|
train
|
async def createStatus(self, *args, **kwargs):
"""
Post a status against a given changeset
For a given changeset (SHA) of a repository, this will attach a "commit status"
on github. These statuses are links displayed next to each revision.
The status is either OK (green check) or FAILURE (red cross),
made of a custom title and link.
This method takes input: ``v1/create-status.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["createStatus"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10289
|
Notify.email
|
train
|
async def email(self, *args, **kwargs):
"""
Send an Email
Send an email to `address`. The content is markdown and will be rendered
to HTML, but both the HTML and raw markdown text will be sent in the
email. If a link is included, it will be rendered to a nice button in the
HTML version of the email
This method takes input: ``v1/email-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10290
|
Notify.pulse
|
train
|
async def pulse(self, *args, **kwargs):
"""
Publish a Pulse Message
Publish a message on pulse with the given `routingKey`.
This method takes input: ``v1/pulse-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10291
|
basicConfig
|
train
|
def basicConfig(level=logging.WARNING, transient_level=logging.NOTSET):
"""Shortcut for setting up transient logging
I am a replica of ``logging.basicConfig`` which installs a
transient logging handler to stderr.
"""
fmt = "%(asctime)s [%(levelname)s] [%(name)s:%(lineno)d] %(message)s"
logging.root.setLevel(transient_level) # <--- IMPORTANT
hand = TransientStreamHandler(level=level)
hand.setFormatter(logging.Formatter(fmt))
logging.root.addHandler(hand)
|
python
|
{
"resource": ""
}
|
q10292
|
Auth.azureAccounts
|
train
|
async def azureAccounts(self, *args, **kwargs):
"""
List Accounts Managed by Auth
Retrieve a list of all Azure accounts managed by Taskcluster Auth.
This method gives output: ``v1/azure-account-list-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10293
|
Auth.authenticateHawk
|
train
|
async def authenticateHawk(self, *args, **kwargs):
"""
Authenticate Hawk Request
Validate the request signature given on input and return list of scopes
that the authenticating client has.
This method is used by other services that wish rely on Taskcluster
credentials for authentication. This way we can use Hawk without having
the secret credentials leave this service.
This method takes input: ``v1/authenticate-hawk-request.json#``
This method gives output: ``v1/authenticate-hawk-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["authenticateHawk"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10294
|
Hooks.listHookGroups
|
train
|
async def listHookGroups(self, *args, **kwargs):
"""
List hook groups
This endpoint will return a list of all hook groups with at least one hook.
This method gives output: ``v1/list-hook-groups-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listHookGroups"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10295
|
Hooks.listHooks
|
train
|
async def listHooks(self, *args, **kwargs):
"""
List hooks in a given group
This endpoint will return a list of all the hook definitions within a
given hook group.
This method gives output: ``v1/list-hooks-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10296
|
Hooks.hook
|
train
|
async def hook(self, *args, **kwargs):
"""
Get hook definition
This endpoint will return the hook definition for the given `hookGroupId`
and hookId.
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["hook"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10297
|
Hooks.updateHook
|
train
|
async def updateHook(self, *args, **kwargs):
"""
Update a hook
This endpoint will update an existing hook. All fields except
`hookGroupId` and `hookId` can be modified.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["updateHook"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10298
|
Hooks.removeHook
|
train
|
async def removeHook(self, *args, **kwargs):
"""
Delete a hook
This endpoint will remove a hook definition.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["removeHook"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10299
|
Hooks.triggerHook
|
train
|
async def triggerHook(self, *args, **kwargs):
"""
Trigger a hook
This endpoint will trigger the creation of a task from a hook definition.
The HTTP payload must match the hooks `triggerSchema`. If it does, it is
provided as the `payload` property of the JSON-e context used to render the
task template.
This method takes input: ``v1/trigger-hook.json#``
This method gives output: ``v1/trigger-hook-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.