code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return await self._makeApiCall(self.funcinfo["reclaimTask"], *args, **kwargs)
async def reclaimTask(self, *args, **kwargs)
Reclaim task Refresh the claim for a specific `runId` for given `taskId`. This updates the `takenUntil` property and returns a new set of temporary credentials for performing requests on behalf of the task. These credentials should be used in-place of the credentials returned by `claimWork`. The `reclaimTask` requests serves to: * Postpone `takenUntil` preventing the queue from resolving `claim-expired`, * Refresh temporary credentials used for processing the task, and * Abort execution if the task/run have been resolved. If the `takenUntil` timestamp is exceeded the queue will resolve the run as _exception_ with reason `claim-expired`, and proceeded to retry to the task. This ensures that tasks are retried, even if workers disappear without warning. If the task is resolved, this end-point will return `409` reporting `RequestConflict`. This typically happens if the task have been canceled or the `task.deadline` have been exceeded. If reclaiming fails, workers should abort the task and forget about the given `runId`. There is no need to resolve the run or upload artifacts. This method gives output: ``v1/task-reclaim-response.json#`` This method is ``stable``
12.269343
19.868475
0.617528
return await self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
async def reportCompleted(self, *args, **kwargs)
Report Run Completed Report a task completed, resolving the run as `completed`. This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
15.186553
20.270594
0.749191
return await self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
async def listArtifacts(self, *args, **kwargs)
Get Artifacts from Run Returns a list of artifacts and associated meta-data for a given run. As a task may have many artifacts paging may be necessary. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as the query-string option: `continuationToken`. By default this end-point will list up-to 1000 artifacts in a single page you may limit this with the query-string parameter `limit`. This method gives output: ``v1/list-artifacts-response.json#`` This method is ``experimental``
13.501169
20.389608
0.662159
return await self._makeApiCall(self.funcinfo["getWorkerType"], *args, **kwargs)
async def getWorkerType(self, *args, **kwargs)
Get a worker-type Get a worker-type from a provisioner. This method gives output: ``v1/workertype-response.json#`` This method is ``experimental``
11.216018
16.441223
0.682189
return await self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
async def declareWorkerType(self, *args, **kwargs)
Update a worker-type Declare a workerType, supplying some details about it. `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are possessed. For example, a request to update the `gecko-b-1-w2008` worker-type within the `aws-provisioner-v1` provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope `queue:declare-worker-type:aws-provisioner-v1/gecko-b-1-w2008#description`. This method takes input: ``v1/update-workertype-request.json#`` This method gives output: ``v1/workertype-response.json#`` This method is ``experimental``
12.731367
18.912077
0.673187
return await self._makeApiCall(self.funcinfo["listWorkers"], *args, **kwargs)
async def listWorkers(self, *args, **kwargs)
Get a list of all active workers of a workerType Get a list of all active workers of a workerType. `listWorkers` allows a response to be filtered by quarantined and non quarantined workers. To filter the query, you should call the end-point with `quarantined` as a query-string option with a true or false value. The response is paged. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as a query-string option. By default this end-point will list up to 1000 workers in a single page. You may limit this with the query-string parameter `limit`. This method gives output: ``v1/list-workers-response.json#`` This method is ``experimental``
12.702744
18.44626
0.688635
return await self._makeApiCall(self.funcinfo["getWorker"], *args, **kwargs)
async def getWorker(self, *args, **kwargs)
Get a worker-type Get a worker from a worker-type. This method gives output: ``v1/worker-response.json#`` This method is ``experimental``
13.900395
17.314308
0.802827
#a bit weird, but I don't want to hard code default values try: f = open(value, **kwarg) except IOError as e: raise ValueError("unable to open %s : %s" % (path.abspath(value), e)) return f
def file(value, **kwarg)
value should be a path to file in the filesystem. returns a file object
5.456868
5.120012
1.065792
if attempt <= 0: return 0 # We subtract one to get exponents: 1, 2, 3, 4, 5, .. delay = float(2 ** (attempt - 1)) * float(DELAY_FACTOR) # Apply randomization factor delay = delay * (RANDOMIZATION_FACTOR * (random.random() * 2 - 1) + 1) # Always limit with a maximum delay return min(delay, MAX_DELAY)
def calculateSleepTime(attempt)
From the go client https://github.com/taskcluster/go-got/blob/031f55c/backoff.go#L24-L29
4.863484
4.861628
1.000382
# We want to handle past dates as well as future future = True offset = offset.lstrip() if offset.startswith('-'): future = False offset = offset[1:].lstrip() if offset.startswith('+'): offset = offset[1:].lstrip() # Parse offset m = r.match(offset) if m is None: raise ValueError("offset string: '%s' does not parse" % offset) # In order to calculate years and months we need to calculate how many days # to offset the offset by, since timedelta only goes as high as weeks days = 0 hours = 0 minutes = 0 seconds = 0 if m.group('years'): years = int(m.group('years')) days += 365 * years if m.group('months'): months = int(m.group('months')) days += 30 * months days += int(m.group('days') or 0) hours += int(m.group('hours') or 0) minutes += int(m.group('minutes') or 0) seconds += int(m.group('seconds') or 0) # Offset datetime from utc delta = datetime.timedelta( weeks=int(m.group('weeks') or 0), days=days, hours=hours, minutes=minutes, seconds=seconds, ) if not dateObj: dateObj = datetime.datetime.utcnow() return dateObj + delta if future else dateObj - delta
def fromNow(offset, dateObj=None)
Generate a `datetime.datetime` instance which is offset using a string. See the README.md for a full example, but offset could be '1 day' for a datetime object one day in the future
2.448269
2.45145
0.998702
def handleDateAndBinaryForJs(x): if six.PY3 and isinstance(x, six.binary_type): x = x.decode() if isinstance(x, datetime.datetime) or isinstance(x, datetime.date): return stringDate(x) else: return x d = json.dumps(obj, separators=(',', ':'), default=handleDateAndBinaryForJs, **kwargs) assert '\n' not in d return d
def dumpJson(obj, **kwargs)
Match JS's JSON.stringify. When using the default seperators, base64 encoding JSON results in \n sequences in the output. Hawk barfs in your face if you have that in the text
3.554622
3.647536
0.974527
if isinstance(b64str, six.text_type): b64str = b64str.encode() # see RFC 4648, sec. 5 return b64str.replace(b'+', b'-').replace(b'/', b'_')
def makeB64UrlSafe(b64str)
Make a base64 string URL Safe
2.550779
2.601624
0.980457
if isinstance(s, six.text_type): s = s.encode() return base64.encodestring(s).strip().replace(b'\n', b'')
def encodeStringForB64Header(s)
HTTP Headers can't have new lines in them, let's
2.820579
2.923428
0.964819
_cache = {} def closure(name): if name not in _cache: _cache[name] = slugId() return _cache[name] return closure
def stableSlugId()
Returns a closure which can be used to generate stable slugIds. Stable slugIds can be used in a graph to specify task IDs in multiple places without regenerating them, e.g. taskId, requires, etc.
4.411942
3.797499
1.161802
for scopeSet in requiredScopeSets: for requiredScope in scopeSet: for scope in assumedScopes: if scope == requiredScope: # requiredScope satisifed, no need to check more scopes break if scope.endswith("*") and requiredScope.startswith(scope[:-1]): # requiredScope satisifed, no need to check more scopes break else: # requiredScope not satisfied, stop checking scopeSet break else: # scopeSet satisfied, so we're happy return True # none of the requiredScopeSets were satisfied return False
def scopeMatch(assumedScopes, requiredScopeSets)
Take a list of a assumed scopes, and a list of required scope sets on disjunctive normal form, and check if any of the required scope sets are satisfied. Example: requiredScopeSets = [ ["scopeA", "scopeB"], ["scopeC"] ] In this case assumed_scopes must contain, either: "scopeA" AND "scopeB", OR just "scopeC".
3.06938
3.340781
0.918761
retry = -1 response = None while retry < retries: retry += 1 # if this isn't the first retry then we sleep if retry > 0: snooze = float(retry * retry) / 10.0 log.info('Sleeping %0.2f seconds for exponential backoff', snooze) time.sleep(snooze) # Seek payload to start, if it is a file if hasattr(payload, 'seek'): payload.seek(0) log.debug('Making attempt %d', retry) try: response = makeSingleHttpRequest(method, url, payload, headers, session) except requests.exceptions.RequestException as rerr: if retry < retries: log.warn('Retrying because of: %s' % rerr) continue # raise a connection exception raise rerr # Handle non 2xx status code and retry if possible try: response.raise_for_status() except requests.exceptions.RequestException: pass status = response.status_code if 500 <= status and status < 600 and retry < retries: if retry < retries: log.warn('Retrying because of: %d status' % status) continue else: raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None) return response # This code-path should be unreachable assert False, "Error from last retry should have been raised!"
def makeHttpRequest(method, url, payload, headers, retries=MAX_RETRIES, session=None)
Make an HTTP request and retry it until success, return request
4.490219
4.493875
0.999186
if isinstance(certificate, six.string_types): certificate = json.loads(certificate) expiry = certificate.get('expiry', 0) return expiry < int(time.time() * 1000) + 20 * 60
def isExpired(certificate)
Check if certificate is expired
3.082046
3.052674
1.009622
options = defaults or {} credentials = options.get('credentials', {}) rootUrl = os.environ.get('TASKCLUSTER_ROOT_URL') if rootUrl: options['rootUrl'] = rootUrl clientId = os.environ.get('TASKCLUSTER_CLIENT_ID') if clientId: credentials['clientId'] = clientId accessToken = os.environ.get('TASKCLUSTER_ACCESS_TOKEN') if accessToken: credentials['accessToken'] = accessToken certificate = os.environ.get('TASKCLUSTER_CERTIFICATE') if certificate: credentials['certificate'] = certificate if credentials: options['credentials'] = credentials return options
def optionsFromEnvironment(defaults=None)
Fetch root URL and credentials from the standard TASKCLUSTER_… environment variables and return them in a format suitable for passing to a client constructor.
2.098412
1.794677
1.169242
assert 0 <= xcolor <= 255 if xcolor < 16: # basic colors return BASIC16[xcolor] elif 16 <= xcolor <= 231: # color cube xcolor -= 16 return (CUBE_STEPS[xcolor // 36 % 6], CUBE_STEPS[xcolor // 6 % 6], CUBE_STEPS[xcolor % 6]) elif 232 <= xcolor <= 255: # gray tone c = 8 + (xcolor - 232) * 0x0A return (c, c, c)
def xterm_to_rgb(xcolor)
Convert xterm Color ID to an RGB value All 256 values are precalculated and stored in :data:`COLOR_TABLE`
2.816671
2.899909
0.971297
if r < 5 and g < 5 and b < 5: return 16 best_match = 0 smallest_distance = 10000000000 for c in range(16, 256): d = (COLOR_TABLE[c][0] - r) ** 2 + \ (COLOR_TABLE[c][1] - g) ** 2 + \ (COLOR_TABLE[c][2] - b) ** 2 if d < smallest_distance: smallest_distance = d best_match = c return best_match
def rgb_to_xterm(r, g, b)
Quantize RGB values to an xterm 256-color ID This works by envisioning the RGB values for all 256 xterm colors as 3D euclidean space and brute-force searching for the nearest neighbor. This is very slow. If you're very lucky, :func:`compile_speedup` will replace this function automatically with routines in `_xterm256.c`.
2.030586
2.026047
1.002241
import os import ctypes from os.path import join, dirname, getmtime, exists, expanduser # library = join(dirname(__file__), '_xterm256.so') library = expanduser('~/.xterm256.so') sauce = join(dirname(__file__), '_xterm256.c') if not exists(library) or getmtime(sauce) > getmtime(library): build = "gcc -fPIC -shared -o %s %s" % (library, sauce) if (os.system(build + " >/dev/null 2>&1") != 0): raise OSError("GCC error") xterm256_c = ctypes.cdll.LoadLibrary(library) xterm256_c.init() def xterm_to_rgb(xcolor): res = xterm256_c.xterm_to_rgb_i(xcolor) return ((res >> 16) & 0xFF, (res >> 8) & 0xFF, res & 0xFF) return (xterm256_c.rgb_to_xterm, xterm_to_rgb)
def compile_speedup()
Tries to compile/link the C version of this module Like it really makes a huge difference. With a little bit of luck this should *just work* for you. You need: - Python >= 2.5 for ctypes library - gcc (``sudo apt-get install gcc``)
2.644807
2.681431
0.986342
import optparse parser = optparse.OptionParser() parser.add_option( "-w", "--width", dest="width", type="int", default=None, help=("Width of printed image in characters. Default: %default")) (options, args) = parser.parse_args(args=sys.argv[1:]) for imgpath in args: for line in Image(imgpath, options.width): printy(line)
def main()
Main function for :command:`fabulous-image`.
3.610135
3.583463
1.007443
(iw, ih) = self.size if width is None: width = min(iw, utils.term.width) elif isinstance(width, basestring): percents = dict([(pct, '%s%%' % (pct)) for pct in range(101)]) width = percents[width] height = int(float(ih) * (float(width) / float(iw))) height //= 2 self.img = self.img.resize((width, height))
def resize(self, width=None)
Resizes image to fit inside terminal Called by the constructor automatically.
3.730535
3.806764
0.979975
need_reset = False line = [] for color, items in itertools.groupby(colors): if color is None: if need_reset: line.append("\x1b[49m") need_reset = False line.append(self.pad * len(list(items))) elif color == "EOL": if need_reset: line.append("\x1b[49m") need_reset = False yield "".join(line) else: line.pop() yield "".join(line) line = [] else: need_reset = True line.append("\x1b[48;5;%dm%s" % ( color, self.pad * len(list(items))))
def reduce(self, colors)
Converts color codes into optimized text This optimizer works by merging adjacent colors so we don't have to repeat the same escape codes for each pixel. There is no loss of information. :param colors: Iterable yielding an xterm color code for each pixel, None to indicate a transparent pixel, or ``'EOL'`` to indicate th end of a line. :return: Yields lines of optimized text.
2.881772
2.57143
1.120689
(width, height) = self.img.size bgcolor = utils.term.bgcolor self.img.load() for y in range(height): for x in range(width): rgba = self.img.getpixel((x, y)) if len(rgba) == 4 and rgba[3] == 0: yield None elif len(rgba) == 3 or rgba[3] == 255: yield xterm256.rgb_to_xterm(*rgba[:3]) else: color = grapefruit.Color.NewFromRgb( *[c / 255.0 for c in rgba]) rgba = grapefruit.Color.AlphaBlend(color, bgcolor).rgb yield xterm256.rgb_to_xterm( *[int(c * 255.0) for c in rgba]) yield "EOL"
def convert(self)
Yields xterm color codes for each pixel in image
3.606108
3.160205
1.1411
return self._makeApiCall(self.funcinfo["oidcCredentials"], *args, **kwargs)
def oidcCredentials(self, *args, **kwargs)
Get Taskcluster credentials given a suitable `access_token` Given an OIDC `access_token` from a trusted OpenID provider, return a set of Taskcluster credentials for use on behalf of the identified user. This method is typically not called with a Taskcluster client library and does not accept Hawk credentials. The `access_token` should be given in an `Authorization` header: ``` Authorization: Bearer abc.xyz ``` The `access_token` is first verified against the named :provider, then passed to the provider's APIBuilder to retrieve a user profile. That profile is then used to generate Taskcluster credentials appropriate to the user. Note that the resulting credentials may or may not include a `certificate` property. Callers should be prepared for either alternative. The given credentials will expire in a relatively short time. Callers should monitor this expiration and refresh the credentials if necessary, by calling this endpoint again, if they have expired. This method gives output: ``v1/oidc-credentials-response.json#`` This method is ``experimental``
13.406669
20.965374
0.639467
return await self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
async def builds(self, *args, **kwargs)
List of Builds A paginated list of builds that have been run in Taskcluster. Can be filtered on various git-specific fields. This method gives output: ``v1/build-list.json#`` This method is ``experimental``
16.068943
29.375214
0.547024
return await self._makeApiCall(self.funcinfo["repository"], *args, **kwargs)
async def repository(self, *args, **kwargs)
Get Repository Info Returns any repository metadata that is useful within Taskcluster related services. This method gives output: ``v1/repository.json#`` This method is ``experimental``
16.427279
23.80781
0.689995
return await self._makeApiCall(self.funcinfo["createStatus"], *args, **kwargs)
async def createStatus(self, *args, **kwargs)
Post a status against a given changeset For a given changeset (SHA) of a repository, this will attach a "commit status" on github. These statuses are links displayed next to each revision. The status is either OK (green check) or FAILURE (red cross), made of a custom title and link. This method takes input: ``v1/create-status.json#`` This method is ``experimental``
12.719444
22.87215
0.556111
return await self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
async def email(self, *args, **kwargs)
Send an Email Send an email to `address`. The content is markdown and will be rendered to HTML, but both the HTML and raw markdown text will be sent in the email. If a link is included, it will be rendered to a nice button in the HTML version of the email This method takes input: ``v1/email-request.json#`` This method is ``experimental``
15.70442
25.786541
0.609016
return await self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
async def pulse(self, *args, **kwargs)
Publish a Pulse Message Publish a message on pulse with the given `routingKey`. This method takes input: ``v1/pulse-request.json#`` This method is ``experimental``
16.708496
24.44603
0.683485
fmt = "%(asctime)s [%(levelname)s] [%(name)s:%(lineno)d] %(message)s" logging.root.setLevel(transient_level) # <--- IMPORTANT hand = TransientStreamHandler(level=level) hand.setFormatter(logging.Formatter(fmt)) logging.root.addHandler(hand)
def basicConfig(level=logging.WARNING, transient_level=logging.NOTSET)
Shortcut for setting up transient logging I am a replica of ``logging.basicConfig`` which installs a transient logging handler to stderr.
3.29558
3.172818
1.038692
return await self._makeApiCall(self.funcinfo["resetAccessToken"], *args, **kwargs)
async def resetAccessToken(self, *args, **kwargs)
Reset `accessToken` Reset a clients `accessToken`, this will revoke the existing `accessToken`, generate a new `accessToken` and return it from this call. There is no way to retrieve an existing `accessToken`, so if you loose it you must reset the accessToken to acquire it again. This method gives output: ``v1/create-client-response.json#`` This method is ``stable``
13.621654
21.175104
0.643286
return await self._makeApiCall(self.funcinfo["role"], *args, **kwargs)
async def role(self, *args, **kwargs)
Get Role Get information about a single role, including the set of scopes that the role expands to. This method gives output: ``v1/get-role-response.json#`` This method is ``stable``
15.958801
23.69137
0.673612
return await self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
async def azureAccounts(self, *args, **kwargs)
List Accounts Managed by Auth Retrieve a list of all Azure accounts managed by Taskcluster Auth. This method gives output: ``v1/azure-account-list-response.json#`` This method is ``stable``
14.342927
18.447577
0.777497
return await self._makeApiCall(self.funcinfo["authenticateHawk"], *args, **kwargs)
async def authenticateHawk(self, *args, **kwargs)
Authenticate Hawk Request Validate the request signature given on input and return list of scopes that the authenticating client has. This method is used by other services that wish rely on Taskcluster credentials for authentication. This way we can use Hawk without having the secret credentials leave this service. This method takes input: ``v1/authenticate-hawk-request.json#`` This method gives output: ``v1/authenticate-hawk-response.json#`` This method is ``stable``
13.321739
19.132671
0.696282
return await self._makeApiCall(self.funcinfo["listHookGroups"], *args, **kwargs)
async def listHookGroups(self, *args, **kwargs)
List hook groups This endpoint will return a list of all hook groups with at least one hook. This method gives output: ``v1/list-hook-groups-response.json#`` This method is ``stable``
11.836989
18.878677
0.627003
return await self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
async def listHooks(self, *args, **kwargs)
List hooks in a given group This endpoint will return a list of all the hook definitions within a given hook group. This method gives output: ``v1/list-hooks-response.json#`` This method is ``stable``
12.373603
22.934265
0.539525
return await self._makeApiCall(self.funcinfo["hook"], *args, **kwargs)
async def hook(self, *args, **kwargs)
Get hook definition This endpoint will return the hook definition for the given `hookGroupId` and hookId. This method gives output: ``v1/hook-definition.json#`` This method is ``stable``
18.909891
30.513102
0.61973
return await self._makeApiCall(self.funcinfo["updateHook"], *args, **kwargs)
async def updateHook(self, *args, **kwargs)
Update a hook This endpoint will update an existing hook. All fields except `hookGroupId` and `hookId` can be modified. This method takes input: ``v1/create-hook-request.json#`` This method gives output: ``v1/hook-definition.json#`` This method is ``stable``
13.097463
23.037073
0.568538
return await self._makeApiCall(self.funcinfo["removeHook"], *args, **kwargs)
async def removeHook(self, *args, **kwargs)
Delete a hook This endpoint will remove a hook definition. This method is ``stable``
13.458493
27.535398
0.488771
return await self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
async def triggerHook(self, *args, **kwargs)
Trigger a hook This endpoint will trigger the creation of a task from a hook definition. The HTTP payload must match the hooks `triggerSchema`. If it does, it is provided as the `payload` property of the JSON-e context used to render the task template. This method takes input: ``v1/trigger-hook.json#`` This method gives output: ``v1/trigger-hook-response.json#`` This method is ``stable``
15.176603
21.969658
0.690798
return await self._makeApiCall(self.funcinfo["getTriggerToken"], *args, **kwargs)
async def getTriggerToken(self, *args, **kwargs)
Get a trigger token Retrieve a unique secret token for triggering the specified hook. This token can be deactivated with `resetTriggerToken`. This method gives output: ``v1/trigger-token-response.json#`` This method is ``stable``
11.410278
17.511395
0.651592
return await self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs)
async def resetTriggerToken(self, *args, **kwargs)
Reset a trigger token Reset the token for triggering a given hook. This invalidates token that may have been issued via getTriggerToken with a new token. This method gives output: ``v1/trigger-token-response.json#`` This method is ``stable``
12.126357
18.263283
0.663975
return await self._makeApiCall(self.funcinfo["triggerHookWithToken"], *args, **kwargs)
async def triggerHookWithToken(self, *args, **kwargs)
Trigger a hook with a token This endpoint triggers a defined hook with a valid token. The HTTP payload must match the hooks `triggerSchema`. If it does, it is provided as the `payload` property of the JSON-e context used to render the task template. This method takes input: ``v1/trigger-hook.json#`` This method gives output: ``v1/trigger-hook-response.json#`` This method is ``stable``
11.577814
18.574266
0.623326
return await self._makeApiCall(self.funcinfo["createWorkerType"], *args, **kwargs)
async def createWorkerType(self, *args, **kwargs)
Create new Worker Type Create a worker type. A worker type contains all the configuration needed for the provisioner to manage the instances. Each worker type knows which regions and which instance types are allowed for that worker type. Remember that Capacity is the number of concurrent tasks that can be run on a given EC2 resource and that Utility is the relative performance rate between different instance types. There is no way to configure different regions to have different sets of instance types so ensure that all instance types are available in all regions. This function is idempotent. Once a worker type is in the provisioner, a back ground process will begin creating instances for it based on its capacity bounds and its pending task count from the Queue. It is the worker's responsibility to shut itself down. The provisioner has a limit (currently 96hours) for all instances to prevent zombie instances from running indefinitely. The provisioner will ensure that all instances created are tagged with aws resource tags containing the provisioner id and the worker type. If provided, the secrets in the global, region and instance type sections are available using the secrets api. If specified, the scopes provided will be used to generate a set of temporary credentials available with the other secrets. This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#`` This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#`` This method is ``stable``
12.31419
17.231583
0.714629
return await self._makeApiCall(self.funcinfo["updateWorkerType"], *args, **kwargs)
async def updateWorkerType(self, *args, **kwargs)
Update Worker Type Provide a new copy of a worker type to replace the existing one. This will overwrite the existing worker type definition if there is already a worker type of that name. This method will return a 200 response along with a copy of the worker type definition created Note that if you are using the result of a GET on the worker-type end point that you will need to delete the lastModified and workerType keys from the object returned, since those fields are not allowed the request body for this method Otherwise, all input requirements and actions are the same as the create method. This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#`` This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#`` This method is ``stable``
12.115457
18.765162
0.645636
return await self._makeApiCall(self.funcinfo["workerType"], *args, **kwargs)
async def workerType(self, *args, **kwargs)
Get Worker Type Retrieve a copy of the requested worker type definition. This copy contains a lastModified field as well as the worker type name. As such, it will require manipulation to be able to use the results of this method to submit date to the update method. This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#`` This method is ``stable``
14.14126
18.894693
0.748425
return await self._makeApiCall(self.funcinfo["createSecret"], *args, **kwargs)
async def createSecret(self, *args, **kwargs)
Create new Secret Insert a secret into the secret storage. The supplied secrets will be provided verbatime via `getSecret`, while the supplied scopes will be converted into credentials by `getSecret`. This method is not ordinarily used in production; instead, the provisioner creates a new secret directly for each spot bid. This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-secret-request.json#`` This method is ``stable``
13.787181
20.748293
0.664497
return await self._makeApiCall(self.funcinfo["removeSecret"], *args, **kwargs)
async def removeSecret(self, *args, **kwargs)
Remove a Secret Remove a secret. After this call, a call to `getSecret` with the given token will return no information. It is very important that the consumer of a secret delete the secret from storage before handing over control to untrusted processes to prevent credential and/or secret leakage. This method is ``stable``
13.315381
19.632704
0.678225
return await self._makeApiCall(self.funcinfo["getLaunchSpecs"], *args, **kwargs)
async def getLaunchSpecs(self, *args, **kwargs)
Get All Launch Specifications for WorkerType This method returns a preview of all possible launch specifications that this worker type definition could submit to EC2. It is used to test worker types, nothing more **This API end-point is experimental and may be subject to change without warning.** This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-launch-specs-response.json#`` This method is ``experimental``
11.100817
16.609755
0.668331
retry = -1 response = None implicit = False if session is None: implicit = True session = aiohttp.ClientSession() def cleanup(): if implicit: loop = asyncio.get_event_loop() loop.run_until_complete(session.close()) try: while True: retry += 1 # if this isn't the first retry then we sleep if retry > 0: snooze = float(retry * retry) / 10.0 log.info('Sleeping %0.2f seconds for exponential backoff', snooze) await asyncio.sleep(snooze) # Seek payload to start, if it is a file if hasattr(payload, 'seek'): payload.seek(0) log.debug('Making attempt %d', retry) try: with async_timeout.timeout(60): response = await makeSingleHttpRequest(method, url, payload, headers, session) except aiohttp.ClientError as rerr: if retry < retries: log.warn('Retrying because of: %s' % rerr) continue # raise a connection exception raise rerr except ValueError as rerr: log.warn('ValueError from aiohttp: redirect to non-http or https') raise rerr except RuntimeError as rerr: log.warn('RuntimeError from aiohttp: session closed') raise rerr # Handle non 2xx status code and retry if possible status = response.status if 500 <= status and status < 600 and retry < retries: if retry < retries: log.warn('Retrying because of: %d status' % status) continue else: raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None) return response finally: cleanup() # This code-path should be unreachable assert False, "Error from last retry should have been raised!"
async def makeHttpRequest(method, url, payload, headers, retries=utils.MAX_RETRIES, session=None)
Make an HTTP request and retry it until success, return request
4.313475
4.314608
0.999737
if os.path.exists(name): return os.path.abspath(name) fonts = get_font_files() if name in fonts: return fonts[name] raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts" % name)
def resolve_font(name)
Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass
4.548004
6.207115
0.732708
roots = [ '/usr/share/fonts/truetype', # where ubuntu puts fonts '/usr/share/fonts', # where fedora puts fonts os.path.expanduser('~/.fonts'), # custom user fonts os.path.abspath(os.path.join(os.path.dirname(__file__), 'fonts')), ] result = {} for root in roots: for path, dirs, names in os.walk(root): for name in names: if name.endswith(('.ttf', '.otf')): result[name[:-4]] = os.path.join(path, name) return result
def get_font_files()
Returns a list of all font files we could find Returned as a list of dir/files tuples:: get_font_files() -> {'FontName': '/abs/FontName.ttf', ...] For example:: >>> fonts = get_font_files() >>> 'NotoSans-Bold' in fonts True >>> fonts['NotoSans-Bold'].endswith('/NotoSans-Bold.ttf') True
2.506333
2.686345
0.93299
import optparse parser = optparse.OptionParser() parser.add_option( "-l", "--list", dest="list", action="store_true", default=False, help=("List available fonts")) parser.add_option( "-S", "--skew", dest="skew", type="int", default=None, help=("Apply skew effect (measured in pixels) to make it look " "extra cool. For example, Fabulous' logo logo is skewed " "by 5 pixels. Default: %default")) parser.add_option( "-C", "--color", dest="color", default="#0099ff", help=("Color of your text. This can be specified as you would " "using HTML/CSS. Default: %default")) parser.add_option( "-B", "--term-color", dest="term_color", default=None, help=("If you terminal background isn't black, please change " "this value to the proper background so semi-transparent " "pixels will blend properly.")) parser.add_option( "-F", "--font", dest="font", default='NotoSans-Bold', help=("Name of font file, or absolute path to one. Use the --list " "flag to see what fonts are available. Fabulous bundles the " "NotoSans-Bold and NotoEmoji-Regular fonts, which are guaranteed " "to work. Default: %default")) parser.add_option( "-Z", "--size", dest="fsize", type="int", default=23, help=("Size of font in points. Default: %default")) parser.add_option( "-s", "--shadow", dest="shadow", action="store_true", default=False, help=("Size of font in points. Default: %default")) (options, args) = parser.parse_args(args=sys.argv[1:]) if options.list: print("\n".join(sorted(get_font_files()))) return if options.term_color: utils.term.bgcolor = options.term_color text = " ".join(args) if not isinstance(text, unicode): text = text.decode('utf-8') for line in text.split("\n"): fab_text = Text(line, skew=options.skew, color=options.color, font=options.font, fsize=options.fsize, shadow=options.shadow) for chunk in fab_text: printy(chunk)
def main()
Main function for :command:`fabulous-text`.
3.405687
3.283761
1.03713
return self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
def purgeCache(self, *args, **kwargs)
Purge Worker Cache Publish a purge-cache message to purge caches named `cacheName` with `provisionerId` and `workerType` in the routing-key. Workers should be listening for this message and purge caches when they see it. This method takes input: ``v1/purge-cache-request.json#`` This method is ``stable``
13.672346
19.821985
0.689757
reverse = raw[::-1] asdf = ''.join(ASDF) return raw in asdf or reverse in asdf
def is_asdf(raw)
If the password is in the order on keyboard.
10.127234
8.839581
1.145669
# make sure it is unicode delta = ord(raw[1]) - ord(raw[0]) for i in range(2, len(raw)): if ord(raw[i]) - ord(raw[i-1]) != delta: return False return True
def is_by_step(raw)
If the password is alphabet step by step.
3.303839
2.923694
1.130022
frequent = WORDS.get(raw, 0) if freq: return frequent > freq return bool(frequent)
def is_common_password(raw, freq=0)
If the password is common used. 10k top passwords: https://xato.net/passwords/more-top-worst-passwords/
7.937804
9.380358
0.846216
raw = to_unicode(raw) if level > STRONG: level = STRONG if len(raw) < length: return Strength(False, 'terrible', 'password is too short') if is_asdf(raw) or is_by_step(raw): return Strength(False, 'simple', 'password has a pattern') if is_common_password(raw, freq=freq): return Strength(False, 'simple', 'password is too common') types = 0 if LOWER.search(raw): types += 1 if UPPER.search(raw): types += 1 if NUMBER.search(raw): types += 1 if MARKS.search(raw): types += 1 if types < 2: return Strength(level <= SIMPLE, 'simple', 'password is too simple') if types < min_types: return Strength(level <= MEDIUM, 'medium', 'password is good enough, but not strong') return Strength(True, 'strong', 'password is perfect')
def check(raw, length=8, freq=0, min_types=3, level=STRONG)
Check the safety level of the password. :param raw: raw text password. :param length: minimal length of the password. :param freq: minimum frequency. :param min_types: minimum character family. :param level: minimum level to validate a password.
3.206965
3.284211
0.97648
attempts = 0 while attempts < 1: # Authenticate first if not authenticated already if not self._is_authenticated: self._authenticate() # Make the request and check for authentication errors # This allows us to catch session timeouts for long standing connections try: return self._send_request(url, method, data, extra_headers) except HTTPError as e: if e.response.status_code == 403: logger.info("Authenticated session against NetMRI timed out. Retrying.") self._is_authenticated = False attempts += 1 else: # re-raise other HTTP errors raise
def _make_request(self, url, method="get", data=None, extra_headers=None)
Prepares the request, checks for authentication and retries in case of issues Args: url (str): URL of the request method (str): Any of "get", "post", "delete" data (any): Possible extra data to send with the request extra_headers (dict): Possible extra headers to send along in the request Returns: dict
4.541664
4.681439
0.970143
headers = {'Content-type': 'application/json'} if isinstance(extra_headers, dict): headers.update(extra_headers) if not data or "password" not in data: logger.debug("Sending {method} request to {url} with data {data}".format( method=method.upper(), url=url, data=data) ) r = self.session.request(method, url, headers=headers, data=data) r.raise_for_status() return r.json()
def _send_request(self, url, method="get", data=None, extra_headers=None)
Performs a given request and returns a json object Args: url (str): URL of the request method (str): Any of "get", "post", "delete" data (any): Possible extra data to send with the request extra_headers (dict): Possible extra headers to send along in the request Returns: dict
2.262286
2.6916
0.840499
url = "{base_url}/api/server_info".format(base_url=self._base_url()) server_info = self._make_request(url=url, method="get") return server_info["latest_api_version"]
def _get_api_version(self)
Fetches the most recent API version Returns: str
4.073815
4.269607
0.954143
url = "{base_url}/api/authenticate".format(base_url=self._base_url()) data = json.dumps({'username': self.username, "password": self.password}) # Bypass authentication check in make_request by using _send_request logger.debug("Authenticating against NetMRI") self._send_request(url, method="post", data=data) self._is_authenticated = True
def _authenticate(self)
Perform an authentication against NetMRI
5.180469
4.635582
1.117544
# would be better to use inflect.pluralize here, but would add a dependency if objtype.endswith('y'): return objtype[:-1] + 'ies' if objtype[-1] in 'sx' or objtype[-2:] in ['sh', 'ch']: return objtype + 'es' if objtype.endswith('an'): return objtype[:-2] + 'en' return objtype + 's'
def _controller_name(self, objtype)
Determines the controller name for the object's type Args: objtype (str): The object type Returns: A string with the controller name
3.254999
3.52736
0.922786
return "{base_url}/api/{api_version}/{controller}/{obj_id}".format( base_url=self._base_url(), api_version=self.api_version, controller=self._controller_name(objtype), obj_id=objid )
def _object_url(self, objtype, objid)
Generate the URL for the specified object Args: objtype (str): The object's type objid (int): The objects ID Returns: A string containing the URL of the object
2.616094
3.281083
0.797326
return "{base_url}/api/{api}/{method}".format( base_url=self._base_url(), api=self.api_version, method=method_name )
def _method_url(self, method_name)
Generate the URL for the requested method Args: method_name (str): Name of the method Returns: A string containing the URL of the method
2.913581
3.846301
0.757502
url = self._method_url(method_name) data = json.dumps(params) return self._make_request(url=url, method="post", data=data)
def api_request(self, method_name, params)
Execute an arbitrary method. Args: method_name (str): include the controller name: 'devices/search' params (dict): the method parameters Returns: A dict with the response Raises: requests.exceptions.HTTPError
3.123602
3.81821
0.81808
url = self._object_url(objtype, int(objid)) return self._make_request(url, method="get")
def show(self, objtype, objid)
Query for a specific resource by ID Args: objtype (str): object type, e.g. 'device', 'interface' objid (int): object ID (DeviceID, etc.) Returns: A dict with that object Raises: requests.exceptions.HTTPError
5.047766
6.149272
0.820872
return self._makeApiCall(self.funcinfo["irc"], *args, **kwargs)
def irc(self, *args, **kwargs)
Post IRC Message Post a message on IRC to a specific channel or user, or a specific user on a specific channel. Success of this API method does not imply the message was successfully posted. This API method merely inserts the IRC message into a queue that will be processed by a background process. This allows us to re-send the message in face of connection issues. However, if the user isn't online the message will be dropped without error. We maybe improve this behavior in the future. For now just keep in mind that IRC is a best-effort service. This method takes input: ``v1/irc-request.json#`` This method is ``experimental``
15.483356
25.44755
0.608442
return self._makeApiCall(self.funcinfo["addDenylistAddress"], *args, **kwargs)
def addDenylistAddress(self, *args, **kwargs)
Denylist Given Address Add the given address to the notification denylist. The address can be of either of the three supported address type namely pulse, email or IRC(user or channel). Addresses in the denylist will be ignored by the notification service. This method takes input: ``v1/notification-address.json#`` This method is ``experimental``
11.642097
14.032081
0.829677
return self._makeApiCall(self.funcinfo["deleteDenylistAddress"], *args, **kwargs)
def deleteDenylistAddress(self, *args, **kwargs)
Delete Denylisted Address Delete the specified address from the notification denylist. This method takes input: ``v1/notification-address.json#`` This method is ``experimental``
10.250232
12.210891
0.839434
return self._makeApiCall(self.funcinfo["list"], *args, **kwargs)
def list(self, *args, **kwargs)
List Denylisted Notifications Lists all the denylisted addresses. By default this end-point will try to return up to 1000 addresses in one request. But it **may return less**, even if more tasks are available. It may also return a `continuationToken` even though there are no more results. However, you can only be sure to have seen all results if you keep calling `list` with the last `continuationToken` until you get a result without a `continuationToken`. If you are not interested in listing all the members at once, you may use the query-string option `limit` to return fewer. This method gives output: ``v1/notification-address-list.json#`` This method is ``experimental``
15.25342
22.842129
0.667776
if hasattr(s, 'as_utf8'): if hasattr(sys.stdout, 'buffer'): sys.stdout.buffer.write(s.as_utf8) sys.stdout.buffer.write(b"\n") else: sys.stdout.write(s.as_utf8) sys.stdout.write(b"\n") else: print(s)
def printy(s)
Python 2/3 compatible print-like function
1.994303
2.00754
0.993407
ref = { 'exchange': 'client-created', 'name': 'clientCreated', 'routingKey': [ { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/client-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def clientCreated(self, *args, **kwargs)
Client Created Messages Message that a new client has been created. This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys: * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
10.441012
6.043386
1.727676
ref = { 'exchange': 'client-updated', 'name': 'clientUpdated', 'routingKey': [ { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/client-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def clientUpdated(self, *args, **kwargs)
Client Updated Messages Message that a new client has been updated. This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys: * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
10.852952
6.176572
1.757116
ref = { 'exchange': 'client-deleted', 'name': 'clientDeleted', 'routingKey': [ { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/client-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def clientDeleted(self, *args, **kwargs)
Client Deleted Messages Message that a new client has been deleted. This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys: * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
10.209622
5.921741
1.724091
ref = { 'exchange': 'role-created', 'name': 'roleCreated', 'routingKey': [ { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/role-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def roleCreated(self, *args, **kwargs)
Role Created Messages Message that a new role has been created. This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys: * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
10.077177
6.2336
1.61659
ref = { 'exchange': 'role-updated', 'name': 'roleUpdated', 'routingKey': [ { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/role-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def roleUpdated(self, *args, **kwargs)
Role Updated Messages Message that a new role has been updated. This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys: * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
10.5795
6.297474
1.679959
ref = { 'exchange': 'role-deleted', 'name': 'roleDeleted', 'routingKey': [ { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/role-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def roleDeleted(self, *args, **kwargs)
Role Deleted Messages Message that a new role has been deleted. This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys: * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
9.916456
5.968369
1.661502
cache = {} @functools.wraps(function) def _memoize(*args): if args in cache: return cache[args] result = function(*args) cache[args] = result return result return function
def memoize(function)
A very simple memoize decorator to optimize pure-ish functions Don't use this unless you've examined the code and see the potential risks.
2.163085
2.266054
0.95456
try: call = fcntl.ioctl(self.termfd, termios.TIOCGWINSZ, "\000" * 8) except IOError: return (79, 40) else: height, width = struct.unpack("hhhh", call)[:2] return (width, height)
def dimensions(self)
Returns terminal dimensions Don't save this information for long periods of time because the user might resize their terminal. :return: Returns ``(width, height)``. If there's no terminal to be found, we'll just return ``(79, 40)``.
3.521562
2.563884
1.373527
msg = [SP_HEADER, cmd] if pipe is not None: msg.append(pipe) if data is not None: msg.append(data) return msg
def sp_msg(cmd, pipe=None, data=None)
Produces skypipe protocol multipart message
2.495149
2.393579
1.042434
socket = ctx.socket(zmq.DEALER) socket.linger = 0 socket.connect(endpoint) socket.send_multipart(sp_msg(SP_CMD_HELLO)) timeout_time = time.time() + timeout while time.time() < timeout_time: reply = None try: reply = socket.recv_multipart(zmq.NOBLOCK) break except zmq.ZMQError: time.sleep(0.1) socket.close() if reply: return str(reply.pop(0)) == SP_HEADER
def check_skypipe_endpoint(endpoint, timeout=10)
Skypipe endpoint checker -- pings endpoint Returns True if endpoint replies with valid header, Returns False if endpoint replies with invalid header, Returns None if endpoint does not reply within timeout
2.725746
2.86891
0.950098
name = name or '' socket = ctx.socket(zmq.DEALER) socket.connect(endpoint) try: socket.send_multipart(sp_msg(SP_CMD_LISTEN, name)) while True: msg = socket.recv_multipart() try: data = parse_skypipe_data_stream(msg, name) if data: yield data except EOFError: raise StopIteration() finally: socket.send_multipart(sp_msg(SP_CMD_UNLISTEN, name)) socket.close()
def stream_skypipe_output(endpoint, name=None)
Generator for reading skypipe data
3.137139
3.034799
1.033722
header = str(msg.pop(0)) command = str(msg.pop(0)) pipe_name = str(msg.pop(0)) data = str(msg.pop(0)) if header != SP_HEADER: return if pipe_name != for_pipe: return if command != SP_CMD_DATA: return if data == SP_DATA_EOF: raise EOFError() else: return data
def parse_skypipe_data_stream(msg, for_pipe)
May return data from skypipe message or raises EOFError
3.182549
2.979345
1.068204
name = name or '' class context_manager(object): def __enter__(self): self.socket = ctx.socket(zmq.DEALER) self.socket.connect(endpoint) return self def send(self, data): data_msg = sp_msg(SP_CMD_DATA, name, data) self.socket.send_multipart(data_msg) def __exit__(self, *args, **kwargs): eof_msg = sp_msg(SP_CMD_DATA, name, SP_DATA_EOF) self.socket.send_multipart(eof_msg) self.socket.close() return context_manager()
def skypipe_input_stream(endpoint, name=None)
Returns a context manager for streaming data into skypipe
2.747113
2.59581
1.058287
stdin = os.fdopen(sys.stdin.fileno(), 'r', 0) while True: line = stdin.readline() if line: yield line else: break
def stream_stdin_lines()
Generator for unbuffered line reading from STDIN
2.230886
1.90478
1.171204
try: if os.isatty(0): # output mode for data in stream_skypipe_output(endpoint, name): sys.stdout.write(data) sys.stdout.flush() else: # input mode with skypipe_input_stream(endpoint, name) as stream: for line in stream_stdin_lines(): stream.send(line) except KeyboardInterrupt: pass
def run(endpoint, name=None)
Runs the skypipe client
4.465828
3.944343
1.132211
if not self.is_set(): # for python2/3 compatibility raise TypeError if self.start_datetime > self.end_datetime: raise ValueError( "time inversion found: {:s} > {:s}".format( str(self.start_datetime), str(self.end_datetime) ) )
def validate_time_inversion(self)
Check time inversion of the time range. :raises ValueError: If |attr_start_datetime| is bigger than |attr_end_datetime|. :raises TypeError: Any one of |attr_start_datetime| and |attr_end_datetime|, or both is inappropriate datetime value. :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:00:00+0900") try: time_range.validate_time_inversion() except ValueError: print "time inversion" :Output: .. parsed-literal:: time inversion
4.868241
4.188008
1.162424
try: return self.start_datetime.strftime(self.start_time_format) except AttributeError: return self.NOT_A_TIME_STR
def get_start_time_str(self)
:return: |attr_start_datetime| as |str| formatted with |attr_start_time_format|. Return |NaT| if the invalid value or the invalid format. :rtype: str :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") print(time_range.get_start_time_str()) time_range.start_time_format = "%Y/%m/%d %H:%M:%S" print(time_range.get_start_time_str()) :Output: .. parsed-literal:: 2015-03-22T10:00:00+0900 2015/03/22 10:00:00
5.575269
4.858465
1.147537
try: return self.end_datetime.strftime(self.end_time_format) except AttributeError: return self.NOT_A_TIME_STR
def get_end_time_str(self)
:return: |attr_end_datetime| as a |str| formatted with |attr_end_time_format|. Return |NaT| if invalid datetime or format. :rtype: str :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") print(time_range.get_end_time_str()) time_range.end_time_format = "%Y/%m/%d %H:%M:%S" print(time_range.get_end_time_str()) :Output: .. parsed-literal:: 2015-03-22T10:10:00+0900 2015/03/22 10:10:00
5.759402
4.874859
1.18145
if value is None: self.__start_datetime = None return try: self.__start_datetime = typepy.type.DateTime( value, strict_level=typepy.StrictLevel.MIN, timezone=timezone ).convert() except typepy.TypeConversionError as e: raise ValueError(e)
def set_start_datetime(self, value, timezone=None)
Set the start time of the time range. :param value: |param_start_datetime| :type value: |datetime|/|str| :raises ValueError: If the value is invalid as a |datetime| value. :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange() print(time_range) time_range.set_start_datetime("2015-03-22T10:00:00+0900") print(time_range) :Output: .. parsed-literal:: NaT - NaT 2015-03-22T10:00:00+0900 - NaT
3.850425
3.907577
0.985374
if value is None: self.__end_datetime = None return try: self.__end_datetime = typepy.type.DateTime( value, strict_level=typepy.StrictLevel.MIN, timezone=timezone ).convert() except typepy.TypeConversionError as e: raise ValueError(e)
def set_end_datetime(self, value, timezone=None)
Set the end time of the time range. :param datetime.datetime/str value: |param_end_datetime| :raises ValueError: If the value is invalid as a |datetime| value. :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange() print(time_range) time_range.set_end_datetime("2015-03-22T10:10:00+0900") print(time_range) :Output: .. parsed-literal:: NaT - NaT NaT - 2015-03-22T10:10:00+0900
3.841513
3.866673
0.993493
self.set_start_datetime(start) self.set_end_datetime(end)
def set_time_range(self, start, end)
:param datetime.datetime/str start: |param_start_datetime| :param datetime.datetime/str end: |param_end_datetime| :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange() print(time_range) time_range.set_time_range("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") print(time_range) :Output: .. parsed-literal:: NaT - NaT 2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900
3.834304
3.700062
1.036281
if self.__compare_timedelta(step, 0) == 0: raise ValueError("step must be not zero") is_inversion = False try: self.validate_time_inversion() except ValueError: is_inversion = True if not is_inversion: if self.__compare_timedelta(step, seconds=0) < 0: raise ValueError("invalid step: expect greater than 0, actual={}".format(step)) else: if self.__compare_timedelta(step, seconds=0) > 0: raise ValueError("invalid step: expect less than 0, actual={}".format(step)) current_datetime = self.start_datetime while current_datetime <= self.end_datetime: yield current_datetime current_datetime = current_datetime + step
def range(self, step)
Return an iterator object. :param step: Step of iteration. :type step: |timedelta|/dateutil.relativedelta.relativedelta :return: iterator :rtype: iterator :Sample Code: .. code:: python import datetime from datetimerange import DateTimeRange time_range = DateTimeRange("2015-01-01T00:00:00+0900", "2015-01-04T00:00:00+0900") for value in time_range.range(datetime.timedelta(days=1)): print(value) :Output: .. parsed-literal:: 2015-01-01 00:00:00+09:00 2015-01-02 00:00:00+09:00 2015-01-03 00:00:00+09:00 2015-01-04 00:00:00+09:00
3.065198
2.823178
1.085726
self.validate_time_inversion() x.validate_time_inversion() if any([x.start_datetime in self, self.start_datetime in x]): start_datetime = max(self.start_datetime, x.start_datetime) end_datetime = min(self.end_datetime, x.end_datetime) else: start_datetime = None end_datetime = None return DateTimeRange( start_datetime=start_datetime, end_datetime=end_datetime, start_time_format=self.start_time_format, end_time_format=self.end_time_format, )
def intersection(self, x)
Newly set a time range that overlaps the input and the current time range. :param DateTimeRange x: Value to compute intersection with the current time range. :Sample Code: .. code:: python from datetimerange import DateTimeRange dtr0 = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") dtr1 = DateTimeRange("2015-03-22T10:05:00+0900", "2015-03-22T10:15:00+0900") dtr0.intersection(dtr1) :Output: .. parsed-literal:: 2015-03-22T10:05:00+0900 - 2015-03-22T10:10:00+0900
2.378105
2.160475
1.100733
self.validate_time_inversion() x.validate_time_inversion() return DateTimeRange( start_datetime=min(self.start_datetime, x.start_datetime), end_datetime=max(self.end_datetime, x.end_datetime), start_time_format=self.start_time_format, end_time_format=self.end_time_format, )
def encompass(self, x)
Newly set a time range that encompasses the input and the current time range. :param DateTimeRange x: Value to compute encompass with the current time range. :Sample Code: .. code:: python from datetimerange import DateTimeRange dtr0 = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") dtr1 = DateTimeRange("2015-03-22T10:05:00+0900", "2015-03-22T10:15:00+0900") dtr0.encompass(dtr1) :Output: .. parsed-literal:: 2015-03-22T10:00:00+0900 - 2015-03-22T10:15:00+0900
3.043765
2.471948
1.231322
self.validate_time_inversion() if percentage < 0: raise ValueError("discard_percent must be greater or equal to zero: " + str(percentage)) if percentage == 0: return discard_time = self.timedelta // int(100) * int(percentage / 2) self.__start_datetime += discard_time self.__end_datetime -= discard_time
def truncate(self, percentage)
Truncate ``percentage`` / 2 [%] of whole time from first and last time. :param float percentage: Percentage of truncate. :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange( "2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") time_range.is_output_elapse = True print(time_range) time_range.truncate(10) print(time_range) :Output: .. parsed-literal:: 2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900 (0:10:00) 2015-03-22T10:00:30+0900 - 2015-03-22T10:09:30+0900 (0:09:00)
5.675458
5.330087
1.064796
if finish: finish.set() time.sleep(0.1) # threads, sigh if not io: io = sys.stdout finish = threading.Event() io.write(text) def _wait(): while not finish.is_set(): io.write('.') io.flush() finish.wait(timeout=1) io.write('\n') threading.Thread(target=_wait).start() return finish
def wait_for(text, finish=None, io=None)
Displays dots until returned event is set
3.373456
3.198261
1.054778