content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def fib(n): """This function returns the nth Fibonacci number.""" # Creates variables that will become the answer i = 0 j = 1 n = n - 1 # Keeps looping through numbers that are greater than 0 while n >= 0: # Variables on right of'=' are calculated first, then assigned to left of '=' i, j = j, i + j # Acts as a counter for loop, each loop n is de-incremented by 1 n = n - 1 # When loop ends, return i return i
459d940518776219c17ce4c6a6c05a7c3a7fac8b
30,539
def task(*work_args, **work_kwargs): """ `task` Decorator to create a task \n `task` named kwargs: \n { `name` (str), `task_instance` (`Workflow` instance), `task_order` (int), `shared` (bool), `args` (list) or (tuple) or (function), `kwargs` (dict) or (function), `before` (tuple) or (list) or (dict), `after` (tuple) or (list) or (dict), `log` (bool) } \n ##### `task` NAMED KWARGS DEFINITION: `name`: type(str) \n `task_instance`: type(`Workflow` instance) \n `task_order`: type(int) \n [Default is int 1] \n `shared`: type(bool) \n Value Options [ True, False ] [Default is bool False] \n `args`: type(list) or type(tuple) or type(function) \n [Default is a empty list [] ] \n `kwargs`: type(dict) or type(function) \n [Default is a empty dict {}] \n `before` or `after` middleware task dict keys as a dict or list: \n Middleware object structure: { name, function, args, kwargs, options } [Default is empty list []] \n `log`: type(bool) \n Value Options [ True or False ]. [Default is bool False] """ def get_decorator(function_): def add_tasks(*function_args, **function_kwargs): if not work_kwargs.get("name"): raise TypeError("Name Argument or task instance not provided") if type(work_kwargs.get("args")) != list or type(work_kwargs.get("args")) != tuple or not hasattr(work_kwargs.get("args"), '__call__'): work_kwargs["args"] = work_kwargs.get("args", []) if type(work_kwargs.get("kwargs")) != dict or not hasattr(work_kwargs.get("kwargs"), '__call__'): work_kwargs["kwargs"] = work_kwargs.get("kwargs", {}) t = work_kwargs["task_instance"] work_kwargs.update({ "task_order": work_kwargs.get("task_order", 1), "before": work_kwargs.get("before", []), "after": work_kwargs.get("after", []), "shared": work_kwargs.get("shared", False), "options": work_kwargs.get("options", {}), "log": work_kwargs.get("log", False) }) # args_normal = t.clean_args( # function_, work_kwargs["args"], work_kwargs["kwargs"]) if (True if (len(function_.__code__.co_varnames) == 4) else False) in [None, False]: raise Exception("Args and Kwargs do not match") # function_, args, kwargs, work_args, work_kwargs fn_task = { "name": work_kwargs.get("name"), "task_order": work_kwargs.get("task_order"), "function": function_, "workflow_args": work_args, "workflow_kwargs": work_kwargs, "log": work_kwargs.get("log") } t.setter("tasks", fn_task, t) # print("Workflow add_tasks - Task added: ", # work_kwargs.get("name")) # print("Workflow add_tasks - Task Present: ", t.getter("tasks", 1)) return add_tasks() return get_decorator
687e632c4da83a3d8d525e1b4bd2bfc147a467b2
30,540
def route_pattern_match(route, path): """ Backport of RegexPattern.match for Django versions before 2.0. Returns the remaining path and positional and keyword arguments matched. """ if hasattr(route, "pattern"): match = route.pattern.match(path) if match: path, args, kwargs = match kwargs.update(route.default_args) return path, args, kwargs return match # Django<2.0. No converters... :-( match = route.regex.search(path) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() args = () if kwargs else match.groups() if kwargs is not None: kwargs.update(route.default_args) return path[match.end() :], args, kwargs return None
475e0ed14504987adaa7bbc6f28b84de156f3468
30,541
def is_on_border(bbox, im, width_border): """ Checks if object is on the border of the frame. bbox is (min_row, min_col, max_row, max_col). Pixels belonging to the bounding box are in the half-open interval [min_row; max_row) and [min_col; max_col). """ min_col = bbox[1] max_col = bbox[3] width_im = im.shape[1] if (min_col <= width_border) or (max_col >= width_im - width_border): return True else: return False
815dab6682f0c977ec2ad86522d41ec086423971
30,542
import argparse def parse_args(): """ Parse command line arguments :returns: object -- Object containing command line options """ parser = argparse.ArgumentParser(description="Align and compute RMSD") parser.add_argument("pdb_input_file", type=str, help="Original pdb file.") parser.add_argument("pdb_to_smarts", type=str, help="JSON file connecting pdbs to SMARTS") parser.add_argument("ids_to_smarts", type=str, help="JSON file connecting ids to SMARTS.") parser.add_argument("-o", "--output_path", type=str, default="./output/", help="Path where to write the results") parser.add_argument("-of", "--output_file", type=str, default="hydration_free_energy.csv", help="File with the energy difference.") args = parser.parse_args() return args.pdb_input_file, args.pdb_to_smarts, args.ids_to_smarts, args.output_path, args.output_file
6e8085b06e50a89d9845d35b92aaa082a797cba7
30,543
from datetime import datetime def get_rate_limit_info(headers): """ Get Rate Limit Information from response headers (A Dictionary) :returns: Dictionary of 'remaining' (int), 'limit' (int), 'reset' (time) """ ret = {} ret['remaining'] = int(headers.get('x-rate-limit-remaining')) ret['limit'] = int(headers.get('x-rate-limit-limit')) ret['reset'] = datetime.fromtimestamp(int(headers.get('x-rate-limit-reset'))) return ret
2478b21d71bff0021d102a78fa6a4149ff4b2d6c
30,545
def mysum(a, b): """ Add two numbers together. Parameters ---------- a, b : Number Numbers to be added together. Returns ------- Number The sum of the two inputs. """ return a + b
a64083ac9524cd20875967adbe2b378604be0187
30,546
def list_usage(progname, description, command_keys, command_helps, command_aliases): """ Constructs program usage for a list of commands with help and aliases. Contructs in the order given in command keys. Newlines can be used for command sections by adding None entries to command_keys list. Only one alias allowed per command. :param command_keys: list of keys or None to print help or empty lines :param command_helps: dict{key: help} :param command_aliases: dict{key: alias} :returns: usage string (multiline) """ dvars = {'prog': progname} dvars.update(vars()) result = [] result.append(description % dvars) for key in command_keys: if key in command_aliases: alias = ' (%s)' % command_aliases[key] else: alias = '' if key is not None: result.append(("%s%s" % (key, alias)).ljust(10) + ' \t' + command_helps[key]) else: result.append('') return '\n'.join(result)
3246c509eaea33e271b1dc3425a1cc49acb943a3
30,547
def convert_to_local_image_name(image: str) -> str: """NOTE: get available image name For example, `abeja-inc/all-cpu:19.04` locates inside Platform. and cannot access it from outside of Platform. Therefore let use same images in DockerHub which name starts with `abeja/`. """ if image.startswith('abeja-inc/'): return image.replace('abeja-inc/', 'abeja/', 1) return image
a7337bea1b4ea38d33603cf3b7bf9b114ba4a441
30,548
def strWithout3a3b(self, A, B): # ! 递归法 """ :type A: int :type B: int :rtype: str """ if A == 0: return 'b' * B elif B == 0: return 'a' * A elif A == B: return 'ab' + self.strWithout3a3b(A-1, B-1) elif A > B: return 'aab' + self.strWithout3a3b(A-2, B-1) else: return 'bba' + self.strWithout3a3b(A-1, B-2)
161ac52e5a6aa27365e0ba5d1244937fe2fb3a40
30,549
from typing import List def dromedary_to_pascal_case(text: str) -> str: """Convert from dromedaryCase to PascalCase.""" characters: List[str] = [] letter_seen = False for char in text: if not letter_seen and char.isalpha(): letter_seen = True characters.append(char.upper()) else: characters.append(char) return "".join(characters)
50b735ff1f44c30faa27251f85d902f1d2459ea6
30,551
def get_spaceweather_imagefile(if_path, if_date, if_filename, if_extension, \ verbose): """Returns a complete image filename string tailored to the spaceweather site string by concatenating the input image filename (if) strings that define the path, date, filename root, and the filename extension If verbose is truthy, then print the returned image filename string """ sw_imagefile = if_path + if_date + "_" + if_filename + if_extension if verbose: print("Output image file full path: \n{}\n".format(sw_imagefile)) return sw_imagefile
52fab5c964e28287c8cdd1f30208bc74a3e99d34
30,552
from pathlib import Path def find_lab(directory, utt_id): """Find label for a given utterance. Args: directory (str): directory to search utt_id (str): utterance id Returns: str: path to the label file """ if isinstance(directory, str): directory = Path(directory) ps = sorted(directory.rglob(f"{utt_id}.lab")) assert len(ps) == 1 return ps[0]
c3ca69e436456284b890b7de3aff665da756f044
30,553
import time def sleep(sec): """ Ждет sec секунд и возвращает 'Hello' :param int sec: время ожидания в секундах :return: Hello :rtype: str """ time.sleep(sec) return 'Hello'
c669db08769e7a55cf77a9ab9dd44a6c7bbe2de3
30,554
def MAX(*expression): """ Returns the maximum value. See https://docs.mongodb.com/manual/reference/operator/aggregation/max/ for more details :param expression: expression/expressions or variables :return: Aggregation operator """ return {'$max': list(expression)} if len(expression) > 1 else {'$max': expression[0]} if expression else {}
9a2cdb9873b3a3380051509b7b6ac23fa76a7cb3
30,555
import glob def subdirectories(): """returns the subdirectories in the current path""" # !!FIX: another stupid function, but might as well leave alone. current_subdirectories = glob.glob('*/') return current_subdirectories
7d5655312586ae8b6dddc0ebb356a00e89d9bb14
30,557
def longestRun(string,chars): """counts the number of iterations in the longest run of each character""" mode = 0 current_run = 0 best_run = [0]*len(chars) for val in string: if(val.lower() == chars[mode].lower()): #same character as we've seen! great current_run+=1 elif(val.lower() in (chars[:mode]+chars[mode+1:]).lower()): #now we hit a character of the other type if(current_run > best_run[mode]): best_run[mode]=current_run current_run = 1 mode = chars.index(val) if(current_run > best_run[mode]): #counting the last run in the string best_run[mode]=current_run return(best_run)
c66375368e155b19aaca3aa9f57393c28e22820e
30,558
from datetime import datetime def diff_dates(d1, d2): """ Find date difference between two given dates. :param d1: :param d2: :return: """ d1 = datetime.strptime(d1, "%m/%d/%Y") d2 = datetime.strptime(d2, "%m/%d/%Y") return abs(d1 - d2).days
a26d58ccc74778aede5943283b2974a79e181f67
30,559
def validate(config): """ Validate the the config is a dict """ if not isinstance(config, list): return False, ("Configuration for status beacon must be a list.") return True, "Valid beacon configuration"
88e1fe5d6cfc28e262a82b45e7dbb603d0a4bb0c
30,560
def formatItemRows(itemRows) -> dict: """ Formats array of item rows to form { "ITEM_TITLE[0]": "test title", "ITEM_TITLE[1]": "test title 2" } :param itemRows: :return: """ items = {} for title, item in itemRows.items(): for index, value in enumerate(item): items[f"{title}[{index}]"] = value return items
1a9329aa677bb99307a0a5fef4b1b871a49ea5d9
30,561
from typing import Callable from typing import Iterable def multiply(func: Callable, *args, **kwargs): """ Returns a function that takes an iterable and maps ``func`` over it. Useful when multiple batches require the same function. ``args`` and ``kwargs`` are passed to ``func`` as additional arguments. """ def wrapped(xs: Iterable, *args_, **kwargs_) -> tuple: return tuple(func(x, *args_, *args, **kwargs_, **kwargs) for x in xs) return wrapped
97cab2f739d39ab2e933b11a84814df8ecc811cf
30,562
def upload_file_to_slack(slack_connection, file_name, channel_name, timestamp): """ Upload a file to Slack in a thread under the main message. All channel members will have access to the file. Only works for files <50MB. """ try: file_response = slack_connection.files_upload( file=file_name, filename=file_name, title=file_name, channels=channel_name, thread_ts=timestamp) print(f'{file_name} successfully uploaded.') except Exception as e: print(e) file_response = None print('File failed to upload.') return file_response
1922bf8d357881325fd861bc76827961c7ac4db8
30,566
def bitstr_to_int(a): """ Convert binary string to int """ return int(a, 2)
1188a2ff24b1bf6c70db9a458d39f08f44f006e8
30,567
def average_true_range(days=10, target="close"): """ | Calculates the average true range | Name: average\_true\_range\_\ **days**\ \_of\_\ **target** :param days: Window size, defaults to 10 :type days: int, optional :param target: Data column to use, defaults to "close" :type target: str, optional """ def return_function(data): column_name = f"average_true_range_{days}_of_{target}" high_col_name = f"high{target[5:]}" low_col_name = f"low{target[5:]}" if column_name not in data.columns: data['true_range'] = data[high_col_name] - data[low_col_name] data['low_vs_close'] = abs(data[low_col_name] - data['close'].shift(1)) data['high_vs_close'] = abs(data[high_col_name] - data['close'].shift(1)) data['max_true_range'] = data[['true_range', 'low_vs_close', 'high_vs_close']].max(axis=1) data[column_name] = data['max_true_range'].rolling(days, min_periods=1).mean() return data[column_name].copy() return return_function
f1030d7595c1caba937f768c9a34cc7be130b505
30,568
def check_for_outside_blotches(data): """takes maximum radius of blotch and adds/subtracts from x/y.""" # pixel coordinate maximum in x: right_side = 840 cols = 'radius_1 radius_2'.split() # define length of all data no_all = data.shape[0] no_off_right = ((data.x - data[cols]. max(axis=1)) > right_side).value_counts()[True] return no_off_right/float(no_all)
0eca723d850834d4a2602a730d0e4c0db1dff108
30,569
def average(number1, number2, number3): """ Calculating the average of three given numbers Parameters: number1|2|3 (float): three given numbers Returns: number (float): Returning the statistical average of these three numbers """ return (number1 + number2 + number3) / 3.0
04102ee8646b6e5d2cfa9265771c4f4bdbe45d45
30,570
def _make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None): """If it's filter on some_id without start and end, start_row = some_id while end_row = some_id + MAX_BYTE """ if some_id is None: return None, None if not rts_start: rts_start = chr(127) end_row = "%s_%s" % (some_id, rts_start) start_row = "%s_%s" % (some_id, rts_end) return start_row, end_row
8b2366855cd2c24c6600054e04d00276cc5a2c48
30,573
def compare(O, E): """Returns a similarity score for corresponding elements in O and E""" return (O - E) / E
06fb39ace22277ca5d53daf8e03d159d5a41d690
30,574
import ast def _convert_string_to_native(value): """Convert a string to its native python type""" result = None try: result = ast.literal_eval(str(value)) except (SyntaxError, ValueError): # Likely a string result = value.split(',') return result
eaeec2eda520e6342ee6b5f3ac523d44b1c06d53
30,575
def get_help(): """Reading help from the file.""" try: with open('resource/help.txt', encoding='UTF-8') as helpfile: helptext = helpfile.read() except Exception: helptext = '' return helptext
e83dd58b73d9ea2199c8c62b9c07c38b4e02d1a7
30,577
def get_llvm_tld4_access(geom): """ For 2D textures, operand c specifies coordinates as a two-element, 32-bit floating-point vector. For 2d texture arrays operand c is a four element, 32-bit vector. The first element in operand c is interpreted as an unsigned integer index (.u32) into the texture array, and the next two elements are interpreted as 32-bit floating point coordinates of 2d texture. The fourth element is ignored. For cubemap textures, operand c specifies four-element vector which comprises three floating-point coordinates (s, t, r) and a fourth padding argument which is ignored. [For cube arrays] The first element in operand c is interpreted as an unsigned integer index (.u32) into the cubemap texture array, and the remaining three elements are interpreted as floating-point cubemap coordinates (s, t, r), used to lookup in the selected cubemap. """ geom_to_access = { "2d" : "float %x, float %y", "a2d" : "i32 %l, float %x, float %y", "cube" : "float %s, float %t, float %r", "acube" : "i32 %l, float %s, float %t, float %r" } return geom_to_access[geom]
bd6799b76a664523f3e120572b23ec8db95fb2f5
30,578
def cmp(x, y) -> int: """ Implementation of ``cmp`` for Python 3. Compare the two objects x and y and return an integer according to the outcome. The return value is negative if ``x < y``, zero if ``x == y`` and strictly positive if ``x > y``. """ return int((x > y) - (x < y))
2609f78797ad778e0d0b9c40a568a3f03153dd2c
30,579
def update_taxids(input, updatedTaxids): """ Updates a map of sequenceIDs to taxIDs with information from merged.dmp Some of NCBI's taxonomy IDs might get merged into others. Older sequences can still point to the old taxID. The new taxID must be looked up in the merged.dmp file in order to be able to find the right node in the taxonomy tree. Parameters ---------- input : dict of dicts of sets The keys of the outer dicts are the sequence types, e.g. NC, GeneID or gi. Keys of the inner dict are OTUs or clades. Values of the inner dict are sets of taxIDs. updatedTaxids : dict Content of merged.dmp in form of a dict where key is current taxID and value the new taxID Returns ------- The original map, but some taxIDs might have been updated. """ for seqType in input: for seqID in input[seqType]: cur_taxid = input[seqType][seqID] if cur_taxid in updatedTaxids: input[seqType][seqID] = updatedTaxids[cur_taxid] return input
be6fe1579d83e8915712c8ea79baaf76ce0a8c9d
30,580
def parse_slice(useslice): """Parses the argument string "useslice" as the arguments to the `slice()` constructor and returns a slice object that's been instantiated with those arguments. i.e. input useslice="1,20,2" leads to output `slice(1, 20, 2)`. """ try: l = {} exec("sl = slice(%s)" % useslice, l) return l['sl'] except Exception: msg = ("Expected arguments consumable by 'slice' to follow " "option `-j`, found '%s'" % useslice) raise ValueError(msg)
471b31a6774127a68eca6619d36df8e0bee66f18
30,581
def predict_multiclass(model, x, *extra_xs): """ Predict a class (int in [0...(n classes - 1)]) for each input (first dim of `x`) using the classifier `model`. Args: model: multiclass classifier module that outputs "n classes" logits for every input sequence. x: input tensor. extra_xs: additional inputs of `model`. Returns Integer tensor of shape `(x.size(0),)` that contains the predicted classes. """ preds = model(x, *extra_xs).cpu() pred_classes = preds.argmax(dim=-1) return pred_classes
c351d672d2177cb668a67b6d81ac166fde38daa2
30,586
import torch def convert_2Djoints_to_gaussian_heatmaps_torch(joints2D, img_wh, std=4): """ :param joints2D: (B, N, 2) tensor - batch of 2D joints. :param img_wh: int, dimensions of square heatmaps :param std: standard deviation of gaussian blobs :return heatmaps: (B, N, img_wh, img_wh) - batch of 2D joint heatmaps (channels first). """ device = joints2D.device xx, yy = torch.meshgrid(torch.arange(img_wh, device=device), torch.arange(img_wh, device=device)) xx = xx[None, None, :, :].float() yy = yy[None, None, :, :].float() j2d_u = joints2D[:, :, 0, None, None] # Horizontal coord (columns) j2d_v = joints2D[:, :, 1, None, None] # Vertical coord (rows) heatmap = torch.exp(-(((xx - j2d_v) / std) ** 2) / 2 - (((yy - j2d_u) / std) ** 2) / 2) return heatmap
26607acb1f756b840a8b970ade0ccdf01bf286e9
30,587
def get_rho_air(): """ """ return 1.2
6fe2b826f30c1b10841355711dbe106b67711ca5
30,588
import itertools def validate_filterfalse(condition_, container_): """ NAME : filterfalse() Arguments : pred, seq Results : elements of seq where pred(elem) is false Example : filterfalse(lambda x: x%2, range(10)) --> 0 2 4 6 8 """ return list( itertools.filterfalse( condition_, container_ ) )
5302ee2f6ba5b08142516ab7b1d203857338479d
30,589
import string import random def random_string(length=50): """Return a string of random letters""" chars = string.ascii_letters r_int = random.randint return "".join([chars[r_int(0, len(chars) - 1)] for x in range(length)])
b337045cc724d450f578fc890042dd5e4c849580
30,594
def istamil_alnum( tchar ): """ check if the character is alphanumeric, or tamil. This saves time from running through istamil() check. """ return ( tchar.isalnum( ) or tchar.istamil( ) )
605fc9c82b688e314201ba2be3a3cf9d06fb256d
30,595
def find_size_order (root, parent_inode, file_inode): """Create a list containing (inode,size) tuple of files in a given folder. Return size_order of a requested file.""" inode_size_list = list() for fileobject in root.findall('fileobject'): fo_parent_inode = fileobject.find('parent_object').find('i_node').text fo_name_type = fileobject.find('name_type').text fo_inode = fileobject.find('inode') fo_filesize = fileobject.find('filesize') if (fo_parent_inode is not None and int(fo_parent_inode) == int(parent_inode) and str(fo_name_type) == str("r/r") and fo_inode is not None and fo_filesize is not None and fo_inode.text is not None and fo_filesize.text is not None): inode_size_list.append((int(fo_inode.text),int(fo_filesize.text))) elif (fo_parent_inode is not None and int(fo_parent_inode) == int(parent_inode) and str(fo_name_type) == str("r/r") and fo_inode is not None and fo_filesize is not None and fo_inode.text is not None and fo_filesize.text is None): inode_size_list.append((int(fo_inode.text), 0)) # sort a list by a second value in tuples, i.e by size sorted_list = sorted(inode_size_list, key=lambda tup: tup[1]) return [t[0] for t in sorted_list].index(int(file_inode))
02a8c1120b56b6b1be0b3a6a1bbbc41c7a9a8e3a
30,596
def diff_lists(list_a, list_b): """ Return the difference between list_a and list_b. :param list_a: input list a. :param list_b: input list b. :return: difference (list). """ return list(set(list_a) - set(list_b))
3fac6e456827d567900d1c8dd14cb3fe7e4af0b7
30,597
import os import requests def download_dump(dump_link: str, download_path: str) -> str: """ Download a Wikipedia dump. The download folder will be created if it does not exist. Parameters ---------- dump_link : str The link to the dump to download download_path : str A path where to store the downloaded file. Returns ------- str The path the the downloaded dump file. """ file_name = dump_link.split("/")[-1] if not os.path.exists(download_path): os.makedirs(download_path) file_path = os.path.join(download_path, file_name) if not os.path.exists(file_path): r = requests.get(dump_link) with open(file_path, "wb") as f: f.write(r.content) return file_path
ce1ea1fc90bba2271243328999a63293404d4e97
30,598
def editDistRecursive(x, y): """ this implementation is very slow """ if len(x) == 0: return len(y) elif len(y) == 0: return len(x) else: distHor = editDistRecursive(x[:-1], y) + 1 distVer = editDistRecursive(x, y[:-1]) + 1 if x[-1] == y[-1]: distDiag = editDistRecursive(x[:-1], y[:-1]) else: distDiag = editDistRecursive(x[:-1], y[:-1]) + 1 return min(distDiag, distHor, distVer)
b2acc8c92e0817d6dc6e90f0f66a7a7ab07e0e7c
30,599
from urllib.parse import urlparse def deluge_is_url( torrent_url ): """ Checks whether an URL is valid, following `this prescription <https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not>`_. :param str torrent_url: candidate URL. :returns: ``True`` if it is a valid URL, ``False`` otherwise. :rtype: bool """ try: result = urlparse( torrent_url ) return all([result.scheme, result.netloc, result.path]) except Exception as e: return False
4a4817dddb89800f8e9db7f80e31f58bff698c5b
30,600
import os def get_gcs_dir(bucket): """Return the GCS directory for this job.""" pull_number = os.getenv("PULL_NUMBER") repo_owner = os.getenv("REPO_OWNER") repo_name = os.getenv("REPO_NAME") job_name = os.getenv("JOB_NAME") # GCS layout is defined here: # https://github.com/kubernetes/test-infra/tree/master/gubernator#job-artifact-gcs-layout pull_number = os.getenv("PULL_NUMBER") repo_owner = os.getenv("REPO_OWNER") repo_name = os.getenv("REPO_NAME") if pull_number: output = ("gs://{bucket}/pr-logs/pull/{owner}_{repo}/" "{pull_number}/{job}/{build}").format( bucket=bucket, owner=repo_owner, repo=repo_name, pull_number=pull_number, job=os.getenv("JOB_NAME"), build=os.getenv("BUILD_NUMBER")) elif repo_owner: # It is a postsubmit job output = ("gs://{bucket}/logs/{owner}_{repo}/" "{job}/{build}").format( bucket=bucket, owner=repo_owner, repo=repo_name, job=job_name, build=os.getenv("BUILD_NUMBER")) else: # Its a periodic job output = ("gs://{bucket}/logs/{job}/{build}").format( bucket=bucket, job=job_name, build=os.getenv("BUILD_NUMBER")) return output
51084f0250e2d33dfe8bfdfaa2fa78fceaa2cbff
30,601
import math def fnZeroToPi(rotangle): """ Wraps angle to fit in [0,pi). Works in [rad] not [deg] Date: 15 October 2016 """ wrappedangle = rotangle % (math.pi); return wrappedangle
9bbded8f81d2b87f5fc5b9a3317020d0a4fc736b
30,602
def to_dict(block, keys=['hash', 'start', 'end', 'blockSize', 'version', 'prevHash', 'merkleRootHash', 'time', 'timestamp', 'nBits', 'nonce', 'nTransactions']): """ Return block attributes as dict Similar to block.__dict__ but gets properties not just attributes. """ # Create output dict bd = {} for k in keys: # Add each attribute with attribute name as key bd[k] = getattr(block, k) return bd
aa594edc52500347fb515c81c9abe8308c81ce04
30,604
def popcount(x): """ Count ones in the binary representation of number x Parameters ---------- x: Integer The number to be counted Returns ------- cnt: Integer The number of ones in the binary representation of number x """ cnt = 0 while x: x -= x & -x cnt += 1 return cnt
2198546d57be313268ba5f2700a8f7bb9b13518d
30,606
def origin(current, start, end, cum_extents, screen, moving): """Determine new origin for screen view if necessary. The part of the DataFrame displayed on screen is conceptually a box which has the same dimensions as the screen and hovers over the contents of the DataFrame. The origin of the relative coordinate system of the box is calculated here. >>> origin(0, 0, 0, [0, 4, 8, 12], 7, True) 0 >>> origin(4, 0, 2, [0, 4, 8, 12], 7, True) 5 >>> origin(5, 1, 1, [0, 4, 8, 12], 7, False) 4 :param current: current origin of a given axis :type current: int :param start: leftmost column index or topmost row index selected :type start: int :param end: rightmost column index or bottommost row index selected :type end: int :param cum_extents: cumulative sum of column widths or row heights :type cum_extents: numpy.ndarray :param screen: total extent of a given axis :type screen: int :param moving: flag if current action is advancing :type: bool :returns: new origin :rtype: int """ # Convert indices to coordinates of boundaries start = cum_extents[start] end = cum_extents[end+1] if end > current + screen and moving: return end - screen elif start < current and not moving: return start else: return current
c679db7f9c68b28ce7c89069f267dc7013e8bdf6
30,607
def local_url_loader(*args, **kwargs): """This mock returns in a string the file. Used for premailer""" result = None with open(args[0].lstrip('/'), 'r') as f: result = f.read() return result
e4e459ea24c63029ccde7173554b819d5c8773fa
30,608
def _swap_on_miss(partition_result): """ Given a partition_dict result, if the partition missed, swap the before and after. """ before, item, after = partition_result return (before, item, after) if item else (after, item, before)
2cbf8ae30bc4b9efb449e0b0f63c78781cd09fd3
30,609
import secrets def miller_rabin(w, iterations = 50): """ Primality test of a number w. It's a probability function with an error of pow(2,-100). Given 50 iterations it is assumed the risk of false primes is less than pow(2,-100) if the prime is less than 2048 bits. """ a = 0 m = w-1 while m % 2 == 0: a += 1 # largest int: pow(2,a) divides w-1 m //= 2 for _ in range(1,iterations): b = secrets.randbelow(w-1) if b <= 1: continue z = pow(b, m, w) if z == 1 or z == w - 1: continue for _ in range(a-1): z = pow(z, 2, w) if z == w - 1: break else: return "COMPOSITE" return "PROBABLY PRIME"
d655106c72fc3782ac76acec93d073fa53620819
30,610
from operator import truediv def distances_from_average(test_list): """Return a list of distances to the average in absolute terms.""" avg = truediv(sum(test_list), len(test_list)) return [round(float((v - avg) * - 1), 2) for v in test_list]
e59e404e824e35d99dbf8fb8f788e38b7a19cc03
30,611
import sys def is_pyinstaller_bundle(): """bool: Checks to see if the application is running under a pyinstaller bundle""" # TIP: when running in a frozen environment, you can retrieve the path # to the temporary folder pyinstaller creates when running the app using the # following meta-variable: # sys._MEIPASS return hasattr(sys, 'frozen')
df91a7f83320ce7d61b6c0efec3fb31f175986fb
30,612
def byte_display(size): """ Returns a size with the correct unit (KB, MB), given the size in bytes. """ if size == 0: return '0 KB' if size <= 1024: return '%s bytes' % size if size > 1048576: return '%0.02f MB' % (size / 1048576.0) return '%0.02f KB' % (size / 1024.0)
35766e6ec839de9d928f8acf965e60de3a66b5cb
30,613
def authors_filter(authors): """ Reformat authors. """ # Reformat authors authors = authors.split(' and ') authors = [' '.join(reversed(a.split(','))).strip() for a in authors] return ', '.join(authors)
34a141f3088a880ebac3cd14e8104dd21a9e1427
30,615
from typing import Optional import os def find_config() -> Optional[str]: """ Find the path to the configuration files. Priority order is: 1. (this file dir) 2. ~/.config/plain_inc_bak/config.py 3. ~/.plain_inc_bak_config.py 3. /etc/plain_inc_bak/config.pykwargs) Config files are *not* flattened, only one will be parsed """ op = os.path curdir = op.join(op.dirname(op.abspath(__file__)), 'config.py') userconfig = op.expanduser('~/.config/plain_inc_bak/config.py') userroot = op.expanduser('~/.plain_inc_bak_config.py') etc = '/etc/plain_inc_bak/config.py' for d in (curdir, userconfig, userroot, etc): if op.exists(d): return d return None
5a219190f70579da4532a88c6e3ea4bd3f6dfd94
30,616
import difflib def get_real_matching_blocks(words_list1: list, words_list2: list, minimum_size: int = 2) -> list: """ Return list of matching blocks with size greater than n """ matching_blocks = difflib.SequenceMatcher(a=words_list1, b=words_list2).get_matching_blocks() if minimum_size and minimum_size > 0: return [b for b in matching_blocks if b.size >= minimum_size] else: return [b for b in matching_blocks if b.size >= 2]
5cd1c8bb419cc35ee406c6436b657498170d7b64
30,617
def load_element_single(properties, data): """ Load element data with lists of a single length based on the element's property-definitions. Parameters ------------ properties : dict Property definitions encoded in a dict where the property name is the key and the property data type the value. data : array Data rows for this element. If the data contains list-properties, all lists belonging to one property must have the same length. """ col_ranges = [] start = 0 row0 = data[0] for name, dt in properties.items(): length = 1 if '$LIST' in dt: # the first entry in a list-property is the number of elements in the list length = int(row0[start]) # skip the first entry (the length), when reading the data start += 1 end = start + length col_ranges.append((start, end)) # start next property at the end of this one start = end return {n: data[:, c[0]:c[1]].astype(dt.split('($LIST,)')[-1]) for c, (n, dt) in zip(col_ranges, properties.items())}
cc82704592cd2c5e98864cf59b454d2d7551bba0
30,618
import re def compile_regex(pattern, user_map=None): """Compile a regex pattern using default or user mapping. """ # Handy regular expressions. mapping = {'ALPHA': r'[-.,?!\w]', 'WORD': r'[-.,?!\s\w]', 'START': r'(^|\s)', 'END': r'([.,?!\s]|$)'} if user_map: mapping = mapping.copy() mapping.update(user_map) def sub(text, mapping): for From, To in mapping.iteritems(): text = text.replace(From, To) return text pattern = sub(pattern, mapping) return re.compile(pattern, re.LOCALE | re.VERBOSE)
05b501599d8a53f0559a0463e1db95bd2d2c8347
30,619
def indexMap(inds,prodLabel,reactLabel): """ Transforms a labeling mapping of type: abc --> cba to an indexed labeling of the type [1,2,3] --> [3,2,1] Inputs: -) inds: numerical indices, e.g.: [1,2,3] -) prodLabel: product labeling, e.g. : cba -) reactLabel: reactant labeling, e.g.: abc """ carbonsAll = prodLabel carbons = ''.join(carbonsAll[i-1] for i in inds) if not carbons: raise Exception('Indices: '+str(inds)+' not in product label: '+prodLabel) indP = [] indR = [] for carb in carbons: index = reactLabel.find(carb)+1 if index > 0: indP.append(carb) indR.append(index) success = True if indR else False return success,(indP,indR)
594715ea5a21e2cc30ade7b24c2ca8fec0d75e1c
30,620
def redondear(a,decimal): """ Entrega la decada a la que pertenece cierto año. parametros: a: dataframe columna. decimal: Por default este valor debiese ser -1, para decada. Puede usarse otro valor para considerar siglos o milenios pero debe modificar el código siguiente. :return: lista con las decadas asociada a cierto dataframe de entrada (columna de tipo numérico). """ lista=[] for i in a: if i< round(i,decimal): b=round(i,decimal)-10 else: b=round(i,decimal) lista.append(b) return lista
a9919d605d48b7fe0ff7c9270f067df35368fd9d
30,622
def get_preresolution(file: str) -> int: """ Read pre file and returns number of unknowns :param file: pre file :return: number of unknowns """ with open(file) as f: content = f.readlines() ind = [idx for idx, s in enumerate(content) if '$DofData' in s][0] tmp = content[ind + 5].split()[-1] return int(tmp)
8084c37792246bef5f6fc9da43e30da9f4902cbc
30,623
def is_recovered(alleles_in_probands, alleles_in_pool): """True if all the variants found in the proband(s) are also found in the pool. This tends to result in multi-alleleic sites not getting filtered in many cases. alleles_in_probands, alleles_in_pool: iterable consisting of items that can be compared in a set """ if len(set(alleles_in_probands) - set(alleles_in_pool)) == 0: return True else: return False
68e978f63bcfc899a0a8c278d17d418d6423d674
30,624
from typing import Union import json def load_json_from_file(path: str) -> Union[dict, list, str, int]: """Load JSON from specified path""" with open(path, 'r', encoding='utf-8') as infile: return json.load(infile)
c8ba755c62ea4ab6fe74b4571034967ce610afdf
30,626
def get_workitem_id_from_task_name(task_name: str): """Parse the Task Name to get the Work Item ID""" return task_name.split(" ")[0]
2f5c22b02fc132e319404fb43f444f9f2044315e
30,627
import torch def get_model(model='PGAN', dataset='celebAHQ-512', use_gpu=True): """Returns a pretrained GAN from (https://github.com/facebookresearch/pytorch_GAN_zoo). Args: model (str): Available values are "PGAN", "DCGAN". dataset (str: Available values are "celebAHQ-256", "celebAHQ-512', "DTD", "celeba". Ignored if model="DCGAN". use_gpu (bool): Whether to use gpu. """ all_models = ['PGAN', 'DCGAN'] if not model in all_models: raise KeyError( f"'model' should be in {all_models}." ) pgan_datasets = ['celebAHQ-256', 'celebAHQ-512', 'DTD', 'celeba'] if model == 'PGAN' and not dataset in pgan_datasets: raise KeyError( f"If model == 'PGAN', dataset should be in {pgan_datasets}" ) model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub', model, model_name=dataset, pretrained=True, useGPU=use_gpu) return model
bb8df4164d27566960acf01d3664b3802d0c4fe7
30,628
import socket def find_free_port(): """ Get an available TCP port. """ s = socket.socket() s.bind(('localhost', 0)) port = s.getsockname()[1] s.close() return port
a24479425144ba43cd8437c22bab79a94faa2376
30,630
import re def __expand_ranges(expression: str) -> str: """Expand ranges in a given expression. Args: expression: The expression to expand. Returns: The expression with ranges expanded. """ # Find {n}..{n} in the expression. pattern = re.compile("(\\d+)\.\.(\\d+)") # Expand ranges while True: match = pattern.search(expression) if match is None: break left, right = int(match.group(1)), int(match.group(2)) if left <= right: # Replace hyphen expression with comma-separated list. numbers = [str(i) for i in range(left, right + 1)] expression = expression.replace(match.group(0), ",".join(numbers)) return expression
3563e4b452d49eca3c04ace0e796a14a7347700a
30,632
def serialize_greeting(greeting): """.""" return { 'id': None, 'type': 'greeting', 'attributes': { 'word': greeting.get('word', None), 'propertyTwo': greeting.get('propertyTwo', None), 'propertyThree': greeting.get('propertyThree', None), } }
c575ceb943c271e612ddd6c23be81ef8403003cc
30,633
import argparse def get_parser(): """ return a parser """ parser = argparse.ArgumentParser("populate") parser.add_argument('--project-count','-p', dest='project_count', required=False, default=100) parser.add_argument('--repo-count','-r', dest='repo_count', required=False, default=100) args = parser.parse_args() return (args.project_count, args.repo_count)
1133957ac8008943600635d6252a8feacc6c7fb1
30,634
import csv def importTCP(tcpfilename): """ Returns the TCP given a TCP file name. Parameters: tcpfilename (string): Filename Returns: tcpPos (list): list of 6 ints tcpPayload (list): list - mass, cogx, cogy, cogz """ # 0,1,2,3,4,5 are point locations for tcp # x,y,z,rx,ry,rz # 6 weight in kg # 7,8,9 are Centre of Gravity tcpPos = [0, 0, 0, 0, 0, 0] tcpPayload = [0, 0, 0, 0] with open(tcpfilename) as f2: reader2 = csv.reader(f2, delimiter=",", quoting=csv.QUOTE_NONNUMERIC) f2.seek(0) for row in reader2: tcpPos = [row[0], row[1], row[2], row[3], row[4], row[5]] tcpPayload = [row[6], row[7], row[8], row[9]] print("{0} loaded".format(tcpfilename)) return tcpPos, tcpPayload
993c78672fb0345ce03c94bba2f70ae7598143ce
30,635
def _get_expanded_term_prefixes(): """Return a list of query prefixes to extend Google Autocomplete results. Returns: expanded_term_prefixes (list) """ expanded_term_prefixes = ['who is *', 'what is *', 'where is *', 'when can *', 'why is *', 'how to *', 'best', 'cheap', 'worst', 'is', 'what', 'when', 'why', 'how', 'who'] return expanded_term_prefixes
69927e86851c255415f52c42328e96cf12bb12d2
30,636
def fmtcols(mylist, cols): """Generate a string of tab and newline delimited columns from a list """ lines = ("\t".join(mylist[i:i + cols]) for i in range(0, len(mylist), cols)) return '\n'.join(lines)
5f83aa16039edafa6789c8cd1580ff45ae495f67
30,637
def split_list(l: list, n: int = 1) -> list: """Split list into n parts""" length = len(l) split = [] for i in range(n): split.append(l[i*length // n: (i+1)*length // n]) return split
76bfc987dca606fda945a3222a852d0c0c8489db
30,638
def strip(content: str, _: dict) -> str: """Works like 'copy', but strip empty spaces before and after""" return content.strip()
d745cf1f0574bbc16ef7848f9eb81490a65a1233
30,640
def get_start(maze): """Searches for the 1 inside the maze. Returns: The row and column of the found 1. E.g. if 1 was in row 3 and column 4, this would return: 3, 4 If there is no 1 in the maze, this returns -1, -1 """ for y, row in enumerate(maze): for x, col in enumerate(row): if col == 1: return y, x return -1, -1
f2561d9af924eb28c86807e079c9515f8c395bf1
30,641
def get_nonzero_either_mask(vector_a, vector_b): """Returns a numpy array of boolean values indicating where values in two vectors are both greater than zero. Parameters ---------- vector_a : numpy.ndarray Array of counts or RPKM vector_b : numpy.ndarray Array of counts or RPKM Returns ------- numpy.ndarray Boolean array that is `True` where both `vector_a` and `vector_b` have values greater than zero, and `False` elsewhere. """ return (vector_a > 0) & (vector_b > 0)
82f7433bcbcfcfc799b46083b112a9a7abcab918
30,642
def get_setting_name_and_refid(node): """Extract setting name from directive index node""" entry_type, info, refid = node['entries'][0][:3] return info.replace('; setting', ''), refid
f72908c1f3adfc1d37f4760a240f68c66031dc19
30,643
import csv def get_func_rep(thresh_results, input_comps, conf_values = True): """ Find the functional representation of a set of components based on the results of data mining Parameters ---------- thresh_results : dict The return dictionary from the "get_top_results" function input_comps : string The filename of a .csv file containing the components of a product conf_values : bool A boolean of whether or not to return the results with the frequency values, default is True Returns ------- learned_dict Returns a dictionary of function and flow combinations sorted by confidence for each component in the input_case. The key is the component and the value is a list of function-flow combinations. unmatched Returns a list of components that were in the set of input components but not found in the data mining results. """ # Instances of each component are counted counts = {} # keep a list of components that were not learned in data mining unmatched = [] with open(input_comps, encoding='utf-8-sig') as input_file: for row in csv.reader(input_file, delimiter=','): comp = row[0] # Create a dictionary with each component if comp not in counts: counts[comp] = 1 else: counts[comp] += 1 # Method of returning results with frequency values if conf_values is True: res_with_conf = {} # Inherit the values of thresh_results for each of the same component in input components for k, v in counts.items(): if k in thresh_results: res_with_conf[k] = thresh_results[k] else: if k not in unmatched: unmatched.append(k) return res_with_conf, unmatched else: # Method of returning results without frequency values # List for keeping track of which function-flows happen for each component keep_flows = [] # Dictionary for keeping CFF combinations from the learning set learned_dict = {} for k, v in counts.items(): if k in thresh_results: for vs in thresh_results[k]: # Append list of all of the function-flows for each component keep_flows.append(vs[0]) # Save list of function-flows for each component learned_dict[k] = keep_flows # Reset list for each component keep_flows = [] else: if k not in unmatched: unmatched.append(k) return learned_dict, unmatched
a24d2f4833330dbcc33a422225e517e29b38f868
30,644
def version_sum(packet: dict) -> int: """ Recursively calculate the sum of version numbers in packet. """ return packet["version"] + sum(version_sum(sub) for sub in packet["subpackets"])
2559e2531c59d93f6bd00a625e7a1e21c6bdeaa1
30,645
import pickle def load_clf(trained_mod): """Load a trained model from pickle file. Args: trained_mod (str): file path to pickle file. Returns: sklearn.classifier: A trained sklearn classifier. """ # save model with open(wb) + pickle.dump. with open(trained_mod, 'rb') as file: model = pickle.load(file) return model
21a9dbd4e5455e8909ed0f46b78cd5fb7d161b04
30,646
def r_perimeter(l, b): """Function for calculating Perimeter of Rectangle""" return 2 * (l + b)
7860ebc843faf55a3ad893f4359e802775260a0f
30,648
def buffers(columns, values, buffer_val): """Processes the space multiple for buffers between records value and column borders.""" max_lens = [len(str(i)) for i in columns] val_lens = [[i for i in max_lens]] for i in values: cur_lens = [] for j in range(len(i)): cur_lens += [len(str(i[j]))] if cur_lens[j] > max_lens[j]: max_lens[j] = cur_lens[j] val_lens += [cur_lens] return [[max_lens[i]+buffer_val - el[i] for i in range(len(el))] for el in val_lens], [i+buffer_val for i in max_lens]
aea8e523498a44a9e3405087d0e097312bd13daf
30,651
def tree_pop_fields(root, fields): """deletes given fields (as iterable of keys) from root and all its children (recursively) returnes updated root """ for f in fields: root.pop(f) if root['is_leaf']: return root for i in range(len(root['children'])): root['children'][i]['child'] = tree_pop_fields(root['children'][i]['child'], fields) return root
1dca88301219ad2a9c83642024ab0db08472b507
30,652
from typing import Tuple from typing import Callable def interpolate(p1: Tuple[int, float], p2: Tuple[int, float]) -> Callable[[int], float]: """ Returns a function that linearly interpolates between these two points. Implements the equation given in https://mathworld.wolfram.com/Two-PointForm.html Args: p1: A point (x1, y1) with the smaller x value. p2: A point (x2, y2) with the larger x value. Raises: ValueError if x1 is greater than x2. """ x2, y2 = p2 x1, y1 = p1 if x1 >= x2: raise ValueError(f"x2 ({x2}) must be greater than x1 ({x1}).") slope = (y2 - y1) / (x2 - x1) return lambda x: slope*(x - x2) + y2
2196e99e1ae22328d45047474cd5d5b092ee01ce
30,653
def count(value, node): """ Count number of list elements that match a value :param value: value to search for :param node: value of head node, start of list :return: int: number of elements that match the value """ if node is not None: if value == node.value: # basically same as length but only add if they match a value return 1 + count(value, node.next_node) return count(value, node.next_node) return 0
05ffe8ce83e3fff981d8953090f6615463627e43
30,656
import torch def clamp(image, min=0., max=1.): """Clamp values in input tensor exceeding (min, max) to (min, max)""" return torch.clamp(image, min, max)
4b7fe6100d0e85a7ee1ae00a53df5a6616bd65c9
30,658
import os import glob def get_biotab_files(directory): """ Returns a list of biotab files """ pattern = os.path.join(directory, "*", "nationwide*.txt") results = glob.glob(pattern) if len(results) == 0: raise RuntimeError("Could not find any biotab files") return results
d96141eed4ea7ff640e778a4fab24392e95cff7d
30,659
def split_out_internet_rules(rule_list): """Separate rules targeting the Internet versus normal rules""" normal_rules = filter(lambda x: x.target_zone != 'internet', rule_list) internet_rules = filter(lambda x: x.target_zone == 'internet', rule_list) return list(normal_rules), list(internet_rules)
aa838ef7655658b3255c127f392c536bceb5a3bd
30,661
import os def get_abs_path(filepath): """Return the absolute path of filepath. :param filepath: The input file path to extend to absolute. :type filepath: string :returns: string -- The absolute path. """ if filepath is None: return None return os.path.abspath(filepath)
b8e6181d950f1a6bb89dec8880ea9e8d76761722
30,662
def pattern_F(Z, type_restrict, restrict, result, row, col): """[summary] [addting variables to leave in pattern form] Arguments: Z {[list]} -- [list Z values] type_restrict {[int]} -- [<= or >=] restrict {[list]} -- [list of all restrictions] result {[list]} -- [list of results from each restrition] row {[int]} -- [number of line] col {[int]} -- [number of coumn] Returns: [lists] -- [every lists to need to make a first tableaux] """ i = 0 base = [] sv_count = 0 tam = len(restrict[0]) for type_R in type_restrict: if type_R == 1: #add new spear var. If type == 1 then its equals to "<=" col += 1 # att number of columns for j in range(0,row): # create a triangular of news vars matrix if j == i: restrict[j].append(1) else: restrict[j].append(0) Z.append(0) if result[i] < 0: # negative z_valueults -> multiply all var by (-1) for var in restrict: for var2 in var: var2 = var2 * (-1) base.append(tam+1) tam += 1 i+=1 type_R = "=" return Z, restrict, result, base, col
f20dff6c8044b950e00c4e3c22f82690db249948
30,665
def get_default_cooling_spec(): """主たる居室、その他の居室に冷房設備機器を設置しない場合又はルームエアコンディショナー以外の冷房設備機器を設置する場合の評価上想定される冷房房設備機器等の種を取得する Args: Returns: dict: 冷房房設備機器等の種 """ return { 'type': 'ルームエアコンディショナー', 'e_class': 'ろ', 'dualcompressor': False, }
861a3c79ee8925ffabc40af56cae45dc8025743e
30,668
def _get_response_status(response) -> int: """Get the HTTP status code from any type of response object.""" if hasattr(response, "status"): # aiohttp-style return response.status elif hasattr(response, "status_code"): # starlette-style return response.status_code raise TypeError(f"Don't know how to find the path for {type(response)}")
1a9286db6277601240545e36c4a51536555a83d0
30,669
import re def parse_only(args, scripts, scripts_names, error): """ Parse the 'only' range. """ only = set() if args.only == 'the full set': # --only parameter was not given # Include all the #pages for i in range(0, len(scripts)): only.add(i) return only # --only parameter was given, parse it for comp in [c.strip() for c in args.only.split(",")]: # Single number match = re.match(r'^[1-9]\d*$', comp) if match: num = int(match.group(0)) if not num <= len(scripts): error(f'#page {num} was selected in --only, ' \ f'but only {len(scripts)} #pages exists') only.add(num-1) continue # Numeric range match = re.match(r'^([1-9]\d*)\s*-\s*([1-9]\d*)$', comp) if match: (start,end) = (int(match.group(1)), int(match.group(2))) length = end - start + 1 if 0 < length < 10000: for num in range(start, end+1): if not num <= len(scripts): error(f'#page {num} was selected in --only, ' \ f'but only {len(scripts)} #pages exists') only.add(num-1) continue # Single name match = re.match(r'^[a-zA-Z_]+([1-9]\d*)?$', comp) if match: name = match.group(0) if name not in scripts_names: error(f'#page named "{name}" was selected in --only, ' \ f'but there is no #page with that name. ' \ f'Available #page names are: ' \ f'{",".join(sorted(scripts_names.keys()))}') only.add(scripts_names[name]) continue # name range match = re.match(r'^([a-zA-Z_]+)([1-9]\d*)-([1-9]\d*)$', comp) if match: (base,start,end) = (match.group(1),int(match.group(2)),int(match.group(3))) length = end - start + 1 if 0 < length < 10000: for i in range(start, end+1): name = base+str(i) if name not in scripts_names: error(f'#page named "{name}" was selected in --only, ' \ f'but there is no #page with that name. ' \ f'Available #page names are: ' \ f'{",".join(sorted(scripts_names.keys()))}') only.add(scripts_names[name]) continue error('Invalid "only" range component: '+comp) return only
56f8b42f4a3f4e7f8cfb059761b1eccd620d64da
30,670
def _byorder(r, keep='last'): """ Args: r (pd.Series): keep (str): Returns: scalar """ r2 = r.dropna() if keep == 'first': return r2.iloc[0] elif keep == 'last': return r2.iloc[-1]
4f07ff0631c808453fea72c72a7e881309735234
30,671
def read_nion_image_info(original_metadata): """Read essential parameter from original_metadata originating from a dm3 file""" if not isinstance(original_metadata, dict): raise TypeError('We need a dictionary to read') if 'metadata' not in original_metadata: return {} if 'hardware_source' not in original_metadata['metadata']: return {} if 'ImageScanned' not in original_metadata['metadata']['hardware_source']: return {} exp_dictionary = original_metadata['metadata']['hardware_source']['ImageScanned'] experiment = {} print(exp_dictionary) if 'autostem' in exp_dictionary: print('auto') print(exp_dictionary.keys()) print() print(experiment)
f07c976a2d1d445bff243a6dd138d5b4bfabc52b
30,672
def unflatten(dict_, separator="."): """Turn back a flattened dict created by :py:meth:`flatten()` into a nested dict. >>> unflatten({"my.sub.path": True, "another.path": 3, "my.home": 4}) {'my': {'sub': {'path': True}, 'home': 4}, 'another': {'path': 3}} """ items = {} for k, v in dict_.items(): keys = k.split(separator) sub_items = items for ki in keys[:-1]: try: sub_items = sub_items[ki] except KeyError: sub_items[ki] = {} sub_items = sub_items[ki] sub_items[keys[-1]] = v return items
2ec7463c73fa542fe5e5830e1af1a969a3c88d0b
30,674
import click def pywbem_error_exception(exc, intro=None): """ Return the standard click exception for a pywbem Error exception. These exceptions do not cause interactive mode failure but display the exception class and its str value and return to the repl mode. Parameters: exc (Exception): The pywbem Error exception. intro (string): An additional message used as introduction for the resulting click exception message. This message usually states what cannot be done due to the error. Returns: click.ClickException: Click exception for the pywbem Error exception. """ if intro: msg = "{}: {}: {}".format(intro, exc.__class__.__name__, exc) else: msg = "{}: {}".format(exc.__class__.__name__, exc) return click.ClickException(msg)
3d99a69857d99e3e7c579a7e9be147574c9baf67
30,675