content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_historical_time(data_point): """Returns the time of a DATA_POINT""" try: return data_point['begins_at'][11:16] except: return None
412ed21e7e2591075cc71f911304faa4af959176
53,582
def _extract_problem_info(source): """Split the logpath to identify test problem, data set, etc. Args: source (Cockpit or str): ``Cockpit`` instance, or string containing the path to a .json log produced with ``Cockpit.write``, where information will be fetched from. Returns: [dict]: Dictioniary of logpath, testproblem, optimizer, etc. """ if isinstance(source, str): # Split logpath if possible try: dicty = { "logpath": source + ".json", "optimizer": source.split("/")[-3], "testproblem": source.split("/")[-4], "dataset": source.split("/")[-4].split("_", 1)[0], "model": source.split("/")[-4].split("_", 1)[1], } except Exception: dicty = { "logpath": source + ".json", "optimizer": "", "testproblem": "", "dataset": "", "model": "", } else: # Source is Cockpit instance dicty = { "logpath": "", "optimizer": source._optimizer_name, "testproblem": "", "dataset": "", "model": "", } return dicty
a88c8dbc7a1175957c3c57a12b780892f36eea46
53,583
def BHS_standard(err): """ Computes the BHS Standard metric Arguments: err {array} -- array of absolute error Returns: tuple -- tuple of percentage of samples with <=5 mmHg, <=10 mmHg and <=15 mmHg error """ leq5 = 0 leq10 = 0 leq15 = 0 for i in range(len(err)): if(abs(err[i])<=5): leq5 += 1 leq10 += 1 leq15 += 1 elif(abs(err[i])<=10): leq10 += 1 leq15 += 1 elif(abs(err[i])<=15): leq15 += 1 return (leq5*100.0/len(err), leq10*100.0/len(err), leq15*100.0/len(err))
a2a71a5fea63c7bc6965d54a7d7dcae7333e1838
53,596
def find_biggest_box(length: int, width: int) -> int: """ >>> find_biggest_box(1, 1) 1 >>> find_biggest_box(1680, 640) 80 >>> find_biggest_box(640, 1680) 80 """ if length < width: length, width = width, length rest = length % width if not rest: return width return find_biggest_box(width, rest)
316eec7579436188127d83a5bd2bf8b66ab684b9
53,605
from typing import List def extract_masks(mask) -> List: """ Выделяет бинарные маски из монолитной серой маски :param mask: серая маска в которой значение пикселя соответствует классу :return: список бинарных масок """ masks = [] classes = set() for row in mask: for v in row: classes.add(v) classes.remove(0) # удаляем фон for c in classes: m = mask.copy() m[m != c] = 0 m[m == c] = 255 masks.append(m) return masks
316fcc118d5732aae37d24a16c89d0adc09c96fc
53,606
from typing import Iterable from typing import List import re def filter_filenames(filenames: Iterable[str], pattern: str) -> List[str]: """Filters filenames according to a regex pattern.""" filtered_filenames = [] for filename in filenames: if re.search(pattern, filename): filtered_filenames.append(filename) return filtered_filenames
45787d263c8869a1fdc99273f535257ec3d513fd
53,608
def asProj4Str(epsg): """ convert EPSG code to *proj4* ``+init=epsg:<code>`` notation """ return "+init=epsg:%d" % int(epsg)
1e6d9066a245389e7ee61abade3889e55a62aa93
53,611
async def enclose_tags_in_spoilers(tags): """ Encloses the tags in spoilers and returns the resultant string. """ tags = tags.split(", ") tags = [tag.strip("[]'") for tag in tags] tags = map(lambda str: "||" + str + "||", tags) tags = ",".join(tags) return tags
3e8f3e0a2a92ec1518635cd6bb39d082bc924c8d
53,613
def cli(ctx, name, synopsis, description="", type="unrestricted", remote_repository_url="", homepage_url="", category_ids=""): """Create a new repository in a Tool Shed. Output: a dictionary containing information about the new repository. For example:: {"deleted": false, "deprecated": false, "description": "new_synopsis", "homepage_url": "https://github.com/galaxyproject/", "id": "8cf91205f2f737f4", "long_description": "this is some repository", "model_class": "Repository", "name": "new_repo_17", "owner": "qqqqqq", "private": false, "remote_repository_url": "https://github.com/galaxyproject/tools-devteam", "times_downloaded": 0, "type": "unrestricted", "user_id": "adb5f5c93f827949"} """ return ctx.gi.repositories.create_repository(name, synopsis, description=description, type=type, remote_repository_url=remote_repository_url, homepage_url=homepage_url, category_ids=category_ids)
bc5e5da0a72faf28e47d880c1d6485bbc91b698c
53,616
def extract(v): """ 提取张量(Tensor)存储在 storage 中的数据部分,并将数据存储到列表(list)中 :param v: 张量 :return: 将数据存储到列表(list),并返回 """ # .storage(): 返回底层数据 # .tolist(): 将 torch.Tensor 转换为 python 的 list(列表) 类型 return v.data.storage().tolist()
d9cb4d68c201c6582b3fe0b429e23a36487623b7
53,624
def time_feature_engineering_function(dataframe, time_column): """ Args: dataframe and the timeseries column called time_column Assumption: time_column already has the hour day month year information Logic: generate 8 possible combinations of the time_column column Returns: The original dataframe with the new feature engineered columns + names of those columns """ # Then use the above column to split up into respective time components # in which there might be seasonality dataframe['hour'] = dataframe[time_column].dt.hour dataframe['dayofweek'] = dataframe[time_column].dt.dayofweek dataframe['dayofmonth'] = dataframe[time_column].dt.day dataframe['dayofyear'] = dataframe[time_column].dt.dayofyear dataframe['weekofyear'] = dataframe[time_column].dt.weekofyear dataframe['quarter'] = dataframe[time_column].dt.quarter dataframe['month'] = dataframe[time_column].dt.month dataframe['year'] = dataframe[time_column].dt.year new_col_name_list = ['hour','dayofweek','dayofmonth','dayofyear','weekofyear','quarter','month','year'] return dataframe, new_col_name_list
8407a6f22583a2a02e225493ab358dd3eb355a9e
53,627
from typing import Optional def form_profile_value(accesskey: str, url: Optional[str] = None) -> str: """Form the profile value with accesskey (and url). Arguments: accesskey: The accesskey to TensorBay. url: The TensorBay url. Returns: The formed profile value. """ values = ["", accesskey] if url: values.append(url) return "\n".join(values)
c12c8d29f607d0ad00948f1b797dd0dc509a8ff7
53,634
def _bits_to_float(bits, lower=-90.0, middle=0.0, upper=90.0): """Convert GeoHash bits to a float.""" for i in bits: if i: lower = middle else: upper = middle middle = (upper + lower) / 2 return middle
f059e217f86c23485c58e24ca2328a26e371ad53
53,638
import re def sourceSplit(source): """sourceSplit(source) -> str, str, str Split the given source into it's parts. source must be of the form: nick!ident@host Example: >>> nick, ident, host, = sourceSplit("Joe!Blogs@localhost") """ m = re.match( "(?P<nick>[^!].*)!(?P<ident>.*)@(?P<host>.*)", source) if m is not None: d = m.groupdict() return d["nick"], d["ident"], d["host"] else: return source, None, None
3ee0a816085ad5d568800262ab05f0b08ef15d6f
53,643
def has_path(data_product): """ Given a data product in the form of an `~astropy.table.Row` returns True if it's mission supports cloud access, False if not. """ return data_product['dataURI'].lower().startswith("mast:hst/product") or \ data_product['dataURI'].lower().startswith("mast:tess/product") or \ data_product['dataURI'].lower().startswith("mast:kepler")
d46e73f3fa825bdda43d9608d20e480f386df8b7
53,649
def from_c_bytes(c_data, c_data_len): """Takes a byte pointer and a length and converts it into a python bytes value. """ # pylint: disable=invalid-slice-index return bytes(c_data[:c_data_len.value])
34ebb3043cb0b16673d60ccf2009733931e32581
53,652
def create_processor_uid_mapping(mapping_cache): """ Create dictionary that maps processor name to uid :param mapping_cache: mapping_cache built in LqnBuilder :return: mapping for processor to uid """ processor_uid_mapping = {} # Get uid mappings from mapping_cache branch_mapping = mapping_cache["branch_mapping"] loop_uid_mapping = mapping_cache["loop_uid_mapping"] composite_uid_mapping = mapping_cache["composite_uid_mapping"] # Add mappings to processor_uid_mapping for branch_id, processor_dict in branch_mapping.items(): processor_uid_mapping.update(processor_dict) processor_uid_mapping.update(loop_uid_mapping) processor_uid_mapping.update(composite_uid_mapping) return processor_uid_mapping
d9773db091fc8b68b9dcf422ad7a03b2ede34243
53,654
def print_multi_list(list_ch_info=None, sep=";"): """Print the summary of downloaded claims from multiple channels. This is meant to be used with the returned list from `ch_download_latest_multi`. Parameters ---------- list of lists of dicts A list of lists, where each internal list represents one channel, and this internal list has as many dictionaries as downloaded claims. The information in each dictionary represents the standard output of the `lbrynet_get` command for each downloaded claim. If the download fails, then the corresponding item in the list may be `False`, in which case no claim information is printed. sep: str, optional It defaults to `;`. It is the separator character between the data fields in the printed summary. Since the claim name can have commas, a semicolon `;` is used by default. Returns ------- bool It returns `True` if the information was read and printed without problems. If there is a problem or no list of items, it will return `False`. """ if not list_ch_info or not isinstance(list_ch_info, (list, tuple)): print("Print information from a list of lists from multiple " "channels obtained from `ch_download_latest_multi`.") return False if len(list_ch_info) < 1: print("Empty list.") return False # flat_list = [item for sublist in list_ch_info for item in sublist] flat_list = [] for sublist in list_ch_info: if not sublist: flat_list.append(None) continue for item in sublist: if not item: flat_list.append(None) continue flat_list.append(item) n_items = len(flat_list) print("Summary of downloads") out_list = [] for it, item in enumerate(flat_list, start=1): out = "{:2d}/{:2d}".format(it, n_items) + f"{sep} " if not item: out += "empty item. Failure establishing server connection?" out_list.append(out) continue if "claim_id" in item: out += "{}".format(item["claim_id"]) + f"{sep} " out += "{:3d}/{:3d}".format(item["blobs_completed"], item["blobs_in_stream"]) + f"{sep} " out += '"{}"'.format(item["channel_name"]) out += f"{sep} " out += '"{}"'.format(item["claim_name"]) out_list.append(out) elif "error" in item: out_list.append(out + '"{}"'.format(item["error"])) else: out_list.append(out + "not downloaded") print("\n".join(out_list)) return True
51aefe169399561e5d0e0f6c21576c9ec8e4813f
53,655
def fix(series): """ fix the series to match the model API Args: series (pd.Series): the series to reshape Return: series with shape (n_samples, n_features) """ return series.values.reshape((-1, 1))
e61392f0bcfcec59ff52f96cd65e2627b35ab8dd
53,660
def division(numerator, denominator): """ if d = 0 return 0, otherwise return n/d """ return numerator/denominator if denominator else 0
7d46e6a0c58373df5475f4a4a7aaaa7bfa5b9d4d
53,661
def string_to_elements(string): """ :string: elements separated by colon as in s1:s2:s3 Return list of elements """ ss = string.split(':') elements = [] for s in ss: if s: elements.append(bytes.fromhex(s)) return elements
e1fe186d0e4b8f4b5ebc7fcc548a24be734189d9
53,663
def psa_want_symbol(name: str) -> str: """Return the PSA_WANT_xxx symbol associated with a PSA crypto feature.""" if name.startswith('PSA_'): return name[:4] + 'WANT_' + name[4:] else: raise ValueError('Unable to determine the PSA_WANT_ symbol for ' + name)
dfee9a4d1ad2f38fd9e9927050545af6b144591e
53,665
def uses_only(word, letters): """return true if word only use a set letters""" letters = letters.lower() for letter in word: if letter.lower() not in letters: return False return True
c0118b10e90bb61186419c68fa710353425c081c
53,673
def enter_qty(oFieldsReporter, FieldQuantityString): """ Enter a field quantity into the Fields Calculator. Parameters ---------- oFieldsReporter : pywin32 COMObject An HFSS "FieldsReporter" module FieldQuantityString : str The field quantity to be entered onto the stack. Returns ------- None """ return oFieldsReporter.EnterQty(FieldQuantityString)
91ae74757d9832e0deffcb1e19df81711ea27697
53,674
def column(results): """ Get a list of values from a single-column ResultProxy. """ return [x for x, in results]
352a604eb9bb863c8bf71c7c96df6c2cbcd226c8
53,677
def int_to_gast(node): """ Handles int to gast for positive and negative whole numbers """ if len(node.prefix_operators) > 0 and node.prefix_operators[0] == "-": return {"type": "num", "value": int(node.value) * -1} return {"type": "num", "value": int(node.value)}
f304b36a78b3a705322f7247a57473ee5d396ab7
53,683
def ScaleData(data, old_min, old_max, new_min, new_max): """Scale the input data so that the range old_min-old_max maps to new_min-new_max. """ def ScalePoint(x): if x is None: return None return scale * x + translate if old_min == old_max: scale = 1 else: scale = (new_max - new_min) / float(old_max - old_min) translate = new_min - scale * old_min return list(map(ScalePoint, data))
3d9748fc18ba9fd34e3f018c8f717ca4267d486b
53,686
def to_base36(value): """Returns a base36 version of an integer""" buf = [] while value: value, i = divmod(value, 36) buf.append(u'0123456789abcdefghijklmnopqrstuvwxyz'[i]) return u''.join(reversed(buf)) or u'0'
f2b8d35b486a7356027d858f43bfc22bcdad7936
53,690
def rgb_to_tk(rgb): """ Converts rgb values to tkinter color codes. :param rgb: Tuple of 3 ints. :return: tk color code string """ return "#%02x%02x%02x" % rgb
3e04c03427cdc495ef45fb44865fac35a0fbd991
53,693
def match_feature(pairs, feature_mapping): """ Match the pairs of cards on it's group_id. Based on symbol_mapping or shades_mapping. ex. match 'a' -> [{'a', 'A', '@'}, {'h', 'H', '#'}, {'s', 'S', '$'}] should get 'a' in the first sets. so the group_id should be '0'. :params: pairs - list of cards :params: feature_mapping - Either symbol_mapping or shades_mapping. :return: match_result - list of int (group_id) """ match_result = [] for color, feature in pairs: for index, group in enumerate(feature_mapping): if feature[0] in group: match_result.append(index) break return match_result
3ee0113ed1be58b2d0d41d1680b41fbb143b733f
53,695
from pathlib import Path def file_extension(path: str) -> str: """ Extracts canonical file extension from path (no leading dot and all lowercase) e.g. mp4, avi, jpeg, ts """ return Path(path).suffix[1:].lower()
a085044a19de507844412ea4027fda32c0f2f979
53,696
def return_mock_token(_, _a): """Return a mock token.""" return "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
54343f92d8439ab0a0b7cdfafe110949f82cc74e
53,701
import re def endsWith(self, field, term): """endsWith returns true if the value of the identifier ends with the string argument; otherwise, it returns false. """ expression = "%s$" % term return self.compareBase(field=field, expression=expression, func=re.match)
12a050492952964f5455c82dd86347ef651bc0bd
53,702
def parse_imsi(imsi): """This method verifies that the input is a valid imsi, ie. it is 14 or 15 digits. It will also strip the prefix "IMSI". """ imsi = imsi[4:] if 'IMSI' in imsi else imsi if not str(imsi).isdecimal(): raise TypeError('IMSI not decimal: %s' % imsi) if len(str(imsi)) not in [14, 15]: raise ValueError('len(IMSI) invalid') return imsi
8b6044727f04e680de7e0521f222714e9fd53e69
53,706
import sqlite3 def open_db(database): """ Helper functiom to open a database connection :param db: database file :return: connection object and cursor """ # Make connection to storage database conn = sqlite3.connect(database) c = conn.cursor() return conn, c
5718a9c1b74382aa7412067ebd0e8c2387be1a09
53,708
def BinaryTree(r): """Create a new binary tree.""" return [r, [], []]
409e19ec24576db6b97bee5bac8d65f727c6c1b7
53,712
import random def create_random_datetime(from_date, to_date, rand_type="uniform"): """ Create random date within timeframe. Parameters ---------- from_date : datetime object to_date : datetime object rand_type : {'uniform'} Examples -------- >>> random.seed(28041990) >>> create_random_datetime(datetime(1990, 4, 28), datetime(2000, 12, 31)) datetime.datetime(1998, 12, 13, 23, 38, 0, 121628) >>> create_random_datetime(datetime(1990, 4, 28), datetime(2000, 12, 31)) datetime.datetime(2000, 3, 19, 19, 24, 31, 193940) """ delta = to_date - from_date if rand_type == "uniform": rand = random.random() else: raise NotImplementedError(f"Unknown random mode '{rand_type}'") return from_date + rand * delta
ec80ce4498f70c4682e0210db8b4ffab09296a5e
53,713
def slicex(df, values, keep_margins=True): """ Return an index-wise slice of df, keeping margins if desired. Assuming a Quantipy-style view result this function takes an index slice of df as indicated by values and returns the result. Parameters ---------- df : pandas.DataFrame The dataframe that should be sliced along the index. values : list-like A list of index values that should be sliced from df. keep_margins : bool, default=True If True and the margins index row exists, it will be kept. Returns ------- df : list The sliced dataframe. """ # If the index is from a frequency then the rule # should be skipped if df.index.levels[1][0]=='@': return df name_x = df.index.levels[0][0] slicer = [(name_x, value) for value in values] if keep_margins and (name_x, 'All') in df.index: slicer = [(name_x, 'All')] + slicer df = df.loc[slicer] return df
6719eba7824b04b2e1d5735fd699412190a8811e
53,718
import re def GuessSlugFromTitle(title): """Return an automatically generated slug from title. Turn spaces into dashes, lowercase everything, limit length.""" def IsValidChar(c): return c.isalnum() or c == '-' lower = title.lower() slug = lower.replace(' ', '-') slug = ''.join([c for c in slug if IsValidChar(c)]) slug = re.sub("-+", "-", slug) return slug
6073e087c8d200034f099335b8e9b3468e165a1d
53,725
import itertools def flatten(list_of_list): """Flatten one layer of nesting. Usage:: >>> flatten([[0, 1], [2, 3]] [0, 1, 2, 3] """ return itertools.chain.from_iterable(list_of_list)
9a46469ae3e1ed99584c0da1994fa2c6cd56b6d5
53,726
def colorToTagIndex(col): """Convert an MColor value to a tag index.""" r = hex(int(round(col.r*255.0)))[::-1] while len(r) < 4: r += '0' r = r[::-1] g = hex(int(round(col.g*255.0)))[::-1] while len(g) < 4: g += '0' g = g[::-1] return int('0x%s%s'%(g[g.find('x')+1::], r[r.find('x')+1::]), 16)
6fbbe61316dd22f4abca3b9225276bb22008e18c
53,727
from typing import List def tokenize(equation_string: str) -> List[str]: """ Tokenize an equation string, including spaces around each token for reversibility. """ tokens = equation_string.replace("(", " ( ").replace(")", " ) ").strip().split() return [" " + token + " " for token in tokens]
0c2f09488358980a25ad27f6b9555f3e2113173d
53,728
def rev_bits(x: int, width: int = 0) -> int: """Return the given integer but with the bits reversed. Params: x - integer to reverse width - width of the integer (in bits) Returns: The reversed integer. """ if not width: width = x.bit_length() rev = 0 for i in range(width): bit = x & (1 << i) if bit: rev |= (1 << (width - i - 1)) return rev
1650abb6be76aefd293e8801b8a0398947da78d9
53,733
def is_sum_of_extended_pdfs(model) -> bool: """Checks if the input model is a sum of extended models. Args: model: the input model/pdf Returns: True if the model is a sum of extended models, False if not. """ if not hasattr(model, "get_models"): return False return all(m.is_extended for m in model.get_models())
a3f6e7ce9db495296b01c0d350e63a5ae448e334
53,734
import torch def nms(boxes: torch.Tensor, scores: torch.Tensor, iou_threshold: float) -> torch.Tensor: """Perform non-maxima suppression (NMS) on a given tensor of bounding boxes according to the intersection-over- union (IoU). Args: boxes: tensor containing the encoded bounding boxes with the shape :math:`(N, (x_1, y_1, x_2, y_2))`. scores: tensor containing the scores associated to each bounding box with shape :math:`(N,)`. iou_threshold: the throshold to discard the overlapping boxes. Return: A tensor mask with the indices to keep from the input set of boxes and scores. Example: >>> boxes = torch.tensor([ ... [10., 10., 20., 20.], ... [15., 5., 15., 25.], ... [100., 100., 200., 200.], ... [100., 100., 200., 200.]]) >>> scores = torch.tensor([0.9, 0.8, 0.7, 0.9]) >>> nms(boxes, scores, iou_threshold=0.8) tensor([0, 3, 1]) """ if len(boxes.shape) != 2 and boxes.shape[-1] != 4: raise ValueError(f"boxes expected as Nx4. Got: {boxes.shape}.") if len(scores.shape) != 1: raise ValueError(f"scores expected as N. Got: {scores.shape}.") if boxes.shape[0] != scores.shape[0]: raise ValueError(f"boxes and scores mus have same shape. Got: {boxes.shape, scores.shape}.") x1, y1, x2, y2 = boxes.unbind(-1) areas = (x2 - x1) * (y2 - y1) _, order = scores.sort(descending=True) keep = [] while order.shape[0] > 0: i = order[0] keep.append(i) xx1 = torch.max(x1[i], x1[order[1:]]) yy1 = torch.max(y1[i], y1[order[1:]]) xx2 = torch.min(x2[i], x2[order[1:]]) yy2 = torch.min(y2[i], y2[order[1:]]) w = torch.clamp(xx2 - xx1, min=0.) h = torch.clamp(yy2 - yy1, min=0.) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = torch.where(ovr <= iou_threshold)[0] order = order[inds + 1] if len(keep) > 0: return torch.stack(keep) return torch.tensor(keep)
e4f61faf23959d9b72cd425c1603117d7b297c23
53,737
def padl(l): """ Return amount of padding needed for a 4-byte multiple. """ return 4 * ((l + 3) // 4) - l
264cb5a0776d4f314711d539ad786a3ae9b35221
53,739
def filter_bonds(bonds, pocket_indices): """Get the bond list of bonds between atoms in the target's pocket.""" # Keep only the bonds in the processed bonds list that are in the # target's pocket. filtered_bonds = [ entry for entry in bonds if entry[0] in pocket_indices and entry[2] in pocket_indices ] # Remap the entry indices to be between 0-number of atoms remaining, # to correspond to their positions in the coordinates array. index_mapping = dict(zip(pocket_indices, range(1, len(pocket_indices) + 1))) return [ [index_mapping[entry[0]], entry[1], index_mapping[entry[2]]] for entry in filtered_bonds ]
eed3b2a545a3e2a17df3d491b9c0b93e5f7d818b
53,741
def is_content_authored_by(content, user): """ Return True if the author is this content is the passed user, else False """ try: return int(content.get('user_id')) == user.id except (ValueError, TypeError): return False
9a6846b04bce0066973aac3e3a5dfda2db2b1663
53,742
def prefix_handlers(endpoint, handlers): """Prepends each handlers route with the given endpoint. eg. Given: endpoint = /sweet+service/1.0 handlers[0] = (r"/people, PeopleHandler) ------ Result: handlers[0] = (r"/sweet+service/1.0/people", PeopleHandler) """ if not endpoint: endpoint = '/' for i, handler in enumerate(handlers): path = handler[0] if path[0] == '/': path = path[1:] handlers[i] = (endpoint+path,)+handler[1:] return handlers
6694dfebafd87b0bc26730ac8c021ca8096cd020
53,743
import functools import time def python_profile(func): """Decorator for profiling MXNet operation. Uses Python's time module to collect execution time information of the operation. Parameters ---------- func: Operation to be executed and timed. Returns ------- res, timing output. res being result returned after operator execution. profiler output is a dictionary with summary of operation execution. Example output : { "add": [{"avg_time_add": 0.4053089120425284, "inputs": { "lhs": [1024, 1024], "rhs": [1024,1024] }] } """ @functools.wraps(func) def python_profile_it(*args, **kwargs): start_time = time.perf_counter() # 1 res = func(*args, **kwargs) end_time = time.perf_counter() # 2 run_time = end_time - start_time # 3 # NOTE : same as cpp_profile_it if len(args) > 0: operator_name = args[0].__name__ elif 'block' in kwargs: operator_name = kwargs['block']._op_name else: raise ValueError("Unable to identify operator name to extract profiler output!") profiler_output = {'avg_time_'+str(operator_name): run_time} return res, profiler_output return python_profile_it
373ef98127e20074cf72c2b49a7c40175fceec15
53,745
def random_integer_list( lowest_number: int, highest_number: int, numbers_required: int, no_repetitions: bool = False, seed : int = 0, slope : int = 11, y_intercept : int = 13, ) -> list: """Random number generation using linear congruential generation. Check out the following link for more information: https://en.wikipedia.org/wiki/Linear_congruential_generator Args: lowest_number: The lower-bound on the value of the random number. highest_number: The upper-bound on the value of the random number. numbers_required: The number of random numbers to be generated. no_repetitions: If the values in the list cannot be repeated. seed: The initial seed of the generator. slope: The multiplier to the piecewise linear function. y_intercept: The addition to the piece-wise line. Returns: List of random numbers. """ random_numbers = [] assert slope > 0 and slope < highest_number, "The multiplier `slope` must be positive and less than the `modulus`" assert y_intercept > 0 and y_intercept <= highest_number, "" assert seed >= 0 and seed < highest_number seed %= numbers_required while len(random_numbers) < numbers_required: seed = (seed * slope + y_intercept) % highest_number if not(no_repetitions and seed in random_numbers): random_numbers.append(seed + lowest_number - 1) return random_numbers
c94ac98fb6ab5fd4312032fe4e63424cd71adf81
53,747
def construct_dict_from_source(fields, source): """ Construct a new dict from a source dict and catch all KeyErrors, using predefined functions to extract desired values from a source dictionary. :param fields: Dictionary with fields in the the target dict as keys and functions to extract their desired value . for these fields from the source dict :type fields: dict :param source: Source dictionary. :type source: dict :return: Target dictionary. :rtype: dict """ new_dict = {} for field_name, getter_func in fields.items(): try: new_dict[field_name] = getter_func(source) except KeyError: pass return new_dict
5e98709090a22dc69a4fb2e003b2e628fc39bf32
53,749
def invalid_name(entity): """Generate invalid name template.""" base = "is an invalid %s name." % entity return "%(value)s " + base + " Expected %(expected)s."
f24d7bcd1b59d3d5f05dd49a7b440cf208094233
53,752
def dd2dms(dd): """ Convert a decimal to degrees, minutes, seconds. Parameters ---------- dd : numeric The decimal to convert Returns ------- d,m,s : list List of the integer degrees, integer minutes, and float seconds """ d = int(dd) dm = (dd - d)*60 m = int(dm) s = float((dm - m)*60) return d,m,s
58aea6935bd37520a274b99c1e142333d82bd393
53,754
def sanitize_sample_id(sample_id): """Replaces slashes with underscores, so KBase refs could be used as sample IDs""" return sample_id.replace('/', '_')
f3e81531b1e8d9fddb61c1a901a0652e082bedda
53,755
import torch def onehot(size, target): """ Translate scalar targets to one hot vectors """ vec = torch.zeros(size, dtype=torch.float32) vec[target] = 1. return vec
2bfdb8437011e7cd8c291cc59564da0ffce3f7d3
53,767
def eqvalue(x): """ Checks whether all values of the iterable `x` are identical and returns that value if true, and otherwise raises a `ValueError` exception. >>> eqvalue([1, 1, 1]) 1 """ items = iter(x) first = next(items) for item in items: if item != first: raise ValueError("Values are not identical: {}, {}".format(first, item)) return first
15f4b6fa61bb23400345fd15217626338c030066
53,769
import torch def find_best_match_indices(search_for, search_in, less_or_equal=False, greater_or_equal=False): """For each element of `search_for`, find the index of closest element in `search_in`. Parameters ---------- search_for: torch.tensor search_in: torch.tensor less_or_equal: bool If True, searches for an element which is less or equal than the reference from `search_for` greater_or_equal: bool If True, searches for an element which is greater or equal than the reference from `search_for` Returns ------- indices: torch.Tensor Shape: `search_for.shape` If the required neighbour was not found, -1 value is used instead of true indices. """ assert search_for.ndim <= 1, "search_for should be a scalar or 1D tensor." assert search_in.ndim == 1, "search_in should be a 1D tensor." diff = search_for.float().view(-1, 1) - search_in.float().view(1, -1) if less_or_equal: diff[diff < 0] = torch.tensor(float('inf')).to(search_in.device) # pylint: disable=not-callable if greater_or_equal: diff[diff > 0] = torch.tensor(float('inf')).to(search_in.device) # pylint: disable=not-callable diff = torch.abs(diff) res = torch.argmin(diff, dim=1) if less_or_equal or greater_or_equal: res[torch.all(diff == float('inf'), dim=1)] = -1 return res if search_for.ndim else res.squeeze()
a3ca4a5a3b31e04331bcd33d80552948da5b50b7
53,774
def find_beamwidth(the_wavel,dish_size): """ find the beamwidth using Stulll eq. 8.13 Parameters ---------- the_wavel : wavelength (float) units (cm) dish_size : antenna dish diameter (float) units (m) Returns ------- beamwidth : beamwidth angle units (degrees) """ # # Stull eq. 8.13 # ### BEGIN SOLUTION a = 71.6 #constant (degrees) the_wavel = the_wavel/100. #convert cm to m beamwidth = a*the_wavel/dish_size #beamwitdh in degrees return beamwidth ### END SOLUTION
06096b78eab965625120050ba1e205dc99ae170f
53,776
def _GetFailedRevisionFromResultsDict(results_dict): """Finds the failed revision from the given dict of revisions. Args: results_dict: (dict) A dict that maps revisions to their results. For example: { 'rev1': 'passed', 'rev2': 'passed', 'rev3': 'failed', } Note results_dict is expected only to have one failed revision which will be the one to be returned. Returns: The revision corresponding to a failed result, if any. """ for revision, result in results_dict.iteritems(): if result.lower() == 'failed': return revision return None
cecdf11c21b0851d0b61e08983dfe3eb3fed1c7a
53,779
def create_node(kind=None, args=None): """Create a kind, args Celery Script node.""" node = {} node['kind'] = kind node['args'] = args return node
84bd6de0182f16b93e04a76769f88896db3a657b
53,789
def pair_wise_flip_flopping(W): """Determine if a unique walk matrix demonstrates pair-wise flip-flopping. Pair-wise flip-flopping is defined as: For every pair of classes C[i] and C[j], there exist walk lengths L[x] and L[y], such that C[i] has more closed walks of length L[x] than C[j] and vice versa. Parameters ---------- W : Numpy Matrix Unique walk matrix as returned by `walk_classes` Returns ------- boolean True if pair-wise flip-flopping holds for `W` """ # Work with an ndarray representation of W w = W.getA() # Get the number of rows and columns in W num_rows, num_cols = w.shape # Generate all possible pairs of classes and k-walks # A single pair is held as a tuple (x, y) classes, walks = ([ (i, j) for i in range(0, num) for j in range(i + 1, num) ] for num in [num_rows, num_cols]) # Iterate over all pairs of classes for cls1, cls2 in classes: # Whether these two classes flip-flop at any point flip_flops = False # Iterate over all pairs of walks for w1, w2 in walks: # Comparison of classes for k-walk 1 n1 = w[cls1][w1] - w[cls2][w1] # Comparison of classes for k-walk 2 n2 = w[cls1][w2] - w[cls2][w2] # Check to see if the classes flip-flopped if (n1 > 0 and n2 < 0) or (n1 < 0 and n2 > 0): flip_flops = True break # If the classes don't flip-flop, return false immediately if not flip_flops: return False # Return true if no counter examples are found return True
e40216b665beceb88bfd9149f296cfbf2d0d1c9f
53,790
from typing import List from typing import Dict import requests def fetch_stories(story_count: int) -> List[Dict[str, str]]: """ Fetches summaries and links to Spotlight PA stories from Spotlight PA story feed. Args: story_count (int): Number of stories to return Returns: List[Dict[str, str]]: Spotlight PA stories """ url = "https://www.spotlightpa.org/news/feed-summary.json" r = requests.get(url) stories = r.json()["items"][0:story_count] return stories
78df5872f9cbbd4f032aec750c335e8d84a23533
53,792
def sort_diclist(undecorated, sort_on): """ Sort a list of dictionaries by the value in each dictionary for the sorting key Parameters ---------- undecorated : list of dicts sort_on : str, numeric key that is present in all dicts to sort on Returns --------- ordered list of dicts Examples --------- >>> lst = [{'key1': 10, 'key2': 2}, {'key1': 1, 'key2': 20}] >>> sort_diclist(lst, 'key1') [{'key2': 20, 'key1': 1}, {'key2': 2, 'key1': 10}] >>> sort_diclist(lst, 'key2') [{'key2': 2, 'key1': 10}, {'key2': 20, 'key1': 1}] """ decorated = [(len(dict_[sort_on]) if hasattr(dict_[sort_on], '__len__') else dict_[ sort_on], index) for (index, dict_) in enumerate(undecorated)] decorated.sort() return[undecorated[index] for (key, index) in decorated]
678d818d5eed43d14d922b0a727e90ec71d0e352
53,798
import copy import random def generate_training_sets(dataset, percentage, copies): """Resample from separated training sets to generate smaller training sets. No instance will present in one new training set more than once. Mechanism is to shuffle, then pick the first percentage% instances. Args: dataset (list): List of vectors (features + label) percentage (number that supports __mul__ and __floordiv__): This decides the size of new training set generated related to the population. copies (int): The number of new training sets required. Returns: list: list of new training datasets list of list of vectors """ training_sets = [] i = copies while i > 0: population = copy.deepcopy(dataset) random.shuffle(population) training_sets.append(population[:len(population) * percentage // 100]) i -= 1 return training_sets
129fe8231e28dfc1c3352f5466f2667c2a204508
53,800
import importlib def has_module (name): """Test if given module can be imported. @return: flag if import is successful @rtype: bool """ try: importlib.import_module(name) return True except (OSError, ImportError): # some modules (for example HTMLtidy) raise OSError return False
dc0ca9fd3cc5c162625625d5656bf704c907ceb3
53,808
def analyse_storage(cid): """ Parse the num_blocks and content_size info from cid_storage.txt :param cid: cid of the object :return: number of blocks and the size of the content """ size = 0 num_blocks = -1 with open(f'{cid}_storage.txt', 'r') as stdin: for line in stdin.readlines(): # The output of the ipfs dag stat <cid> command is in the form of "Size: 152361, NumBlocks: 1\n" if "Size" in line: line = line.split(",") size = line[0].split(" ")[1] num_blocks = line[1].split(" ")[2] num_blocks = num_blocks.split("\n")[0] return num_blocks, size
d1fdacdd926458c8b2a0710651a6c649a2ac0b68
53,814
from xml.sax.saxutils import quoteattr def xmlattrstr(attrs): """Construct an XML-safe attribute string from the given attributes "attrs" is a dictionary of attributes The returned attribute string includes a leading space, if necessary, so it is safe to use the string right after a tag name. """ # XXX Should this be using s = '' names = attrs.keys() names.sort() # dump attrs sorted by key, not necessary but is more stable for name in names: s += ' %s=%s' % (name, quoteattr(str(attrs[name]))) return s
4b341f6b3b8b80162440f8562cb54ae32bb56c76
53,817
import random def owo(chat, match): """Respond to OwO appropriately""" owo = ['whats this??', '*notices ur buldge*', '>//w//<', '*pounces on u*', '*sneaks in your bed and cuddles u*', '*nozzles u*', '*pounces on u and sinks his nozzle inside your fluffy fur*', '*scratches ur ears* x3'] return chat.send_text(random.choice(owo))
b1bc21f1e95e64ac5a58f1356be6b05e57b3de09
53,821
def append_tag(image_tag, append_str): """ Appends image_tag with append_str :param image_tag: str, original image tag :param append_str: str, string to be appended """ return f"{image_tag}-{append_str}"
b35ad32be77d473237ff6c47afba253400a031b2
53,826
from datetime import datetime def get_entry_date(entry): """ Get the last modified date from the file metadata. """ return datetime.fromtimestamp(entry.stat().st_mtime)
c14973154b9814faffe07dd62d9119543bb72a8c
53,828
def generate_parentheses(n): """ generate all the different ways you can have n parentheses - nested - adjacent - mixture each of the combos is a str return an array of all the strings good case of backtracking n = 3 two unique chars to wrry abouyt = '(' and ')' 1st - for _ in n, add an opener for _ in n, add a closer ((())) 2 options - add the next pair of parentheses adj, or nest it BUT we always add the open/close togetther! (())() Call Tree = n is the depth of the Tree 1st adj() / \ 2nd nest() adj() = '(())()' / \ /\ 3rd nest() adj() nest() adj() '((()))' '(()()) '((())) Helper functions: 1. nest() - adds inside the one we just added to the stack 2. adj() - adds to the end of the string of the permutation n = is the var to use in ther logic base case: n = 0 - pre: added all the () - post: added the permutation to the output recursive case: - n > 0: add another () by adj() - recurse add another by nest() - recurse decrement n # Assume n > 1 """ global output def add_adj(permutation, parentheses_left): # '()' # find the last ')' char for index_char in range(len(permutation) - 1, -1, -1): char = permutation[index_char] if char == ")": # add the new '()' next to it new_permutation = "".join( [permutation[: index_char + 1], "()", permutation[index_char + 1 :]] ) generate_permutations(new_permutation, parentheses_left - 1) # return new_permutation def add_nested(permutation): # find the index of the last ')' char # '(())' # ic = 3 # last_closer_index = 3 # c = 2 # ci = 1 # oi = 0 for index_char in range(len(permutation) - 1, -1, -1): char = permutation[index_char] if char == ")": last_closer_index = index_char # while the index of the matching opener closers = 1 closers_index = index_char - 1 while closers_index > 0: if permutation[closers_index] == ")": closers += 1 closers_index -= 1 else: break # find the index of the matching opening parenthesis opening_index = last_closer_index - (2 * closers) + 1 # wrap the () in a new pair of parentheses new_permutation = ( permutation[:opening_index] + "(" + permutation[opening_index : last_closer_index + 1] ) new_permutation += ")" return new_permutation def generate_permutations(permutation, parentheses_left): # base case: added n ()'s if parentheses_left == 0: # and len(permutation) == required_length: # append to the output if permutation not in output: output.add(permutation) return # recursive case: while parentheses_left > 0: # add another by nest(), then recurse nested_permutation = add_nested(permutation) generate_permutations(nested_permutation, parentheses_left - 1) # add another () by adj(), and do so recursively adj_permutation = add_adj(permutation, parentheses_left) # generate_permutations(adj_permutation, parentheses_left - 1) # decrement n parentheses_left -= 1 if parentheses_left == 0: return # init the output of all the permutations output = set() if n > 0: # save the required length in a global variable global required_length required_length = 2 * n # add the first parentheses permutation = "()" n -= 1 generate_permutations(permutation, n) # return all the permutations return list(output)
ff602cbf278e92d16cb5f338257e0a4373660a7f
53,829
def bytes_fmt(value): """Format a byte value human readable. Args: value (float): value of bytes Returns: str: formated value with unit """ for unit in ['', 'K', 'M', 'G']: if abs(value) < 1024.0: return '{:3.2}f{}B'.format(value, unit) value /= 1024.0 return '{:3.2}fTB'.format(value, 'G')
37be4030ccc7699d657c4fc908547241698b3e5f
53,831
def get_length(string): """Returns the length of a string.""" try: string = string.split() return len(string) except: print("Error encountered!")
1a63adda111891aa76f9189645699a4f9c51af9d
53,834
import torch def scale_oodom(x: torch.Tensor) -> torch.Tensor: """ Scales the given input with a constant of 255 such that it can be considered out-of-domain. """ return x * 255
441abb095ea9d45ca6b1647b0bc05e8285b93677
53,838
def _get_vm_prop(vm, attributes): """Safely get a property or return None""" result = vm for attribute in attributes: try: result = getattr(result, attribute) except (AttributeError, IndexError): return None return result
d563e7cf677c9668566b0dd5ebf9224a3d2af1c4
53,840
def calculate_precision(total_correct: int, total_found: int) -> float: """ Calculate precision as the ratio of correct acronyms to the found acronyms. :param total_correct: :param total_found: :return: """ return total_correct / total_found if total_found != 0 else 0
c512e2d3d2ae964b76cc1fba836f05f8070f42af
53,843
from typing import Set from typing import FrozenSet def remove_incomplete(schedules: Set[FrozenSet[str]]) -> Set[FrozenSet[str]]: """ Removes all incomplete schedules from schedules, returning the remaining set of schedules. """ return set(schedule for schedule in schedules if not schedule == "incomplete")
03a042578a6986706e0fd60747ed2ca36e70217b
53,849
def prefix_matcher(*prefixes): """Return a module match func that matches any of the given prefixes.""" assert prefixes def match(name, module): for prefix in prefixes: if name.startswith(prefix): return True else: return False return match
90266470befc923aeb0020af32dd3a0dc636e9e1
53,851
def format_duration(secs): """ Format a duration in seconds as minutes and seconds. """ secs = int(secs) if abs(secs) > 60: mins = abs(secs) / 60 secs = abs(secs) - (mins * 60) return '%s%im %02is' % ('-' if secs < 0 else '', mins, secs) return '%is' % secs
1e228c7ce20b2598531dcb72213641736c26d882
53,854
def _escape(s): """ Escape HTML characters and return an escaped string. """ s = s.replace('&', '&amp;') s = s.replace('<', '&lt;') s = s.replace('>', '&gt;') return s
7c89520512deca71c9615274dff69db88f5ee250
53,855
def crop(img, box): """ Crop regoin of box in image. Input: img: np.array box: (left, top, width, height), int Return: croped_img: np.array """ croped_img = img[box[1]:(box[1]+box[3]), box[0]:(box[0]+box[2])] return croped_img
56e1497245c265d2bdca76ed683414996cbf41eb
53,860
def inside_circle(cx, cy, r): """ Return a function that checks whether a point is in a circle with center (*cx*, *cy*) and radius *r*. The returned function has the signature:: f(xy: Tuple[float, float]) -> bool """ r2 = r ** 2 def _f(xy): x, y = xy return (x - cx) ** 2 + (y - cy) ** 2 < r2 return _f
aea8b7c852c8f103c796137f0cbc3be7a06fa9cc
53,861
import itertools def build_full_factorial(factor_count): """Builds a full 2^K factorial design. The resulting design will contain every combination of -1 and +1 for the number of factors given. """ factor_data = [] for run in itertools.product([-1, 1], repeat=factor_count): factor_data.append(list(run)) return factor_data
ade9b7d5f834e8f8065610284de80332e7db0117
53,862
def barabasi_diff(k, t, m=2): """ Returns the differential value of a node of degree value k at time t. Parameters: k : float = Current degree of the node t : int = Current time Returns: float = Difference """ return k / (m * t)
43579b65783df89f6ecf9bfcfa3159efac4845b8
53,863
import string def remove_stop_words(word_list): """ Takes a list of words and returns a copy with all stop words removed """ f = open('../stop_words.txt') stop_words = f.read().split(',') f.close() # add single-letter words stop_words.extend(list(string.ascii_lowercase)) return [w for w in word_list if not w in stop_words]
e516c9b0c0175b3486de817f497ebb201d00da61
53,864
from typing import Dict def get_input_data(input_section: Dict) -> str: """Gets playbook single input item - support simple and complex input. Args: input_section (dict): playbook input item. Returns: (str): The playbook input item's value. """ default_value = input_section.get('value') if isinstance(default_value, str): return default_value if default_value: complex_field = default_value.get('complex') if complex_field: return f"{complex_field.get('root')}.{complex_field.get('accessor')}" return default_value.get('simple') return ''
a1d7d3fb4d5d3cd0e5ce46f7d1c685495c71981f
53,868
def retrieve_train_test_instances(cases, train_indices, test_indices): """ Retrieves training and test cases from indices for hpo. Parameters ---------- cases : list of dicts, where single dict represents a case Cases of the training set of the event log. train_indices : list of ints Indices of training cases for hpo. test_indices : list of ints Indices of test cases for hpo. Returns ------- train_cases : list of dicts, where single dict represents a case Training cases for hpo. test_cases : list of dicts, where single dict represents a case Test cases for hpo. """ train_cases = [] test_cases = [] for idx in train_indices: train_cases.append(cases[idx]) for idx in test_indices: test_cases.append(cases[idx]) return train_cases, test_cases
e4309a9c50afa2ac76a9e9439f52af95fccb0c82
53,871
def google_map_view(context): """ Dependencies for google_map_view gizmo. """ return ('tethys_gizmos/vendor/farbtastic/farbtastic.css', 'tethys_gizmos/vendor/farbtastic/farbtastic.js', 'tethys_gizmos/js/tethys_google_map_view.js')
73a9633938ee2c20d8a0cfbcfe30ab6305e47882
53,873
def create_section_data_block(data_length): """Create a block of data to represend a section Creates a list of the given size and returns it. The list will have every byte set to 0xff. This is because reserved bits are normally set to 1 Arguments: data_length -- amount of bytes needed for the data block Returns: A list of bytes of the desired length with zero in every byte """ data = [0xff] * data_length return data
cffd8e28e7ad2d3ffd60901493f03fc7ef07741d
53,882
import re def parse_hgvc_c(hgvs_c): """ Parses HGVS like c.4146T>A and returns a dictionary with the keys pos, type, ref, alt """ if not hgvs_c.startswith("c.") or "?" in hgvs_c: return {} parts = re.split(r"([[_\.ACGT]+|[0-9]+|del|ins])", hgvs_c[2:]) bases = "ATCG" pos = ctype = ref = alt = None if parts[4] == ">": # Substitution. pos = parts[1] ctype = "sub" ref = parts[3] alt = parts[5] return { "pos": pos, "type": ctype, "ref": ref or '', "alt": alt or '' }
2938bd1a0e98dd6e6c6bca4b6de132b86ad65a90
53,883
def is_internal_function_name(name: str) -> bool: """If the function name is internal. Skips underscored functions but keeps special functions such as __init__. """ return name.startswith('_') and not (name.startswith('__') and name.endswith('__'))
e243027dba0651472ea7d498105ff10b22d842cc
53,884
def merge(s, t): """Merge dictionary t into s.""" for k, v in t.items(): if isinstance(v, dict): if k not in s: s[k] = v continue s[k] = merge(s[k], v) continue s[k] = v return s
16d5f0e877cb68709ee296bd96aa39afd52580ba
53,886
import errno def readlocal(path, offset=None, size=-1): """ Wrapper around open(path, 'rb') that returns the contents of the file as a string. Will rethrow FileNotFoundError if it receives an IOError. """ try: with open(path, 'rb') as f: if offset: f.seek(offset) buf = f.read(size) except IOError as e: if e.errno == errno.ENOENT: raise FileNotFoundError(e) else: raise return buf
b4cf8912699fd158b910f0d45c492ae1221fc8f3
53,890
def format_hugo_date(dt): """Generate a Hugo formatted datestring from a datetime object""" d = dt.strftime("%Y-%m-%dT%H:%M:%S+00:00") return(d)
7da182f0187cffaf43a584215b093232c7d0bf17
53,895
def inner(x, y): """ Returns the inner product of two equal-length vectors. """ n = len(x) assert len(y) == n sum = 0 for i in range(0, n): sum += x[i]*y[i] return sum
36567d82c7c482311a40d9dab29b3ec80c772037
53,896
def line_as_comment(line): """Change a C code line to a comment by adding the prefix "// " If the last character of the line is a backslash, certain characters are appended. This silences some compiler warnings. """ line = "// " + line if line[-2:] == "\\\n": line = line[:-2] + "\\ //\n" return line
285581c1cf2e6e7762a2227a4fc3a61f91b388bf
53,899
def get_comp_render_range(comp): """Return comp's start and end render range.""" comp_attrs = comp.GetAttrs() start = comp_attrs["COMPN_RenderStart"] end = comp_attrs["COMPN_RenderEnd"] # Whenever render ranges are undefined fall back # to the comp's global start and end if start == -1000000000: start = comp_attrs["COMPN_GlobalEnd"] if end == -1000000000: end = comp_attrs["COMPN_GlobalStart"] return start, end
fe9f6c7522da941932d1957a65d2c8a6241ff1e9
53,902
def censys_ipv4_meta_extraction(raw: dict) -> dict: """ Extracts metadata from Censys IPv4 dicts :param raw: Censys IPv4 dict :return: Metadata part of common format dict """ _as = raw.get("autonomous_system", None) or dict() return { "ip": raw["ip"], # Program should fail if IP is not given "as": { "number": _as.get("asn", None), "name": _as.get("name", None), "location": _as.get("country_code", None), "prefix": _as.get("routed_prefix", None), }, }
1760282c9dee363968973e54c9b76410865e1c93
53,906
import collections import six def parseColumnDefs(defs, keyOrder=None): """ Translate a few different forms of column definitions into a single common format. These formats are accepted for all methods which request column definitions (createTable, checkTable, etc) list of tuples: [(name, type, <constraints>), ...] dict of strings: {name: type, ...} dict of tuples: {name: (type, <constraints>), ...} dict of dicts: {name: {'Type': type, ...}, ...} Returns dict of dicts as the common format. """ if keyOrder is None: keyOrder = ['Type', 'Constraints'] def isSequence(x): return isinstance(x, list) or isinstance(x, tuple) def toDict(args): d = collections.OrderedDict() for i,v in enumerate(args): d[keyOrder[i]] = v if i >= len(keyOrder) - 1: break return d if isSequence(defs) and all(map(isSequence, defs)): return collections.OrderedDict([(c[0], toDict(c[1:])) for c in defs]) if isinstance(defs, dict): ret = collections.OrderedDict() for k, v in defs.items(): if isSequence(v): ret[k] = toDict(v) elif isinstance(v, dict): ret[k] = v elif isinstance(v, six.string_types): ret[k] = {'Type': v} else: raise Exception("Invalid column-list specification: %s" % str(defs)) return ret else: raise Exception("Invalid column-list specification: %s" % str(defs))
62625a9b8df80818b6d7596c87130b3a59e0a352
53,909