content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import multiprocessing def get_n_jobs(n_jobs): """get_n_jobs. Determine the number of jobs which are going to run in parallel. This is the same function used by joblib in the PoolManager backend. """ if n_jobs == 0: raise ValueError('n_jobs == 0 in Parallel has no meaning') elif n_jobs is None: return 1 elif n_jobs < 0: n_jobs = max(multiprocessing.cpu_count() + 1 + n_jobs, 1) return n_jobs
53fe32e94c771095bdab2601abba1761548f2d4c
48,875
def labels2one_hot(label_list, num_classes): """ transforms the list of labels from numbers to one-hot for the multilabel case """ labels_one_hot = {} for index, target_list in label_list.items(): if isinstance(target_list, str): target_list = list(map(int, target_list.strip('[]').split(','))) labels_one_hot[index] = [1 if i in target_list else 0 for i in range(num_classes)] return labels_one_hot
45f3b557a5156c8afbb71ffeb113eabbb38da1c8
48,876
import random def get_split(k, num, X, Y): """ :param i: 正反类别各取(i+1)/10 :param num: 一共分成num份 :param X: 全部集合X :param Y: 全部集合Y :return: """ batch = int(1.0 * len(X) / num) train = random.sample(range(0, len(X)), k*batch) train_X = X[train] train_Y = Y[train] tot = [i for i in range(len(X))] test = list(set(tot) - set(train)) test_X = X[test] test_Y = Y[test] return train_X,train_Y,test_X,test_Y
e123847757b5b54b805608e031b5cc8bb6533ec1
48,877
import hashlib def md5hash(s): """ Parameters: ----------- s : str a string Returns: -------- h : str a string containing the MD5 hash of 's' """ m = hashlib.md5() m.update(bytes(s, "utf-8")) return m.hexdigest()
e0ec8a7c8ed7b9c99cce34a7e08fa5dcadec432a
48,880
def extract_position_relations(qdmr_step): """Extract a relation regarding entity positions in a QDMR step. Relevant for VQA data Parameters ---------- qdmr_step : str string of the QDMR step containg relative position knowledge. Either a FILTER of BOOLEAN step. Returns ------- str string of the positional relation. """ if ' left ' in qdmr_step: return 'POS_LEFT_OF' elif ' right ' in qdmr_step: return 'POS_RIGHT_OF' elif (' between ' in qdmr_step) or (' middle of ' in qdmr_step): return 'POS_BETWEEN' elif (' behind ' in qdmr_step) or (' rear of ' in qdmr_step): return 'POS_BEHIND_OF' elif (' in ' in qdmr_step and ' front ' in qdmr_step) or \ (' infront ' in qdmr_step): return 'POS_IN_FRONT_OF' elif ' touch' in qdmr_step: return 'POS_TOUCHING' elif ' reflect' in qdmr_step: return 'POS_REFLECTING' elif (' cover' in qdmr_step) or (' obscur' in qdmr_step) or \ (' blocking' in qdmr_step) or (' blocked' in qdmr_step) or \ (' hidden' in qdmr_step) or (' obstruct' in qdmr_step): return 'POS_COVERS' elif (' near' in qdmr_step) or (' close ' in qdmr_step) or \ (' closer ' in qdmr_step) or (' closest ' in qdmr_step) or \ (' next to ' in qdmr_step) or (' adjacent ' in qdmr_step): return 'POS_NEAR' else: return None return None
ca3ca61ac66419c0fb48ad784a6397a67ba3d653
48,881
def get_module_properties(properties): """ Get dict of properties for output module. Args: properties (list): list of properties Returns: (dict): dict with prop, der and contrib """ module_props = dict(property=None, derivative=None, contributions=None) for prop in properties: if prop.startswith("property"): module_props["property"] = prop elif prop.startswith("derivative"): module_props["derivative"] = prop elif prop.startswith("contributions"): module_props["contributions"] = prop return module_props
8f69335538b738a4a6523e6919b8e5080ab045af
48,885
def get_overlap_ratio(r1, r2): """Return the overlap ratio of two rectangles.""" r1x2 = r1['x'] + r1['w'] r2x2 = r2['x'] + r2['w'] r1y2 = r1['y'] + r1['h'] r2y2 = r2['y'] + r2['h'] x_overlap = max(0, min(r1x2, r2x2) - max(r1['x'], r2['x'])) y_overlap = max(0, min(r1y2, r2y2) - max(r1['y'], r2['y'])) intersection = x_overlap * y_overlap r1_area = r1['w'] * r1['h'] r2_area = r2['w'] * r2['h'] union = r1_area + r2_area - intersection overlap = float(intersection) / float(union) return overlap
3af3c4a7745c602a7e83db233d04441a3c6bcb29
48,886
import argparse def parse_args(): """Parse the input arguments, use '-h' for help""" parser = argparse.ArgumentParser(description='Estimate allele lengths and find outliers at STR loci.') parser.add_argument( '--pvalfile', type=str, required = True, help='.pval for all STRs in a sample. Contains STR locus and its corresponding pval.') #parser.add_argument( # '--out', type=str, required = True, # help='Prefix for output files (suffix will be STRs.tsv) (default: %(default)s)') return parser.parse_args()
7ec3dea5326703b18f93e8b0e50bd48d17d4a9bf
48,887
import os def scriptname(path): """ Helper for building a list of script names from a list of module files. """ script = os.path.splitext(os.path.basename(path))[0] script = script.replace('_', '-') return script
edf8b22a1e9a39c830a8cece2cc9487172fed484
48,888
def to_int_float_or_string(s): """ Convert string to int or float if possible If s is an integer, return int(s) >>> to_int_float_or_string("3") 3 >>> to_int_float_or_string("-7") -7 If s is a float, return float(2) >>> to_int_float_or_string("3.52") 3.52 >>> to_int_float_or_string("-10.0") -10.0 >>> to_int_float_or_string("1.4e7") 14000000.0 Otherwise, just return the string s >>> to_int_float_or_string("A3") 'A3' """ try: s_int = int(s) # int(s) will truncate. If user specified a '.', return a float instead if "." not in s: return s_int except (TypeError, ValueError): pass try: return float(s) except (TypeError, ValueError): return s
18fbc417edb85219f3fbd2ae8e5eb8aa4df61d87
48,889
def get_urs_pass_user(netrc_file): """ retrieve the urs password and username from a .netrc file :param netrc_file: this is a path to a file that contains the urs username and password in the .netrc format :return: [tuple] (user_name, password) """ with open(netrc_file, 'r') as f: text = f.read() words = text.split() # find urs.earthdata.nasa.gov url = 'urs.earthdata.nasa.gov' url_loc = words.index(url) user_name_loc = words[url_loc:].index('login') + 2 user_name = words[user_name_loc] pword_loc = words[url_loc:].index('password') + 2 pword = words[pword_loc] return user_name, pword
a72938a04c5d0b50608446d87bbbd819cd9c9607
48,890
def is_new_install(current_install): """Check to see if this is a new install or an upgrade""" if current_install.config["ProductInstanceID"]: return False return True
b9a9783fcf0a36ac1ba7f553e2005e7df401691c
48,891
def merge_left(b): """ Merge the board left Args: b (list) two dimensional board to merge Returns: list """ def merge(row, acc): """ Recursive helper for merge_left. If we're finished with the list, nothing to do; return the accumulator. Otherwise, if we have more than one element, combine results of first from the left with right if they match. If there's only one element, no merge exists and we can just add it to the accumulator. Args: row (list) row in b we're trying to merge acc (list) current working merged row Returns: list """ if not row: return acc x = row[0] if len(row) == 1: return acc + [x] return merge(row[2:], acc + [2*x]) if x == row[1] else merge(row[1:], acc + [x]) board = [] for row in b: merged = merge([x for x in row if x != 0], []) merged = merged + [0]*(len(row)-len(merged)) board.append(merged) return board
96af9931dffee9193c565efbe89b463e93921151
48,892
def getROPFlux(spc_rop_dict, species_string, rxn_index): """ get the flux (numpy:array) for a given species and given rxn """ if species_string in spc_rop_dict: flux_tup_list = spc_rop_dict[species_string] for flux_tup in flux_tup_list: header = flux_tup[0] rxnNum = int(header.split("Rxn#")[1].split('_')[0]) if rxnNum == rxn_index: flux = flux_tup[1] return flux return []
6a5c44c0bfeca44aaf42fbe600a768e55a261d85
48,893
import os def pad_lines(lines, length): """ Left pad set of lines with spaces """ lines = lines.split(os.linesep) prefix = os.linesep + ' ' * int(length) return prefix + prefix.join(lines)
a2daedddcaad1eee887077d8a7607046760fabf8
48,894
import typing def filesDiffer(a:typing.List[str], b:typing.List[str]) -> bool: """ Compares two files for meaningingful differences. Traffic Ops Headers are stripped out of the file contents before comparison. Trailing whitespace is ignored :param a: The contents of the first file, as a list of its lines :param b: The contents of the second file, as a list of its lines :returns: :const:`True` if the files have any differences, :const:`False` """ a = [l.rstrip() for l in a if l.rstrip() and not l.startswith("# DO NOT EDIT") and\ not l.startswith("# TRAFFIC OPS NOTE:")] b = [l.rstrip() for l in b if l.rstrip() and not l.startswith("# DO NOT EDIT") and\ not l.startswith("# TRAFFIC OPS NOTE:")] if len(a) != len(b): return True for i, l in enumerate(a): if l != b[i]: return True return False
41f01627e0b51841a116f65c1cc3fc2fed14e91d
48,895
def multiply_1(factor: int): """ Example where input argument has specified variable annotation. This example is showing how to cast variable. And assert in order to check for any error at the early stage. :param factor: :return: results: int """ # This will print a data type of this variable e.g. int, float, string. print(type(factor)) # Casting variable into another data type (Casting is when you convert a variable value from one type to another.) factor_var = int(factor) # Check if data type is what we expect to be. assert type(factor_var) == int # Return function can return a single variable as well as execute function. # Return causes the function to stop executing and hand a value back to whatever called it.return causes the # function to stop executing and hand a value back to whatever called it." return int(factor_var * 23.233223)
5bbd5ec6c7636775855a9e3c63a1ee30d6eabb95
48,896
def get_util_shape(row): """Get utility term shape based on ROI. Parameters ---------- row : pandas.core.series.Series Row of func_df DataFrame. Returns ------- str If 'chest' or 'rib in row['Roi'], then return 'linear'. Otherwise, return 'linear_quadratic'. """ if any([roi in row['Roi'].lower() for roi in ['chest', 'rib']]): return 'linear' return 'linear_quadratic'
4ebb50dd0991f3edda7f33ad17fb3a3dfaf39de3
48,897
from typing import Optional from typing import Match import re def _replace_html_saving_export_path_by_doc_path(code: str) -> str: """ Replace html saving interace's export path argument value in the code by document path. Parameters ---------- code : str Target Python code. Returns ------- code : str Replaced code. html saving interface argument, for example, `save_overall_html` `dest_dir_path` will be replaced by './docs_src/_static/<original_path>/'. """ match: Optional[Match] = re.search( pattern=( r"save_overall_html\(.*?dest_dir_path='(.+?)'\)" ), string=code, flags=re.MULTILINE | re.DOTALL) if match is None: return code original_path: str = match.group(1) while original_path.startswith('.'): original_path = original_path.replace('.', '', 1) if original_path.startswith('/'): original_path = original_path.replace('/', '', 1) if not original_path.endswith('/'): original_path += '/' code = re.sub( pattern=( r"(save_overall_html\(.*?dest_dir_path=).+?\)" ), repl=rf"\1'./docs_src/source/_static/{original_path}')", string=code, count=1, flags=re.MULTILINE | re.DOTALL) return code
0bfcfdd234ae72c70feafb1efd9799387b75c8f1
48,900
from typing import List def body_25_to_coco_18(keypoints: List[float]) -> List[float]: """ noqa BODY_25 to COCO_18 key mapping based on the following: https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/output.md The only difference is that COCO_18 doesn't have the MidHip (#8), Toes, Heels and Background (#19-#25) keypoints. """ return keypoints[:8 * 3] + keypoints[9 * 3:19 * 3]
6945a1057e94fbe306810e6b1aa3f7a5dd1ad6df
48,901
def factory_register(SuperClass, ClassName): """Register an concrete class to a factory Class.""" def decorator(Class): # return Class if not hasattr(SuperClass, "_factory"): setattr(SuperClass, "_factory", {}) SuperClass._factory[ClassName] = Class setattr(Class, "_factory_type", ClassName) return Class return decorator
891fd2419fbb545507f3fe79ad1ad27e28cdf2d0
48,903
def _split_theta(theta, ltot): """split pairwise interaction parameter matrix into components""" mat_q = theta[:ltot, :ltot] mat_r = theta[ltot:, :ltot] mat_lbda = -theta[ltot:, ltot:] return mat_q, mat_r, mat_lbda
fd09647ccce7939d214caa3d0c7baa77577d3b39
48,904
import sys def is_not_run_from_api(): """ check if framework run from API to prevent any alert Returns: True if run from API otherwise False """ if '--start-api' in sys.argv: return False return True
d1a8cd8250decbe8ad0b75dc749ff0d5b2ae7466
48,905
def _type_error(actual, expected): """Returns a ValueError that the actual type is not the expected type.""" msg = 'invalid type: %s is not in %s' % (actual.__name__, [t.__name__ for t in expected]) return ValueError(msg)
fc9b09c6bd684a1132ca7c0a792aca9da37be2e9
48,906
def extract_callback_args(args, kwargs, name, type_): """Extract arguments for callback from a name and type""" parameters = kwargs.get(name, []) if parameters: if not isinstance(parameters, (list, tuple)): # accept a single item, not wrapped in a list, for any of the # categories as a named arg (even though previously only output # could be given unwrapped) return [parameters] else: while args and isinstance(args[0], type_): parameters.append(args.pop(0)) return parameters
9ed14a40dee7799d49f0001818c20e9e2935c69b
48,908
def truncate_line(line: str, length: int = 80) -> str: """Truncates a line to a given length, replacing the remainder with ...""" return (line if len(line) <= length else line[: length - 3] + "...").replace("\n", "")
1bc7e34b85afc59ff5afcfa9a57b575aa3d032fe
48,909
import torch def dd_qq(qn, zs, zp): """ qn: pricipal quantum number for valence shell, shape (n_atoms,) zs : zeta_s, shape (n_atoms,) zp : zeta_p, shape (n_atoms,) return dd, qq dd: dipole charge separation qq: qutrupole charge separation """ dd = (2.0*qn+1.0)*(4.0*zs*zp)**(qn+0.5)/(zs+zp)**(2.0*qn+2.0)/ \ torch.sqrt(torch.tensor(3.0, dtype=zs.dtype, device=zs.device)) qq = torch.sqrt((4.0*qn**2+6.0*qn+2.0)/20.0)/zp return dd, qq
a474b6bfa5fc485289f7e102b7800359c603796c
48,911
def track_point(start, direction, z): """ This function returns the segment coordinates for a point along the `z` coordinate Args: start (tuple): start coordinates direction (tuple): direction coordinates z (float): `z` coordinate corresponding to the `x`, `y` coordinates Returns: tuple: the (x,y) pair of coordinates for the segment at `z` """ l = (z - start[2]) / direction[2] xl = start[0] + l * direction[0] yl = start[1] + l * direction[1] return xl, yl
72313aa592d8014641d8533acbe40661142df1c0
48,912
def mod(n, size): """Returns the position of a cell based on modulo arithmetic""" size -= 1 if n < 0: return size elif n > size: return 0 return n
d035bb05f5553671ac35abfcfdc1d9914482721d
48,913
def _parse_answers(answers): """Parse the raw answers to readable strings. The reason this exists is because of YAML's ambiguous syntax. For example, if the answer to a question in YAML is ``yes``, YAML will load it as the boolean value ``True``, which is not necessarily the desired answer. This function aims to undo that for bools, and possibly for numbers in the future too. Parameters ---------- answers : `iterable` of `str` The raw answers loaded from YAML. Returns ------- `tuple` of `str` The answers in readable/ guessable strings. """ ret = [] for answer in answers: if isinstance(answer, bool): if answer is True: ret.extend(["True", "Yes", "On"]) else: ret.extend(["False", "No", "Off"]) else: ret.append(str(answer)) # Uniquify list seen = set() return tuple(x for x in ret if not (x in seen or seen.add(x)))
bd6b3986ebc50244c9db93b190e2c1fe62454070
48,915
import os def findFirst(thisDir, targetFile, trace=False): """ Search directories at and below thisDir for a file or dir named targetFile. Like find.find in standard lib, but no name patterns, follows Unix links, and stops at the first file found with a matching name. targetFile must be a simple base name, not dir path. could also use os.walk or PP4E.Tools.find to do this. """ if trace: print('Scanning', thisDir) for filename in os.listdir(thisDir): # skip . and .. if filename in [os.curdir, os.pardir]: # just in case continue elif filename == targetFile: # check name match return os.path.join(thisDir, targetFile) # stop at this one else: pathname = os.path.join(thisDir, filename) # recur in subdirs if os.path.isdir(pathname): # stop at 1st match below = findFirst(pathname, targetFile, trace) if below: return below
20604e7e63bd146e17c649e6f255278b08915adc
48,917
import json def _load_query(query_file: str) -> str: """ Load the advanced query from a json file. :param query_file: path to the json file. :return: advanced query in json string format (single line). """ with open(query_file, 'r', encoding='utf-8') as file_pointer: query = json.load(file_pointer) # return single line json string return json.dumps(query)
eebc23785499010b780c1a671ef12fad12fe3c37
48,918
def create_scatter_legend(axi, color_labels, class_names, show=False, **kwargs): """Generate a legend for a scatter plot with class labels. Parameters ---------- axi : object like :class:`matplotlib.axes.Axes` The axes we will add the legend for. color_labels : dict of objects like :class:`numpy.ndarray` Colors for the different classes. color_names : dict of strings Names for the classes. show : boolean, optional If True, we will add the legend here. kwargs : dict, optional Additional arguments passed to the scatter method. Used here to get a consistent styling. Returns ------- patches : list of objects like :class:`matplotlib.artist.Artist` The items we will create a legend for. labels : list of strings The labels for the legend. """ patches, labels = [], [] for key, val in color_labels.items(): patches.append( axi.scatter([], [], color=val, **kwargs) ) if class_names is not None: label = class_names.get(key, key) else: label = key labels.append(label) if show: axi.legend(patches, labels, ncol=1) return patches, labels
3e7551f5b9f3b7f74181aa75adfe092d5389ddbb
48,919
import re def IgnoreVariables(name): """E.g. map ppapi_cpp<(nacl_ppapi_library_suffix) to ppapi_cpp.""" return re.sub('\<\([^)]*\)$', '', name)
795399da9fe06c112349d49f2205ee7b20245fad
48,920
import requests import pandas def xbrl_query(access_token, queryparameters, baseapiurl='https://api.xbrl.us/api/v1/report/search?'): """ https://xbrl.us/home/use/xbrl-api/ http://files.xbrl.us/documents/XBRL-API-V1.4.pdf The xbrl_query function is used to query the xbrl api usingthe token generated in the xbrl_apikey function. Inputs: access_token: Access token string generated in the xbrl_apikey function. Found in the access_token column of the response dataframe. baseapiurl: API request URL corresponding to the type of request prior to passing any parameters. This is everything up-to and including the "?" in the API request - 'https://api.xbrl.us/api/v1/report/search?' - 'https://api.xbrl.us/api/v1/fact/search?' queryparameters: Dictionary structure to specify each aspect of the api request (See the Examples section below) Outputs: xbrl_queryoutput: Pandas Dataframe object corresponding to the fields specified in the request Examples: - xbrl_query(access_token=xbrl_apikeyoutput.access_token.values[0], baseapiurl='https://api.xbrl.us/api/v1/report/search?', queryparameters = {'report.entity-name': "APPLE INC.", 'fields': "report.id,report.entity-name,report.filing-date,report.base-taxonomy,report.document-type,report.accession,entity.ticker,report.sic-code,entity.cik,report.entry-type,report.period-end,report.sec-url,report.checks-run,report.accepted-timestamp.sort(DESC),report.limit(20),report.offset(0),dts.id,report.entry-url", 'report.document-type': "10-K" }) - xbrl_query(access_token=xbrl_apikeyoutput.access_token.values[0], baseapiurl='https://api.xbrl.us/api/v1/fact/search?', queryparameters = {'report.id': "315201", 'fields': "report.id,report.entity-name,report.filing-date,report.base-taxonomy,report.document-type,report.accession,entity.ticker,report.sic-code,entity.cik,report.entry-type,report.period-end,report.sec-url,report.checks-run,report.accepted-timestamp.sort(DESC),report.limit(20),report.offset(0),dts.id,report.entry-url", 'concept.local-name': "AccumulatedOtherComprehensiveIncomeLossNetOfTax" }) """ # Modify the queryparameter keys to create a web request string in dataquery # Add the "=" sign between keys and values for keyholder in list(queryparameters.keys()): queryparameters[keyholder + "="] = queryparameters[keyholder] del queryparameters[keyholder] # Add the "&" sign between keys queryurl = ''.join(list(map(str.__add__, list(queryparameters.keys()), [ "{}{}".format(i, '&') for i in list(queryparameters.values())])))[:-1] # Add the baseurl and modified request values dataquery = str(baseapiurl + queryurl) # Generate the authentication bearer tolken headers = {"Authorization": "Bearer " + access_token} # Generate the response dataresponse = requests.get(url=dataquery, headers=headers) # Check the Response Code if dataresponse.status_code == 200: try: xbrl_queryoutput = pandas.DataFrame.from_dict( dataresponse.json()['data']) except Exception: raise ValueError(str(dataresponse.json())) else: xbrl_queryoutput = dataresponse.status_code print(dataresponse.text) raise ValueError(str(xbrl_queryoutput) + ": Error in Response") return(xbrl_queryoutput)
d0cdc733381f9f957060c2285012e7763d85cae0
48,921
def is_float(potential_float: str) -> bool: """ Check if potential_float is a valid float. Returns ------- is_float : bool Examples -------- >>> is_float('123') True >>> is_float('1234567890123456789') True >>> is_float('0') True >>> is_float('-123') True >>> is_float('123.45') True >>> is_float('a') False >>> is_float('0x8') False """ try: float(potential_float) return True except ValueError: return False
19d907f3cba743f3b6a9867c7c29ad505f6a26e4
48,923
import types def tramp(gen, *args, **kwargs): """ Copyright, 2012, Alex Beal """ g = gen(*args, **kwargs) while isinstance(g, types.GeneratorType): g = next(g) return g
85c55eb2bfefe725e1286c99720a129aabfafb35
48,924
def styblinski_tang(ind): """Styblinski-Tang function defined as: $$ f(x) = 1/2 \sum_{i=1}^{n} x_i^4 - 16 x_i^2 + 5 x_i $$ with a search domain of $-5 < x_i < 5, 1 \leq i \leq n$. """ return sum(((x ** 4.) - 16. * (x ** 2.) + 5. * x for x in ind)) / 2.,
9e060448b02558e33e7b6b2aa74ce1a2806f3c0b
48,925
from typing import Counter def count_fields(targets, field): """Cuenta la cantidad de values en el key especificado de una lista de diccionarios""" return Counter([target.get(field) or 'None' for target in targets])
753ce59bb12b37994f76f69bca3b93671e0e0aca
48,926
import tempfile def mkdtemp(): """ Create and clean a temporary directory """ import os # pylint: disable=reimported,redefined-outer-name tempDir = tempfile.mkdtemp() for dataFile in os.listdir(tempDir): os.remove(dataFile) return tempDir
279a3e314b1f629906b11ea499416cc00864f9f0
48,927
def get_contact_line(contact_point, elipse_axies): """[summary] Args: contact_point ([type]): [description] elipse_axies ([type]): [description] """ circle_top, circle_right, circle_bottom, circle_left = elipse_axies """ 접점을 다음과 같이 약속함: (x4, -y4) 접선은 아래 방정식으로 표현 됨 (x4 - x3) * (x - x3) / (x1 - x3) ** 2 + (-y4 + y1) * (y + y1) / (-y3 + y1) ** 2 = 1 y에 대한 식으로 정리하면 left_eq = (1 - (x4 - x3) * (x - x3) / (x1 - x3) ** 2) right_eq = (-y3 + y1) ** 2 / (-y4 + y1) y = left_eq * right_eq - y1 """ contact_x = contact_point[0] x1 = circle_right[0] y1 = -circle_right[1] x2 = circle_left[0] y2 = -circle_bottom[1] x3 = circle_top[0] y3 = -circle_top[1] x4 = contact_point[0] y4 = -contact_point[1] contact_x1 = contact_x - 50 # 임의의 점을 설정, 접선으로부터 50픽셀 이전 위치 left_eq = (1 - (x4 - x3) * (contact_x1 - x3) / (x1 - x3) ** 2) right_eq = (-y3 + y1) ** 2 / (-y4 + y1) contact_y1 = int(left_eq * right_eq - y1) contact_x2 = contact_x + 50 # 임의의 점을 설정, 접선으로부터 50픽셀 이후 위치 left_eq = (1 - (x4 - x3) * (contact_x2 - x3) / (x1 - x3) ** 2) right_eq = (-y3 + y1) ** 2 / (-y4 + y1) contact_y2 = int(left_eq * right_eq - y1) contact_line = ((contact_x1, contact_y1), (contact_x2, contact_y2)) return contact_line
447716988786182031e462ab41a73e4c94c15692
48,928
from io import StringIO def encode_multipart(data): """Encode the given data to be used in a multipart HTTP POST. Data is a where keys are the field name, and values are either strings or tuples (filename, content) for file uploads. This code is based on distutils.command.upload """ # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = '\r\n--' + boundary end_boundary = sep_boundary + '--' body = StringIO() for key, value in data.items(): # handle multiple entries for the same name if type(value) != type([]): value = [value] for value in value: if type(value) is tuple: fn = '; filename="%s"' % value[0] value = value[1] else: fn = "" body.write(sep_boundary) body.write('\r\nContent-Disposition: form-data; name="%s"' % key) body.write(fn) body.write("\r\n\r\n") body.write(value) body.write(end_boundary) body.write("\r\n") return body.getvalue(), boundary
f9c5d6518776117eddd2250fbcc21c5617650ef4
48,929
def clean_path(path): """ remove index.html from end of a path, add / if not at beginning """ path = path.split("index.html")[0] if not path.startswith("/"): path = "/" + path if not path.endswith("/"): path += "/" return path
55360f0e0729b9cb0510308f5e5be32a404f1e70
48,930
from typing import Dict def get_violation_details_consent_table(row: Dict) -> Dict: """ entry for the json file when the consent table is used only """ return { "visit_id": row["visit_id"], "site_url": row["site_url"], "cmp_type": row["cmp_type"], "name": row["consent_name"], "domain": row["consent_domain"], "purpose": row["purpose"], "label": row["cat_id"], "cat_name": row["cat_name"], "cookiebot_type_id": row["type_id"], "cookiebot_type_name": row["type_name"], "expiry": row["consent_expiry"]}
6399605d5a00a6a6825b4a545950afdb9e1f0903
48,933
import re def _extract_subcats(pattern, categs): """Extract the category names matching `pattern`.""" return {cat for cat in categs if re.match(pattern, cat)}
1092e247b2fdd3290759ceedb309691ef640058b
48,934
def is_lineage_match(lin_a, lin_b, rank): """ check to see if two lineages are a match down to given rank. """ for a, b in zip(lin_a, lin_b): assert a.rank == b.rank if a.rank == rank: if a == b: return 1 if a != b: return 0 return 0
b05833b8ef74fce3ac9f065ef605e3ca5a8f2696
48,935
def my_key(t): """ Customize your sorting logic using this function. The parameter to this function is a tuple. Comment/uncomment the return statements to test different logics. """ # return t[1] # sort by artist names of the songs return t[1], t[0] # sort by artist names, then by the songs # return len(t[0]) # sort by length of the songs # return t[0][-1] # sort by last character of the songs
ae416506de1820290d97d558ff899ba930d68705
48,937
def _json_add_classification_filter(json, classification, equality="equals"): """ Add classification Filter element and return """ limits = 'Classification[{0}:{0}]'.format(classification) if equality == 'max': limits = 'Classification[:{0}]'.format(classification) json['pipeline'].insert(0, { 'type': 'filters.range', 'limits': limits }) return json
31c1123e4b3dc15d907f711aa4575ee001ed1975
48,938
import sys def pypdfChr(c): """ Abstracts the conversion from a single byte to the corresponding ASCII character over versions 2.7.x and 3 of Python. """ if sys.version_info[0] < 3: return c else: return chr(c)
26fb5f99c40d68b2035ab005f5f1c1cb1a6ee7ab
48,939
def reshape_matrix(array, y, x): """ Converts a matrix whose rows are vectorized frames to a cube with reshaped frames. """ return array.reshape(array.shape[0], y, x)
46e112ed891f2afced6340f38945a8c50ab77c52
48,940
def parse_query(query_to_parse): """ Converts a comma or space-separated string of query terms into a list to use as filters. The use of positional arguments on the CLI provides lists (which we join and resplit to avoid any formatting issues), while strings passed from pssh.py just get split since those should never come over as a list. """ if isinstance(query_to_parse, list): _query = ','.join(query_to_parse) else: _query = query_to_parse.replace(' ',',') split_query = _query.split(',') ### Pick up passed --region query from pssh.py parsed_query = [x for x in split_query if not x.startswith('--region=')] region_query = [x for x in split_query if x.startswith('--region')] parsed_regions = ','.join([x.split('=')[1] for x in region_query]) return parsed_query, parsed_regions
edd1cb6b42cd895a2eeb5f4f024dc1f2c92743af
48,941
def str_has_one_word(search_str): """ Test if string which has a substring in quotes, that has wildcards. >>> str_has_one_word(r"test* 12?") False >>> str_has_one_word(r"test**") True """ if len(search_str.split()) == 1: # has more than 1 word return True else: return False
203d6468d01b17216ffdc89dcf6e40d44b8a3b77
48,942
import pickle def read(in_dir): """Read pickled files.""" pickle_in = open(in_dir, 'rb') obj = pickle.load(pickle_in) pickle_in.close() return obj
1e4281c90dd9a3409d8b26339e6282d80d12d3b4
48,944
from typing import Union def tab(text:Union[list, str], pad=" ") -> Union[list, str]: """Adds a tab before each line. str in, str out. List in, list out""" if isinstance(text, str): return "\n".join([pad + line for line in text.split("\n")]) else: return [pad + line for line in text]
bc6e20dfa9ed51b0e8058020feafe92cd1bdcbe8
48,946
def is_final_id(id_path, pda, acceptance): """Helper for final_id --- If the id of id_path meets the acceptance criterion that is passed in, then it is "final"; else not. """ (id, path) = id_path (q, inp_str, stk_str) = id if (acceptance == "ACCEPT_F"): return (inp_str=="" and q in pda["F"]) else: assert(acceptance == "ACCEPT_S") return (inp_str=="" and stk_str=="")
8b93568a61f0470813a237f6cde17ff15f701f9d
48,948
import re def find_text(text): """ Find string inside double quotes """ matches = re.findall(r'\"(.+?)\"', text) return matches[0]
55238d27db4c3a8b682e4616b253d63db1af38fd
48,949
def ser(predicted, ground_truth, pass_ids): """Segment error rate. Args: predicted (list): ground_truth (list): pass_ids (list): list of id which does not start a new tag (inside) Returns: correct (int): count (int): """ count = 0 correct = 0 is_correct = False for i, _pr in enumerate(predicted): _gt = ground_truth[i] if _gt not in pass_ids: count += 1 if _pr not in pass_ids: correct += 1 if is_correct else 0 is_correct = _pr == _gt if _gt != _pr: is_correct = False if is_correct: correct += 1 return correct, count
88b1ecd2d2a396be5fc78463b6f266573122697f
48,950
import torch def to_one_hot(labels, num_sample, num_classes): """Concert label to one_hot type.""" zeros = torch.zeros(num_sample, num_classes) one_hot = zeros.scatter_(1, labels.long(), 1) return one_hot
e9f60cd6ad91c744e77b88f430c11bc5513ceb8a
48,951
import uuid def uuid_unparse(uuid_t): """Convert raw bytes from uuid_t to a formatted string. uuid_t must be an array of 16 c_char""" data = [str.format("%x" % ord(c)) for c in uuid_t] return uuid.UUID("".join(data))
6cef476573a883e0b59c90c8d8960fe96efaaf1f
48,953
def index(i: int) -> str: """Return index if it is not -1 """ return "[{}]".format(i) if i != -1 else ""
8d602550214cf15b836e791d5785a65707340c35
48,955
from pathlib import Path def _project_root(): """ Returns module root """ return Path(__file__).parent.parent
f60fbe39426743fdf513e8531c898d36083c3ccf
48,956
def user_exist(username): """ 判断用户是否存在 :param username: 用户名 :return: True or False 用户是否存在 """ with open("db", "r", encoding="utf-8") as f: for line in f: result = line.strip().split("@") # 判断是否是存在该用户 if username == result[0]: return True else: # 不存在则返回False return False
0040f75562103397f7d5ad9e0e4d3ba3e2d7b11b
48,957
import os def move_files_to_folders(files, CWD): """ gets tuple(file,date) from create_file_list() """ for i in files: try: os.rename(os.path.join(CWD,i[0]), os.path.join(CWD,(i[1] + '/' + i[0]))) except Exception as e: raise return len(files)
78658879b1e6499a1dfc8972c7bc4e8fd5a39793
48,958
def iou(box1, box2): """ Calculate intersection over union between two boxes """ x_min_1, y_min_1, width_1, height_1 = box1 x_min_2, y_min_2, width_2, height_2 = box2 assert width_1 * height_1 > 0 assert width_2 * height_2 > 0 x_max_1, y_max_1 = x_min_1 + width_1, y_min_1 + height_1 x_max_2, y_max_2 = x_min_2 + width_2, y_min_2 + height_2 area1, area2 = width_1 * height_1, width_2 * height_2 x_min_max, x_max_min = max([x_min_1, x_min_2]), min([x_max_1, x_max_2]) y_min_max, y_max_min = max([y_min_1, y_min_2]), min([y_max_1, y_max_2]) if x_max_min <= x_min_max or y_max_min <= y_min_max: return 0 else: intersect = (x_max_min-x_min_max) * (y_max_min-y_min_max) union = area1 + area2 - intersect return intersect / union
4de8ce8314c1833d56fcd81a254f65d2d8a02df9
48,959
import torch def extract_tensors(from_this): """ Given a dict from_this, returns two dicts, one containing all of the key/value pairs corresponding to tensors in from_this, and the other containing the remaining pairs. """ tensor_dict = {k:v for k, v in from_this.items() if type(v) is torch.Tensor} other_keys = from_this.keys() - tensor_dict.keys() if tensor_dict else from_this.keys() other_dict = {k: from_this[k] for k in other_keys} return tensor_dict, other_dict
d557ab786e6af935a1b6f093d2257e32dae2357c
48,960
from datetime import datetime def Date(year, month, day): """Construct an object holding a date value.""" return datetime(year, month, day)
1994d9f8720bf19c9b7493ec2164bc9e74d1a72a
48,961
import six import re def isidentifier(s): """ Check whether the given string can be used as a Python identifier. """ if six.PY2: return re.match("[_A-Za-z][_a-zA-Z0-9]*$", s) return s.isidentifier()
7769d6b93cd2a49bbd104558c354c60d8d27530d
48,962
def L_shaped_context(image, y, x): """This grabs the L-shaped context around a given pixel. Out-of-bounds values are set to 0xFFFFFFFF.""" context = [0xFFFFFFFF] * 4 if x > 0: context[3] = image[y][x - 1] if y > 0: context[2] = image[y - 1][x] context[1] = image[y - 1][x - 1] if x > 0 else 0 context[0] = image[y - 1][x + 1] if x < image.shape[1] - 1 else 0 # The most important context symbol, 'left', comes last. return context
84a7a298b43ffddf697986f2f9af6402990b832c
48,963
import os def get_luminosity_overview(command): """Fetch luminosity information from the DB. Retrieves proper luminosity information from the LumiDB, as requested by the user. Args: command: the command line that needs to be execute to retireve information from the DB. It only works in overview mode since we always extract the information from the last line of command's output. Returns: A tuple with the following float luminosity values in pb-1: (delivered_luminosity, recorder_luminosity) """ pipe = os.popen(command) lumi_line = eval(pipe.readlines()[-1]) return (float(lumi_line[2])/10.**6, float(lumi_line[4])/10.**6)
7282a20d824b9e9daea824a430edb8e4deb425ef
48,964
import copy import six def merge_attributes(a, b): """Merge values of editable attributes. The values of the b attributes have precedence over the values of the a attributes. """ attrs = copy.deepcopy(b) for section, pairs in six.iteritems(attrs): if section == "repo_setup" or section not in a: continue a_values = a[section] for key, values in six.iteritems(pairs): if key != "metadata" and key in a_values: values["value"] = a_values[key]["value"] return attrs
06b9d31ad73e4e24531961cacd6831241e559b85
48,965
def fahrenheit_to_celcius(x): """ Convert cfs to cms """ try: x.props['unit'] = 'deg C' x.data -= 32. x.data *= 5. / 9. return x except: return (x - 32.) * 5. / 9.
721b3d050c8aca8086927ffb8e632463243aca3b
48,966
def wrap_fn(newname: str, doc: str): """ Decorator which renames a function. Parameters ---------- newname: str Name of the new function. doc: str Docstring of the new function. """ def decorator(f): f.__name__ = newname f.__doc__ = doc return f return decorator
248999597b06665d52246656cb8ee8f3322b4338
48,967
def hsl_to_hsv(h :float, s :float, l :float) -> tuple: """ HSL to HSV color conversion. https://en.wikipedia.org/wiki/HSL_and_HSV#Interconversion Args: h: `float` in [0.0, 1.0] corresponding to hue. s: `float` in [0.0, 1.0] corresponding to saturation. l: `float` in [0.0, 1.0] corresponding to light. Returns: The corresponding HSv normalized tuple. """ v = l + s * min(l, 1 - l) s = 0 if v == 0 else 2 * (1 - l/v) return (h, s, v)
77b7613f276cac51bf5f851e175fa42790084036
48,968
def rescale(value, in_min, in_max, out_min, out_max): """ Maps an input value in a given range (in_min, in_max) to an output range (out_min, out_max) and returns it as float. usage: >>> rescale(20, 10, 30, 0, 100) <<< 50.0 """ in_range = in_max - in_min out_range = out_max - out_min return (value - in_min) * (out_range / in_range) + out_min
69444386da45b2747cea3b304bba0f2fcc4a8978
48,969
def ip_to_hex(ip_addr, reverse=False): """ Will return hex of ip address. reverse will reverse the octet exemple >>> ip_to_hex('10.1.2.23') '0A010217' >>> ip_to_hex('10.1.2.23',reverse = True) '1702010A' """ array_of_octets = ip_addr.split('.') if reverse: array_of_octets.reverse() return '{:02X}{:02X}{:02X}{:02X}'.format(*list(map(int, array_of_octets)))
88dba2075b2c6507016783e987f193a54aaa38f1
48,971
def get_bbox_from_gt(gt): """ Get bounding box from ground truth image """ return gt.convert('RGB').getbbox()
d7b7bcf9c5963393c04ae6f355981fdecae72088
48,973
def get_next_list_elem(list: list, elem): """Returns the element that immediately follows the specified one.""" try: index = list.index(elem) except ValueError: pass else: try: return list[index + 1] except IndexError: pass return
dfbdfca8924aff6cdba5eddebdfb95e777ed967a
48,974
def _make_sparse_feature_vector(instance, transformers): """Make sparse feature vector with tuple(value, weight) as elements.""" if len(transformers) == 1: # The feature has only one source column. transformer = transformers[0] transformed_value = transformer.get_value_and_transform(instance) if transformer.feature_type == 'dense': return enumerate(transformed_value) else: # If this column is not getting combined with any other column (as # is usually the case for 'target' features), we just return the # transformed_value directly since it's of the form # [idx1, idx2, ...] return transformed_value else: feature_vector = [] total_feature_size = 0 for transformer in transformers: transformed_value = transformer.get_value_and_transform(instance) # Transformed value is always a sequence either dense or sparse. # So checking for 'if transformed_value' is a valid check. if transformed_value: if transformer.feature_type == 'dense': tuple_list = enumerate(transformed_value, start=total_feature_size) else: # Categorical features can be represented as either [(idx1, count), # (idx2, count), ...] or as just [idx1, idx2] if transformer.transformed_value_is_tuple: tuple_list = [(idx + total_feature_size, value) for (idx, value) in transformed_value] else: tuple_list = [(idx + total_feature_size, 1) for idx in transformed_value] else: tuple_list = [] total_feature_size += transformer.feature_size feature_vector.extend(tuple_list) return feature_vector
b18209bf2f83a2bd0e71efa0dac8c02f5738def3
48,975
def generate_date(date): """ Formats the provided date, returns: String """ date = date date = date.replace('/', '\n') date = date.split() month, day, year = date[0], date[1], date[2] date = month + '%2f' + day + '%2f' + year return date
d0d136987452f718a3c9043184f55b7331d09181
48,976
def titlecase_keys(d): """ Takes a dict with keys of type str and returns a new dict with all keys titlecased. """ return {k.title(): v for k, v in d.items()}
8c5d2cbdc8bc28e384798c3e5b0f096068926945
48,979
def _generate_fortran_bool(pybool): """Generates a fortran bool as a string for the namelist generator """ if pybool: fbool = '.true.' else: fbool = '.false.' return fbool
e945364845ccaae0da91d81262558a6165075071
48,980
def minmax_normalisation(dataframe): """returns a list of min max values for each feature in a dataset""" minmax = list() for i in range(len(dataframe[0])): colValues = [row[i] for row in dataframe] minValue = min(colValues) maxValue = max(colValues) minmax.append([minValue, maxValue]) return minmax
b9e0aec76d1e3578288e1744d3b1208d5fc08169
48,981
def printable_device(device): """Returns a printable form of a device record.""" if device is None: return "<device NULL>" output_list = [] assert device.StartSize + len(device.DeltaValue) - 1 == device.EndSize for index in range(len(device.DeltaValue)): output_list.append( "%d %d" % (device.StartSize + index, device.DeltaValue[index]) ) return "<device %s>" % (", ".join(output_list))
2b3bdea992402188687e7270fca47bf612d76e13
48,983
def parse_date(date): """Parse date int string into to date int (no-op).""" if date is None: return 0 return int(date)
46cba9bcc9a0ba90028579994df4e92b9165a1b8
48,984
import math def giantstep_babyStep(m, c, n, phi, group) : """ With c = m^e % n given m, c, n, 𝜙(𝑛), and the target group This function will find out e % group in time O(√N). Give : @m : plaintxt data @c : cipher data @n : module @phi : 𝜙(𝑛) @group : A factor in phi Return : @e : what exponent this m is taken in group field. """ # Raising to subgroup assert phi % group == 0, f"This phi didn't make {group} group" e = phi // group sqf = math.ceil(math.sqrt(group)) gf = pow(m, e, n) gsqf = pow(gf, sqf, n) table = {} # Giant step ygna = pow(c, e, n) for a in range(sqf): table[ygna] = a ygna = (ygna * gsqf) % n # Baby step gb = 1 for b in range(sqf): if gb in table : a = table[gb] ki = (b-a*sqf) % group return ki gb = (gb*gf)%n
9afaeef7ad647d8c1cd3bfeac01711642282f7f7
48,985
def flip(Series): """Folk and Ward datasets must be ordered with size limits descending""" s = Series idx = Series.index u = idx[0] l = idx[-1] if u < l: s = s.iloc[::-1] return(s)
26fa0f4c02f8c1e5b0dd2653eb894a99a9e24aa8
48,986
def human_readable_timedelta(duration): """Timedelta as a human readable string. :param duration: timedelta values from timedelta_to_dict() :type duration: dict :returns: Human readable string """ if not duration: return "" assert isinstance(duration, dict) # format duration string msg = [] if duration["days"]: msg.append(f"{duration['days']:d} days") if msg or duration["hours"]: msg.append(f"{duration['hours']:d} hr") if msg or duration["minutes"]: msg.append(f"{duration['minutes']:d} min") # output seconds only if duration is shorter than one hour if not duration["days"] and not duration["hours"]: if duration["minutes"] or duration["seconds"] >= 30: msg.append(f"{duration['seconds']} sec") elif duration["seconds"] >= 10: msg.append(f"{duration['seconds']:.1f} sec") elif duration["seconds"] >= 1: msg.append(f"{duration['seconds']:.2f} sec") else: msg.append(f"{duration['seconds']:.3f} sec") return str(" ".join(msg))
e2c69a459296f79b984cf35993c96a46e8ea75a3
48,987
def isTapeRSE(rseName): """ Given an RSE name, return True if it's a Tape RSE (rse_type=TAPE), otherwise False :param rseName: string with the RSE name :return: True or False """ # NOTE: a more reliable - but more expensive - way to know that would be # to query `get_rse` and evaluate the rse_type parameter return rseName.endswith("_Tape")
f380660cb307689c5300db10e00e1a1df73f263e
48,988
async def root(): """ Simple welcome message for GETs to the root, directing to the documentation. """ return {"message": "Welcome! Check out the interactive documentation at /docs"}
08482501d5a5a45bdcf53b52be1929d431f9c9c7
48,989
def isRetrovirus(lineage): """ Determine whether a lineage corresponds to a retrovirus. @param lineage: An iterable of C{LineageElement} instances. @return: C{True} if the lineage corresponds to a retrovirus, C{False} otherwise. """ for element in lineage: if element.rank == 'family' and element.name == 'Retroviridae': return True return False
e4f0aa37a673e9640ca6c6ed0c76d993d6eefb82
48,990
import glob import string def set_grams(data_path,top=100): """ read a dict with 'terms frequency' in gn """ files = glob.glob(data_path + "/*/*word*.txt") # txt files in subfolders ngram = [] table = str.maketrans("","",string.punctuation) for f_in in files: with open(f_in, 'r') as fi: for lines in fi: item = lines.replace("\n","").split() term = "" count = 0 if len(item)==3: # bigrams term0 = str(item[0]).translate(table).strip() term1 = str(item[1]).translate(table).strip() term = "{},{}".format(term0,term1) if (len(term0)>2 and len(term1)>2 and not term0.isnumeric() and not term1.isnumeric()) else (term0 if (len(term0)>2 and not term0.isnumeric()) else (term1 if (len(term1)>2 and not term1.isnumeric()) else "")) # comma(,) for OR in Twitter count = int(item[2]) elif len(item)==2: # unigrams term = str(item[0]).translate(table).strip() count = int(item[1]) if count>=top and str(term) != 'nan' and len(term)>=3: # ignore term freq minor than top and term length than 3 ngram.append(term) fi.close() gn_set = set(ngram) print(len(gn_set)) f = open(data_path+".txt", 'w') for w in gn_set: f.write('{}\n'.format(w)) f.close() return list(gn_set)
f1700eddba54e23d59507a7183fd52b8c811051f
48,992
def cf(x_coord, y_coord, sample_image): """Format x and y coordinates for printing Args: x_coord (int): X coordinate y_coord (int): y coordinate sample_image (numpy.array): Sample image for numpy arrays Returns: str: formatted coordinates (x, y, and z). """ numrows, numcols = sample_image.shape col = int(x_coord + 0.5) row = int(y_coord + 0.5) if 0 <= col < numcols and 0 <= row < numrows: z_coord = sample_image[row, col] return 'x=%1.4f, y=%1.4f, z=%1.4f' % (x_coord, y_coord, z_coord) return 'x=%1.4f, y=%1.4f' % (x_coord, y_coord)
edcf2b1f7a053ce4a0f6e50a7af0ae31521593e5
48,994
import time import hmac import base64 def generate_token(key, expire=60 * 60 * 24): """ @Args: key: str (用户给定的key,需要用户保存以便之后验证token,每次产生token时的key 都可以是同一个key) expire: int(最大有效时间,单位为s) @Return: state: str :param key: :param expire: :return: """ ts_str = str(time.time() + expire) ts_byte = ts_str.encode("utf-8") sha1_tshex_str = hmac.new(key.encode("utf-8"), ts_byte, 'sha1').hexdigest() token = ts_str + ':' + sha1_tshex_str b64_token = base64.urlsafe_b64encode(token.encode("utf-8")) return b64_token.decode("utf-8")
060dc1a5e85a66eb48c938138517bc5b281648f0
48,995
def compute_transitions(bands,in_list,fin_list): """ Compute the (positive) transition energies for the bands (on a single kpoint). The `fast` index is associated to the bands in the fin_list list. Args: bands (list) : list with the energies in_list (list) : indexes of the bands used as starting points of the transitions fin_list (list) : indexes of the bands used as final points of the transitions Returns: :py:class:`list` : list with the transition energies for each possible couple of (distinct) in and out bands """ transitions = [] for v in in_list: for c in fin_list: if c > v: transitions.append(bands[c]-bands[v]) return transitions
09bc38199f7f36aa80cc7834e13f3e5f55546099
48,997
def contains(first, second): """Returns True if any item in `first` matches an item in `second`.""" return any(i in first for i in second)
0157fd1c9ad3e48f9a6e9aef39e58240dbcb64e8
48,998
def load(obj, cls, default_factory): """Create or load an object if necessary. Parameters ---------- obj : `object` or `dict` or `None` cls : `type` default_factory : `function` Returns ------- `object` """ if obj is None: return default_factory() if isinstance(obj, dict): return cls.load(obj) return obj
2dfc09558fffde079810c295e39cc0443bc65116
48,999
def clean_data(df): """ clean dataframe's data :param df: dataframe :return: result """ return df.drop_duplicates()
41531f5a610cf9cc7ef1b6da8504fb79484c25fb
49,000
def extract_tags(data, keys): """Helper function to extract tags out of data dict.""" tags = dict() for key in keys: try: tags[key] = data.pop(key) except KeyError: # Skip optional tags pass return tags
25bcf2fa99ed1a6d1850b0fbbdda3f7b5c8357eb
49,001
def _uprank(a): """Get `a` as a rank-two tensor, correctly handling the case where `a` is rank one. Args: a (tensor): Tensor to get as a rank-two tensor. Returns: tensor: `a` as a rank-two vector. """ if a.ndim == 1: return a[:, None] else: return a
f2aa483234d4dbe98989984eb28b3011ad7a4194
49,002
def unflat_len(obj): """Return number of non-list/tuple elements in obj.""" if not isinstance(obj, (list, tuple)): return 1 else: return sum([unflat_len(x) for x in obj])
d11c1c05b3f80f166781006ad573176dbf4752db
49,003