content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def flatten_array_w_0bias(X): """Takes list of arrays in natural shape of the network parameters and returns as a flattened 1D numpy array.""" vec = [] for x in X: if len(x.shape) == 0 : vec.append(torch.zeros(x.shape).flatten()) else: vec.append(x.flatten()) return torch.cat(vec)
f55ecac2295dfe22e2d28efab8d3a6ac51c4725b
42,204
import math def drop_dead_intensity(t): """Intensity that can be maintained for a given time. All athletes assumed to exhibit identical behavior. Originally based on elite athletes. Args: t (float): Time to exhaustion in minutes. Returns: float: maximum sustainable intensity for this time, defined as a ratio of max sustainable VO2 to VO2max. """ a1 = 0.2989558 a2 = 0.1894393 tau1 = 1 / 0.1932605 tau2 = 1 / 0.012778 c = 0.8 return a1 * math.exp(-t / tau1) + a2 * math.exp(-t / tau2) + c
b5048e8907c0147aa5a8390221e2c64eb7663546
42,206
import sys def calcTolerance(termUnits, timestep, termName): """ Finds the tolerance above which we consider the budgets do not balance. Which one is chosen is dependant on a terms units. Input: termUnits: String containting the units of the variable in question. timestep: Integer of real time in seconds between model iterations termName: Name of variable being looked at. Used for finding tolerance for special cases """ # These were copied from constants_clubb.F90 w_tol = 2e-2 # m/s thl_tol = 1e-2 # K rt_tol = 1e-8 # kg/kg Nr_tol = 1e-5 # num/kg # Special Cases for tolerance if termName == "Ncm": tol = 2e2 elif termName == "Nrm": tol = 5e3 # Get error tolerance based on variable units elif termUnits == "(num/kg)/s": # num/kg tol = Nr_tol elif termUnits == "(kg/kg)/s" or termUnits == "kg kg^{-1} s^{-1}": # kg/kg tol = rt_tol elif termUnits == "(kg^2)/(kg^2 s)": # kg^2/kg^2 tol = rt_tol * rt_tol * 100 # Multiply by 100 because otherwise it's too small for tests to pass elif termUnits == "m s^{-2}": # m/s tol = w_tol elif termUnits == "m^2/s^3": # m^2/s^2 tol = w_tol * w_tol elif termUnits == "m^{3} s^{-4}": # m^3/s^3 tol = w_tol * w_tol * w_tol elif termUnits == "K s^{-1}": # K tol = thl_tol elif termUnits == "(K^2)/s": # K^2 tol = thl_tol * thl_tol elif termUnits == "(m kg)/(s^2 kg)": # m/s kg/kg tol = w_tol * rt_tol elif termUnits == "(kg K)/(kg s)": # K kg/kg tol = thl_tol * rt_tol elif termUnits == "(m K)/s^2": # K m/s tol = thl_tol * w_tol elif termUnits == "kg^{3} kg^{-3} s^{-1}": # kg^3/kg^3 tol = rt_tol * rt_tol * rt_tol elif termUnits == "K^{3} s^{-1}": # K^3 tol = thl_tol * thl_tol * thl_tol else: sys.stderr.write("Error parsing units: " + termUnits + "\nCheck this script's calcTolerance method") sys.exit(1) return tol / timestep
e8827f283fd163889fab2dd27f01de0fb6076454
42,208
def missing_no(nums: list) -> int: """ This function finds the missing number """ return list(set(nums) ^ set(range(0, 101)))[0]
97193273c3144a473f3924988dec8dd39ae72383
42,209
import logging def fetch_relationships(bucket, dataset, relation_types): """ Get a listing of relationship files on S3 Parameters ---------- bucket : str dataset : str relationship_file : fileobj relation_types : List[str] Returns ------- {str: [(str, str)} A dict of (bucket, key) pairs, where the key is the relation type and the value is the location of the corresponding relationship files. """ relationship_locations = dict() for rel_type in relation_types: rel_file_key = "relationships/{}.csv".format(rel_type) logging.info("Fetching relationship {}".format(rel_file_key)) relationship_locations[rel_type] = (bucket, rel_file_key) return relationship_locations
4b4e158506aed62c20906212390d34ff1c033d32
42,210
def is_poweroftwo(x): """Is the number x a power of two? >>> assert vipy.linalg.is_poweroftwo(4) == True >>> assert vipy.linalg.is_poweroftwo(3) == False """ return x > 1 and ((x & (x - 1)) == 0)
f12747f1198e5f650f14cefdfeb2450efa89adf2
42,211
def cria_matriz(num_linhas, num_colunas, valor): """ (int, int, valor) -> matriz (lista de listas) cria e retorna uma matriz comnum_linhas linhas e num_colunas colunas em que cada elemento é igual ao valor dado. """ matriz = [] # lista vazia for i in range(num_linhas): #cria a linha i linha =[] # lista vazia for j in range(num_colunas): linha.append(valor) # adiciona linha à matriz matriz.append(linha) return matriz
a6e92131cd9af347b3a36d7bd651a7c3fb5347e3
42,212
def minimumAbsoluteDifference(arr, n): """ Not Greedy Algorithm -> Runtime Error ~ O(n²) n_elements = n elements = (sorted(arr)).copy() abs_diff = math.inf for i, n_i in enumerate(elements): for n_j in (elements[i+1:]): diff_value = n_i - n_j if abs(diff_value) < abs_diff: abs_diff = abs(diff_value) print("{} - {}".format(n_i, n_j)) print(diff_value) print(abs_diff) return abs_diff """ # Greedy Algorithm ~O(n*log(n)) -> It depends of the sorting time n_elements = n elements = (sorted(arr)).copy() # Assuming O(n*log(n)) ~ MERGE-SORT # Greedy Choice -> First Minimum Absolute Difference abs_diff = abs(elements[0] - elements[1]) elements = elements[1:] n_elements = n - 2 for idx in range(n_elements): # O(n) print(idx) diff_value = elements[idx] - elements[idx+1] if (abs(diff_value) < abs_diff): abs_diff = abs(diff_value) return abs_diff
be17c89482901fbe2d2c1885f1eeeba40dda4ffc
42,213
def has_outstanding_labels(byte_labels): """ if a label is used once, it means it has to be called or specified later """ for label_line in byte_labels.keys(): real_line = byte_labels[label_line] if real_line["definition"] == False: return True return False
1e66d43cda695a301af4b492f67b25fbaf785d53
42,214
def extract_first_cell_text(notebook_data): """Extract MarkDown data from first cell.""" try: first_cell = notebook_data["cells"][0] except (AttributeError, IndexError): return "" if first_cell["cell_type"] != "markdown": return "" return first_cell["source"]
d2cde5cd116f22e2e6b230e262a4daf0dd8f9b74
42,217
def fix_spio_issues(mcn_net): """Modify data structures that have been loaded using scipy.io.loadmat The purpose of this function is to address an issue with loadmat, by which it does not always consistently produce the same array dimensions for objects stored in .mat files. Specifically, the level of nesting for the final element in a structure array can differ from previous elements, so we apply a sanity check and fix resulting issues in this function. """ states = ['inputs', 'outputs'] for state in states: tmp = mcn_net['layers'][state] for idx, elem in enumerate(tmp): if not isinstance(elem, list): tmp[idx] = [elem] mcn_net['layers'][state] = tmp return mcn_net
44c466ecade86cff6d425b041606b67392f5eb14
42,218
def buildKey(ids, dataLine): """ Concatenate a set of fields together to build an overall key This is a simple approach to determining k-anonymity, in which all of the fields of interest are concatenated as a single key. The ids coming in should be a list of indexes into the fields in the dataLine. These will be concatenated in order to form a new key. Note that this currently assumes that all of the data fields are strings. """ retKey = '' for i in ids: retKey += dataLine[i] return retKey
b37d96b46916cb7db751b49d7351b1a2e0d9f8ae
42,219
def scan_deployed_policies(org_client): """ Return list of Service Control Policies deployed in Organization """ return org_client.list_policies(Filter='SERVICE_CONTROL_POLICY')['Policies']
53b96af745cd0f782158a6600bb7d6bedb836747
42,220
def get_zhang_aspect_ratio(aspect_ratio): """Compute an equivalent aspect ratio according to Zhang. Parameters ---------- aspect_ratio : float Aspect ratio of a cylindrical fiber. Returns ------- float Equivalent aspect ratio for an ellipsoid. References ---------- .. [1] Zhang, D.; Smith, D.E.; Jack, D.A.; Montgomery-Smith, S., 'Numerical Evaluation of Single Fiber Motion for Short-Fiber-Reinforced Composite Materials Processing.' J. Manuf. Sci. Eng. 2011, 133, 51002. https://doi.org/10.1115/1.4004831 """ return ( 0.000035 * aspect_ratio ** 3 - 0.00467 * aspect_ratio ** 2 + 0.764 * aspect_ratio + 0.404 )
e83b751179c566683d8667b2682d4589d8f3f540
42,221
def _extend_env(env, defaults={}, overrides={}): """ Create a new ``dict`` from fabric's ``env``, first adding defaults specified via ``defaults`` (if available). Finally, override anything in env, with values specified by ``overrides``. """ new_env = {} for key, value in defaults.iteritems(): new_env[key] = value for key, value in env.iteritems(): new_env[key] = value for key, value in overrides.iteritems(): new_env[key] = value return new_env
d2de346ce961eeea96613ad8b55ff131fa493be1
42,223
def app_label(value): """ Template tag that returns the name of the app an item belongs too""" return value._meta.app_label
40bb07baef805ba5ae7f4d59834d70ec3121310a
42,224
def simplify_header(str_in): """simplify strings to match col headers more robustly. Use on both sides of the '==' operator""" return str_in.lower().replace(' ', '').replace('[', '(').replace(']', ')')
3faf822fd9b2d48005630ffe9c778381ba6f847c
42,225
def spanif(value, arg): """Conditionally wrap some text in a span if it matches a condition. Ugh.""" parts = arg.split() if len(parts) != 3: return value n = int(parts[2]) if (parts[1] == '<' and value < n) or (parts[1] == '=' and value == n) or (parts[1] == '>' and value > n): return '<span class="%s">%s</span>' % (parts[0], value) else: return value
42fcc29ff1760388c9ef3b71441488ca4c054dbc
42,226
def get_context_attribute(self, request, context_name, namespace_name = None): """ Retrieves the value of the context attribute with the provided name. In case no attribute is found a none value is returned. :type request: Request :param request: The request to be used. :type context_name: String :param context_name: The name of the of the context attribute to retrieve the value. :type namespace_name: String :param namespace_name: The name of the namespace to be used for the context (session) variable to be retrieved. :rtype: Object :return: The value of the requested context attribute. """ # retrieves the context defaulting to a new and empty map # in case an invalid session attribute is returned context = self.get_session_attribute(request, "_context", namespace_name) if context == None: context = {} # returns the retrieves attribute value, defaulting to none # in case it's not present in the context map return context.get(context_name, None)
06241ff83389fdbcc61fc9c9b419cfab95106a6f
42,227
def _buckets_for_length(bucket_length, batch_size, max_eval_length, n_devices, training): """Creates heuristically a set of bucket boundaries and sizes. The middle boundary is set to `bucket_length` and the corresponding batch size is set to `batch_size`. We also create buckets of 1/2 and 1/4 length with 2x and 4x batch size, and buckets of 2x and 4x and larger length with 1/2 and 1/4 batch size respectively, and batch size 1 for the final one. Args: bucket_length: the length of the middle bucket. batch_size: the batch size for the middle bucket. max_eval_length: the longest bucket length if training=False. n_devices: number of devices, batch sizes are divisible by that. training: bool, whether we are training or evaluating. Returns: a pair of lists of integers, (bucket_boundaries, bucket_batch_sizes). """ bucket_boundaries = [bucket_length // 4, bucket_length // 2, bucket_length, bucket_length * 2, bucket_length * 4, bucket_length * 8, bucket_length * 16] if not training: max_eval_length = max_eval_length or bucket_length * 32 # Set last bucket boundary to be max_eval_length, cut off boundaries # that are larger than this. bucket_boundaries = ( [b for b in bucket_boundaries if b < max_eval_length] + [max_eval_length] ) bucket_boundaries.append(max_eval_length) bucket_batch_sizes = [batch_size * 4, batch_size * 2, batch_size, batch_size // 2, batch_size // 4, batch_size // 8, batch_size // 16, 1] if not training: # The last bucket batch size is always 1, but the one-but-last is # sized to accommodate the final length = bucket_boundaries[-1], which # we changed for eval above -- so adjusting here too. # Resize if needed, since bucket_batch_sizes may not be the same size # anymore. bucket_batch_sizes = bucket_batch_sizes[:len(bucket_boundaries)] + [1] bucket_batch_sizes[-2] = batch_size // max_eval_length # Make batch sizes divisible by n_devices. bucket_batch_sizes = [max(b // n_devices, 1) * n_devices for b in bucket_batch_sizes] return (bucket_boundaries, bucket_batch_sizes)
4381af811f6f5d530806de49e217de014b6c9395
42,231
def _boolify_envvar(val): """Interpret boolean environment variables. True whenever set/exported, even if value is an empty string, "null", or "none". """ falsey = ("false", "nil", "no", "off", "0") return (val if val is not None else "false").lower() not in falsey
caae6f51cc99ef82f6bdccf1fc398f0c79a57035
42,232
from pathlib import Path def file_ext(f): """ File extension """ return Path(f).suffix.replace('.', '')
3e556bcd5d20727c2699da3d5b4e5785a4dcc8f1
42,233
from math import degrees from numpy import arctan def get_object_target_lock_control_angle(center, frame): """Retrieves an angle between the center of an object in the camera's view and the (approximate) robot chassis center. This can be used as input to a PID loop so the object is "target locked" - the robot drives to align itself with the object, i.e. aim to minimize difference between chassis angle and object angle. :param tuple center: (x, y) coordinates of the object in the camera's view where the center of the frame is (0, 0). Please note, this is different from the OpenCV convention where the top left of the frame is (0, 0). :param frame: OpenCV frame that has the same scale used for center parameter - this function uses the dimensions of the frame. :return float angle: Angle in degrees to 1 decimal place """ if center is None: return None # physically, this represents an approximation between chassis rotation center and camera # the PID loop will deal with basically anything > 1 here, but Kp, Ki and Kd would need to change # with (0, 0) in the middle of the frame, it is currently set to be half the frame height below the frame chassis_center_y = -int(frame.shape[1]) # Clockwise is positive angle delta_y = abs(center[1] - chassis_center_y) return round(degrees(arctan(center[0] / delta_y)), 1)
2876aba79fb69c5e60856e8096f43bc8a8de00b1
42,234
def _EscapePosixShellArgument(arg): """Escapes a shell command line argument so that it is interpreted literally. Args: arg: The shell argument to escape. Returns: The escaped string. """ return "'%s'" % arg.replace("'", "'\\''")
c0a46321337621e8f8e5f949783fac3bdf2fc193
42,235
from typing import Dict from typing import List from typing import Tuple from bs4 import BeautifulSoup from typing import OrderedDict def load_docs_from_sgml( file_path: str, encoding='utf-8' ) -> Dict[str, List[Tuple[str, str]]]: """ Loads documents from given SGML file. Returns dict mapping document ids to list of segments [segments]. Each segment is a tuple (segment id, segment text). """ soup = None with open(file_path, encoding=encoding) as _file: soup = BeautifulSoup(_file, features='lxml') all_docs: Dict[str, List[Tuple[str, str]]] = OrderedDict() for curr_doc in soup.find_all('doc'): curr_doc_id = curr_doc.attrs['docid'] if not curr_doc_id in all_docs: all_docs[curr_doc_id] = [] for curr_seg in curr_doc.find_all('seg'): curr_seg_id = curr_seg.attrs['id'] curr_seg_text = curr_seg.get_text() all_docs[curr_doc_id].append((curr_seg_id, curr_seg_text)) return all_docs
99074d183c2f66839db50394528453f4517685c9
42,236
from typing import List from typing import Dict import os import json def load_texts(fp, text_type:str, max_samples=None) -> List[Dict]: """ :param fp: File Path to the questions/captions file :param text_type: questions or captions :return: Dict containing a list of question objects """ if text_type not in ['questions', 'captions']: raise ValueError("Invalid text type, must be either 'questions' or 'captions'") if not os.path.exists(fp): raise FileNotFoundError(f"{fp} does not exist") with open(fp, 'r') as f: texts = json.load(f)[text_type] assert len(texts) > 1 if max_samples: texts = texts[0:max_samples] return texts
5ea38d8535fad67e539876a0e700ac3d70a5e074
42,238
def endianSwapU16(bytes): """Swaps pairs of bytes (16-bit words) in the given bytearray.""" for b in range(0, len(bytes), 2): bytes[b], bytes[b + 1] = bytes[b + 1], bytes[b] return bytes
3a3db1e49de0a171aba856629e06b7084ae702bd
42,240
import asyncio async def reCreate(task_ref, cofunc, *args, **kwargs): """Task book-keeping and management: create a task :param task_ref: a reference to the task. Can be None (implicating this is a new task) :param cofunc: cofunction representing the tasks functionality :param *args: passed on to the confunction :param **kwargs: passed on to the confunction :: # in your class' constructor: self.your_task = None # later on, in your async code: self.your_task = await reCreate(self.your_task, self.yourTask__, some_parameter) - If reference is not None, cancels the task - Creates a new task and returns it """ if task_ref is not None: if not asyncio.isfuture(task_ref): print("reCreate: WARNING:", task_ref, "is not a task") return None task_ref.cancel() try: await asyncio.wait_for(task_ref, timeout = 15) except Exception as e: print("WARNING: reCreate failed with", e, "for task", str(task_ref)) task = asyncio.get_event_loop().create_task(cofunc(*args, **kwargs)) return task
eb32fe6078f30e4b7520ce1116c7c1d2efbf52ee
42,241
def calc_consensus(input_alignment): """ Determines the character counts for each position in an MSA :param input_alignment: dict of sequences :return: dict of base counts by position """ seq_id = next(iter(input_alignment)) seq = input_alignment[seq_id] seq_len = len(seq) # get consensus consensus = [] for i in range(0, seq_len): consensus.append({'A': 0, 'T': 0, 'C': 0, 'G': 0, 'N': 0, '-': 0}) for seq_id in input_alignment: seq = input_alignment[seq_id] for i in range(0,seq_len): base = seq[i].upper() if base in consensus[i]: consensus[i][base]+=1 else: consensus[i]['N'] += 1 return consensus
9ebd0336b3ceec76053e1d32459b5b3612ebb328
42,243
def _parse_html_table(table): """Return list of lists with cell texts. :param table: beautifulsoup tag with table element """ rows = table.findAll("tr") data = [] for row in rows: tds = row.findAll("td") ths = row.findAll("th") if len(ths) > 0: tmp = [th.text.strip() for th in ths] else: tmp = [td.text.strip() for td in tds] data.append(tmp) return data
073449286be9d2a91028b80bad1a73a475b06994
42,244
import os def _is_dev2_environment(): """Indicates whether this code is being run in devappserver2.""" return os.environ.get('SERVER_SOFTWARE', '') == 'Development/2.0'
64bf29628bca77722d499474d69dd82e429621c0
42,246
import torch import math def _entropy(tensor, low=0, high=0): """ entropy of a tensor Sum(-plog(p)) base of log length of tensor, number of events """ _t = tensor.view(-1) _n = len(_t) _th = torch.histc(_t, _n, low, high)/_n _th = _th[_th > 0] return torch.sum(-1*_th*torch.log(_th)).div(math.log(_n))
b59a8712ad3dbecb90f8b0cb5d9e95c094397218
42,247
import os import errno def file_path_with_mkdirs(path_with_filen_name): """will create all dirs to save the file to the given path""" if not os.path.isdir(os.path.dirname(path_with_filen_name)): try: os.makedirs(os.path.dirname(path_with_filen_name)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise return path_with_filen_name
a892d2724016599ea5059c401d4569df6abdb9ab
42,248
def max_allowed_ds_steps(shape, factor): """How often can a shape be down-sampled by a given factor such that non of the divisions will give non-integers. Args: shape (listlike): tensor shape factor (integer): downsample factor Returns: int: maximum allowed downsample operations """ def max_allowed_ds_steps_impl(size, factor): current_size = float(size) allowed_steps = 0 while(True): new_size = current_size / float(factor) if(new_size >=1 and new_size.is_integer()): current_size = new_size allowed_steps += 1 else: break return allowed_steps min_steps = float('inf') for s in shape: min_steps = int(min(min_steps, max_allowed_ds_steps_impl(s, factor))) return min_steps
4839c99f8715cb183ea80389b65ae30b6e806c89
42,251
import subprocess import sys def collect_output(cmd, cwd=None, stderr=subprocess.PIPE, check=True) -> str: """Collect stdout and return it as a str""" try: result = subprocess.run( cmd, cwd=cwd, check=check, stdout=subprocess.PIPE, stderr=stderr ) except subprocess.CalledProcessError as exc: print(f"Command {exc.cmd} failed") if exc.output: print(exc.output.decode("utf-8")) if exc.stderr: print(exc.stderr.decode("utf-8"), file=sys.stderr) raise return result.stdout.decode("utf-8")
042da4b5fe0601d74f32d277c57fdb522d096b7d
42,252
def get_eye_model_3d(x, y): """ Using the calibration model return theta and phi as a function of x and y :param x: centre x coordinate of the eye ball in 2d :param y: centre y coordinate of the eye ball in 2d :return: """ return NotImplemented
42cb89f6e585047639d8d2857396af35d3b331de
42,253
def to_map(labelset, map_unlabeled=True): """map set of labels to series of consecutive integers from 0 to n inclusive, where n is the number of labels in the set. This 'labelmap' is used when mapping labels from annotations of a vocalization into a label for every time bin in a spectrogram of that vocalization. If map_unlabeled is True, 'unlabeled' will be added to labelset, and will map to 0, so the total number of classes is n + 1. Parameters ---------- labelset : set of labels used to annotate a Dataset. map_unlabeled : bool if True, include key 'unlabeled' in mapping. Any time bins in a spectrogram that do not have a label associated with them, e.g. a silent gap between vocalizations, will be assigned the integer that the 'unlabeled' key maps to. Returns ------- labelmap : dict maps labels to integers """ if type(labelset) != set: raise TypeError(f"type of labelset must be set, got type {type(labelset)}") labellist = [] if map_unlabeled is True: labellist.append("unlabeled") labellist.extend(sorted(list(labelset))) labelmap = dict(zip(labellist, range(len(labellist)))) return labelmap
0cca033beea95b9eddd084875ed08b149f62f36b
42,254
def get_indice(self, coord=None): """Return the coordinates of node(s). Parameters ---------- self : NodeMat an NodeMat object coord : ndarray a node coordinate Returns ------- coord: np.array an array of node coordinates """ if coord is None: return self.indice else: pass
ad7251fd7ef27138f95bded196884293d62e6843
42,255
def peek(func, *args): """Calls func with the given arguments and returns _the first argument_.""" if not args: raise TypeError('Need at least 2 arguments; got 1') func(*args) return args[0]
6b1b8066b223745bae0683f9019e472e15c3b4d4
42,256
from typing import List def read_umls_file_headers(meta_path: str, filename: str) -> List[str]: """ Read the file descriptor MRFILES.RRF from a UMLS release and get column headers (names) for the given file MRFILES.RRF file format: a pipe-separated values Useful columns: column 0: name of one of the files in the META directory column 2: column names of that file Args: meta_path: path to the META directory of an UMLS release filename: name of the file to get its column headers Returns: a list of column names """ file_descriptors = f"{meta_path}/MRFILES.RRF" # to get column names with open(file_descriptors) as fin: for line in fin: splits = line.split("|") found_filename = splits[0] column_names = (splits[2] + ",").split( "," ) # ugly hack because all files end with an empty column if found_filename in filename: return column_names assert False, f"Couldn't find column names for file {filename}" return None
788bca5a94e2f7c40f09bc1804d7c2e1e31f2c2b
42,257
def cate_init(*arg, **kwargs): """ No actual use, just demonstrates the signature of an Cate entry point callable. :param arg: any arguments (not used) :param kwargs: any keyword arguments (not used) :return: any or void (not used) """ return arg, kwargs
b7b81c9c4c32a59acb3931c9f57cad8137f7a4ec
42,258
from datetime import datetime def year_fraction(date): """Obtain the fraction of the year that a given date represents. Args: date (datetime): a datetime object. Returns: float representing the fraction of the year. """ year = date.year this_year_start = datetime(year=year, month=1, day=1) next_year_start = datetime(year=year+1, month=1, day=1) days_elapsed = date.timetuple().tm_yday - 0.5 days_total = (next_year_start - this_year_start).days return days_elapsed/days_total
7b600f38ad0862eff2568ed5f460487971010efb
42,259
from pathlib import Path import os def _expand(self): """ Fully expand and resolve the Path with the given environment variables. Example ------- >>> Path('$HOME').expand() >>> PosixPath('/p/home/blaylock') """ return Path(os.path.expandvars(self)).expanduser().resolve()
34c2f63c66658ac12d7f8b98fba1b4d8bbb5412b
42,260
def xywh2rect(xywh, indices=(0, 1, 2, 3)): """ :param xywh: :param indices: :return: """ x, y, w, h = indices rect = xywh.clone() rect[..., x] = xywh[..., x] - xywh[..., w] / 2 rect[..., y] = xywh[..., y] - xywh[..., h] / 2 rect[..., w] = xywh[..., x] + xywh[..., w] / 2 rect[..., h] = xywh[..., y] + xywh[..., h] / 2 return rect
232300c157027a2028a90b4594d83d13fce9f447
42,261
import struct import math def vector_value_comparison(out, expected): """ out likes "<number number>:v128" expected likes "[number number]:v128" """ # print("vector value comparision {} vs {}".format(out, expected)) out_val, out_type = out.split(':') # <number nubmer> => number number out_val = out_val[1:-1] expected_val, lane_type, expected_type = expected.split(':') # [number nubmer] => number number expected_val = expected_val[1:-1] assert("v128" == out_type), "out_type should be v128" assert("v128" == expected_type), "expected_type should be v128" if out_type != expected_type: return False if out_val == expected_val: return True out_val = out_val.split(" ") expected_val = expected_val.split(" ") # since i64x2 out_packed = struct.pack("QQ", int(out_val[0], 16), int(out_val[1], 16)) expected_packed = struct.pack("QQ", int(expected_val[0]) if not "0x" in expected_val[0] else int(expected_val[0], 16), int(expected_val[1]) if not "0x" in expected_val[1] else int(expected_val[1], 16)) if lane_type in ["i8x16", "i16x8", "i32x4", "i64x2"]: return out_packed == expected_packed; else: assert(lane_type in ["f32x4", "f64x2"]), "unexpected lane_type" if "f32x4" == lane_type: out_unpacked = struct.unpack("ffff", out_packed) expected_unpacked = struct.unpack("ffff", expected_packed) else: out_unpacked = struct.unpack("dd", out_packed) expected_unpacked = struct.unpack("dd", expected_packed) out_is_nan = [math.isnan(o) for o in out_unpacked] expected_is_nan = [math.isnan(e) for e in expected_unpacked] if out_is_nan and expected_is_nan: return True; # print("compare {} and {}".format(out_unpacked, expected_unpacked)) result = [o == e for o, e in zip(out_unpacked, expected_unpacked)] if not all(result): result = [ "{:.7g}".format(o) == "{:.7g}".format(e) for o, e in zip(out_unpacked, expected_packed) ] return all(result)
5b81391fce8165375cbea64c9c9c63333c730b0f
42,264
def dec_to_bit(value, bits=8): """ bits=8: 42 -> [False, True, False, True, False, True, False, False] """ v = value % 2**bits seq = list(reversed(bin(v)[2:].zfill(bits))) seq = [True if c == '1' else False for c in seq] if value - v > 0: seq = seq + dec_to_bit(value // 2**bits) return seq
cf58da8af8af8af15aa046207eaa4f6c93438918
42,265
def _GetHandlerFromRequest(request): """Safely extracts a request handler from a Request. Args: request: A webapp2.Request instance. Returns: The handler that corresponds to the given Request (which can be a class or method), or None if there is no such handler (e.g. 404's). """ route = getattr(request, 'route', None) if route is not None: return getattr(route, 'handler', None)
8fe0b6d2f68931cba6ab618b38704d2f21455eea
42,266
def templates(x1='', x2='', x3=''): """接受得到的搜索词,匹配相应搜索词个数的模板 Keyword arguments: x1 -- x1,搜索词1 x2 -- x2,搜索词2 x3 -- x3,搜索词3 Return: (搜索序列,模板号) """ if x1 and x2 and x3: s1 = x1 + ' ' + x2 + ' ' + x3 return s1, 3 elif x1 and x2: s1 = x1 + ' ' + x2 return s1, 2 elif x1: s1 = x1 return s1, 1 else: raise Exception('未正确输入查询词')
a2e06eef6aa7f395487c9afffb67a902a3151982
42,267
def input_parameter_name(name, var_pos): """Generate parameter name for using as template input parameter names in Argo YAML. For example, the parameter name "message" in the container template print-message in https://github.com/argoproj/argo/tree/master/examples#output-parameters. """ return "para-%s-%s" % (name, var_pos)
40a3d29274b141294e4b9cae83ddb84ae8e44188
42,270
def subscript(text: str) -> str: """ Return the *text* surrounded by subscript HTML tags. Subscript text appears half a character below the normal line, and is sometimes rendered in a smaller font. Subscript text can be used for chemical formulas. >>> subscript("foo") '<sub>foo</sub>' """ return f"<sub>{text}</sub>"
48cfc245c863b569aef83743ca7d39a1e02878da
42,272
def confirm(s: str = ''): """ Ask yes/no, retry if invalid. :param str s: prompt :return: bool """ while True: value = input('> {} [y/n]: '.format(s)).lower() if value: if value in 'yesrtui': return True elif value in 'novbm,': return False
32d7d8b8ccef3516a76492bde7038db84a17c67a
42,273
def _get_valid_embedding(Er, len_q, len_k): """ ---------- Author: Damon Gwinn ---------- Gets valid embeddings based on max length of RPR attention ---------- """ len_e = Er.shape[0] start = max(0, len_e - len_q) return Er[start:, :]
3a6f4e20de4ce9c5abc4aacbdba97874b590f429
42,275
def is_point_in_rect2(point, rect_center, rect_w, rect_h): """Checks whether is coordinate point inside the rectangle or not. Rectangle is defined by center and linear sizes. :type point: list :param point: testing coordinate point :type rect_center: list :param rect_center: point, center of rectangle :type rect_w: float :param rect_w: rectangle width :type rect_h: float :param rect_h: rectangle height :rtype: boolean :return: boolean check result """ cx, cy = rect_center x, y = point if abs(x - cx) <= rect_w / 2.0 and abs(y - cy) <= rect_h / 2.0: return True return False
8ced860e0bee60d287aba796414d3b3f46a436b5
42,276
from typing import Tuple from typing import List from typing import Set def get_neighbours(p: Tuple[int], offsets: List[tuple]) -> Set[tuple]: """ Get all of the neighbours of a point. """ ns = set() for offset in offsets: n = [] for dim in range(len(p)): n.append((p[dim]+offset[dim])) ns.add(tuple(n)) return ns
24bbcde11212d2cd86cd0ebe93a343de9c06e1a3
42,277
def retireDoublon(texte): """ paramètre texte : (str) valeur renvoyée : (str) Le texte où les lettres identiques consécutives ont été supprimées >>> retireDoublon('passionnant') 'pasionant' >>> retireDoublon('cocorico') 'cocorico' """ sortie = texte[0] précédent = texte[0] for c in texte: if précédent != c: sortie += c précédent = c return sortie
04f573d8dfdf0f02e9ca68c00ca2a65c877ec3fc
42,278
def createKey(source, dest): """ Convenience function for creating a substitution key from two alphabets. The key will substitute symbols from the source alphabet with the symbol at the corresponding positon form the destination alphabet. """ if len(source) != len(dest): raise Exception("The alphabets are not of equal length") key = {} for (s, d) in zip(source, dest): if s in key: raise Exception("Duplicate key " + s) key[s] = d return key
f31b2d27ac88911955ecd5aba38745a21274a12d
42,279
def mock_dataset_with_config(mocker, mock_dataset, dataset_config_response): """Returns an example dataset, mocked to return a configuration.""" mocker.patch.object( mock_dataset, 'get', return_value=dataset_config_response) mock_dataset.attributes return mock_dataset
0b902a1447b460e0acf2ae5cac7d2223ae6e2b42
42,280
def list_multiply(LIST_A, LIST_B): """ Sums two lists of integers and multiplies them together >>> list_multiply([3,4],[3,4]) 49 >>> list_multiply([1,2,3,4],[10,20]) 300 """ TOTAL_A = 0 for i in LIST_A: TOTAL_A += i TOTAL_B = 0 counter = 0 while True: if counter > len(LIST_B) - 1: break TOTAL_B = TOTAL_B + LIST_B[counter] counter += 1 return TOTAL_A * TOTAL_B
8a20f2cbcaba8087882e8ecb1f4d60b8f1afa72f
42,281
import re def reg_exp(regex, str): """ Does a regexp match on the given string and returns the match """ return re.match(regex, str)
121e8e5d96a180e0d5f3919a12fb5d5e232d4e25
42,282
from unittest.mock import Mock def mock_pipelines(*statuses): """Fake implementation for ``merge_request.pipelines()``.""" return Mock(return_value=[dict(status=status) for status in statuses])
b827489c083710ced16364fb8fd6f5799433850d
42,283
import string def get_filter(cutter:dict)->list: """ Sets up the filtergroups which are divided by '||' and :param cutter: :return: """ filterarrgrps = cutter["filter"].split("||") filter = [] for filterarrgrp in filterarrgrps: # do set for unique values filter.append(set(filterarrgrp.split("|"))) for exgrp in filter: for starex in ("*ascii_letters","*ascii_lowercase","*ascii_uppercase","*digits","*punctuation"): if starex in exgrp: exgrp.discard(starex) exgrp.update(set(getattr(string, starex[1:]))) return filter
695a3b121537e92bebced613e2f75a702827b8dd
42,285
def readonly_iam_policy(): """Generate a readonly policy.""" return '''{ "Version": "2012-10-17", "Statement": { "Effect": "Allow", "Action": [ "iam:Get*", "iam:List*" ], "Resource": "*" } } '''
8269c25ff18937f3fa4b0ca0b31c0cf7b53c6488
42,286
import subprocess def _run_gn_format(build_gn: str) -> str: """Formats the input GN file contents, and returns the formatted output.""" try: return subprocess.check_output(["gn", "format", "--stdin"], stderr=subprocess.PIPE, text=True, input=build_gn) except subprocess.CalledProcessError as e: print() print("stdout") print(e.stdout) print() print("stderr") print(e.stderr) raise e
760e9b63c886504a5aadd9786045f1b3b9077a77
42,287
def force_delegate(func): """ A decorator to allow delegation for the specified method even if cls.delegate = False """ func._force_delegate = True return func
4718310ccae730ef3c8b9aac32c6a60f63a0afe4
42,288
def txt_cln(s): """prepare a string for processing in the JTMS""" t = s.replace("'","prime-") return t.replace("--","-")
decd097dcd5dad4ffcb4af5bbe862da830ed4c9f
42,289
from datetime import datetime def change_datetime_format(datetime_value: str, old: str, new: str): """Converts a date string's format to another format""" return datetime.strptime(datetime_value, old).strftime(new)
cbc6ea7a11607b39d1bf222c6bb120c5772ea8c0
42,290
import grp def parse_gid(gid): """Parse group id. Arguments: gid (str, int): Actual gid, or the name of a group. Returns: int: The actual gid of the group. """ try: return int(gid) except ValueError: try: return grp.getgrnam(gid).gr_gid except (AttributeError, KeyError): raise KeyError(f'Group does not exist: {gid}')
d80c9f7e41b084449f3d1604e513936fecb2335e
42,292
import re def old_str_format_to_new(string): """ convert old format style to new style. Works for digits only %05d is converted to {:05d} :param string: :return: """ return re.sub(r"%(\d*d)", r"{:\1}", string)
000cd7d471b0b211fcd10ef73e4b9f4eb7f14d61
42,293
def _collapse_consecutive_gaps(a, b): """Collapse consecutive gaps in an alignment between two sequences. For example, the alignment ATC----GA ATCATCGGA would become ATC-GA ATCAGA Args: a, b: two aligned sequences Returns: tuple (a', b') where a' and b' represents an alignment of a and b, with gaps collapsed """ assert len(a) == len(b) a_ccg, b_ccg = a[0], b[0] for i in range(1, len(a)): if a[i-1] == '-': if a[i] == '-': # Skip this position; we are already in a gap continue else: # We are out of the gap for a; be sure to include # this position a_ccg += a[i] b_ccg += b[i] elif b[i-1] == '-': if b[i] == '-': # Skip this position; we are already in a gap continue else: # We are out of the gap for b; be sure to include # this position a_ccg += a[i] b_ccg += b[i] else: a_ccg += a[i] b_ccg += b[i] return (a_ccg, b_ccg)
bd5f550edd95b63a70c7bd30d9cb2270919cc1b5
42,295
def variable_om_cost_rule(mod, prj, tmp): """ """ return ( mod.Fuel_Prod_Consume_Power_PowerUnit[prj, tmp] * mod.variable_om_cost_per_mwh[prj] )
264881eb0dac6b2044ff5c937f35b8ae96cf9308
42,297
import json def json_to_dict(col): """ Given a json object as bytes, convert it to a Python dictionary. :param col: :type col: bytes :rtype: dict """ if isinstance(col, dict): return col elif isinstance(col, bytes): col = col.decode("utf-8") return json.loads(col)
920a28a6070998d12c176dd27be2ede263c265d0
42,298
import subprocess def get_running_container_names(): """ get the names of all currently running containers """ return [ f.replace('"', "") for f in ( subprocess.run( ["docker", "ps", "--format", '"{{.Names}}"'], stdout=subprocess.PIPE ) .stdout.decode("utf-8") .lower() .split("\n") ) if len(f) > 0 ]
c6f357a12444b3816228f6d226d61a27b9cbaafc
42,299
def two_oldest_ages(ages): """ The two oldest ages function/method needs to be completed. It should take an array of numbers as its argument and return the two highest numbers within the array. The returned value should be an array in the format [second oldest age, oldest age]. The order of the numbers passed in could be any order. The array will always include at least 2 items. :param ages: an array of numbers. :return: the highest two values within the array. """ return sorted(ages)[-2:]
d7362b9fdd3e266521453446abe30c6c48be41b6
42,301
import re def get_numbers(address_string_no_postcode): """ Retrieves a list of all the numbers in an address that are not part of the postcode """ num_list = re.findall("\d+", address_string_no_postcode) return num_list
d64d05541c423ab391ca3fcdd58471039b5bc481
42,302
import fnmatch def find_file(contents, pattern): """ Find the file matching the given filename pattern. Searches the dictionary of Debian package archive entries reported by :func:`deb_pkg_tools.package.inspect_package()`. :param contents: The dictionary of package archive entries. :param pattern: The filename pattern to match (:mod:`fnmatch` syntax). :returns: The metadata of the matched file. :raises: :exc:`exceptions.AssertionError` if zero or more than one archive entry is found. """ matches = [] for filename, metadata in contents.items(): if fnmatch.fnmatch(filename, pattern): matches.append(metadata) assert len(matches) == 1, "Expected to match exactly one archive entry!" return matches[0]
efd0a5e718e8a487f653247b1dba06b2f39e3292
42,304
import struct def is_native_endian_big(): """ Determine whether the host machine's native endian is big or not. """ return struct.pack('@H', 0x00FF) == struct.pack('>H', 0x00FF)
7ec5c538baaae6e1a54ac797c566c431b8403610
42,307
from typing import List from typing import Dict def remove_duplicate(lst: List[Dict]) -> List[Dict]: """Returns a list free of duplicates of given list """ seen_ids = [] result = [] for item in lst: if item['id'] not in seen_ids: result.append(item) seen_ids.append(item['id']) return result
d921d1a7d257b99690bb9522db67de273b35dcd6
42,308
def linear(stimulus): """definition of linear function """ return stimulus
e4bc3d7cc01d47e33ba5ddab2f5696a9010a438a
42,309
def _pipe_separated(val): """ Returns *val* split on the ``'|'`` character. >>> _pipe_separated("a|b|c") ['a', 'b', 'c] """ return [s.strip() for s in val.split('|') if s.strip()]
9412e939ab8cc095cbaac9fcee58a765a7557efd
42,310
import socket def to_address(hostname, family=socket.AF_UNSPEC, socktype=socket.SOCK_STREAM): """ Resolve a hostname to an address, preferring IPv4 addresses. Given a string containing a DNS hostname, this function resolves the hostname to an address, using an LRU cache to speed up repeat queries. The function prefers IPv4 addresses, but will return IPv6 addresses if no IPv4 addresses are present in the result from getaddrinfo. If the hostname does not resolve, the function returns None rather than raise an exception (this is preferable as it provides a negative lookup cache). :param str hostname: The hostname to resolve to an address :returns: The resolved address """ result = None try: for (family, _, _, _, sockaddr) in socket.getaddrinfo( hostname, None, family, socktype): if family == socket.AF_INET: result = sockaddr[0] break elif family == socket.AF_INET6 and not result: result = sockaddr[0] # XXX Workaround LP #1154599 # This should be socket.gaierror instead of socket.error except socket.error: pass return result
a164fc21e2efa143137b07c63acbb3ba8062ede7
42,311
import os def dirtool(operation, directory): """ Tools For Directories (If Exists, Make And Delete) :raises ValueError: Nor a string or a list was provided. """ operation = operation.lower() if operation == 'exists': return bool(os.path.exists(directory)) if operation == 'create': os.makedirs(directory) elif operation == 'delete': os.rmdir(directory) else: raise ValueError('Invalid operation provided.')
50d0ff27213b884d8105536f5071d7fbe7053755
42,312
def sanatize(input): """ Convert input command line arguments into format contained by documents. """ return input.upper()
5815ed7c7c1b42f9be5dca7772a8e6fe48d5b576
42,313
def paginator(context, adjacent_pages=2, extra_class=""): """ To be used with a Django paginator. Must be stored in the context, as either 'objects', or 'page'. Adds pagination context variables for use in displaying first, adjacent and last page links in addition to those created by the object_list generic view. """ objects = context.get('objects') or context.get('page') paginator = objects.paginator startPage = max(objects.number - adjacent_pages, 1) if startPage <= 3: startPage = 1 endPage = objects.number + adjacent_pages + 1 if endPage >= paginator.num_pages - 1: endPage = paginator.num_pages + 1 page_numbers = [n for n in range(startPage, endPage) \ if n > 0 and n <= paginator.num_pages] query = '' get_params = context.get('request').GET.copy() if get_params: if 'page' in get_params: del get_params['page'] query = "&" + get_params.urlencode() return { 'query': query, 'objects': objects, 'page': objects.number, # 'pages': paginator.num_pages, 'page_numbers': page_numbers, 'show_first': 1 not in page_numbers, 'show_last': paginator.num_pages not in page_numbers, 'extra_class': extra_class }
a01f0a10aba07bfea1b8e7c60174c740dce35300
42,315
def encode_incarnation(incarnation: int, *, signed: bool = False) -> bytes: """ Encode the given incarnation integer as 8-byte BigEndian buffer. """ return int.to_bytes(incarnation, 8, 'big', signed=signed)
cfbac4fc7ee53047b8f6a1f6875dd35dc065eadb
42,316
def update_variables_momentum(alpha, beta1, var, grad, v): """ Updates a variable using the gradient descent with momentum optimization alpha is the learning rate beta1 is the momentum weight var is a numpy.ndarray containing the variable to be updated grad is a numpy.ndarray containing the gradient of var v is the previous first moment of var Returns: the updated variable and the new moment, respectively """ vdv = (beta1 * v) + ((1 - beta1) * grad) vup = var - (alpha * vdv) return vup, vdv
c52c936914829ee115b5f493c8a96fd3088d5a23
42,317
import random import logging def test_sub_port(testbed_params): """Select a test sub port.""" test_sub_port = random.choice(testbed_params.keys()) logging.info("Select test sub port %s", test_sub_port) return test_sub_port
8a5fa38498ec4055d910034355b3b6feb6db3861
42,318
def positionVectorFromModel(model): """Returns a vector with all tokens in model, used to match a token with a position in a vector.""" return tuple(model.items())
b0c3e6c8df7a2a8e11120eff1f30712f05bf195a
42,319
def graph_to_string(graph): """ Recebe um dicinário com a estrutura de um puzzle sokoban. Requires: graph é um dicionário em que as suas chaves são tuplos de inteiros, e os seus valores strings(de um caracter). Ensures: Uma representação em string do dicionário dado como argumento. """ max_x = max(list(map(lambda x: x[0], graph.keys()))) max_y = max(list(map(lambda x: x[1], graph.keys()))) string = '' for r in range(max_y + 1): for c in range(max_x + 1): string += graph[(c, r)] string += '\n' return string
f153c8067eb7b67aedc5cb63f7991cc65defb089
42,320
def merge_headers(event): """ Merge the values of headers and multiValueHeaders into a single dict. Opens up support for multivalue headers via API Gateway and ALB. See: https://github.com/Miserlou/Zappa/pull/1756 """ headers = event.get('headers') or {} multi_headers = (event.get('multiValueHeaders') or {}).copy() for h in (set(multi_headers.keys()) | set(headers.keys())): if h not in multi_headers: multi_headers[h] = [headers[h]] elif h in headers: multi_headers[h].append(headers[h]) multi_headers[h] = ', '.join(multi_headers[h]) return multi_headers
89368eb67a0c74cc048a5b1fd5787ba366f53996
42,321
def round_if_int(val): """Rounds off the decimal of a value if it is an integer float.""" if isinstance(val, float) and val.is_integer(): val = int(val) return val
b0d50f8aec6ab7781b390faaf8c68733992dc2c8
42,322
def get_region(): """Returns the default GCP region for running a job.""" return "us-central1"
50721885bdadc9b701533a7932a8d02fb524160c
42,323
import os def get_dataset_file_path(date, filename): """Produces a filepath for the dataset. :parameter date (string): The date folder name. Ex: "2020-02-05" :parameter filename (string): The csv filename. :returns filepath (string): The filepath for the dataset. Example: project_root ├── README.md ├── data │   └── 2020-04-13 │   ├── README.md │   ├── data_description.txt │   ├── test.csv │   └── train.csv ├── docs ├── requirements.yml └── results └── 2020-04-13 └── runall.py The function is called from the 'runall.py' file. >> get_data_file_path('2020-04-13', 'train.csv') '~/project_root/data/2020-04-13/train.csv' """ basepath = os.path.abspath('') filepath = os.path.abspath(os.path.join(basepath, "..", "..")) + "/data/" + date + "/" + filename return filepath
c109e110748879ebf2835a24a76f1de447020b4b
42,325
def omit_falsy(collection: list): """ Removes falsy entries from a list, returning None if no entries remaining """ new_list = list(filter(lambda entry: entry, collection)) return new_list or None
8f60e6de4671f2029e3c1870ee05849501eb988d
42,326
import re def replace_links(doc, module, nested): """Replace cross reference links `@@package.foo.bar`""" if not doc: return doc def my_sub_fn(match): group = match.group(1) if nested: nlevelup = len(module.split('.')) - 2 rel_path = '/'.join([ '/'.join(['.'] + ['..'] * nlevelup), '/'.join(group.split('.')[1:])]) else: rel_path = './' + '.'.join(group.split('.')[1:]) return '[{}]({}.md)'.format(group, rel_path) pattern = r'@@([^\s]*)' return re.sub(pattern, my_sub_fn, doc)
6a517ad7b0d9d3f9895fdebe128a7776bae21da1
42,327
def contributors(): """ Add your name in the list below, in the same PR as the signed CLA. If your contribution is sponsored by an organization, you can also add the following entry: "organization=[YOUR ORG]". """ return { "1": ["name=Mihai Bojin", "email=mihai.bojin@gmail.com"], }
78a63a5f4aa15b3c26ec61bf422549999ac83bd2
42,328
import argparse def parse_arguments(): """Parses command line arguments. Returns: Parsed arguments as an object. """ parser = argparse.ArgumentParser() required = parser.add_argument_group("required arguments") required.add_argument("--build-events", "-b", action="store", type=str, help="Path to JSON Build Events File", required=True) required.add_argument("--destination", "-d", action="store", type=str, help="Path to a destination directory for artifacts", required=True) return parser.parse_args()
a5308eaa924bb73cb351729638e69904a7089ec4
42,329
def remove_g_tags(path): """ Removing all g-tags from the file. path: path of file being corrected """ file_content = "" file = open(path, 'r') line = file.readline() while line: char_index = 0 #passing through all the file except for the g-tags line_content = "" while char_index < len(line): if line[char_index: char_index + 3] == "<g>": char_index=char_index + 3 if line[char_index: char_index + 4] == "</g>": char_index=char_index + 4 line_content = line_content + line[char_index] char_index = char_index + 1 file_content = file_content + line_content line = file.readline() return file_content
d25f343d9bf7e493d6c0a12994e93abb50d05eaf
42,330
def validate_sortkind(kind): """Define valid sorting algorithm names.""" valid_kind_names = ["quicksort", "mergesort", "heapsort", "stable"] # Chek if string if not isinstance(kind, str): raise TypeError( "Kind: Sorting name must be a string. " "Got instead type {}".format(type(kind)) ) # Check if name is valid if kind not in valid_kind_names: raise ValueError( "Kind: Got an invalid name: '{}'. " "Options are: {}".format(kind, valid_kind_names) ) return None
a1e18b6f1855b3e2745351acb2ba4152cfa405ed
42,331
import os def get_path_from_project_sub_folder(sub_folder, filename='') -> str: """ returns file path or folder path based on the provided args from the dummy_file_generator/dummy_file_generator location :param sub_folder: :param filename: :return: """ current_dir = os.path.dirname(__file__) current_dir_path = os.path.join(current_dir) return os.sep.join([current_dir_path, sub_folder, filename])
9b8518870f3b0853c8d1524a55fa00bcd6542907
42,332
from typing import Any from pydantic import BaseModel # noqa: E0611 import importlib def load_model(model: Any, path: str, backend: str) -> BaseModel: """ Load a pre-trained model given the backend :param model: any neural network models, may be None :param path: string, folder under which the old model is saved :param backend: string, name of the backend :return: Model (from nnimgproc.model) """ lib = importlib.import_module('nnimgproc.backend.%s' % backend) return lib.load(model, path)
3ada11bfbaecce713250c14c3dff6dcfb9222266
42,333