content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def images_rescale(arFrames): """ Rescale array of images (rgb 0-255) to [-1.0, 1.0] :param arFrames: Array of frames. :return: Array of rescaled frames. """ ar_fFrames = arFrames / 127.5 ar_fFrames -= 1. return ar_fFrames
63f823b529a39300e15245e2326edf3b1497edb5
15,279
def model_ids(models): """Generates a list of model ids from a list of model objects.""" return [m.pk for m in models]
a938434df3805c77c0446ab3f4fec4161fbaa39e
15,282
import functools def _expand_variable_args(func): """ Expand single positional argument into multiple positional arguments with optional keyword dicts. Permits e.g. `get(('t', {'lat': 'mean'}))` tuple pairs. """ @functools.wraps(func) def _wrapper(self, *keys, **kwargs): args = [] kwargs = kwargs.copy() def _iter_args(*iargs): # noqa: E306 for arg in iargs: if isinstance(arg, (tuple, list)): _iter_args(*arg) elif isinstance(arg, str): args.append(arg) elif isinstance(arg, dict): kwargs.update(arg) else: raise ValueError(f'Invalid variable spec {arg!r}.') _iter_args(*keys) return func(self, *args, **kwargs) return _wrapper
afe9604e888636704cdeb6cb7ad545d2cff5138e
15,283
def ignore_dt(dt_anno, index, difficulty): """ Indicates whether to ignore detection Args: dt_anno [dict]: Detection annotation index [int]: Detection index difficulty [int]: Difficulty index Returns ignore [bool]: Ignore flag """ ignore = False return ignore
28fa46defcf3effa49cf0eb575c0da7deeeb53e4
15,285
def _v_str_ ( self , fmt = ' %g' ) : """Self-printout of SVectors: (...) """ index = 0 result = '' while index < self.kSize : if 0 != index : result += ', ' result += fmt % self.At( index ) index += 1 return "( " + result + ' )'
b983051bb6a097b8cbdffffab53f6b133117fe74
15,286
def get_parameters(item: str, block: list) -> list: """ evaluate a class of items to download and return the list of actual objects to download. :return: list """ if item == "all": block.remove("all") return block else: return [item]
5acdd48956678d5407d9a9fae8945aeabb10d684
15,288
def is_numeric(obj: str) -> bool: """ Returns True if argument can be coerced to type 'float' """ try: float(obj) return True except ValueError: return False
b75ac54e7cd29406f6ef02882b89d1ab884a09c0
15,289
import string def char_input(): """Choose password character options and return list of choices.""" char_list = [] chars = [ string.ascii_uppercase, string.ascii_lowercase, string.digits, "!@#$%^&*" ] input_list = [ "uppercase letters (A-Z)", "lowercase letters (a-z)", "numbers (0-9)", "special characters (!@#$%^&*)" ] for i in range(4): while True: choice = input( "Include {}? 'yes' or 'no': " .format(input_list[i]) ).lower() if choice == "yes": char_list.append(chars[i]) break elif choice == "no": break else: print("Ah ah ah, you didn't say the magic word!") return char_list
418e1a118135039130ef4eb0f40f0d4fdad96b45
15,290
def get_or_make_group(ec2, name, vpc_id=None, quiet=False): """ Get the EC2 security group of the given name, creating it if it doesn't exist """ groups = ec2.security_groups.all() groups = [g for g in groups if g.group_name == name and g.vpc_id == vpc_id] if len(groups) > 0: return groups[0] else: if not quiet: print("Creating security group " + name) vpc_id = vpc_id if vpc_id is not None else '' sg = ec2.create_security_group( GroupName=name, Description='AbStar cluster group', VpcId=vpc_id) return sg
48b9c0a3fef99763e05e60bc6f2186f52f841031
15,292
import inspect def kwargs_sep(fcn, kwargs): """Used to separate kwargs for multiple different functions Args: fcn: function kwargs: dict of keyword args Returns: dict for fcn keywords contained in kwargs""" #list of fcn argument names fcn_args = [key for key, val in inspect.signature(fcn).parameters.items()] #dict of kwargs for fcn fcn_kwargs = {key: kwargs[key] for key in kwargs if key in fcn_args} return fcn_kwargs
016e8ab7e423e9c8f896bfd18213d323d58a129b
15,294
def mod_crop(img, scale): """Mod crop images, used during testing. Args: img (ndarray): Input image. scale (int): Scale factor. Returns: ndarray: Result image. """ img = img.copy() if img.ndim in (2, 3): h, w = img.shape[0], img.shape[1] h_remainder, w_remainder = h % scale, w % scale img = img[:h - h_remainder, :w - w_remainder, ...] else: raise ValueError(f'Wrong img ndim: {img.ndim}.') return img
825f70fde2dd19ea069c14be77fdf8c6e8cc0d82
15,295
def guess_extension_from_headers(h): """ Given headers from an ArXiV e-print response, try and guess what the file extension should be. Based on: https://arxiv.org/help/mimetypes """ if h.get("content-type") == "application/pdf": return ".pdf" if ( h.get("content-encoding") == "x-gzip" and h.get("content-type") == "application/postscript" ): return ".ps.gz" if ( h.get("content-encoding") == "x-gzip" and h.get("content-type") == "application/x-eprint-tar" ): return ".tar.gz" if ( h.get("content-encoding") == "x-gzip" and h.get("content-type") == "application/x-eprint" ): return ".tex.gz" if ( h.get("content-encoding") == "x-gzip" and h.get("content-type") == "application/x-dvi" ): return ".dvi.gz" return None
5f9fabaf20311f6f2c564b613d50d28d07b3dfdd
15,297
def fast_exponentiation(base, exp, n): """ Iteratively finds the result of the expression (base**exp) mod n """ bin_exp = bin(exp)[2:] output = 1 for i in bin_exp: output = (output ** 2) % n if i == "1": output = (output*base) % n return output
c646a9226487fcac947160ee6d56a6a51b3106a9
15,299
def average_bounding_box(box): """Average list of 4 bounding box coordinates to a midpoint.""" lng = 0 lat = 0 for i in range(len(box[0])): lng += box[0][i][0] lat += box[0][i][1] lat /= 4 lng /= 4 return [lng,lat]
ca934edcab7b658aaeb263509c5dfe048e912873
15,300
def align_by_eq(lines): """make a nice aligned unit file """ max_len, out, kvs = 0, [], [] def add_kvs(out, kvs, m=0): if not kvs: return kls = [len(kv[0]) for kv in kvs if isinstance(kv, tuple)] if kls: m = max(kls) ind = ' ' * (m + 3) for kv in kvs: if not isinstance(kv, tuple): out.append(ind + kv) else: out.append('%s = %s' % (kv[0].ljust(m), kv[1])) while lines: line = lines.pop(0) if not line: kvs.append(line) elif line[0] == '[': add_kvs(out, kvs) out.append(line) kvs = [] else: kv = line.split('=', 1) if len(kv) != 2: kvs.append(line) else: kvs.append((kv[0].rstrip(), kv[1].lstrip())) while lines: if not line.endswith('\\'): break line = lines.pop(0) kvs.append(line) if kvs: add_kvs(out, kvs) return '\n'.join(out)
bfc51541b2522411a45e9c7626122a25a371e8fb
15,302
def parse_value(s): """ Quick-and-dirty way to parse C-style float literal strings into d-attribute compatible strings. """ if s.endswith("f"): return s[0:-1] else: return s
4464cc49137a198c8ac0707f6a5d0cafc82b758f
15,305
def evalRec(env, rec): """Quality check""" if rec.Proband_GQ < env.gq: return False elif rec.FS > env.fs: return False elif rec.QD < env.qd: return False return True
b76525a4078d77db28edd3e8623b9641fabc96bb
15,306
import statistics def _skew(values): """ This function calculates the skew value of the list of values which represents the difference between the mean and median which also corresponds to the skewness. Using the following formula , (1/((n-1)*(n-2)))*(sum over i { ((values[i]-mean(values))/(std_dev))**3) } n -> number of values std_dev -> standard deviation of all values For documentation of this function refer to SKEW function available in Google Sheets Args: values : Type-list of numbers could be floating points Returns: floating point number represeting the skew value of values """ std_dev = statistics.pstdev(values) mean = statistics.mean(values) size = len(values) # If there is no deviation we assume to not have any skewness if std_dev == 0: return 0 # If there are <=2 entries we assume to not have any skewness if size <= 2: return 0 # Summation of skewness of each element skew_value = 0 for x in values: skew_of_x = (x - mean) / std_dev skew_of_x = (skew_of_x)**3 skew_value += skew_of_x #Normalizing skewness with the size of data skew_value = (skew_value * size) / ((size - 1) * (size - 2)) return skew_value
bf564cc138a9e7a2196793fb4b27b00ed6ab4599
15,308
def decode(value, encoding='utf-8'): """Decode given bytes value to unicode with given encoding :param bytes value: the value to decode :param str encoding: selected encoding :return: str; value decoded to unicode string if input is a bytes, original value otherwise >>> from pyams_utils.unicode import decode >>> decode(b'Cha\\xc3\\xaene accentu\\xc3\\xa9e') 'Chaîne accentuée' >>> decode(b'Cha\\xeene accentu\\xe9e', 'latin1') 'Chaîne accentuée' """ return value.decode(encoding) if isinstance(value, bytes) else value
8cef7b6d0367d5bff48f5c9b9cb2b5d7c4a883d9
15,309
from typing import Dict def values_from_bucket(bucket: Dict[str, float]) -> set: """Get set of price formatted values specified by min, max and interval. Args: bucket: dict containing min, max and interval values Returns: Formatted set of values from min to max by interval """ rng = [int(100 * bucket[_k]) for _k in ('min', 'max', 'interval')] rng[1] += rng[2] # make stop inclusive return {_x / 100 for _x in range(*rng)}
848af64d3396cc77c3fa109821d72eb80961eec0
15,310
import json def arg_string_from_dict(arg_dict, **kwds): """ This function takes a series of ditionaries and creates an argument string for a graphql query """ # the filters dictionary filters = { **arg_dict, **kwds, } # return the correctly formed string return ", ".join("{}: {}".format(key, json.dumps(value)) for key,value in filters.items())
ea5d2a393e55cc104311e657758076587c78373e
15,311
def resolve(obj): """ Helper function. Check whether the given object is callable. If yes, return its return value, otherwise return the object itself. """ return obj() if callable(obj) else obj
80172ffce9c5f5fe8699980ad86a98794b4d436c
15,314
def get_si_factor(ci, li, conc='ini19'): """S_i factor defined in FONDECYT Regular or Iniciacion""" # import pdb; pdb.set_trace() if conc in ['ini19', 'reg19']: si = li*((1 + ci)**0.5) else: raise ValueError('Not implemented for this concurso: {}.'.format(conc)) return si
95e0b8f932d14ae6212b299e2331881877f425c6
15,315
def check_class_method(obj, name): """ 检查对象是否有某个属性 :return: """ try: return getattr(obj, str(name)) except Exception as e: print(f"对象{obj} 没有方法{str(name)}") return None
45c323db327f08a7c70fb4935aad3524fe491a0a
15,317
def get_player_input(player_char, char_list): """Get a players move until it is a valid move on the board with no piece currently there.""" while True: #Get user input player_move = int(input(player_char + ": Where would you like to place your piece (1-9): ")) #Move is on board if player_move > 0 and player_move < 10: #Move is an empty spot if char_list[player_move - 1] == '_': return player_move else: print("That spot has already been chosen. Try again.") else: print("That is not a spot on the board. Try again.")
2e2d1ac0b8e4fe6a2467a378dfb9e1821525cddb
15,319
import hashlib def get_checksum(requirements: str): """Determine the hash from the contents of the requirements.""" hash_ = hashlib.md5() hash_.update(requirements.encode("utf-8")) return hash_.hexdigest()
59896edc566725cc1ced4b9e247c5949f36f0fd8
15,321
import torch import math def rand_perlin_2d(shape, res, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): """Generate a random image containing Perlin noise. PyTorch version.""" delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1]) grid = torch.stack(torch.meshgrid(torch.arange(0, res[0], delta[0]), torch.arange(0, res[1], delta[1])), dim=-1) % 1 angles = 2 * math.pi * torch.rand(res[0] + 1, res[1] + 1) gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1) def tile_grads(slice1, slice2): return ( gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]] .repeat_interleave(d[0], 0) .repeat_interleave(d[1], 1) ) def dot(grad, shift): return ( torch.stack( (grid[: shape[0], : shape[1], 0] + shift[0], grid[: shape[0], : shape[1], 1] + shift[1]), dim=-1 ) * grad[: shape[0], : shape[1]] ).sum(dim=-1) n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]) n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]) n01 = dot(tile_grads([0, -1], [1, None]), [0, -1]) n11 = dot(tile_grads([1, None], [1, None]), [-1, -1]) t = fade(grid[: shape[0], : shape[1]]) return math.sqrt(2) * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1])
5bfceb8b23558ebac9debccbe88a0f955911dd16
15,322
import os def filter_files(): """filter_files filter the qualified files. Returns: list: A list contains all filtered file names. """ files = os.listdir() notebooks = [] # Append all files with .ipynb extension for file in files: if file.endswith('.ipynb'): notebooks.append(file) # Remove all files in CN en_notebooks = [] for notebook in notebooks: if not notebook.endswith("zh.ipynb"): en_notebooks.append(notebook) # Remove all solutions no_solution_notebooks = [] for notebook in en_notebooks: if not notebook.endswith("solution.ipynb"): no_solution_notebooks.append(notebook) return no_solution_notebooks
5a8cbc67f367462a2cdb76e3226b54d596f0a047
15,324
def next_departure(bus_id: int, min_time: int) -> int: """Compute the next departure of ``bus_id`` leaving earliest at ``min_time``.""" # ERROR (pschanely, 2021-04-19): # When min_time is zero we get ZeroDivisionError here. wait_time = bus_id % min_time return min_time + wait_time
a6d850b1f8a297603e93ae9083c854c066d1e709
15,325
def getfilename(path): """This function extracts the file name without file path or extension Args: path (file): full path and file (including extension of file) Returns: name of file as string """ return path.split('\\').pop().split('/').pop().rsplit('.', 1)[0]
a2df0c39836f4f04003e412e9b6a8b8f4e1c7081
15,327
def mutual_exclusive(lock): """ Mutex decorator. """ def wrap(f): """Decorator generator.""" def function(*args, **kw): """Decorated functionality, mutexing wrapped function.""" lock.acquire() try: return f(*args, **kw) finally: lock.release() return function return wrap
d94bb7fba63bcec6a2e4e49206c15c8254f84bb6
15,328
import re def reactants(reac): """Extract reactant from a reaction.""" rl = {} for x in reac.split(':'): v = x.split('.') try: n = int(v[0]) except BaseException: continue c = v[1] c = re.sub('\[', '', re.sub('\]', '', c)) # c = re.sub('\[\[', '[', re.sub('\]\]', ']', c)) # TD: not needed rl[c] = n return rl
4c9f650e4d9ec4339a8a70d1be83ce81533af122
15,329
def changed(old,new,delta,relative=True): """ Tests if a number changed significantly -) delta is the maximum change allowed -) relative decides if the delta given indicates relative changes (if True) or absolute change (if False) """ delta = abs(delta) epsilon = 1.0 if old > epsilon: if relative: notChanged = (new <= (1+delta)*old) and (new >= (1-delta)*old) else: notChanged = (new <= old+delta) and (new >= old-delta) elif old < -epsilon: if relative: notChanged = (new >= (1+delta)*old) and (new <= (1-delta)*old) else: notChanged = (new >= old-delta) and (new <= old+delta) else: notChanged = (new >= old-epsilon) and (new <= epsilon+old) return not notChanged
50399926dbf3a652b73ab2045e55da6e8f9a14e5
15,330
import os import argparse def nonempty_file(file_path): """ Checks if the filename in input is non-empty file. :param file_path: str - filename to check :return: str - filename """ if not os.path.exists(file_path): error = "File %s does not exist" % file_path # report.log_str(error, priority=logging.ERROR) raise argparse.ArgumentTypeError(error) if os.path.getsize(file_path) == 0: error = "File %s is empty" % file_path # report.log_str(error, priority=logging.ERROR) raise argparse.ArgumentTypeError(error) return file_path
613a897656b84ae47073570c5388da33d5ddda65
15,331
import gzip import json def tp53_pleiotropic_pair_json(infile): """Read genotype file""" with gzip.open(infile, 'r') as fin: d = json.load(fin) p = d['pathway'] return (p.index('Cycle'), p.index('Damage'))
444bdd4fbaa1cf4785e4391f101cd9094e015e38
15,332
from pathlib import Path def is_relative_to(self:Path, *other): """Return True if the path is relative to another path or False. """ try: self.relative_to(*other) return True except ValueError: return False
6715c8e8fa9fcd71f74c40be61e5681c4e95cf73
15,333
import socket from contextlib import closing def check_socket(host: str, port: int) -> bool: """Check if given port can be listened to""" with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: sock.settimeout(2) try: sock.bind((host, port)) sock.listen(1) return True except socket.error: return False
3683f22bfdb4a548e38c389f8fa21dbd07851745
15,334
def process_wav(wav_rxfilename, process): """Return preprocessed wav_rxfilename. Args: wav_rxfilename: input process: command which can be connected via pipe, use stdin and stdout Returns: wav_rxfilename: output piped command """ if wav_rxfilename.endswith("|"): # input piped command return wav_rxfilename + process + "|" else: # stdin "-" or normal file return "cat {} | {} |".format(wav_rxfilename, process)
bc80531f6e3d09f75fd12953ca1a29270b668ba2
15,335
def _is_header_line(line): """Determines if line represents the headers for the nodetool status output. """ return line.startswith('-- ')
9cd4d5b3180266f5dc5f9048678b6e9637efa516
15,337
from typing import Any import subprocess def subprocess_run(*args: str, **kwargs: Any) -> subprocess.CompletedProcess: """ Helper method to run a subprocess. Asserts for success. """ result = subprocess.run(args, text=True, capture_output=True, **kwargs) assert result.returncode == 0 return result
b60de38e1b4106ec1fef09141f266643349768e0
15,339
def _get_fetch_names(fetches): """Get a flattened list of the names in run() call fetches. Args: fetches: Fetches of the `Session.run()` call. It maybe a Tensor, an Operation or a Variable. It may also be nested lists, tuples or dicts. See doc of `Session.run()` for more details. Returns: (list of str) A flattened list of fetch names from `fetches`. """ lines = [] if isinstance(fetches, (list, tuple)): for fetch in fetches: lines.extend(_get_fetch_names(fetch)) elif isinstance(fetches, dict): for key in fetches: lines.extend(_get_fetch_names(fetches[key])) else: # This ought to be a Tensor, an Operation or a Variable, for which the name # attribute should be available. (Bottom-out condition of the recursion.) lines.append(fetches.name) return lines
c2e8a08ec059cf3e0e7fda89b16a1d4e5aeb7dae
15,340
def scale(x, a, b, c, d): """Scales a value from one range to another range, inclusive. This functions uses globally assigned values, min and max, of N given .nsidat files Args: x (numeric): value to be transformed a (numeric): minimum of input range b (numeric): maximum of input range c (numeric): minimum of output range d (numeric): maximum of output range Returns: numeric: The equivalent value of the input value within a new target range """ return (x - a) / (b - a) * (d - c) + c
e90c528d62d62c28415836497d10b0c61b94ccd8
15,342
import math def distance_two_points(point_a, point_b): """ Calculate distance of two points. """ distance = math.sqrt(pow((point_a.pos_x - point_b.pos_x), 2) + pow((point_a.pos_y - point_b.pos_y), 2)) return distance
01b6357315b90bf9d6141547776f33b48294bfa2
15,343
def command_arg(*args, **kwargs): """ Defines a CLI argument for the particular command. This takes the exact parameteres that `argparse`'s `add_argument` takes """ the_args = args the_kwargs = kwargs def decorator(fn): args = getattr(fn, '_cliargs', []) args.append((the_args, the_kwargs)) fn._cliargs = args return fn return decorator
e5f517936e3e7e7be040ceb7f4d2768317f54c9a
15,346
def init(input_mgr, user_data, logger): """Initialize the example source tool.""" # Get the selected value from the GUI and save it for later use in the user_data user_data.server_url = input_mgr.workflow_config["serverURL"] if 'serverURL' in input_mgr.workflow_config else None user_data.site_name = input_mgr.workflow_config["siteName"] if 'siteName' in input_mgr.workflow_config else None user_data.username = input_mgr.workflow_config["username"] if 'username' in input_mgr.workflow_config else None user_data.password = input_mgr.workflow_config["password"] if 'password' in input_mgr.workflow_config else None user_data.data_source_name = input_mgr.workflow_config["dataSourceName"] if 'dataSourceName' in input_mgr.workflow_config else None user_data.download_location = input_mgr.workflow_config["downloadLocation"] if 'downloadLocation' in input_mgr.workflow_config else None if user_data.server_url == None: logger.display_error_msg("Enter server URL") return False if user_data.site_name == None: logger.display_error_msg("Enter site name") return False if user_data.username == None: logger.display_error_msg("Enter username") return False if user_data.password == None: logger.display_error_msg("Enter password") return False else: user_data.password = input_mgr._plugin._engine_vars.alteryx_engine.decrypt_password(input_mgr.workflow_config["password"]) if user_data.data_source_name == None: logger.display_error_msg("Enter data source name") return False if user_data.download_location == None: logger.display_error_msg("Enter location to save data source") return False return True
edc8ea4492d7b488729755e48529757ece050c7f
15,347
def split_img(image, grid_size): """拆图""" width, height = image.size[0], image.size[1] m, n = grid_size w, h = width // n, height // m return [image.crop((i * w, j * h, (i + 1) * w, (j + 1) * h)) for j in range(m) for i in range(n)]
da152055afe90ed8ebc9424498f3fd3deed82d5e
15,348
def hex_to_RGB(hexstr): """ Convert hex to rgb. """ hexstr = hexstr.strip('#') r, g, b = int(hexstr[:2], 16), int(hexstr[2:4], 16), int(hexstr[4:], 16) return (r, g, b)
7084907ff084229fd2b4656aa301ca72a80d2fab
15,350
def assert_interrupt_signal(library, session, mode, status_id): """Asserts the specified interrupt or signal. Corresponds to viAssertIntrSignal function of the VISA library. Parameters ---------- library : ctypes.WinDLL or ctypes.CDLL ctypes wrapped library. session : VISASession Unique logical identifier to a session. mode : constants.AssertSignalInterrupt How to assert the interrupt. status_id : int Status value to be presented during an interrupt acknowledge cycle. Returns ------- constants.StatusCode Return value of the library call. """ return library.viAssertIntrSignal(session, mode, status_id)
c6035238e75364f591364b49f89bc4204d62200c
15,351
import asyncio async def sleep(msecs: int) -> None: """Creates an asynchronous computation that will sleep for the given time. This is scheduled using a System.Threading.Timer object. The operation will not block operating system threads for the duration of the wait.""" return await asyncio.sleep(msecs / 1000.0)
65fd36f342388309ef996031e3e2bdafa5b8c045
15,352
def _guess_concat(data): """ Guess concat function from given data """ return { type(u''): u''.join, type(b''): bytes, }.get(type(data), list)
193130eea686e5b91db39f5db7a9977b6b91dd32
15,354
import argparse def parse_args(): """Parse command line arguments.""" hpo_warning = 'Flag overwrites config value if set, used for HPO and PBT runs primarily' parser = argparse.ArgumentParser('train.py') add_arg = parser.add_argument add_arg('config', nargs='?', default='configs/hello.yaml') add_arg('-d', '--distributed', choices=['ddp-file', 'ddp-mpi', 'cray']) add_arg('-v', '--verbose', action='store_true') add_arg('--ranks-per-node', default=8) add_arg('--gpu', type=int) add_arg('--rank-gpu', action='store_true') add_arg('--resume', action='store_true', help='Resume from last checkpoint') add_arg('--show-config', action='store_true') add_arg('--interactive', action='store_true') add_arg('--output-dir', help='override output_dir setting') add_arg('--seed', type=int, default=0, help='random seed') add_arg('--fom', default=None, choices=['last', 'best'], help='Print figure of merit for HPO/PBT') add_arg('--n-train', type=int, help='Override number of training samples') add_arg('--n-valid', type=int, help='Override number of validation samples') add_arg('--batch-size', type=int, help='Override batch size. %s' % hpo_warning) add_arg('--n-epochs', type=int, help='Specify subset of total epochs to run') add_arg('--real-weight', type=float, default=None, help='class weight of real to fake edges for the loss. %s' % hpo_warning) add_arg('--lr', type=float, default=None, help='Learning rate. %s' % hpo_warning) add_arg('--hidden-dim', type=int, default=None, help='Hidden layer dimension size. %s' % hpo_warning) add_arg('--n-graph-iters', type=int, default=None, help='Number of graph iterations. %s' % hpo_warning) add_arg('--weight-decay', type=float) return parser.parse_args()
517d0c6a05a513b8eafca0d272584c2f1c58ec34
15,356
def get_method_attr(method, cls, attr_name, default = False): """Look up an attribute on a method/ function. If the attribute isn't found there, looking it up in the method's class, if any. """ Missing = object() value = getattr(method, attr_name, Missing) if value is Missing and cls is not None: value = getattr(cls, attr_name, Missing) if value is Missing: return default return value
4c5bb4383e22f6b63d78f4b0b5266225b0e7c1d2
15,357
def type_length(expr_type): """ Counts the number of parameters of a predicate. E.g. type_length(e) = 1 type_length(<e, t>) = 2 type_length(<e, <e,t>>) = 3 """ acc_first, acc_second = 0, 0 if expr_type is None: return 0 if 'first' not in expr_type.__dict__ \ and 'second' not in expr_type.__dict__: return 1 if 'first' in expr_type.__dict__: acc_first = type_length(expr_type.first) if 'second' in expr_type.__dict__: acc_second = type_length(expr_type.second) return acc_first + acc_second
50c92a5079ff922e806c343d116145bd01c9b57e
15,358
def permutationinverse(perm): """ Function generating inverse of the permutation Parameters ---------- perm : 1D array Returns ------- inverse : 1D array permutation inverse of the input """ inverse = [0] * len(perm) for i, p in enumerate(perm): inverse[p] = i return inverse
c5a62a7e3acbebaebebbcf0f23dc1989a17f1c75
15,360
import os def update_c_H046_no_verbose_cmake_file(main, file): """ The CMake file in test_packages should not use CMAKE_VERBOSE_MAKEFILE TRUE """ files = { "CMakeLists.txt": os.path.join(os.path.dirname(file), "CMakeLists.txt"), "test_package/CMakeLists.txt": os.path.join(os.path.dirname(file), "test_package", "CMakeLists.txt"), } for display_name, cmake_file in files.items(): if main.replace_in_file(cmake_file, "set(CMAKE_VERBOSE_MAKEFILE TRUE)\n", ""): main.output_result_update(title="H046: Removed set(CMAKE_VERBOSE_MAKEFILE TRUE) from {}".format(display_name)) return True return False
2d744e064e9ced9e2297218d4bcf38f5dd476fde
15,361
def _zeros_like_scala(x): """Returns 0 which has the same dtype as x where x is a scalar.""" return 0
405cd960b68b0794bfd813a4e3bd59890f05b315
15,363
import collections def month_results(exercises): """return results for different months.""" months = [] for e in exercises: monthn = e.date.month yearn = e.date.year monthY = str(monthn) + '/' + str(yearn) months.append((monthY, e.distance, e.time_as_hours)) monthtotals = collections.OrderedDict() monthtime = collections.OrderedDict() for k,v,v2 in months: monthtotals[k] = monthtotals.get(k,0) + v monthtime[k] = monthtime.get(k,0) + v2 tots = [monthtotals, monthtime] tot = collections.OrderedDict() for k in monthtotals: tot[k] = tuple(tot[k] for tot in tots) return tot #return monthtotals
d0a10c0e8721e96aac5db595d77ff0af60ea29fc
15,364
def extract_arg_default(arg_str): """ Default strategy for extracting raw argument value. """ args_str_split = arg_str.split(" ", 1) if len(args_str_split)==1: args_str_split.append("") return args_str_split
44f139591695ebf015b88cf343c90f7b2d29aff0
15,365
def namespace_edits_rev_query(users, project, args): """ Obtain revisions by namespace """ return []
8ba25c138e4c807a3c9bb4934eb1ddeb2cf140e5
15,367
def group_words(words, size=2, empty=""): """Generates pairs (tuples) of consequtive words. Returns a generator object for a sequence of tuples. If the length of the input is not divisible by three, the last tuple will have the last len(words) % size spots filled with the value of empty. """ n = int(size) if n < 2: raise ValueError("size must be larger than 1.") it = iter(words) grp = [] try: grp = [next(it)] except StopIteration: return [] for w in it: grp.append(w) if len(grp) == size: yield tuple(grp) grp = [] if grp != []: grp += [empty] * (size - len(grp)) yield tuple(grp)
b484915e84796621a8aba8ac9d8060a33ef05805
15,368
def readFromFile(filePath): """ Read from the specifide file path. Parameters ---------- filePath : str The path to the file to be read from. Returns ------- contents : str The contents of the read file. """ with open(filePath, "r") as timeCheckFile: contents = timeCheckFile.read() return contents
4d07a67f1378fe7bc7e061c17c56677cdadce107
15,371
def output_params(workflow): """ Generate params """ params = '' count = 0 for job in workflow['jobs']: if 'outputFiles' in job: for filename in job['outputFiles']: params += ' --outfile %s=$(prominenceout%d) ' % (filename, count) count += 1 if 'outputDirs' in job: for filename in job['outputDirs']: params += ' --outdir %s=$(prominenceout%d) ' % (filename, count) count += 1 return params
f539f17e13136dd173dd96e5f38a2ed270e91a4a
15,373
def get_num_bits_different(hash1, hash2): """Calculate number of bits different between two hashes. >>> get_num_bits_different(0x4bd1, 0x4bd1) 0 >>> get_num_bits_different(0x4bd1, 0x5bd2) 3 >>> get_num_bits_different(0x0000, 0xffff) 16 """ return bin(hash1 ^ hash2).count('1')
9023085206e3601c0be9a5cf5531024a4415fe97
15,374
import os def file_exists(path): """ Check file existence on given path Parameters ------- path : string Path of the file to check existence Returns ------- file_existence : bool True if file exists otherwise False """ return os.path.exists(path)
2e4d98092375f75b79f8a4e431654ddc10df4cd0
15,375
import math def calculate_Resolution_based_MZ(mz): """ based on MZ calculates resolution provided by a LR """ return math.pow(10, 5.847 + math.log10(mz) * (-0.546))
c81f788e5ab3767ee3ae1223054ab186b171eeb7
15,376
import csv def read_headers(file_name): """ Generate headers list from file """ with open(file_name) as f: reader = csv.reader(f, delimiter=',', quotechar='"') # Skip the header headers = next(reader) return headers
852c5ced337dd00c38a6fc191f58450b41018e52
15,377
import numpy def rix2losses(n, wl): """Return real(n), imag(n), alpha, alpha_cm1, alpha_dBcm1, given a complex refractive index. Power goes as: P = P0 exp(-alpha*z).""" nr = numpy.real(n) ni = numpy.imag(n) alpha = 4 * numpy.pi * ni / wl alpha_cm1 = alpha / 100. alpha_dBcm1 = 10 * numpy.log10(numpy.exp(1)) * alpha_cm1 return nr, ni, alpha, alpha_cm1, alpha_dBcm1
0b7b82ebb5dddcd89f58a15086b3ac9f23e31048
15,378
def get_final_dx_groups(final_exam): """This function separates the final exam data into diagnosis groups. Each dataframe that is returned follows a group of patients based on their progression during the study. cn_cn means a group that was 'CN' at baseline and 'CN' at final exam, while mci_ad means a group that was 'MCI' at baseline and 'AD' at final exam. This function only contains data for the final exam Returns a tuple containing the following items: cn_cn_f, cn_mci_f, cn_ad_f, mci_cn_f, mci_mci_f, mci_ad_f, ad_cn_f, ad_mci_f, ad_ad_f """ # filter the data cn_cn_f = final_exam[(final_exam.DX == 'CN') & (final_exam.DX_bl2 == 'CN')] cn_mci_f = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'CN')] cn_ad_f = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'CN')] mci_cn_f = final_exam[(final_exam.DX == 'CN') & (final_exam.DX_bl2 == 'MCI')] mci_mci_f = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'MCI')] mci_ad_f = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'MCI')] ad_cn_f = final_exam[(final_exam.DX == 'CN') & (final_exam.DX_bl2 == 'AD')] ad_mci_f = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'AD')] ad_ad_f = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'AD')] return (cn_cn_f, cn_mci_f, cn_ad_f, mci_cn_f, mci_mci_f, mci_ad_f, ad_cn_f, ad_mci_f, ad_ad_f)
d8fd5c4d4903b9e4618de75ff734d9037b15b2da
15,379
from typing import List from typing import Tuple def tabulate(rows: List[Tuple[str, str]]) -> List[str]: """Utility function for printing a two-column table as text with whitespace padding. Args: rows (List[Tuple[str, str]]): Rows of table as tuples of (left cell, right cell) Returns: Rows of table formatted as strings with whitespace padding """ left_max = max(len(row[0]) for row in rows) out = [] for left, right in rows: padding = (left_max + 1 - len(left)) * " " out.append(left + padding + right) return out
838540161e06544f81bdf4ea78b0973f75949fb3
15,381
def hit(row, column, fleet): """ This method returns a tuple (fleet, ship) where ship is the ship from the fleet that receives a hit by the shot at the square represented by row and column, and fleet is the fleet resulting from this hit. :param row: int :param column: int :param fleet: list :returns fleet, ship: tuple """ for i in range(len(fleet)): for j in range(fleet[i][3]): # if horizontal if fleet[i][2]: if row == fleet[i][0] and column == fleet[i][1] + j: fleet[i][4].add((row, column)) return fleet, fleet[i] # if vertical else: if row == fleet[i][0] + j and column == fleet[i][1]: fleet[i][4].add((row, column)) return fleet, fleet[i]
673994265cec6e0167b764b25eba1006e16c7257
15,382
import pickle def load_vocab(load_vocab_model_path): """加载模型""" with open(file=load_vocab_model_path, mode='rb') as fp: return pickle.load(file=fp)
ccbb1313a28c3f54147fbc7bad899ac9774f91af
15,383
def find_link_to_join(model, link_name): """Finds a link in the model to move the contents of the provided link. This is done by first finding the joint that attaches this link to another link in the model.""" for el in model.findall('joint'): # Joining is only possible if the joint is a fixed joint. Sometimes a # a revolute joint with 0 upper and lower limits is used for fixed # joint. Handle that case as well. if el.get('type') != 'fixed': if el.get('type') == 'revolute': limit = el.find('limit') if limit is not None: lower = limit.find('lower') upper = limit.find('upper') if float(lower.text) != 0 or float(upper.text) != 0: continue else: continue parent = el.find('parent') child = el.find('child') if parent is not None and child is not None: if parent.text == link_name: attach_links = model.xpath( './link[@name="{}"]'.format(child.text)) if len(attach_links) > 0: return el, attach_links[0] if child is not None: if child.text == link_name: attach_links = model.xpath( './link[@name="{}"]'.format(parent.text)) if len(attach_links) > 0: return el, attach_links[0] return None, None
68600b70e6a6d29cceeae6e365ae4dfeacfd69f2
15,384
def __lop__(op): """ binary left operator decorator """ def tmpop(self,right): return self._run_op(self, right, op) return tmpop
5e84a705a2f1901a1545ce75b139ea4cb6342c1a
15,387
def get_instance_name(inst): """Get the name of an instance, or None if it doesn't have one. The name is the value for the metadata tag "Name," if it exists. """ if inst.get('Tags'): for tag in inst['Tags']: if tag['Key'] == 'Name': return tag['Value'] return None
1fa24ac813f0245a08585c0a776a9ada27099393
15,388
def filter_ann_and_variants(annotations_w_aa_variants): """ Transforms SUBs and DELs so that they're all of length 1 Removes - substitutions whose alternative sequence is X (aligner error) - variants with NULL start coordinate """ new_annotations_w_aa_variants = [] for gene_name, product, protein_id, feature_type, start, stop, nuc_seq, amino_acid_seq, aa_variants in annotations_w_aa_variants: # filter variants new_aa_variants = [] for gene, protein_name, protein_code, mutpos, ref, alt, mut_type in aa_variants: if mutpos is None: continue # transform variants if mut_type == 'DEL': for i in range(len(ref)): new_mutpos = mutpos + i if mutpos is not None else None new_aa_variants.append( (gene, protein_name, protein_code, new_mutpos, ref[i], '-', mut_type)) elif mut_type == 'SUB': for i in range(len(ref)): if alt[i] == 'X': continue else: new_mutpos = mutpos + i if mutpos is not None else None new_aa_variants.append( (gene, protein_name, protein_code, new_mutpos, ref[i], alt[i], mut_type)) else: new_aa_variants.append((gene, protein_name, protein_code, mutpos, ref, alt, mut_type)) # include annotations of type gene and only the other ones coding for amino acids if feature_type == 'gene' or amino_acid_seq is not None: new_annotations_w_aa_variants.append(( gene_name, product, protein_id, feature_type, start, stop, nuc_seq, amino_acid_seq, new_aa_variants )) return new_annotations_w_aa_variants
d1b3a67ea23f33d68ef81ebd9f79ef5b3e422c66
15,389
import os import sys def get_workspaces_dir(): """ Return the path to the directory to store development files in. Create the directory if it does not already exist. Returns: The path to the directory to store development files in. """ workspaces_dir = os.path.join(os.path.expanduser('~'), 'workspaces') if not os.path.isdir(workspaces_dir): if sys.platform == 'cygwin': # First, create the `c:\workspaces\` directory if it doesn't exist. root_workspaces_dir = os.path.join('/cygdrive', 'c', 'workspaces') os.makedirs(root_workspaces_dir, exist_ok=True) # Then, create a symbolic link to it at `~/workspaces`. os.symlink(root_workspaces_dir, workspaces_dir) else: raise OSError('Unsupported platform: ' + sys.platform) return workspaces_dir
0a3837fdb9f6f9864b8e93a36c4f2533d9043581
15,390
import json def get_extracted_hl_features(ofile): """returns the extracted high-level features from json file in dictionary format""" features = {} with open(ofile, 'r') as ef: content = json.load(ef) for f in content['highlevel']: features[f] = content['highlevel'][f]['value'] return features
02ffb2e3881c1293d696e19969cf9246a0835403
15,391
def get_value(config_map, key): """ This does not do truly validate the config value but it at least makes sure it is present and is not an empty string and not just all whitespace """ try: return config_map[key] except: return None
0b5e344de48100882f4d9c6c5d441b75f6900f41
15,392
def cec_module_params(): """ Define some CEC module parameters for testing. The scope of the fixture is set to ``'function'`` to allow tests to modify parameters if required without affecting other tests. """ parameters = { 'Name': 'Example Module', 'BIPV': 'Y', 'Date': '4/28/2008', 'T_NOCT': 65, 'A_c': 0.67, 'N_s': 18, 'I_sc_ref': 7.5, 'V_oc_ref': 10.4, 'I_mp_ref': 6.6, 'V_mp_ref': 8.4, 'alpha_sc': 0.003, 'beta_oc': -0.04, 'a_ref': 0.473, 'I_L_ref': 7.545, 'I_o_ref': 1.94e-09, 'R_s': 0.094, 'R_sh_ref': 15.72, 'Adjust': 10.6, 'gamma_r': -0.5, 'Version': 'MM105', 'PTC': 48.9, 'Technology': 'Multi-c-Si', } return parameters
27da851a8253d4309c8efa9f4c8903820531f872
15,393
def J2Btu(x): """J -> Btu""" return x*3412./1000./3600.
8136dbb87424e1e7cfabbc98e0b5a33ddbd813a2
15,394
import dis def _instructions_up_to_offset(x, lasti: int) -> list: """Get all instructions up to offset *lasti*. """ instructions = [] for instruction in dis.get_instructions(x): instructions.append(instruction) if instruction.offset == lasti: break return instructions
a38ad93350146fb98e559522d86d3674de0539fd
15,395
from typing import Iterable from typing import List from typing import Tuple def get_eigenvalues_with_degeneracies(evals: Iterable[float]) -> List[Tuple[float, int]]: """ Given a set of sorted eigenvalues (possibly including degenerate eigenvalues), return a list of (eigenvalue, degeneracy) pairs, with eigenvalues represented as floats rounded to 3dp. """ cur = None count = 0 result = [] for e in evals: e = round(e, 3) if e == cur: count += 1 else: if cur is not None: result.append((cur, count)) cur = e count = 1 if count > 0: result.append((cur, count)) return result
37b0244b15eda2159f79e4de8da8a86e2b7e6352
15,397
def directly_follows(trace): """ Get the directly-follows relations given a list of activities Parameters -------------- trace List activities Returns -------------- rel Directly-follows relations inside the trace """ return set((trace[i], trace[i+1]) for i in range(len(trace)-1))
1e93d16a3bd002aa29b43309dc0a32360e2046db
15,399
import os import time def retreivePDFLocation(): """get the cached PDF location and clear the file""" try: # see if a path was cached f = open('pdflocation', 'r') pdfpath = f.readline().rstrip() # remove trailing newline timestamp = float(f.readline()) f.close() # now remove the file so its not used again later os.remove('pdflocation') # check if this was cached recently if time.time() - timestamp > 60.0: # if it's been more than a minute then cache is probably unintentional pdfpath = None except IOError: # no cached file pdfpath = None return pdfpath
d48b2c6ed600373db7d663be107958d06d1279dd
15,400
import time def wait(sentry, timeout=900): """Waits for all units to be active/idle.""" def check_status(): status = sentry.get_status() for service_name in sentry.service_names: service = status.get(service_name, {}) for unit_name, unit in service.items(): if not unit['agent-status']: return False if not unit['workload-status']: return False if unit['agent-status'].get('current') != 'idle': return False if unit['workload-status'].get('current') != 'active': return False return True t0 = time.time() while time.time() - t0 < timeout: if check_status(): return time.sleep(1) raise TimeoutError()
a0072f863f5afbde2c0b3b749974fa38f2cf5c55
15,401
def coin_piles(t: tuple) -> str: """ [Medium] https://cses.fi/problemset/task/1754 [Solution] https://cses.fi/paste/61697edb44edc7a922cce0/ You have two coin piles containing a and b coins. On each move, you can either remove one coin from the left pile and two coins from the right pile, or two coins from the left pile and one coin from the right pile. Your task is to efficiently find out if you can empty both the piles. The first input line has an integer t: the number of tests. After this, there are t lines, each of which has two integers a and b: the numbers of coins in the piles. For each test, print "YES" if you can empty the piles and "NO" otherwise. Constraints: . 1 ≤ t ≤ 10^5 . 0 ≤ a, b ≤ 10^9 """ return 'YES' if sum(t) % 3 == 0 and max(t) <= 2*min(t) else 'NO'
5bb8ee989377807bfea48d0685e1ffa66bc58a4b
15,402
def findClosestElements(arr, k, x): """ :type arr: List[int] :type k: int :type x: int :rtype: List[int] """ lo, hi = 0, len(arr)-k while lo < hi: mid = (lo + hi)//2 if x-arr[mid] > arr[mid+k]-x: lo = mid + 1 else: hi = mid return arr[lo:lo+k]
066a40467b293a57852f0bbbce9415dc387e414e
15,403
def scrubbing(time_series, FD, thres=0.2): """ simple scrubbing strategy based on timepoint removal """ scrubbed_time_series = time_series.T[:, (FD < thres)] return scrubbed_time_series.T
beacdde75494ad8e4a346657a18ca48bf5990996
15,404
def make_tokens(binary_parse): """ Convert a binary parse: ( ( The men ) ( ( are ( fighting ( outside ( a deli ) ) ) ) . ) ) to [The men are fighting outside a deli .] @returns tuple """ return tuple(binary_parse.replace("(", "").replace(")", "").split())
a76346b7df225969d2859f792dfe9131c92be533
15,406
def _get2DOverlapBox(box1, box2): """Get 2D box where the two boxes overlap""" result = { 'left': max(box1['left'], box2['left']), 'top': max(box1['top'], box2['top']), 'right': min(box1['right'], box2['right']), 'bottom': min(box1['bottom'], box2['bottom']), } # ensure that right>=left and bottom>=top result['right'] = max(result['left'], result['right']) result['bottom'] = max(result['top'], result['bottom']) return result
5c382ee3a651c24e812f43fdac9f82949361cf96
15,407
def get_dump_period(): """How frequently should we dump?""" return 86400
df0053dd149e30e5d25522747e1ffb7bd98695a8
15,408
def squares_won(microboard, player): """ Return the number of squares won by the player on the microboard. """ return sum(map(lambda line: line.count(player), microboard.export_grid()))
ecbef6669badfb9ef597194892126ae452f01b2c
15,409
def least_distance_only(annotation, new_annotations): """Condition function to keep only smallest distance annotation per image Args: annotation: Current annotation being examined new_annotations: Dict of new annotations kept by image_name Returns: True if annotation image is not yet in new_annotations or distance is less than previous value. False otherwise. """ image_name = annotation["image_name"] if image_name in new_annotations: return annotation["distance"] < new_annotations[image_name]["distance"] else: return True
d9feeaac699a2527f43c96c65a6d61f4bfe1a2a9
15,410
import requests def get_lessons_from_n_to_m(from_n, to_m, current_token): """ :param from_n: starting lesson id :param to_m: finish lesson id :param current_token: token given by API :return: json object with all existing lessons with id from from_n to to_m """ api_url = 'https://stepik.org/api/lessons/' json_of_n_lessons = [] for n in range(from_n, to_m + 1): try: current_answer = (requests.get(api_url + str(n), headers={'Authorization': 'Bearer ' + current_token}).json()) # check if lesson exists if not ("detail" in current_answer): json_of_n_lessons.append(current_answer['lessons'][0]) except: print("Failure on id {}".format(n)) return json_of_n_lessons
194f55806b3b1d252f0fb0d556032b916969523b
15,413
def snak(datatype='', value='', prop='', snaktype='value'): """Create and return a snak (dict)""" if datatype in ['', 'string', 'math', 'external-id', 'url', 'commonsMedia', 'localMedia', 'geo-shape', 'musical-notation', 'tabular-data']: datavalue = { 'value': value, 'type': 'string' } elif datatype == 'wikibase-item': datavalue = { 'value': { 'entity-type': 'item', 'numeric-id': value[1:], 'id': value }, 'type': 'wikibase-entityid' } elif datatype == 'wikibase-property': datavalue = { 'value': { 'entity-type': 'property', 'numeric-id': value[1:], 'id': value }, 'type': 'wikibase-entityid' } elif datatype == 'time': time, timezone, precision, calendarmodel = value datavalue = { 'value': { 'time': time, 'timezone': timezone, 'before': 0, 'after': 0, 'precision': precision, 'calendarmodel': calendarmodel # http://www.wikidata.org/entity/Q1985727 }, 'type': 'time' } elif datatype == 'monolingualtext': val, language = value datavalue = { 'value': { 'text': val, 'language': language }, 'type': 'monolingualtext' } elif datatype == 'quantity': val, unit, upper_bound, lower_bound = value datavalue = { 'value': { 'amount': val, 'unit': unit, 'upperBound': upper_bound, 'lowerBound': lower_bound }, 'type': 'quantity' } elif datatype == 'globe-coordinate': latitude, longitude, precision, globe = value datavalue = { 'value': { 'latitude': latitude, 'longitude': longitude, 'precision': precision, 'globe': globe }, 'type': 'globecoordinate' } elif datatype == 'wikibase-lexeme': datavalue = { 'value': { 'entity-type': 'lexeme', 'numeric-id': value[1:], 'id': value }, 'type': 'wikibase-entityid' } elif datatype == 'wikibase-form': datavalue = { 'value': { 'entity-type': 'form', 'id': value }, 'type': 'wikibase-entityid' } elif datatype == 'wikibase-sense': datavalue = { 'value': { 'entity-type': 'sense', 'id': value }, 'type': 'wikibase-entityid' } else: raise ValueError('{} is not a valid datatype'.format(datatype)) if snaktype in ['value', 'novalue', 'somevalue']: snak = {'snaktype': snaktype, 'property': prop, 'datavalue': datavalue, 'datatype': datatype} else: raise ValueError("""{} is not a valid snaktype. Use "value, "novalue" or "somevalue".""".format(snaktype)) return snak
c4378c165836e76f5339a5e76bb9a5e80b1a2860
15,414
def __read_class_labels(classes_file): """ Returns a function -> class-label dictionary. Only supports single class labels. :param functionclasses_file: A file with the format function-name class-label :return: Dictionary of the form {fn: class} """ classes = {} with open(classes_file, 'r') as f: for line in f: function, class_label = line.strip().split() if function in classes: continue # raise Exception("Duplicate function %s" % function) classes[function] = class_label return classes
0962f499c1f75d4049c16aafe4922f4d2f843bc7
15,415
def function_sum(function_1, function_2): """ Returns the addition of 2 functions """ def summed_function(*args, **kwargs): return function_1(*args, **kwargs) + function_2(*args, **kwargs) return summed_function
72b1f45694291b3aefc3f68df124359d10d018ff
15,417
def get_user_adjacency(user_index, adjacency_matrix): """helper function to retrieve the adjacent users for a certain user by passing the latter index to the function""" adjacent_users = set() for e, col in enumerate(adjacency_matrix[user_index]): if adjacency_matrix[user_index][e] > 0: adjacent_users.add(e) for e, row in enumerate(adjacency_matrix): for e_col, col in enumerate(row): if e_col == user_index: if row[user_index] > 0: adjacent_users.add(e) return adjacent_users
5a7d03dc028e507c194f551c84301d09b38c25e6
15,419
import torch def make_audio(data, vocab): """ batch audio data """ nfft = data[0].size(0) t = max([t.size(1) for t in data]) sounds = torch.zeros(len(data), 1, nfft, t) for i, spect in enumerate(data): sounds[i, :, :, 0:spect.size(1)] = spect return sounds
715fa328d76c080bcfca365a276adc678ec50af4
15,421
def getnodes(o,attr=None,graph=None): """Get destination nodes from graph bindings of o, limited to the specific attribute or graph if specified""" if attr: if hasattr(o,'__schema__') and attr in o.__schema__: return getattr(o,o.__schema__[attr][2]) # RETURN THE PRODUCT if hasattr(o,'__class_schema__') and attr in o.__class_schema__: return getattr(o,o.__class_schema__[attr][2]) # RETURN THE PRODUCT raise AttributeError('No attribute named %s in object %s' % (attr,o)) elif graph: # JUST LOOK UP THE GRAPH TRIVIALLY return graph[o] else: # SHOULD WE GET ALL NODES FROM ALL SCHEMA ENTRIES? HOW?? raise ValueError('You must pass an attribute name or graph')
2e852643d2fd53f6078949a40401ca3c2b3388bd
15,422