content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def air2vacMortonIAU(wl_air): """Take an input air wavelength in Angstroms and return the vacuum wavelength. Formula taken from https://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion """ s = 1e4 / wl_air n = 1 + 0.00008336624212083 + (0.02408926869968 / (130.1065924522 - s**2))\ + (0.0001599740894897 / (38.92568793293 - s**2)) return wl_air * n
66823170493c3b7d8f63bd7d2e32364aa5efa78e
702,513
def instanceType(ty): """ Return the type of an object as a string. """ #print(type(ty).__name__) return type(ty).__name__
8a259626163100ac5e85f1bf26ea5f531405e6da
148,843
def on_off(tag): """Return an ON/OFF string for a 1/0 input. Simple utility function.""" return ['OFF','ON'][tag]
9b21150c4ff8c1533f536d9b95ee467c11f45c89
455,644
def extract(values, label, raise_error=True, default_value=None): """Create a flat dictionary from a nested one. The resulting dictionary contains the same keys as the input dictionary. The associated values are the values from the nested dictionaries under the given label. If a nested value does not contain the given label as key a KeyError is raised if the raise error flag is True. If the flag is False the given default value is used instead. Parameters ---------- values: dict Nested dictionary from which the values with the given label are extracted. label: string Label of element for which the metadata array is created. raise_error: bool, default=True Raise a KeyError if a nested dictionary value does not contain the given label as a key. default_value: any, default=None Default value for values that do not contain the the given label as a key. Returns ------- openclean.data,metadata.Feature Raises ------ KeyError """ result = dict() for key, value in values.items(): if raise_error and label not in value: raise KeyError('missing label for {}'.format(key)) result[key] = value.get(label, default_value) return result
90716781be11e8b4a2db4b23df734cbe12186fec
283,741
def exists(env): """ Check if `gtags` module has been loaded in the environment """ return env['GTAGS'] if 'GTAGS' in env else None
8feb1b08e24cd77c8f954c113f9fa6241496318d
646,801
def effort_std(df): """Standard deviation of the effort.""" return df["e2"].std()
033eed35c4bb7612f99f745ffa6fa17601a79df2
454,399
from typing import Dict def freq_dict_to_lowercase(input_dict: Dict[str, int]) -> Dict[str, int]: """Takes a dictionary of string to value, and returns one where the keys are all mapped to lowercase and the values are the sums of all the values mapped in this way.""" output_dict = {} for item in input_dict.items(): lc = item[0].lower() output_dict[lc] = output_dict.get(lc, 0) + item[1] return output_dict
0ce5348e32104c2ff5e258798b4a22312dc6192d
498,628
def force_to_energy_grad(dataset): """ Converts forces to energy gradients in a dataset. This conforms to the notation that a key with `_grad` is the gradient of the property preceding it. Modifies the database in-place. Args: dataset (TYPE): Description dataset (nff.data.Dataset) Returns: success (bool): if True, forces were removed and energy_grad became the new key. """ if 'forces' not in dataset.props.keys(): return False else: dataset.props['energy_grad'] = [ -x for x in dataset.props.pop('forces') ] return True
584095bea241bd2a08b8b99824d5ef9aa2b1499e
90,883
def format_error(string: str, is_warning: bool) -> str: """ Formats a given message in an error color using ANSI escape sequences (see https://stackoverflow.com/a/287944/5299750 and https://stackoverflow.com/a/33206814/5299750) :param string: to be printed :param is_warning: determines the color of the error """ if is_warning: ansi_start = '\033[93m' else: ansi_start = '\033[91m' ansi_end = '\033[0m' return f"{ansi_start}{string}{ansi_end}"
b1bd11cbb24c0b511656336b4c4b8ec510671b74
193,657
import json def handle_exception(e): """Return JSON instead of HTML for HTTP errors.""" # start with the correct headers and status code from the error response = e.get_response() # replace the body with JSON response.data = json.dumps( {"code": e.code, "name": e.name, "description": e.description,} ) response.content_type = "application/json" return response
5ea827253deba3d36f4902d556511ccc99431646
429,602
def force_unicode(value, encoding='utf-8', errors='strict'): """ Convert bytes or any other Python instance to string. """ if isinstance(value, str): return value return value.decode(encoding, errors)
b681d5b331c5cda6192c38ebd931c5ffec591692
339,000
def _recvall(sock, size): """ Read exactly the specified amount of data from a socket. If a ``recv()`` call returns less than the requested amount, the ``recv()`` is retried until it either returns EOF or all the requested data has been read. :param sock: A socket from which bytes may be read. :param int size: The amount of data to read from the socket. :returns: The requested data. """ data = b'' while len(data) < size: buf = sock.recv(size - len(data)) # Break out if we got an EOF if not buf: # Return None for an EOF return data or None data += buf return data
6a456ee4925b7faa25646e9bc6e702848d047783
13,354
def unwrap_links(soup): """ unwrap <a> elements (remove links) if link target contains removed directories: players/ leagues/ coaches/ teams/ returns a tuple: (soup, n_unwrapped_links) """ unwrapped_links = 0 hrefs = soup.body.find_all('a') for elem in hrefs: if ( 'players/' in elem['href'] or 'coaches/' in elem['href'] or 'leagues/' in elem['href'] or 'teams/' in elem['href'] ): elem.unwrap() unwrapped_links += 1 return (soup, unwrapped_links)
76c20988294f984aa7fb396538b2ad58dee911c0
168,382
from typing import List import yaml def assemble_container_names(validated_compose_content: str) -> List[str]: """returns the list of container names from a validated compose_spec""" parsed_compose_spec = yaml.safe_load(validated_compose_content) return [ service_data["container_name"] for service_data in parsed_compose_spec["services"].values() ]
ea213cc4dc54d4da1fb50c9a855818db2c142d98
383,566
def get_confirmation(warningtext): """ Prints a warning message and asks for user confirmation Return Values: True: User selected Yes False: User selected No """ print() print(warningtext) while True: try: user_input = input("Please confirm (Y/N): ") except: user_input = '' if user_input.lower() == 'y' or user_input.lower() == 'yes': return True elif user_input.lower() == 'n' or user_input.lower() == 'no': return False print("The option: " + str(user_input) + " : is not a valid input") print("Valid options: [Y]es or [N]o")
d34f66935d199b5cf230691bb197f0d9f4b78370
661,847
def is_enum_namestack(nameStack): """Determines if a namestack is an enum namestack""" if not nameStack: return False if nameStack[0] == "enum": return True if len(nameStack) > 1 and nameStack[0] == "typedef" and nameStack[1] == "enum": return True return False
e5d909ad2380dcc5186c377d0440c34159c0b628
468,676
from typing import Union import pathlib def get_gecko_version_from_path_gecko_msi_filename(path_gecko_msi_filename: Union[str, pathlib.Path]) -> str: """ wine_gecko-2.47-x86.msi --> 2.47 >>> assert get_gecko_version_from_path_gecko_msi_filename(pathlib.Path('wine_gecko-2.47-x86.msi')) == '2.47' """ gecko_version = str(path_gecko_msi_filename).split('-')[1] return gecko_version
7e0c9aa13450d4aeb9b277d01b06023ae0a6454f
441,079
def get_header_tokens(headers, key): """ Retrieve all tokens for a header key. A number of different headers follow a pattern where each header line can containe comma-separated tokens, and headers can be set multiple times. """ if key not in headers: return [] tokens = headers[key].split(",") return [token.strip() for token in tokens]
9a89d77543fa58d6841e7a48b3a08b2f5cb1b7e4
408,599
def _get_identifier(obj): """ Gets the name of the identifier defined by an object with either an 'identifier' attribute or a 'typed_identifier' attribute. """ if hasattr(obj, 'identifier'): return obj.identifier if hasattr(obj, 'typed_identifier'): return obj.typed_identifier raise AttributeError( f"Object of type '{type(obj).__name__}' has no 'identifier' or 'typed_identifier'.")
dee249361e96e784047df57efb07e18688abaadf
70,700
def s2ms(t): """Convert seconds to milliseconds.""" return t * 1000
f2cc135c59d2694a08abc4f8c4088cabcd12664b
150,161
def signabs(x): """ Split x into its sign and absolute value. Returns a tuple (sign(x),abs(x)). Note: sign(0) = 1, unlike numpy.sign. """ if x < 0: sgn = -1 else: sgn = 1 return sgn,abs(x)
9a12328734a3952bfcb18239e4700b5bffe323f1
193,644
def format_plaintext_email(message): """Replace \n by <br> to display as HTML""" return message.replace('\n', '<br>')
88a753dd33dcf9a1432547ab0c9d27edd546e9db
376,168
import re def id_validator(s): """ A valid UID must follow the rules below: It must contain at least 2 uppercase English alphabet characters. It must contain at least 3 digits (0-9). It should only contain alphanumeric characters (a-z, A-Z & 0-9). No character should repeat. There must be exactly 10 characters in a valid UID. """ try: s = ''.join(sorted(s)) assert re.search(r"[A-Z]{2}", s) assert re.search(r"\d\d\d", s) assert not re.search(r"[^a-zA-Z0-9]", s) assert not re.search(r"(.)\1", s) assert len(s) == 10 except: return "Invalid" else: return "Valid"
c601dd2cd2bd5a2e5f616151c03b51580be5632b
512,574
import math def transmon_perturbative(tmax, omega_q, eta, g): """Produce energies and cuplings of the transmon perturbatively Uses an anharmonic approximation of the transmon to get the energies Args: tmax (int): maximum number of transmon levels omega_q (float): qubit frequency, eta: transmon anharmonicity (defined positive). g (float): coupling normalization. Value of the coupling between levels 0 and 1. """ qubit_energy_list = [omega_q*q - q*(q-1)/2*eta - q*(q-1)*(q-2)/4*eta**2/omega_q for q in range(tmax)] g_list = [g*math.sqrt(q+1)*(1-q/2*eta/omega_q) for q in range(tmax-1)] #qubit_energy_list[q] is transmon energy for level 'q' #g_list[q] is transmon charge matrix element between levels 'q+1' and 'q' return [qubit_energy_list, g_list]
bd15dfff9e391b8455a608bb2f0397f8ecb438f1
307,245
def Strip(txt): """Return stripped string, can handle None""" try: return txt.strip() except: return None
44edfab97b1cdbfec4174cf554c78077fb31cfd7
693,601
def is_a(cls, x): # noqa: F811 """ Check if x is an instance of cls. Equivalent to isinstance, but auto-curried and with the order or arguments flipped. Args: cls: Type or tuple of types to test for. x: Instance. Examples: >>> is_int = sk.is_a(int) >>> is_int(42), is_int(42.0) (True, False) """ return isinstance(x, cls)
1af08973716c16d6ee673904af5cbeddff0ef386
138,713
def align_up(offset, align): """Align ``offset`` up to ``align`` boundary. Args: offset (int): value to be aligned. align (int): alignment boundary. Returns: int: aligned offset. >>> align_up(3, 2) 4 >>> align_up(3, 1) 3 """ remain = offset % align if remain == 0: return offset else: return offset + (align - remain)
f74c2c3fba775d6bb15b7faf369e8706ecccc610
645,556
def flatten(tensor): """Returns a copy of the tensor flatten into one dimension. Args: tensor (ndarray): tensor to flatten Returns: ndarray: flattened tensor """ return tensor.flatten()
bc632301578df0896d98574175358f8cc8057613
241,532
def get_factory_name(factory): """ Returns a factory name, given the factory. This ensure that something will be displayed (id or name of the factory) even if no name has been specified for the factory """ name = factory.name.strip() if len(name) != 0: return name else: return factory.id
e3c626097c77f34dacab947072186582596bab0a
82,973
import functools def has_default_decorator(decorator_factory): """A meta-decorator for decorator factories. This decorator of decorators allows a decorator factory to be used as a normal decorator (without calling syntax). A single non-keyword argument (with no keyword arguments) will be considered the object to be decorated. If more than one argument is passed, or any keyword arguments are passed, all arguments will be passed to the decorator factory. To treat a single argument as an argument to the decorator factory, pass the keyword-only argument "lonely_argument=True". It will default to False. """ @functools.wraps(decorator_factory) def wrapper(*args, **kwargs): single_argument = kwargs.pop("lonely_argument", False) if not single_argument and len(args) == 1: return decorator_factory()(*args) return decorator_factory(*args, **kwargs) return wrapper
0a20270c02880445e880f395c9422ac5c6ab25d3
120,957
def get_coord_axes(path): """Return the number of atoms and the axes corresponding to atoms and coordinates for a given path. The `path` is assumed to be a :class:`numpy.ndarray` where the 0th axis corresponds to a frame (a snapshot of coordinates). The :math:`3N` (Cartesian) coordinates are assumed to be either: 1. all in the 1st axis, starting with the x,y,z coordinates of the first atom, followed by the *x*,*y*,*z* coordinates of the 2nd, etc. 2. in the 1st *and* 2nd axis, where the 1st axis indexes the atom number and the 2nd axis contains the *x*,*y*,*z* coordinates of each atom. Parameters ---------- path : numpy.ndarray representing a path Returns ------- (int, (int, ...)) the number of atoms and the axes containing coordinates """ path_dimensions = len(path.shape) if path_dimensions == 3: N = path.shape[1] axis = (1,2) # 1st axis: atoms, 2nd axis: x,y,z coords elif path_dimensions == 2: # can use mod to check if total # coords divisible by 3 N = path.shape[1] / 3 axis = (1,) # 1st axis: 3N structural coords (x1,y1,z1,...,xN,xN,zN) else: raise ValueError("Path must have 2 or 3 dimensions; the first " "dimensions (axis 0) must correspond to frames, " "axis 1 (and axis 2, if present) must contain atomic " "coordinates.") return N, axis
1d8e3239c0361e7671ada05ebc28948ec2334d5f
401,767
def _transform_hostname_to_http_endpoint(hostnames): """Convert hostnames to status page endpoint. Args: hostnames: List of server names Returns a list of urls in the format "http://<hostname>/status" """ return ["http://{}/status".format(server) for server in hostnames]
e0921161f25b98c62199bb7e0da33d07ee3da9c1
614,878
def filters_from_args(request_args): """ Helper to centralize reading filters from url params """ timespans_id = request_args['timespanId'] if 'timespanId' in request_args else None snapshots_id = request_args['snapshotId'] if 'snapshotId' in request_args else None foci_id = request_args['focusId'] if 'focusId' in request_args else None q = request_args['q'] if ('q' in request_args) and (request_args['q'] != 'undefined') else None return snapshots_id, timespans_id, foci_id, q
9bfee40d13c086f7114c9322ede4fb51a1226ee3
327,295
import torch def hard_example_mining(distance_matrix, pos_idxs, neg_idxs): """For each anchor, find the hardest positive and negative sample. Args: distance_matrix: pair wise distance between samples, shape [N, M] pos_idxs: positive index with shape [N, M] neg_idxs: negative index with shape [N, M] Returns: dist_ap: pytorch Variable, distance(anchor, positive); shape [N] dist_an: pytorch Variable, distance(anchor, negative); shape [N] p_inds: pytorch LongTensor, with shape [N]; indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1 n_inds: pytorch LongTensor, with shape [N]; indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1 Note: Only consider the case in which all targets have same num of samples, thus we can cope with all anchors in parallel. """ assert len(distance_matrix.size()) == 2 # noqa: S101 # `dist_ap` means distance(anchor, positive) # both `dist_ap` and `relative_p_inds` with shape [N] dist_ap, _ = torch.max(distance_matrix * pos_idxs, dim=1) # `dist_an` means distance(anchor, negative) # both `dist_an` and `relative_n_inds` with shape [N] dist_an, _ = torch.min(distance_matrix * neg_idxs + pos_idxs * 99999999.0, dim=1) return dist_ap, dist_an
00066433a8eb1f5fff5efc3266bfc7a5e91242ca
425,316
import torch def interp(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor: """Interpolate ``x`` tensor according to ``xp`` and ``fp`` as in ``np.interp``. This implementation cannot reproduce numpy results identically, but reasonable. Code referred to `here <https://github.com/pytorch/pytorch/issues/1552#issuecomment-926972915>`_. Args: x: the input tensor that needs to be interpolated. xp: the x-coordinates of the referred data points. fp: the y-coordinates of the referred data points, same length as ``xp``. Returns: The interpolated values, same shape as ``x``. """ if x.dim() != xp.dim() != fp.dim() != 1: raise ValueError( f"Required 1D vector across ``x``, ``xp``, ``fp``. Got {x.dim()}, {xp.dim()}, {fp.dim()}." ) slopes = (fp[1:] - fp[:-1]) / (xp[1:] - xp[:-1]) locs = torch.searchsorted(xp, x) locs = locs.clip(1, len(xp) - 1) - 1 return slopes[locs] * (x - xp[locs]) + xp[locs]
b18c373d8d1bfc8da736a0322d6abcc41af3cde0
27,279
import random def partitionByState(ser, holdouts=1): """ Creates training and test indexes by randomly selecting a indices for each state. :param pd.DataFrame ser: Classes for instances :param int holdouts: number of holdouts for test :return list-object, list-object: test, train """ classes = ser.unique().tolist() classes.sort() test_idxs = [] for cls in classes: ser_cls = ser[ser == cls] if len(ser_cls) <= holdouts: raise ValueError( "Class %s has fewer than %d holdouts" % (cls, holdouts)) idxs = random.sample(ser_cls.index.tolist(), holdouts) test_idxs.extend(idxs) # train_idxs = list(set(ser.index).difference(test_idxs)) return train_idxs, test_idxs
c98d7ee7d7ddeafa97285db9df339db1a01a188f
33,872
def _convert_read_lat_to_system_dat(lattice): """This method converts the lattice_data dictionary returned by io_utils.read_lattice to the dictionary structure of io_utils.read_enum_out. Args: attice (dict): The io_utils.read_lattice style dictionary with keys: "sizes": the range of cell sizes, "lat_vecs": lattice vectors of the parent cell, "nspecies": the number of atomic species in the enumeration, "basis_vecs": basis vectors of the parent cell, "is_crestricted": logical that indicates if the concentrations will be restricted, "arrows": logical that indicates if arrows are present, "concs": array of the concentrations in format [1,3]. Returns: system_data (dict): A dictionary of the system data with keys: "title": The system title. "bulksulf": Is this a surface or bulk system. "plattice": The parent lattice vectors as rows of a matrix. "nD": The number of atoms in the system. "dvecs": The atomic basis vectors. "k": The number of atomic species in the system. "eps": Finite precision tolerance. """ system_data = { "title": "HNF plotter", "bulksurf": lattice["bulk"], "plattice": lattice["lat_vecs"], "nD": 0, "dvecs": lattice["basis_vecs"], "k": lattice["nspecies"], "eps": 1E-10 } return system_data
639fe625e309986cef5107e8e3c4bb31d61f8bff
415,002
import hashlib def create_hash(string, hash_func=hashlib.sha256): """ Create a 10-caracters string hash """ _hash = hash_func(string) return _hash.hexdigest()[:10]
e155846f4652e250e7e06bb3a1e74e400e3e3d7c
282,910
import struct def getpeerid(sock): """ Get peer credentials on a UNIX domain socket. Returns a nested tuple: (uid, (gids)) """ LOCAL_PEERCRED = 0x001 NGROUPS = 16 #struct xucred { # u_int cr_version; /* structure layout version */ # uid_t cr_uid; /* effective user id */ # short cr_ngroups; /* number of groups */ # gid_t cr_groups[NGROUPS]; /* groups */ # void *_cr_unused1; /* compatibility with old ucred */ #}; xucred_fmt = '2ih16iP' res = tuple(struct.unpack(xucred_fmt, sock.getsockopt(0, LOCAL_PEERCRED, struct.calcsize(xucred_fmt)))) # Check this is the above version of the structure if res[0] != 0: raise OSError return (res[1], res[3:3+res[2]])
71d8aee0e2495e6fb07311d596a3bbb38675f611
422,997
import re def replace_unescaped_utf_8_chars(input_text): """ Unescapes characters like "&#x00c3;&#x00a4;" with their equivalents, e.g. "ö". Only necessary to support Python 2.7+. """ pattern = re.compile(r"&#x00(\w{2});&#x00(\w{2});") def replace(match): byte_array = bytearray(map(lambda v: int(v, 16), match.groups())) return byte_array.decode("utf-8") return pattern.sub(replace, input_text)
c5cc78e62e01dcda4343261e840ab38aff375660
646,427
def removeprefix(string, prefix): """Implementation of str.removeprefix() function available for Python versions lower than 3.9.""" if string.startswith(prefix): return string[len(prefix) :] else: return string
9bb3065a332b78dd868ca2ea7002ca22248b204c
437,048
def meetup_time_format(a_datetime): """Format a date/time for Meetup.""" return a_datetime.strftime('%Y-%m-%dT%H:%M:%S.000')
1310423730c61f8bfb9eda3c6d8aac13ddb645be
206,249
def topHat(r,A0=0.2): """Top-hat beam profile as used in Ref. [1]. Args: r (numpy array, ndim=1): Equispaced 1D grid for radial coordinate. A0 (float): Top hat width (default: 0.2). Returns: f (numpy array, ndim=1): Top-hat beam profile. """ return r<A0
5bb8a71daf3150d45b6120ce9be692e03e4f4e23
663,685
def _medgen_url(cui): """ URL linkout the NCBI MedGen website. :param cui: concept unique identifier :return: str url """ return 'http://www.ncbi.nlm.nih.gov/medgen/' + str(cui)
eaf99ee34eb2b7592bc184c9edbecd02c47ca864
296,095
def java_is_subclass(obj, class_name): """Given a deserialized JavaObject as returned by the javaobj library, determine whether it's a subclass of the given class name. """ clazz = obj.get_class() while clazz: if clazz.name == class_name: return True clazz = clazz.superclass return False
27e26c1211c4ddc2f299e0af6d9c15f03dba8680
139,252
import math def elevation_radians(lat, dec, ha): """Returns elevation radians.""" return math.asin(math.sin(dec) * math.sin(lat) + math.cos(dec) * math.cos(lat) * math.cos(ha))
5bb9b7e1121cfd1d770072bced4d4dfd36880a7b
176,502
def postorder_DFT(tree, nodelist): """ Post order traversal on binary RST tree :type tree: SpanNode instance :param tree: an binary RST tree :type nodelist: list :param nodelist: list of node in post order """ if tree.lnode is not None: postorder_DFT(tree.lnode, nodelist) if tree.rnode is not None: postorder_DFT(tree.rnode, nodelist) nodelist.append(tree) return nodelist
b3b65ce98ceb227ee743b3b40e0ee5f21ed1e56c
66,050
def NORMALIZE_OBSERVATION(obs_b, action_b, rew_b, next_obs_b, done_b): """Normalize observation for Atari environments.""" return obs_b / 255, action_b, rew_b, next_obs_b / 255, done_b
f412ce3473e6f7f82b74236ed7a80f8879692125
178,634
import base64 def is_base64(s): """Function to check whether a string is base64 encoded or not Parameters ---------- s String to check """ try: return base64.b64encode(base64.b64decode(s)) == s except Exception: return False
677aa804f7a76b74f1624796b7eff0ae173a0bf8
654,738
import re def _contains_single(string: str) -> bool: """Whether a string contains a single quote.""" match = re.search(r"[']", string) return match is not None
329cd8bf42412bbcc35c09b139244666866b668e
406,263
import math def amps_per_meter_to_oersted(amps_per_meter): """Converts \(\\frac{A}{m}\) to Oersted""" return amps_per_meter*1e-3*4*math.pi
3e8e30dfdd557cbed197066e86c0b2fce8209e46
343,232
import io import json def load_scan(json_input): """ Return a list of scan results loaded from a json_input, either in ScanCode standard JSON format or the data.js html-app format. """ with io.open(json_input, encoding='utf-8') as jsonf: scan = jsonf.read() scan_results = json.loads(scan) scan_results = scan_results['files'] return scan_results
c6209e79d8b3d026cca05059b5e90b5b15b70743
297,700
from typing import Sequence def get_digits(n: int, base: int = 10) -> Sequence[int]: """Decomposes a positive integer into digit values in a given base. Args: n: A positive integer value. base: The base in which ``n`` will be represented. Must be at least 2. Returns: An integer sequence, where each entry represents the value of a single digit in the given base representation of ``n``, from most to least significant. """ digit_list = [] while n != 0: n, digit = divmod(n, base) digit_list.append(digit) return digit_list[::-1]
5346c8c8741d1d6739c7586a68cfb06ffe723746
274,324
def generate_configs(**configs): """ Given configs from users, we want to generate different combinations of those configs For example, given M = ((1, 2), N = (4, 5)) and sample_func being cross_product, we will generate (({'M': 1}, {'N' : 4}), ({'M': 1}, {'N' : 5}), ({'M': 2}, {'N' : 4}), ({'M': 2}, {'N' : 5})) """ assert 'sample_func' in configs, "Missing sample_func to generat configs" result = [] for key, values in configs.items(): if key == 'sample_func': continue tmp_result = [] for value in values: tmp_result.append({key : value}) result.append(tmp_result) results = configs['sample_func'](*result) return results
018259def2e58ea52cf8e443898add8b394a5057
567,954
import string def remove_punctuation(txt): """ Use the list of punctuation given with wordcloud to remove it from the text :params: -------- txt list(): of string that need to have punctuation removed :return: -------- to_return list(): of strings with punctuation removed """ to_return = list() for entry in txt: to_return.append(''.join([str(term) for term in entry if term not in set(string.punctuation)])) return to_return
88464a95abe9a10eca849a4dd86e55e8947ae76d
403,612
def hash_key(aMap, key): """Given a key this will create a number and then convert to an index for the aMap's buckets.""" return hash(key) % len(aMap)
179869154845e7da1ab4e50218ddc7f1f04cf7cb
586,803
from typing import List from typing import Tuple def transform_identifiers(identifiers: List[Tuple[str, int]]) -> List[str]: """ Transform the original list of identifiers into the writable form. :param identifiers: list of tuples, identifier and count. :return: a list of identifiers in the writable form, "identifier:count". """ formatted_identifiers = [] for identifier in identifiers: if identifier[0].rstrip() != "": # Checking for occurring empty tokens. formatted_identifiers.append("{identifier}:{count}" .format(identifier=identifier[0].rstrip(), count=str(identifier[1]).rstrip())) return formatted_identifiers
c280397d5174b50312d9009dd5fa3de94536ac60
490,941
def format_seconds(ms: int) -> str: """Formats milliseconds into min:sec:ms format""" sec, ms = divmod(ms, 1000) min, sec = divmod(sec, 60) return "%01d:%02d.%03d" % (min, sec, ms)
8ee8db40dae91aa9d6c4132825dc637209675a1a
466,283
import re def clean(name): """Clean name by only keeping the key-words in it. Args: name (str): name we are cleaning. Returns: relevant_sentence (str): cleaned name """ if str(name) != "nan": # seperate in words name_lowercase = name.lower() relevant_words = re.findall(r"[a-z]+", name_lowercase) # remove one letter words relevant_words = [word for word in relevant_words if len(word) > 1] relevant_sentence = " ".join(relevant_words) return relevant_sentence else: return None
e67a0e675217f61445a6b7ccd34985f5ebdc11ce
528,004
def truncate(string, length): """truncate a string to specified length by adding ... in the middle of the string""" # print(len(string), string) sep = '...' if length < len(sep) + 2: string = string[:length] elif len(string) > length: part = (length - len(sep)) // 2 remainder = (length - len(sep)) % 2 string = string[:part + remainder] + sep + string[-part:] # print(len(string), string) return string
7951180823f5c90ada5df7c9cfdf6785f4603d46
383,996
import torch def ssm(vec): """ Get the skew-symmetric matrix equivalent to cross-prodict for a (batch of) 3-element vector(s). """ if len(vec.shape) == 1: return ssm(vec.unsuqeeze(0)).squeeze() out = torch.zeros(vec.shape[0], 3, 3, device=vec.device) out[:, 0, 1] = -vec[:, 2] out[:, 0, 2] = vec[:, 1] out[:, 1, 0] = vec[:, 2] out[:, 1, 2] = -vec[:, 0] out[:, 2, 0] = -vec[:, 1] out[:, 2, 1] = vec[:, 0] return out
5a000c219e4f4d79aff9bb0400a01177738d223d
260,357
import torch def get_TFPN_dict(preds, labels, true_label=1, as_float=False): """ Given predictions and labels, returns a dictionary with TP, TN, FP and FN for a given class label. """ eval_dict = dict() eval_dict["TP"] = torch.logical_and(preds == true_label, preds == labels) eval_dict["TN"] = torch.logical_and(preds != true_label, preds == labels) eval_dict["FP"] = torch.logical_and(preds == true_label, preds != labels) eval_dict["FN"] = torch.logical_and(preds != true_label, preds != labels) eval_dict = {key: eval_dict[key].long().sum() for key in eval_dict} if as_float: eval_dict = {key: eval_dict[key].float() for key in eval_dict} return eval_dict
5a3589e8b12877b736209049441bb3b6eb6f84d5
318,519
from typing import List import re def decodeBinInfoData(bin_data: List[str]) -> List[str]: """ Decode information data from BIN_DATA """ decoded_data = [] for line in bin_data: temp_line = "" for bin_asc in re.findall(r'.{8}', line): dec_asc = int(bin_asc, 2) ch = chr(dec_asc) temp_line += ch decoded_data.append(temp_line) return decoded_data
0c3e82201c8802c3f1495a39e4e6ef9d459cfec9
204,284
from typing import Sequence from typing import Callable import torch def zero(units: Sequence[int]) -> Callable[[torch.Tensor], torch.Tensor]: """Zero the given units. Args: units (Sequence[int]): The units to zero. Returns: Callable[[torch.Tensor], torch.Tensor]: Function that takes layer features and zeros the given units, returning the result. """ def fn(features: torch.Tensor) -> torch.Tensor: if features.dim() != 4: raise ValueError(f'expected 4D features, got {features.dim()}') # Make sure we don't break autograd by editing values in place. # Just use a mask. Fauci said it first. shape = (*features.shape[:2], 1, 1) mask = features.new_ones(*shape) mask[:, units] = 0 return features * mask return fn
6adb88bf2a27cb2bcc89f663a2950922abdde37e
153,485
def yo(name): """Say yo to yo friend.""" return f"yo, {name}!"
f6d2bed159aaf26c600171c878ce32262df7abf3
307,359
from typing import Any def is_number(thing: Any) -> bool: """ Test if an object can be converted to a number UNLESS it is a string """ if isinstance(thing, str): return False try: float(thing) return True except (ValueError, TypeError): return False
f0cc1048635fd71d5917dd2a13615697aef6afdb
479,482
def is_subset(dict1, dict2): """Is dict1 subset of dict2.""" for key, value in dict1.items(): if key not in dict2 or value != dict2[key]: return False return True
560660a1eefb4ac108868cd0936b615e5ee4fc1c
274,817
def rec_to_dict(arr): """ Turns record array *arr* into a dict """ return dict(zip(arr.dtype.names, arr))
21f9fd0c47b25c27c912ab441a5e1743d06f4566
364,859
def alt_case(string, lower_first=True): """Returns the string with alternating upper and lowercase characters. lower_first: if True, first character is lowercase; if False, first character is uppercase""" string = string.lower() for i in range(len(string)): if bool(i % 2) == lower_first: string = string[: i] + string[i].upper() + string[i+1:] return string
6ada67f2ee6878db6f844e58191614d51ebda932
367,431
import warnings def deprecated(message=None): """A decorator for deprecated functions""" def _decorator(func, message=message): if message is None: message = '%s is deprecated' % func.__name__ def newfunc(*args, **kwds): warnings.warn(message, DeprecationWarning, stacklevel=2) return func(*args, **kwds) return newfunc return _decorator
0b9f271f63334e5ad82ae61ebeb2b2e44eddf278
328,333
def find_node_name(model, name): """ Finds a node by its name. :param model: onnx graph :param name: node name :return: node pointer """ if not hasattr(model, "graph"): raise TypeError( # pragma: no cover "Parameter model is not an ONNX model but " "{}".format(type(model))) for node in model.graph.node: if node.name == name: return node return None
9dc3a308f5134236b12bf79bc76b0d09fc41458d
16,817
from typing import Sequence def non_decreasing(seq: Sequence): """ Return True if a sequence is in non-decreasing order, False otherwise. """ return all(x <= y for x, y in zip(seq, seq[1:]))
554957a92a970b5c2c688c1a9babbfd5c285b3a2
218,024
def version_is_dash(request): """Return a boolean, whether the version they asked for is -. """ return request.line.uri.path.get('version') == '-'
d44fc7621dee4dfc10966456803a4fa3b3b234f4
504,815
def _perp(t: tuple) -> tuple: """ Obtain the point whose corresponding vector is perpendicular to t. Parameters ---------- t : tuple Point (x, y) Returns ------- tuple: (-y, x) """ return -t[1], t[0]
ed49a4f326f8ef065e72d964bce3b3ce444c25f5
629,725
def trim(s): """Trim string to fit on terminal (assuming 80-column display)""" return s if len(s) <= 80 else s[:77] + "..."
b3c2236b902bf20e0f2e4e7c97cfb57487ec1031
691,271
def clamp(value, min_value, max_value): """ Method to clamp a value between a certain range. Args: value(float): Value to clamp. min_value(float): Minimum limit to clamp the value with. max_value(float): Upper limit to clamp the value with. Returns: value(float): Value clamped between the maximum and minimum values. """ return max(min(value, max_value), min_value)
e284dc0f1440ae6371ff961e1d992f153b3aa52d
406,370
def is_pathogenic(pvs, ps_terms, pm_terms, pp_terms): """Check if the criterias for Pathogenic is fullfilled The following are descriptions of Pathogenic clasification from ACMG paper: Pathogenic (i) 1 Very strong (PVS1) AND (a) ≥1 Strong (PS1–PS4) OR (b) ≥2 Moderate (PM1–PM6) OR (c) 1 Moderate (PM1–PM6) and 1 supporting (PP1–PP5) OR (d) ≥2 Supporting (PP1–PP5) (ii) ≥2 Strong (PS1–PS4) OR (iii) 1 Strong (PS1–PS4) AND (a)≥3 Moderate (PM1–PM6) OR (b)2 Moderate (PM1–PM6) AND ≥2 Supporting (PP1–PP5) OR (c)1 Moderate (PM1–PM6) AND ≥4 supporting (PP1–PP5) Args: pvs(bool): Pathogenic Very Strong ps_terms(list(str)): Pathogenic Strong terms pm_terms(list(str)): Pathogenic Moderate terms pp_terms(list(str)): Pathogenic Supporting terms Returns: bool: if classification indicates Pathogenic level """ if pvs: # Pathogenic (i)(a): if ps_terms: return True if pm_terms: # Pathogenic (i)(c): if pp_terms: return True # Pathogenic (i)(b): if len(pm_terms) >= 2: return True # Pathogenic (i)(d): if len(pp_terms) >= 2: return True if ps_terms: # Pathogenic (ii): if len(ps_terms) >= 2: return True # Pathogenic (iii)(a): if pm_terms: if len(pm_terms) >= 3: return True elif len(pm_terms) >= 2: if len(pp_terms) >= 2: return True elif len(pp_terms) >= 4: return True return False
d26d6c528906e4efb6cc4df9aaca1e0e3cc4dfae
130,400
import logging def check_entries_and_report_duplicates(class_entries): """ Scans given dict with class entries and prints information about duplicate entries :return 0 if there no duplicates, 1 otherwise """ rc = 0 for class_entry in class_entries.keys(): class_entry_jars = class_entries[class_entry] if len(class_entry_jars) > 1: logging.error("%s is present in (%s)" % (class_entry, ", ".join(class_entry_jars))) rc = 1 return rc
addd9ce46df0987da0aa74930329d5825373447d
379,661
def triple_prod(vec1, vec2, vec3): """ Triple cross product (vec1 x vec2) x vec3. PARAMETERS ---------- vec{1, 2, 3}: Vec Vectors for triple cross product. Order is important """ cross_vec = vec1.cross(vec2) return cross_vec.cross(vec3)
77a249154c37ea54a9b48c0ba77af1680a5b62d9
138,183
def _make_phase_block(phase, number=1, name="", scale_down=1.0): """Create a pcr phase block skelleton with placeholder strings for different refinement options. (for internal use) :param phase: a crystal structucture :type phase: cctbx.xray_structure :param number: the number of the phase :type number: integer :param name: a title to be used for the phase in the pcr file :type name: string :param scale_down: factor to divide intensities by (to avoid overflows) :type scale_down: float :returns: the pcr phase block skelleton as a string :rtype: string """ phase.make_scatterer_labels_shelx_compatible_in_place() if name =="": name = "Phase_{0}".format(number) scatt = phase.scatterers() symm = phase.crystal_symmetry() tmp = """\ !------------------------------------------------------------------------------- ! Data for PHASE number: {number} ==> Current R_Bragg for Pattern# {number}: !------------------------------------------------------------------------------- {name:s} ! !Nat Dis Ang Pr1 Pr2 Pr3 Jbt Irf Isy Str Furth ATZ Nvk Npr More {nat:>4d} 0 0 0.0 0.0 0.0 0 0 0 0 0 {atz} 0 0 0 ! {space_group} <--Space group symbol !Atom Typ X Y Z Biso Occ In Fin N_t Spc /Codes """.format(number=number, name=name, nat=len(scatt), atz=0.0, space_group=symm.space_group_info() ) for atom in scatt: tmp += """\ {lbl:s} {stype:s} {x:f} {y:f} {z:f} {biso:7.5f} {occ:7.5f} 0 0 0 0 ##_ap*_## ##_ap*_## ##_ap*_## 0.00 0.00\n""".format( lbl=atom.label, stype=atom.element_symbol(), x=atom.site[0], y=atom.site[1], z=atom.site[2], biso=atom.b_iso(), occ=atom.occupancy) a, b, c, alpha, beta, gamma = symm.unit_cell().parameters() tmp += """\ !-------> Profile Parameters for Pattern # 1 ! Scale Shape1 Bov Str1 Str2 Str3 Strain-Model {scale:f} 0.10033 0.03837 0.00000 0.00000 0.00000 0 ##_scf_## ##_shp1.{n}_## ##_shp2.{n}_## 0.000 0.000 0.000 ! U V W X Y GauSiz LorSiz Size-Model 1.706310 -1.179299 0.405540 0.000000 0.000000 0.000000 0.000000 0 ##_prfu.{n}_## ##_prfv.{n}_## ##_prfw.{n}_## 0.000 0.000 0.000 0.000 ! a b c alpha beta gamma #Cell Info {a:f} {b:f} {c:f} {alpha:f} {beta:f} {gamma:f}\n""".format( scale=0.01/scale_down, a=a, b=b, c=c, alpha=alpha, beta=beta, gamma=gamma, n=number) # make contraints from symmetry def __constraints_from_symm(xtal_system): xtal_system = xtal_system.lower() if xtal_system == "triclinic": return " ##_lp1.{0}_## ##_lp2.{0}_## ##_lp3.{0}_## ##_lp4.{0}_## ##_lp5.{0}_## ##_lp6.{0}_##".format(number) elif xtal_system == "monoclinic": return " ##_lp1.{0}_## ##_lp2.{0}_## ##_lp3.{0}_## ##_lp4.{0}_## 0.00 ##_lp5.{0}_##".format(number) elif xtal_system == "orthorhombic": return " ##_lp1.{0}_## ##_lp2.{0}_## ##_lp3.{0}_## 0.00 0.00 0.00".format(number) elif xtal_system in ["trigonal", "hexagonal", "tetragonal"]: return " ##_lp1.{0}_## ##_lp1.{0}_## ##_lp2.{0}_## 0.00 0.00 0.00".format(number) elif xtal_system == "cubic": return " ##_lp1.{0}_## ##_lp1.{0}_## ##_lp1.{0}_## 0.00 0.00 0.00".format(number) else: return " 0.00 0.00 0.00 0.00 0.00 0.00" tmp += __constraints_from_symm(symm.space_group().crystal_system()) + "\n" tmp += """\ ! Pref1 Pref2 Asy1 Asy2 Asy3 Asy4 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00 0.00 0.00 0.00 0.00 0.00 """ return tmp
10282f321aa435e970c39b071c9eaa0ad8c94fb9
313,473
def is_visible(attr): """ This function checks whether an HTML element attribute is visible or hidden based on the style of that HTML element. The purpose of this function in this package is to avoid including data that is useless, such as inner code markers, non-sense data, etc. So this function will ensure that we just retrieve useful data. Args: attr (:obj:`lxml.html.HtmlElement`): HTML element Returns: :obj:`bool` - flag This function returns either `True` or `False` depending on the style of the HTML element received from parameter, whether if it is visible or not, respectively. """ if 'style' in attr.attrib: style = attr.get('style') if style.__contains__("display:none") or style.__contains__("display: none"): return False return True
0e1eb59f7c53b0329c1ad4e61e4fe100d63ee112
97,377
import math def get_angle_between_yaw_and_point(a, p, yaw): """ Gets the angle between the point p and the vector starting from a's centroid with the given yaw angle. """ p_yaw = [math.cos(math.radians(yaw)), math.sin(math.radians(yaw))] p_self = [p[0] - a.centroid.x, p[1] - a.centroid.y] angle = math.degrees(math.atan2(*p_yaw) - math.atan2(*p_self)) % 360 return angle
f6ff0d1affec482a2324209e452d7e24439466b4
97,457
import hashlib def calculate_md5(text): """Calculate the MD5 hash for the give text""" return hashlib.md5(text.encode('utf-8')).hexdigest()
bbf8b7b0ef61a14bb8a69c0de48efb2b44b61449
483,431
def delete_words(dataframe, *word_list, include_report=False): """ Delete rows containing at least one of the specified words. :param dataframe: Dataframe to filter. :param word_list: List of words to remove. :param include_report: If True, the object returned will be a Dictionary including the fitlered dataframe and a report of the words found. If False, will simply return the filtered DataFrame. :return: A filtered Dataframe (if include_report is False) or a Dict including it. """ ret = dataframe[~dataframe["Keyword"].str.contains('|'.join(word_list))] if not include_report: return ret word_report = {} for word in word_list: word_report[word] = sum(dataframe["Keyword"].str.contains(word)) return {"dataframe": ret, "word_report": word_report}
ebbd44e75fbc7891e49ce4ce205109a3dcc314ae
180,012
import collections def _sort_almost_sorted(almost_sorted_deque, key): """ Sort a deque like that where only the first element is potentially unsorted and should probably be last and the rest of the deque is sorted in descending order. :param collections.deque almost_sorted_deque: The deque of size n, where the first n-1 elements are definitely sorted (in descending order) and where the last element is also probably in the correct place, but needs to be checked :param callable key: The key (function) to use for sorting. :return: The sorted (given that the conditions are met) deque. :rtype: collections.deque """ if key(almost_sorted_deque[0]) < key(almost_sorted_deque[1]): almost_sorted_deque.append(almost_sorted_deque.popleft()) if key(almost_sorted_deque[-1]) <= key(almost_sorted_deque[-2]): return almost_sorted_deque almost_sorted_deque = collections.deque(sorted(almost_sorted_deque, key=key, reverse=True)) return almost_sorted_deque
93d282f6cc0fd4df16480b0ebbc33d132bd3cc0e
666,140
def getFloatFromStr(number: str) -> float: """ Return float representation of a given number string. HEX number strings must start with ``0x``. Args: numberStr: int/float/string representation of a given number. """ numberStr = number.strip() isNegative = False if "-" in numberStr: numberStr = numberStr.replace("-", "") isNegative = True if numberStr.lower().startswith("0x"): num = float(int(numberStr, 16)) else: num = float(numberStr) if isNegative: num = 0 - num return num
de5ee4e8193bd8127bb8d42946e184f45d1f3b23
661,459
def not_in_dict_or_none(dict, key): """ Check if a key exists in a map and if it's not None :param dict: map to look for key :param key: key to find :return: true if key is in dict and not None """ if key not in dict or dict[key] is None: return True else: return False
2bc3f2194b82e978ab8edb2ffaac7a88a58e9c9e
24,841
def make_options_bank_drop(values): """ Helper function to generate the data format the dropdown dash component wants """ ret = [] for value in values: ret.append({"label": value, "value": value}) return ret
3c89c73b6e3cdc6fe07a71080577320f0af6729f
39,481
def apply_linear_transform(value): """ Transforms value from non-linear scale (with gamma transform) to linear :param value: Value to be transform (between 0-1) :return: Transformed value (between 0-1) """ if value > 0.04045: return pow(((value + 0.055) / 1.055), 2.4) else: return value / 12.92
45c63324bdbd5532da1a9e3a119c319448e6de50
516,663
def _CppName(desc): """Return the fully qualified C++ name of the entity in |desc|.""" return '::'+desc.fqname.replace('.', '::')
bd5985321918850bfb1f095c1587028194e9739b
46,875
def generate_all_descriptions(env_params): """ Generates all possible descriptions from a set of environment parameters. Parameters ---------- env_params: dict Dict of environment parameters from get_env_params function. Returns ------- training_descriptions: tuple of str Tuple of descriptions that belong to the training set (descriptions that do not contain occurrences reserved to the testing set). test_descriptions: tuple of str Tuple of descriptions that belong to the testing set (that contain occurrences reserved to the testing set). all_descriptions: tuple of str Tuple of all possible descriptions (training_descriptions + test_descriptions). """ p = env_params.copy() # Get the list of admissible attributes and split them by name attributes (type and categories) and other different attributes. name_attributes = env_params['name_attributes'] colors_attributes = env_params['colors_attributes'] positions_attributes = env_params['positions_attributes'] drawer_door_attributes = env_params['drawer_door_attributes'] any_all_attributes = env_params['any_all_attributes'] rgbb_attributes = env_params['rgbb_attributes'] all_descriptions = () if 'Throw' in p['admissible_actions']: throw_descriptions = [] for i in range(env_params['max_nb_objects'] + 1): throw_descriptions.append('Throw {} objects on the floor'.format(i)) all_descriptions += tuple(throw_descriptions) if 'Open' in p['admissible_actions']: open_descriptions = [] for d in drawer_door_attributes: open_descriptions.append('Open the {}'.format(d)) all_descriptions += tuple(open_descriptions) if 'Close' in p['admissible_actions']: close_descriptions = [] for d in drawer_door_attributes: close_descriptions.append('Close the {}'.format(d)) all_descriptions += tuple(close_descriptions) if 'Grasp' in p['admissible_actions']: grasp_descriptions = [] for c in colors_attributes: grasp_descriptions.append('Grasp any {} object'.format(c)) for ca in colors_attributes + ('any',): for n in name_attributes: grasp_descriptions.append('Grasp {} {}'.format(ca, n)) all_descriptions += tuple(grasp_descriptions) if 'Move' in p['admissible_actions']: move_descriptions = [] for c in colors_attributes: move_descriptions.append('Move any {} object'.format(c)) for ca in colors_attributes + ('any',): for n in name_attributes: move_descriptions.append('Move {} {}'.format(ca, n)) all_descriptions += tuple(move_descriptions) if 'Put' in p['admissible_actions']: put_descriptions = [] for a in any_all_attributes: for c in colors_attributes: for pos in positions_attributes: put_descriptions.append('Put {} {} object {}'.format(a, c, pos)) for ca in colors_attributes + any_all_attributes: for n in name_attributes: for pos in positions_attributes: put_descriptions.append('Put {} {} {}'.format(ca, n, pos)) all_descriptions += tuple(put_descriptions) if 'Hide' in p['admissible_actions']: hide_descriptions = [] for a in any_all_attributes: for c in colors_attributes: hide_descriptions.append('Hide {} {} object'.format(a, c)) for ca in colors_attributes + any_all_attributes: for n in name_attributes: hide_descriptions.append('Hide {} {}'.format(ca, n)) all_descriptions += tuple(hide_descriptions) if 'Turn on' in p['admissible_actions']: turn_on_descriptions = [] for r in rgbb_attributes: turn_on_descriptions.append('Turn on the {} light'.format(r)) all_descriptions += tuple(turn_on_descriptions) if 'Turn off' in p['admissible_actions']: turn_off_descriptions = [] for r in rgbb_attributes: turn_off_descriptions.append('Turn off the {} light'.format(r)) all_descriptions += tuple(turn_off_descriptions) if 'Make' in p['admissible_actions']: make_descriptions = [] for c in colors_attributes: make_descriptions.append('Make the panel {}'.format(c)) all_descriptions += tuple(make_descriptions) if 'Paint' in p['admissible_actions']: color_descriptions = [] for a in any_all_attributes: for c1 in colors_attributes: for c2 in sorted(tuple(set(colors_attributes) - set(list(c1)))): color_descriptions.append('Paint {} {} object {}'.format(a, c1, c2)) for c1 in colors_attributes: for n in name_attributes: for c2 in sorted(tuple(set(colors_attributes) - set([c1]))): color_descriptions.append('Paint {} {} {}'.format(c1, n, c2)) for a in any_all_attributes: for n in name_attributes: for c2 in colors_attributes: color_descriptions.append('Paint {} {} {}'.format(a, n, c2)) all_descriptions += tuple(color_descriptions) train_descriptions = [] test_descriptions = [] for descr in all_descriptions: to_remove = False for w in p['words_test_set_def']: # words_test_set_def is the set of occurrences that is reserved to the testing set. if w in descr: to_remove = True break if not to_remove: train_descriptions.append(descr) else: test_descriptions.append(descr) train_descriptions = tuple(sorted(train_descriptions)) test_descriptions = tuple(sorted(test_descriptions)) return train_descriptions, test_descriptions, all_descriptions
00b50eb085550515c4f7e57c96d7b78fdc2c8f6f
257,862
def corefNode(parseinfo, mention): """ Returns the best node in the tree that matches the mention """ sentence = parseinfo.sentence(mention.sentence) tree = sentence.parseTree() for st in tree.subtrees(): # print st.tag(), st.token_range(), (mention.start, mention.end)==st.token_range() if (mention.start, mention.end)==st.token_range(): return st # nothing found that matches exactly --- should not happen return None
17dab0c6e1dd0682f43194043b29b347174faec0
164,880
def get_sessid(sessdir): """ get_sessid(sessdir) Returns the session ID associated with a session. Required args: - sessdir (str): session directory Returns: - sessid (int): session ID (9 digits) """ sesspart = "ophys_session_" start = sessdir.find(sesspart) + len(sesspart) sessid = sessdir[start:start + 9] return sessid
457c332acebeb8787a0e870cf74821e2702730d5
493,120
def CleanLine(line): """Converts a line from coolrunning results to a tuple of values.""" t = line.split() if len(t) < 6: return None place, divtot, div, gun, net, pace = t[0:6] if not '/' in divtot: return None for time in [gun, net, pace]: if ':' not in time: return None return place, divtot, div, gun, net, pace
c59550c846e474ab5eadcc0e082feda4c04cc819
643,654
def insertion_sort(items): """Sort given items by taking first unsorted item, inserting it in sorted order in front of items, and repeating until all items are in order. Running time: O(n^2) because as items grow outter and inner loop both increases, also the function increases in a quadratic way Memory usage: O(1) because everything is done in place """ # similar to selection sort where list is pseudo broken into 'sorted' and 'unsorted' sections # an item is selected from 'unsorted' and checks against the 'sorted' section to see where to add # this is our selection section of the list for i in range(1, len(items)): # range is non inclusive so i is never reached only i-1 # loop through our 'sorted' section for j in range(0, i): # the moment it finds an item in this part of the list which is greater or equal 'unsorted' selected item, it is removed from the 'unsorted' section and inserted into the 'sorted' section if items[j] >= items[i]: removed_item = items.pop(i) items.insert(j, removed_item) # can continue/skip loop cause this part of the list is sorted, which means everything after will be much larger than selected item continue return items
07a15b8373d9168b92f71d78748c01fddefd6a8c
566,233
def resolve_object_property(obj, path: str): """Resolves the value of a property on an object. Is able to resolve nested properties. For example, a path can be specified: 'other.beer.name' Raises: AttributeError: In case the property could not be resolved. Returns: The value of the specified property. """ value = obj for path_part in path.split('.'): value = getattr(value, path_part) return value
ed1afb179daa719a028ddadea5bc320e1a238a40
160,012
def estimated_bigram_probability(bigram, unigram_dict, bigram_dict): """ estimate the probability of bigram (= (syll_1,syll_2)) by: (count (syll_1,syll_2)) / (count syll_1) """ # set the count to zero count = 0 # check if bigram is actually in our dict if bigram in bigram_dict: # if so, set count to the value from bigram_dict count = bigram_dict[bigram] # divide the (bigram) count by the unigram count of the first syllable # to get the probability prob = count / unigram_dict[bigram[0]] # return the calculated probability return prob
adf5d3830118da86b4e9a7981bfda4210f47b7f9
668,849
def convert_locp(locp, converter): """ Convert the given list of card plays using the converter :param locp: CardPlays to convert :type locp: [CardPlay, ...] :param converter: method to convert a CardPlay :type converter: CardPlay -> PyJSON or None :return: the list of PyJSON :rtype: [PyJSON, ...] """ result = [] for cp in locp: converted = converter(cp) if converted is not None: result.append(converted) return result
23330bec68ef8b5a658f387f421144d88c2d5a39
585,733
from typing import Dict def _new_ax_png_name(dct: Dict[str, str]) -> str: """ Returns the first string of the form "axNNN.png" not present in dct, for NNN=000, 001, 002, ... """ idx = 0 while True: base = f"figure{idx:03d}.png" if base not in dct: return base idx += 1
d880e277a64dd01b565d367b64c5bfd9302c1642
548,914
def _default_list(length: int) -> list: """ Returns a list filled with None values. """ return [None for _ in range(length)]
fbdd2b5769960c87f64b9ed9769bd4f9e80ae608
330,757