content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Union from pathlib import Path import hashlib def md5_for_file(file_path: Union[Path, str], block_size: int = 8192) -> str: """Get md5 hex digest for a file. :param file_path: Path to input file :param block_size: Input block size. Should be multiple of 128 bytes. :returns: md5 hex digest """ _path = Path(file_path) if not _path.is_file(): raise ValueError(f"Provided file path is invalid: {file_path}") if not (block_size >= 0 and not block_size % 128): raise ValueError(f"Provided block_size is invalid: {block_size}") md5 = hashlib.md5() with open(_path, 'rb') as f: while True: data = f.read(block_size) if not data: break md5.update(data) return md5.hexdigest()
7eda527aa9cba3daee4ed248ec79b353c25a4813
123,226
def to_upper_underscore(name: str) -> str: """Transform package name into uppercase with underscores. Example: >>> pkg_2_uu('a-pkg') 'A_PKG' """ return name.replace("-", "_").upper() + "_"
9d703b535c3724b0ae68913ed6d0c7b46b6c5b95
123,227
def looksLikeDraft(o): """Does this object look like a Draft shape? (flat, no solid, etc)""" # If there is no shape at all ignore it if not hasattr(o, 'Shape') or o.Shape.isNull(): return False # If there are solids in the object, it will be handled later # by getCutShapes if len(o.Shape.Solids) > 0: return False # If we have a shape, but no volume, it looks like a flat 2D object return o.Shape.Volume < 0.0000001
eb7b0d61fb06bcab680aa7c8495713825fff7fcc
123,233
def determine_refframe(phdr): """Determine the reference frame and equinox in standard FITS WCS terms. Parameters ---------- phdr : `astropy.io.fits.Header` Primary Header of an observation Returns ------- out : str, float Reference frame ('ICRS', 'FK5', 'FK4') and equinox """ # MUSE files should have RADECSYS='FK5' and EQUINOX=2000.0 equinox = phdr.get('EQUINOX') radesys = phdr.get('RADESYS') or phdr.get('RADECSYS') if radesys == 'FK5' and equinox == 2000.0: return 'FK5', equinox elif radesys: return radesys, None elif equinox is not None: return 'FK4' if equinox < 1984. else 'FK5', equinox else: return None, None
0d15f35c044cb993e7568988aebbec15308e2656
123,234
def get_cve_id(cve): """ Attempts to extract the cve id from the provided cve. If none is found, returns a blank string. Parameters ---------- cve : dict The dictionary generated from the CVE json. Returns ------- str This will be either the cve id, or a blank string. """ cve_id = "" if "CVE_data_meta" in cve and "ID" in cve["CVE_data_meta"]: cve_id = cve["CVE_data_meta"]["ID"].lower() return cve_id
badf451b78ca0d713ee3f63564704d0b6d9c7bda
123,241
def flatten_shot(shot, game_id): """Flatten the schema of a shot record.""" shot_id = shot[0] shot_data = shot[1] return {'game_id': game_id, 'shot_id': shot_id, 'shot_type': shot_data['type'], # 't_half': shot_data['t']['half'], # 't_min': shot_data['t']['m'], # 't_sec': shot_data['t']['s'], 'time_of_event(min)': (shot_data['t']['m'] + (shot_data['t']['s'] / 60 )), 'team_id': shot_data.get('team', None), 'player_id': float(shot_data['plyrId']), # 'caught_by': shot_data.get('ctchBy', None), 'shot_coord_x1': shot_data['coord']['1']['x'], 'shot_coord_y1': shot_data['coord']['1']['y'], 'shot_coord_z1': shot_data['coord']['1']['z'], 'shot_coord_x2': shot_data['coord']['2']['x'], 'shot_coord_y2': shot_data['coord']['2']['y'], 'shot_coord_z2': shot_data['coord']['2']['z']}
76e79162662ca920df95453a4eedd2723c7d996b
123,246
def callback(fname, fcontents): """ The callback to apply to the file contents that we from HDFS. Just prints the name of the file. """ print("Fname:", fname) return fname, fcontents
0b7a418035dc5075338a8ff0d660bf2aa27d3186
123,247
import hashlib def checksum(data): """ calculate checksum of data :data: bytes object or str """ if isinstance(data, str): data = data.encode() return hashlib.sha1(data).hexdigest()
4f9a168d42b2a7a9c10faf1b859e9949b7e9ac38
123,253
def get_fake_pcidevice_required_args(slot='00:00.0', class_id='beef', vendor_id='dead', device_id='ffff'): """Get a dict of args for lspci.PCIDevice""" return { 'slot': slot, 'class_id': class_id, 'vendor_id': vendor_id, 'device_id': device_id }
5ad6475c209cfaee946edc820529dc3cb293678b
123,254
def trapezoidchar(aa,bb,cc,hh): """ Calculate area and centroid of trapezoid """ A = hh*(aa + bb)/2. xc = (2.*aa*cc + aa**2. + cc*bb + aa*bb + bb**2.)/3./(aa + bb) yc = hh*(2.*aa + bb)/3./(aa + bb) return A,xc,yc
64e2d5682030fe0e5cbcf4addd1225ad7c8b5add
123,257
def walk(F, D, num_steps): """ simulate one walk of D drunk in F field of num_steps steps. """ init = F.getDrunkLoc(D) for _ in range(num_steps): F.moveDrunk(D) return init.distFrom(F.getDrunkLoc(D))
795eb195084d4d70687fd293ca50b890ebda1908
123,258
def CircleLineIntersectDiscriminant(circle_center, circle_radius, P1, P2, tangent_tol = 1e-9): """ Returns the values necesary to determine if a line intersects using the function full line intersections Parameters ---------- circle_center : 1D numpy array The x- and y-coordinate of the circle center. circle_radius : float The radius of the circle. P1 : 1D numpy array, floats X- and y_coordinate of a point representing one of the ends of the line segement. P2 : 1D numpy array, floats X- and y_coordinate of a point representing one of the ends of the line segement. tangent_tol : bool, optional How close to the line segment is an overlap? The default is 1e-9. Returns ------- cx : float X-coordinate of circle center location. cy : float Y-coordinate of circle center location. dx : float Linear distance between x-coordinates of the points in the line segment. dy : float Linear distance between y-coordinates of the points in the line segment. dr : float Length of the segment. big_d : float Parameter to determine the discriminant. discriminant : float Discriminant. Source ------- https://stackoverflow.com/a/59582674 https://mathworld.wolfram.com/Circle-LineIntersection.html """ (p1x, p1y), (p2x, p2y), (cx, cy) = P1, P2, circle_center (x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy) dx, dy = (x2 - x1), (y2 - y1) dr = (dx ** 2 + dy ** 2)**.5 big_d = x1 * y2 - x2 * y1 discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2 return cx, cy, dx, dy, dr, big_d, discriminant
7f1f514660cd9e542ecd47e7b394956014af9522
123,259
import yaml def _instance_user_data(hostname, otp, instance_vars): """Return instance user data (common instance vars + hostname and otp).""" key_value_pairs = instance_vars.copy() key_value_pairs['hostname'] = hostname key_value_pairs['otp'] = otp return "#cloud-config\n" + yaml.dump( key_value_pairs, default_flow_style=False, default_style='\'' )
888740b7d43f639e35ace1986cf28eaad774ec27
123,261
def _weights(model): """Returns tensors of model weights, in the order of the variables.""" return [v.read_value() for v in model.weights]
7dd8fc5de2cc416ff0cc63da8d376c3c46ac4868
123,263
def apply_pp(literal, pp_func): """ Apply a post processing function to the provided text. Arguments: `literal` - The string to which the post-processing function will be applied. `pp_func` - The name of the post-processing function to be applied. Returns: A string with the result of the post-processing function. """ if pp_func == 'upper': ret = literal.upper() elif pp_func == 'lower': ret = literal.lower() elif pp_func == 'upcase-first': ret = literal[0].upper() + literal[1:] elif pp_func == 'capitalize': ret = literal.capitalize() # english specific functions elif pp_func == 'trim-e': if literal[-1] == 'e': ret = literal[:-1] else: ret = literal elif pp_func == 'strip-the': if literal.startswith('the ') or literal.startswith('The '): ret = literal[4:] else: ret = literal elif pp_func == 'pluralise': if literal[-1] == 'y': ret = literal[:-1] + 'ies' elif literal[-1] == 's': ret = literal + 'es' else: ret = literal + 's' elif pp_func == 'past-tensify': if literal[-1] == 'e': ret = literal + 'd' else: ret = literal + 'ed' else: raise Exception("ppfunc unknown: " + pp_func) return ret
3c3c6365f3d6e84a816dc19a1ccdd4b0dce1e2d5
123,265
def FWHMeff2FWHMgeom(FWHMeff): """ Convert FWHMeff to FWHMgeom. This conversion was calculated by Bo Xin and Zeljko Ivezic (and will be in an update on the LSE-40 and overview papers). Parameters ---------- FWHMeff: float the single-gaussian equivalent FWHM value, appropriate for calcNeff, in arcseconds Returns ------- float FWHM geom, the geometric FWHM value as measured from a typical PSF profile in arcseconds. """ FWHMgeom = 0.822*FWHMeff + 0.052 return FWHMgeom
916daff02a589418c6855786b23c0f3970b89769
123,270
def _get_readlines(file_generated): """ Get the readlines() of the file generated. """ file_output = open("/code/calico-upgrade-report/"+file_generated, 'rU') file_output_lines = file_output.readlines() file_output.close() return file_output_lines
4a0d56e6d08da06b7ea936eece15a9d3c70582e3
123,272
def im2col_get_pixel(im, height, width, row, col, channel, pad): """ Args: im: input image. height: image height. width: image width. row: row index. col: col index. channel: channel index. pad: padding length. """ row = row - pad col = col - pad if row < 0 or col < 0 or row >= height or col >= width: pixel = 0 else: pixel = im[int(col + width * (row + height * channel))] return pixel
d03a0e4806e4ae102ca12525920db99fbe2bf1b1
123,273
def parse_line(line): """Parses a line of a text embedding file. Args: line: (str) One line of the text embedding file. Returns: A token string and its embedding vector in floats. """ columns = line.split() token = columns.pop(0) values = [float(column) for column in columns] return token, values
d09f75cde6f7a50ce3b62d990dda9fd48f4bac4b
123,274
def to_str(message): """ Returns the string representation of an arbitrary message. """ if isinstance(message, str): return message else: return message.decode('utf-8')
166eb23cb883543c5fa7d7859a2f0e9845f3f7c5
123,278
import _hashlib def __hash_new(name, string='', usedforsecurity=True): """new(name, string='') - Return a new hashing object using the named algorithm; optionally initialized with a string. Override 'usedforsecurity' to False when using for non-security purposes in a FIPS environment """ try: return _hashlib.new(name, string, usedforsecurity) except ValueError: raise
9ddd4daa37a49869e9d748c2f51c63de49d335b5
123,279
def videogames_sampling(df_can_suggest, num_suggestions, random_state=None): """ This method samples the games that survived the filtering. We now choose a few video game titles to suggest. Args: df_can_suggest (pandas.DataFrame): the subset of videogames that can be suggested to the user. num_suggestions (int): the number of suggestions displayed to the user. random_state (int): Leave set to None except for unit test. Returns: list of str: the suggestions of titles for the GUI. (Written by Scott Mobarry) Driver: Scott Mobarry Navigator: Salah Waji """ # print(f'df_can_suggest.count() = {df_can_suggest.count()}') print(f'df_can_suggest = \n{df_can_suggest}') #df_suggestions = df_can_suggest.head(num_suggestions) df_suggestions = df_can_suggest.sample(n = num_suggestions, random_state=random_state) # The next statement extracts a python list of suggested titles suggestions = df_suggestions['Name'].tolist() return suggestions
af4242a793664572c6d1ae1dc38524e44d8d3440
123,286
from typing import List def filter_list(unfiltered_list: List) -> List: """Filters empty lists and None's out of a list. :param unfiltered_list: A list to filter. :return: A list containing no empty lists or None's """ filtered_list = [element for element in unfiltered_list if element != [] and element is not None] return filtered_list
d8d91984dc97960e704c77e487a93690657b88fe
123,292
from typing import Union import json def get_object(path: str) -> Union[list, dict]: """ This function takes a path to a .json file and returns a json object. """ file = open(path, 'r', encoding='utf-8') data = json.load(file) return data
ae6d84b38ae551e688860078a773598829662b25
123,294
def is_null_or_empty(obj): """ Checks if a given object is either Null, empty or a length of 0 """ validations = [] validations.append(obj is None) validations.append(obj == "") if isinstance(obj, list) or isinstance(obj, dict): validations.append(len(obj) == 0) return any(validations)
97f9e27e08ab7233f9dfd6fce3bd31bdaf0bea93
123,297
def bounding_box_to_annotations(bbx): """Converts :any:`bob.ip.facedetect.BoundingBox` to dictionary annotations. Parameters ---------- bbx : :any:`bob.ip.facedetect.BoundingBox` The given bounding box. Returns ------- dict A dictionary with topleft and bottomright keys. """ landmarks = { 'topleft': bbx.topleft, 'bottomright': bbx.bottomright, } return landmarks
41b984e7824035ebda551712b5777c2aa4269de6
123,298
import re def sanitize_filename(filename): """ Sanitize filename so it contains only alphanumeric characters, dot, underscore or dash. All other characters are replaced with single underscore and multiple consecutive uderscores is collapsed into one (eg. `a_%$#@_b` -> `a_b`). """ return re.sub(r'[^a-zA-Z0-9.-]+', '_', filename)
77145397accba0f005b285816c2839616bce8dd8
123,299
import torch def nll_lorentzian_var(preds, target, gamma): """ Isotropic lorentzian loss function :param preds: prediction values from NN of size [batch, particles, timesteps, (x,y,v_x,v_y)] :param target: target data of size [batch, particles, timesteps, (x,y,v_x,v_y)] :param gamma: The tensor for the FWHM of the distribution of size [batch, particles, timesteps, (x,y,v_x,v_y)] :return: variance of the loss function normalised by (batch * number of atoms) """ gammasquared = gamma ** 2 neg_log_p = torch.log(1+((preds - target) ** 2 / (gammasquared))) neg_log_p += torch.log(gamma) return (neg_log_p.sum(dim=1)/target.size(1)).var()
5b3efe721c6e3a2258b3088b4b3656ea00a01eb5
123,301
def _read_until(sock, char): """ Read from the socket until the character is received. """ chunks = [] while True: chunk = sock.recv(1) chunks.append(chunk) if chunk == char: break return b"".join(chunks)
958a488078f46a0ed24788603cfd4fd762b8300b
123,302
import random def checkLevelForGif(stressLevel: float): """ Checks the stress level to see if it is a special number e.g. 69 returns the gif 'nice' """ if stressLevel == 69.0: return random.choice(["https://tenor.com/view/noice-nice-click-gif-8843762", "https://tenor.com/view/brooklyn99-noice-jake-peralta-andy-samberg-nice-gif-14234819"]) # add another elif statement below this for each numerical check and a gif you would like to respond with else: return 0
eaf3042c896e0a737d31505dc95269c5c8310391
123,303
def _get_new_user_identities_for_remove(exist_user_identity_dict, user_identity_list_to_remove): """ :param exist_user_identity_dict: A dict from user-assigned managed identity resource id to identity objecct. :param user_identity_list_to_remove: None, an empty list or a list of string of user-assigned managed identity resource id to remove. :return A list of string of user-assigned managed identity resource ID. """ if not exist_user_identity_dict: return [] # None if user_identity_list_to_remove is None: return list(exist_user_identity_dict.keys()) # Empty list means remove all user-assigned managed identities if len(user_identity_list_to_remove) == 0: return [] # Non-empty list new_identities = [] for id in exist_user_identity_dict.keys(): if not id.lower() in user_identity_list_to_remove: new_identities.append(id) return new_identities
c46e8f5e54636bb3f790f3ed720bf645333d344b
123,306
def year_sequence(start_year, end_year): """ returns a sequence from start_year to end_year inclusive year_sequence(2001,2005) = [2001, 2002, 2003, 2004, 2005] year_sequence(2005,2001) = [2005, 2004, 2003, 2002, 2001] """ if start_year == end_year: return [start_year] if start_year < end_year: return list(range(start_year, end_year + 1)) return list(range(start_year, end_year - 1, -1))
b6de422c63ab94caf756e31b11c8d2bc9b51cbcf
123,308
from typing import Callable from typing import Iterable def all(pred: Callable, xs: Iterable): """ Check whether all the elements of the iterable `xs` fullfill predicate `pred`. :param pred: predicate function :param xs: iterable object. :returns: boolean """ for x in xs: if not pred(x): return False return True
98c5689d0d0209690010cc0d37d4fd5e02554890
123,309
import errno def is_eintr(exc): """Returns True if an exception is an EINTR, False otherwise.""" if hasattr(exc, 'errno'): return exc.errno == errno.EINTR elif getattr(exc, 'args', None) and hasattr(exc, 'message'): return exc.args[0] == errno.EINTR return False
b6a7c280f87757492f3a4c1ee166577691ef8622
123,313
def filter(input_list: list, allowed_elements: list): """ Function that filters out only the chosen elements of a list """ output_list = [] for element in input_list: if element in allowed_elements: output_list.append(element) return output_list
04c5d811705ac5d122c1fc3ada90f35ea999572a
123,319
def get_showable_lines(self) -> int: """ Return the number of lines which are shown. Less than maximum if less lines are in the array. """ if self.showable_line_numbers_in_editor + self.showStartLine < self.maxLines: return self.showable_line_numbers_in_editor + self.showStartLine else: return self.maxLines
07058adc289b1c736c396a869e3a00eb34f9b8c7
123,321
from typing import Dict import json def get_geo_locations() -> Dict: """ Returns the dictionary mapping each address to its geo-location. :return: the geo-locations :rtype: Dict """ with open("static/json/geo_locations.json", "r") as f: return json.load(f)
4d57a426a3d078ea3ec88ae0ea428b8100502bde
123,323
import imaplib def imap_connect(username, password, server, port=993): """ Connect to the server using IMAP SSL Args: username <str> password <str> server <str> port <int>: default 993 for SSL Returns: <imaplib.IMAP4_SSL>: Reference to the connection """ imap = imaplib.IMAP4_SSL(server) imap.login(username, password) return imap
b06c2cb6851df34afd936daac6ffdbacb71dc500
123,324
def get_shingles(text, k): """Return a list of the k-singles of a text file @param text: string to convert to shingles @param k: length of each single @return: list of shingles """ length = len(text) return [text[i:i+k] for i in range(length) if i + k < length]
cf0e261ba67a0f51d0af1f3da4de1487d1b5449b
123,325
import torch def logp( denom: torch.Tensor, acts: torch.Tensor, maxT: int, maxU: int, alphabet_size: int, mb: int, t: int, u: int, v: int ): """ Compute the sum of log probability from the activation tensor and its denominator. Args: denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor across entire vocabulary. acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor. maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor. maxU: The maximum possible target sequence length. Represents U in the logprobs tensor. alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank). mb: Batch indexer. t: Acoustic sequence timestep indexer. u: Target sequence timestep indexer. v: Vocabulary token indexer. Returns: The sum of logprobs[mb, t, u, v] + denom[mb, t, u] """ col = (mb * maxT + t) * maxU + u return denom[col] + acts[col * alphabet_size + v]
786c1163c3b89e5497f1be3aad225b24641162c5
123,329
import math def get_degree_sequence(node_degrees, num_buckets, k_neighbors, max_degree): """ Turn lists of neighbors into a degree sequence :param node_degrees: the degree of each node in a graph :param max_degree: max degree in all input graphs :param num_buckets: degree (node feature) binning :param k_neighbors: node's neighbors at a given layer :return: length-D list of ints (counts of nodes of each degree), where D is max degree in graph """ if num_buckets is not None: degrees_vector = [0] * int(math.log(max_degree, num_buckets) + 1) else: degrees_vector = [0] * (max_degree + 1) # For each node in k-hop neighbors, count its degree for node in k_neighbors: weight = 1 # unweighted graphs supported here degree = node_degrees[node] # print("get_degree_sequence %d %d", node, degree) if num_buckets is not None: try: degrees_vector[int(math.log(degree, num_buckets))] += weight except: print("Node %d has degree %d and will not contribute to feature distribution" % (node, degree)) else: degrees_vector[degree] += weight return degrees_vector
e0f95da6f5b47f0b6dbb1ae03d234cbc31528970
123,330
import requests def get_github_from_pypi(package, version): """ Make a request to pypi to get the github repo for the given package and version :param package: PyPI package :param version: Package version :return: Owner and github repo of package """ res = requests.get("https://pypi.python.org/pypi/{}/{}/json".format(package, version)) if res.status_code != 200: print(res.text) raise Exception("Got error requesting from pypii") # Parse out the package home page res_json = res.json() home_page = res_json['info']['home_page'] if "github.com" not in home_page: # If not a github repo just return what was given return home_page else: # Parse out the owner and package of github repo home_page = home_page.replace("https://github.com/", "") home_page = home_page.replace("http://github.com", "") split_b = home_page.split("/") github = { "owner": split_b[0], "package": split_b[1] } return github
2c241a7c00d30a0bee5296f0d004b200bf82feb0
123,331
def relu_derivative(x): """An implementation of the ReLU derivative. Thanks to source: https://stackoverflow.com/questions/46411180/implement-relu-derivative-in-python-numpy """ x[x<=0] = 0 x[x>0] = 1 return x
c53f7aa507516a82312267227a5d436394930a7b
123,333
def is_container(node): """ Determine if an ASDF tree node is an instance of a "container" type (i.e., value may contain child nodes). Parameters ---------- node : object an ASDF tree node Returns ------- bool True if node is a container, False otherwise """ return isinstance(node, dict) or isinstance(node,list) or isinstance(node, tuple)
bcb45a9104fbc275ada23ba949b9044f5aaf7773
123,334
def compare_dictionaries(reference, comparee, ignore=None): """ Compares two dictionary objects and displays the differences in a useful fashion. """ if not isinstance(reference, dict): raise Exception("reference was %s, not dictionary" % type(reference)) if not isinstance(comparee, dict): raise Exception("comparee was %s, not dictionary" % type(comparee)) if ignore is None: ignore = [] b = set((x, y) for x, y in comparee.items() if x not in ignore) a = set((x, y) for x, y in reference.items() if x not in ignore) differences_a = a - b differences_b = b - a if len(differences_a) == 0 and len(differences_b) == 0: return True try: differences_a = sorted(differences_a) differences_b = sorted(differences_b) except Exception: pass print("These dictionaries are not identical:") if differences_a: print("in reference, not in comparee:") for x, y in differences_a: print("\t", x, y) if differences_b: print("in comparee, not in reference:") for x, y in differences_b: print("\t", x, y) return False
10d3590bda442fbd3e6b48ec8e52ece6b8996764
123,337
import wave def getWAVduration(fname): """ Determine the duration of a .WAV file Params: fname -- String: The WAV filename Returns: A Float representing the duration in milliseconds """ f = wave.open(fname, 'r') frames = f.getnframes() rate = f.getframerate() duration = frames/float(rate) * 1000 return duration
083b81ded681193eabc8c4328028a86c7506352a
123,339
import math import struct def _next_up(x): """Returns the next-largest float towards positive infinity. Args: A floating point number. Returns: The next-largest float towards positive infinity. If NaN or positive infinity is passed, the argument is returned. """ if math.isnan(x) or (math.isinf(x) and x > 0): return x # 0.0 and -0.0 both map to the smallest +ve float. if x == 0.0: x = 0.0 n = struct.unpack('<q', struct.pack('<d', x))[0] if n >= 0: n += 1 else: n -= 1 return struct.unpack('<d', struct.pack('<q', n))[0]
ebe92139002cee153af362136a97580225dae00d
123,342
def html_color_for(rgb): """ Convert a 3-element rgb structure to a HTML color definition """ opacity_shade = 0.3 return f"rgba({rgb[0]},{rgb[1]},{rgb[2]},{opacity_shade})"
fae20229cb1f9d766715768104f68775610e49c2
123,345
def bankbase(bankindex, numbanks, prgbanksize, fix8000): """Calculate the base address of a PRG bank. bankindex -- the index of a bank (0=first) numbanks -- the total number of banks in the ROM prgbanksize -- the size in 1024 byte units of a bank, usually 8, 16, or 32 fix8000 -- if false, treat the first window ($8000) as switchable; if true, treat the last window as switchable """ if prgbanksize > 32: return 0 # for future use with Super NES HiROM numwindows = 32 // prgbanksize if fix8000: wndindex = min(bankindex, numwindows - 1) else: wndindex = max(0, bankindex + numwindows - numbanks) return 0x8000 + 0x400 * prgbanksize * wndindex
20a1816e1b00e78b43b94c283a7407a65b2ad76c
123,346
import re def remove_numbers(wordlist): """Remove numbers in the wordlist Arg: wordlist: a list of words Return: new_wordlist: a list of words without any number """ new_wordlist = [] for word in wordlist: if not re.search('\d+', word): new_wordlist.append(word) return list(set(new_wordlist))
26964efe57c1c44d158e77c0d0b6aa893285f0f3
123,347
import re def get_chrome_version() -> str: """Get the current version of Chrome. Note that this only works on Linux systems. On macOS, for example, the `google-chrome` command may not work. Returns: str. The version of Chrome we found. """ output = str(common.run_cmd(['google-chrome', '--version'])) # type: ignore[no-untyped-call] chrome_version = ''.join(re.findall(r'([0-9]|\.)', output)) return chrome_version
c6ec2d4487917f72f10ca113849348a2233f48a6
123,349
def one_to_many(df, unitcol, manycol): """ Assert that a many-to-one relationship is preserved between two columns. For example, a retail store will have have distinct departments, each with several employees. If each employee may only work in a single department, then the relationship of the department to the employees is one to many. Parameters ========== df : DataFrame unitcol : str The column that encapulates the groups in ``manycol``. manycol : str The column that must remain unique in the distict pairs between ``manycol`` and ``unitcol`` Returns ======= df : DataFrame """ subset = df[[manycol, unitcol]].drop_duplicates() for many in subset[manycol].unique(): if subset[subset[manycol] == many].shape[0] > 1: msg = "{} in {} has multiple values for {}".format(many, manycol, unitcol) raise AssertionError(msg) return df
2823b05255592788cd7a969b1f90662dc180b668
123,352
import collections def compute_bins(path): """Computes the bin counts for numbers of cognates in each simulation. Args: path: Path to the output of generate_random_cognate_lists.py Returns: A histogram of the numbers of cognates. """ bins = collections.defaultdict(int) with open(path) as stream: for line in stream: if line.startswith("RUN"): _, _, n_cognates = line.split() bins[int(n_cognates)] += 1 return bins
fbdd4b244b9aa07943c0dd53027bd20146914ac0
123,364
def getServer(networkDict): """ Get Server information in a specific network """ return networkDict['WGNet']['Server']
9c68c7109d2155025e1e4147e649e0eb38eebc56
123,371
def is_slug(string): """ Function to test if a URL slug is valid """ return all([s in '0123456789-abcdefghijklmnopqrstuvwxyz' for s in string])
e81a54afd9c4b4cbc789e610f7fa70e7d8e73668
123,374
def get_analysis_files_names(resu_dir, simu_id): """ Give analysis files names. Parameters ---------- resu_dir: filename Path of the directory in which to save the results. simu_id: string Name of the simulation, to be used to name files. Returns ------- analysis_files : dictionary analysis_files[<measure abbreviation>_<algo>] = filename """ # - to hold accuracy values acc_st_fname = '%s/%s.sfan.acc' % (resu_dir, simu_id) acc_np_fname = '%s/%s.msfan_np.acc' % (resu_dir, simu_id) acc_fname = '%s/%s.msfan.acc' % (resu_dir, simu_id) # - to hold mcc values mcc_st_fname = '%s/%s.sfan.mcc' % (resu_dir, simu_id) mcc_np_fname = '%s/%s.msfan_np.mcc' % (resu_dir, simu_id) mcc_fname = '%s/%s.msfan.mcc' % (resu_dir, simu_id) # - to hold PPV values ppv_st_fname = '%s/%s.sfan.ppv' % (resu_dir, simu_id) ppv_np_fname = '%s/%s.msfan_np.ppv' % (resu_dir, simu_id) ppv_fname = '%s/%s.msfan.ppv' % (resu_dir, simu_id) # - to hold sensitivity = TPR values tpr_st_fname = '%s/%s.sfan.tpr' % (resu_dir, simu_id) tpr_np_fname = '%s/%s.msfan_np.tpr' % (resu_dir, simu_id) tpr_fname = '%s/%s.msfan.tpr' % (resu_dir, simu_id) # - to hold consistency values ci_st_fname = '%s/%s.sfan.consistency' % (resu_dir, simu_id) ci_np_fname = '%s/%s.msfan_np.consistency' % (resu_dir, simu_id) ci_fname = '%s/%s.msfan.consistency' % (resu_dir, simu_id) # - to hold RMSE values rmse_st_fname = '%s/%s.sfan.rmse' % (resu_dir, simu_id) rmse_np_fname = '%s/%s.msfan_np.rmse' % (resu_dir, simu_id) rmse_fname = '%s/%s.msfan.rmse' % (resu_dir, simu_id) # - to hold timing values timing_st_fname = '%s/%s.sfan.timing' % (resu_dir, simu_id) timing_np_fname = '%s/%s.msfan_np.timing' % (resu_dir, simu_id) timing_fname = '%s/%s.msfan.timing' % (resu_dir, simu_id) # - to hold maxRSS values maxRSS_st_fname = '%s/%s.sfan.maxRSS' % (resu_dir, simu_id) maxRSS_np_fname = '%s/%s.msfan_np.maxRSS' % (resu_dir, simu_id) maxRSS_fname = '%s/%s.msfan.maxRSS' % (resu_dir, simu_id) analysis_files = { 'acc_st':acc_st_fname, 'acc_msfan_np': acc_np_fname, 'acc_msfan': acc_fname, 'mcc_st': mcc_st_fname, 'mcc_msfan_np': mcc_np_fname, 'mcc_msfan': mcc_fname, 'ppv_st':ppv_st_fname, 'ppv_msfan_np':ppv_np_fname, 'ppv_msfan':ppv_fname, 'tpr_st':tpr_st_fname, 'tpr_msfan_np':tpr_np_fname, 'tpr_msfan':tpr_fname, 'ci_st':ci_st_fname , 'ci_msfan_np':ci_np_fname , 'ci_msfan':ci_fname, 'rmse_st':rmse_st_fname , 'rmse_msfan_np':rmse_np_fname , 'rmse_msfan':rmse_fname, 'timing_st':timing_st_fname, 'timing_msfan_np':timing_np_fname, 'timing_msfan':timing_fname, 'maxRSS_st':maxRSS_st_fname, 'maxRSS_msfan_np':maxRSS_np_fname, 'maxRSS_msfan':maxRSS_fname } return analysis_files
e7931ae4f58494b21d81b19898c3f63ba6456477
123,376
def guess_n_eigs( n_electron, n_eigs = None ): """ Guess the number of eigenvalues (energies) to compute so that the smearing iteration converges. Passing n_eigs overrides the guess. """ if n_eigs is not None: return n_eigs if n_electron > 2: n_eigs = int(1.2 * ((0.5 * n_electron) + 5)) else: n_eigs = n_electron return n_eigs
74b044d807d243243744d73b8e230be24c528492
123,379
def _num_pred(op, x, y): """ Returns a numerical predicate that is called on a State. """ def predicate(state): operands = [0, 0] for i, o in enumerate((x, y)): if type(o) == int: operands[i] = o else: operands[i] = state.f_dict[o] return op(*operands) return predicate
98fdfff485b6116a7a2a394d9de577bc42fd0c23
123,381
def cleanup_report(report): """Given a dictionary of a report, return a new dictionary for output as JSON.""" report_data = report report_data["id"] = str(report["_id"]) report_data["user"] = str(report["user"]) report_data["post"] = str(report["post"]) del report_data["_id"] return report_data
5ddc0782c0b2e51047fe6a19870b6d7562b5b45c
123,382
def extract_key(key_shape, item): """construct a key according to key_shape for building an index""" return {field: item[field] for field in key_shape}
9a05dda75ceadaf1e615ffb9f470de8da1ef5b43
123,385
def lead(series, i=1): """ Returns a series shifted forward by a value. `NaN` values will be filled in the end. Same as a call to `series.shift(i)` Args: series: column to shift forward. i (int): number of positions to shift forward. """ shifted = series.shift(i * -1) return shifted
6832c6dcb61e8a99b5e2b2f236ab2cc1700fac2b
123,394
def validate_network_parameters(num_neurons_input, num_neurons_output, num_neurons_hidden_layers, output_activation, hidden_activations, num_solutions=None): """ Validating the parameters passed to initial_population_networks() in addition to creating a list of the name(s) of the activation function(s) for the hidden layer(s). In case that the value passed to the 'hidden_activations' parameter is a string not a list, then a list is created by replicating the passed name a number of times equal to the number of hidden layers (i.e. the length of the 'num_neurons_hidden_layers' parameter). If an invalid parameter found, an exception is raised and the execution stops. The function accepts the same parameters passed to the constructor of the GANN class. num_neurons_input: Number of neurons in the input layer. num_neurons_output: Number of neurons in the output layer. num_neurons_hidden_layers: A list holding the number of neurons in the hidden layer(s). output_activation: The name of the activation function of the output layer. hidden_activations: The name(s) of the activation function(s) of the hidden layer(s). num_solutions: Number of solutions (i.e. networks) in the population which defaults to None. The reason why this function sets a default value to the `num_solutions` parameter is differentiating whether a population of networks or a single network is to be created. If `None`, then a single network will be created. If not `None`, then a population of networks is to be created. Returns a list holding the name(s) of the activation function(s) for the hidden layer(s). """ # Validating the number of solutions within the population. if not (num_solutions is None): if num_solutions < 2: raise ValueError("num_solutions: The number of solutions within the population must be at least 2. The current value is {num_solutions}.".format(num_solutions=num_solutions)) # Validating the number of neurons in the input layer. if num_neurons_input is int and num_neurons_input <= 0: raise ValueError("num_neurons_input: The number of neurons in the input layer must be > 0.") # Validating the number of neurons in the output layer. if num_neurons_output is int and num_neurons_output <= 0: raise ValueError("num_neurons_output: The number of neurons in the output layer must be > 0.") # Validating the type of the 'num_neurons_hidden_layers' parameter which is expected to be list or tuple. if not (type(num_neurons_hidden_layers) in [list, tuple]): raise TypeError("num_neurons_hidden_layers: A list or a tuple is expected but {hidden_layers_neurons_type} found.".format(hidden_layers_neurons_type=type(num_neurons_hidden_layers))) # Frequently used error messages. unexpected_output_activation_value = "Output activation function: The activation function of the output layer is passed as a string not {activation_type}." unexpected_activation_value = "Activation function: The supported values for the activation function are {supported_activations} but an unexpected value is found:\n{activations}" unexpected_activation_type = "Activation Function: A list, tuple, or a string is expected but {activations_type} found." length_mismatch = "Hidden activation functions: When passing the activation function(s) as a list or a tuple, its length must match the length of the 'num_neurons_hidden_layers' parameter but a mismatch is found:\n{mismatched_lengths}" # A list of the names of the supported activation functions. supported_activations = ["sigmoid", "relu", "softmax", "None"] # Validating the output layer activation function. if not (type(output_activation) is str): raise ValueError(unexpected_output_activation_value.format(activation_type=type(output_activation))) if not (output_activation in supported_activations): #activation_type raise ValueError(unexpected_activation_value.format(activations=output_activation, supported_activations=supported_activations)) # Number of hidden layers. num_hidden_layers = len(num_neurons_hidden_layers) if num_hidden_layers > 1: # In case there are more than 1 hidden layer. if type(hidden_activations) in [list, tuple]: num_activations = len(hidden_activations) if num_activations != num_hidden_layers: raise ValueError(length_mismatch.format(mismatched_lengths="{num_activations} != {num_layers}".format(num_layers=num_hidden_layers, num_activations=num_activations))) elif type(hidden_activations) is str: if hidden_activations in supported_activations: hidden_activations = [hidden_activations]*num_hidden_layers else: raise ValueError(unexpected_activation_value.format(supported_activations=supported_activations, activations=hidden_activations)) else: raise TypeError(unexpected_activation_type.format(activations_type=type(hidden_activations))) elif num_hidden_layers == 1: # In case there is only 1 hidden layer. if (type(hidden_activations) in [list, tuple]): if len(hidden_activations) != 1: raise ValueError(length_mismatch.format(mismatched_lengths="{num_activations} != {num_layers}".format(num_layers=num_hidden_layers, num_activations=len(hidden_activations)))) elif type(hidden_activations) is str: if not (hidden_activations in supported_activations): raise ValueError(unexpected_activation_value.format(supported_activations=supported_activations, activations=hidden_activations)) else: hidden_activations = [hidden_activations] else: raise TypeError(unexpected_activation_type.format(activations_type=type(hidden_activations))) else: # In case there are no hidden layers (num_hidden_layers == 0) print("WARNING: There are no hidden layers however a value is assigned to the parameter 'hidden_activations'. It will be reset to [].".format(hidden_activations=hidden_activations)) hidden_activations = [] # If the value passed to the 'hidden_activations' parameter is actually a list, then its elements are checked to make sure the listed name(s) of the activation function(s) are supported. for act in hidden_activations: if not (act in supported_activations): raise ValueError(unexpected_activation_value.format(supported_activations=supported_activations, activations=act)) return hidden_activations
8f5c019e9010083740f19946555ab52e9329bd08
123,396
import random def random_id(size=12): """ make a random character string for semi-unique IDs Args: size: length of string to return. 12 characters should mean a 50% chance of string collisions only after 20 million random strings. Returns: random character string """ def is_number(string): try: number = float(string) return True except ValueError: return False # don't allow the random strings to be equivalent to a number. This reduces # the randomness, but enforces evaluation as a string. string = None while string is None or is_number(string) or len(string) != size: string = f"{random.getrandbits(size*4):x}" string = string.strip() return string
541ea03bfdfa5be8495ff0dbce75c14fea316572
123,398
def get_element_and_channel_type(type): """Decomposes the type. Args: type: BytesType Returns: object containing element_type and channel_type. """ return { 'element_type': (type >> 16) & 0xFFFF, 'channel_type': type & 0xFFFF, }
0a868d21a8850fcd7231e3775f0e58314ce00a5e
123,399
def get_subclasses(c): """Gets the subclasses of a class.""" subclasses = c.__subclasses__() for d in list(subclasses): subclasses.extend(get_subclasses(d)) return subclasses
ab8f2fde3b4dda4621c9a2ea89a30d364815d1bb
123,400
import hashlib def hash_test_code(main_path): """Hashes file main_path.""" with open(main_path) as main: test_code_hash = hashlib.sha256() for line in main: test_code_hash.update(line.encode()) return test_code_hash.hexdigest()
134df6b5e7bb87a48e19bcfc1f8178f34d8f6599
123,404
def check_enabled(ext): """Check to see whether an extension should be enabled. :param ext: The extension instance to check. :return: True if it should be enabled. Otherwise false. """ return ext.obj.enabled()
b81fbbf29ebde450463185d6e33ad0c7cf1cb5dd
123,411
def determine_language(extension): """ A function to determine programming language given file extension, returns programming language name (all lowercase) if could be determined, None if the test is inconclusive """ if extension.startswith('.'): extension = extension[1:] mapping = {'py': 'python', 'r': 'r', 'R': 'r', 'Rmd': 'r', 'rmd': 'r'} # ipynb can be many languages, it must return None return mapping.get(extension)
2510a204e3a7953692741aeb1b2a373c81e4a3ff
123,412
def create_record(client, user): """ Creates a record in indexd and returns that record's GUID. """ document = { "form": "object", "size": 123, "urls": ["s3://endpointurl/bucket/key"], "hashes": {"md5": "8b9942cf415384b27cadf1f4d2d682e5"}, "metadata": {"project_id": "bpa-UChicago"}, } res = client.post("/index/", json=document, headers=user) assert res.status_code == 200 # The GUID is the "did" (Document IDentifier) returned from a successful # POST request. guid = res.get_json()["did"] return guid
f09f9012f45c1615362869e6b876f0d934924a85
123,415
def safe_readlines(handle, hint=-1): """Attempts to read lines without throwing an error.""" try: lines = handle.readlines(hint) except OSError: lines = [] return lines
501ddfc12e61bc44e673c586b0892b64f6e1e414
123,418
import typing import asyncio import functools async def RunAsync(sync_function:typing.Callable, *args:typing.Any, **kwargs:typing.Any) -> typing.Coroutine: """ 同期関数をスレッド上で非同期的に実行するためのヘルパー 同期関数を実行し、Awaitable なコルーチンオブジェクトを返す Args: function (typing.Callable): 同期関数 *args (Any): 同期関数の引数(可変長引数) *kwargs (Any): 同期関数のキーワード引数(可変長引数) Returns: typing.Coroutine: 同期関数のコルーチン """ # ref: https://github.com/tiangolo/fastapi/issues/1066 # ref: https://github.com/tiangolo/starlette/blob/master/starlette/concurrency.py # 現在のイベントループを取得 loop = asyncio.get_running_loop() # 引数なしの形で呼べるようにする sync_function_noargs = functools.partial(sync_function, *args, **kwargs) # スレッドプール上で実行する return await loop.run_in_executor(None, sync_function_noargs)
4b61ae9b6c6f3cbfc63535d6ea5befbbaa50384b
123,420
def docstring(node, nodetype=None, desc='no description', error_url=None): """Handle the error template for a missing docstring.""" return dict( errcode='NO_DOCSTRING', desc=desc, lineno=node.lineno if hasattr(node, 'lineno') else None, coloffset=node.col_offset if hasattr(node, 'col_offset') else None, nodetype=nodetype, error_url=error_url, )
cd8ae856eb58e8e668d15b9cb5c839f3a403ee52
123,423
import re def string_to_iterable(string): """convert a string into an iterable note: ranges are inclusive >>> string_to_iterable('0,3,5-10,15-30-3,40') [0,3,5,6,7,8,9,10,15,18,21,24,27,30,40] """ if re.search("[^\d^,^-]", string) is not None: raise ValueError("Iterable string must contain only digits, commas, and dashes") it = [] splits = [tuple(s.split("-")) for s in string.split(",")] for item in splits: if len(item) == 1: it.append(int(item[0])) elif len(item) == 2: it.extend(list(range(int(item[0]), int(item[1]) + 1))) elif len(item) == 3: it.extend(list(range(int(item[0]), int(item[1]) + 1, int(item[2])))) else: raise ValueError("Iterable string items must be of length <= 3") return sorted(list(set(it)))
43eae42210409554f1b29ae588286e86908e6fd6
123,430
async def register_device(provisioning_device): """This function will register provisioning device and return registration effect Args: provisioning_device (ProvisioningDeviceClient): Client which can be used to run the registration of a device with provisioning service usingX509 authentication. Returns: RegistrationResult: indicating the result of the registration """ registration_effect = await provisioning_device.register() return registration_effect
eef9538050b2856a7f57b2e48848d8074b297c47
123,436
def get_ele_and_grain_with_node_id( mesh, node_id, grain_id_1, grain_id_2, set_type="poly"): """ Find the element that has vertices with the node identifier node_id and if it belongs to grain with identifier grain_id_1 or grain_id_2 :param mesh: The mesh. :type mesh: :class:`Mesh` :param node_id: THe node to find the tet :type node_id: int :param grain_id_1: Identifier for the first grain. :type grain_id_1: int :param grain_id_2: Identifier for the second grain. :type grain_id_2: int :param set_type: Type of elements, poly for 3d and face for 2d :type set_type: string :return: Returns a tuple of the grain identifier the element is in and the identifier of the element. :rtype: tuple (int, int) """ three_d_element = [] grains = [] for grain_id in [grain_id_1, grain_id_2]: for element_id in mesh.element_sets[set_type + str(grain_id)].ids: element = mesh.elements[element_id] if node_id in element.vertices: three_d_element.append(element_id) grains.append(grain_id) return grains, three_d_element
b2099694106391b42328a2b4db05768cc5153ed0
123,438
def is_mlcmt(line,mlcmto,mlcmtc): """Test if the line has an start, end or both of multiple line comment delimiters """ return [line.find(mlcmto),line.find(mlcmtc)]
ac62361f39d0ea5b1006a4d628a1719f68206ed4
123,439
from typing import Any import json def read_json(file_path: str) -> Any: """Return data from json file""" with open(file_path, "r") as f: try: file_data = json.load(f) return file_data except json.JSONDecodeError as e: print(e)
e01f603bf6577ae8d64da5347c421f3a1d5eb937
123,441
def generate_service_obs(n): """ Recursively generate list of all possible configurations of n services. Same as generate_service_configs except replace True/False with "yes"/"no" """ # base cases if n <= 0: return [] if n == 1: return ["yes", "no"] perms = [] for p in generate_service_obs(n - 1): perms.append("yes" + p) perms.append("no" + p) return perms
edce9bfc70f1da5631b5d054f8a59c2d5f5998b1
123,442
def all_eq(*items) -> bool: """ Tests whether all passed items are equal """ return items.count(items[0]) == len(items)
4498e9ccc453cc5838ff5075b3d6713396e843e0
123,444
def sort_cards(cards): """Sort shuffled list of cards, sorted by rank. sort_cards(['3', '9', 'A', '5', 'T', '8', '2', '4', 'Q', '7', 'J', '6', 'K']) ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K'] """ deck = [] alphas = [] for card in cards: if card.isalpha(): alphas.append(card) else: deck.append(card) deck = sorted(deck) for _ in range(alphas.count('A')): deck.insert(0, 'A') for _ in range(alphas.count('T')): deck.append('T') for _ in range(alphas.count('J')): deck.append('J') for _ in range(alphas.count('Q')): deck.append('Q') for _ in range(alphas.count('K')): deck.append('K') return deck
cd804d4173a9cabdecf72a154c28bba1d6307812
123,445
def touching(pos1, pos2): """ tells you if two positions are touching """ if pos1[0] == pos2[0] and abs(pos1[1] - pos2[1]) == 1: return True if pos1[1] == pos2[1] and abs(pos1[0] - pos2[0]) == 1: return True return False
a1012d014b10f4571a5c436d0953b72d5a81e452
123,447
def _get_architecture_or_default(environment): """Returns the current target architecture or the default architecture if none has been explicitly set. @param environment Environment the target architecture will be looked up from @returns The name of the target architecture from the environment or a default""" architecture = environment['TARGET_ARCH'] if architecture is None: return 'x64' else: return architecture
c5636eeeafe69ce33a6c9ef2081063481627d4d2
123,449
def get_config_from_package(package): """ Breaks a package string in module and class. :param package: A package string. :return: A config dict with class and module. """ package_x = package.split('.') package_conf = {} package_conf['class'] = package_x[-1] package_conf['module'] = '.'.join(package_x[:-1][:]) return package_conf
b6504372aac83d008e97463636d7bcd7533e061b
123,450
def solveLinearSingular(aMat, bVec, isParameterized=False, defaultValue=1): """ Finds a solution to a linear system where the linear matrix may be singular. Parameter values are set to 1 if not isParameterized. Parameters ---------- aMat: sympy.Matrix N X N bVec: sympy.Matrix N X 1 isParameterized: bool Return the parameterized result Returns ------- sympy.Matrix N X 1 """ solution = aMat.gauss_jordan_solve(bVec) solutionVec = solution[0] if not isParameterized: parameterMat = solution[1] for parameter in parameterMat: solutionVec = solutionVec.subs(parameter, defaultValue) solutionVec = solutionVec.evalf() return solutionVec
2178b4486910851933ec04013df5e0613a8f1d9d
123,451
import requests def random_an_user(gender: str = 'male', nation: str = 'us'): """ Generate a fake user by https://randomuser.me/ :param str gender: Gender :param str nation: Nation :return: dict """ resp = requests.get('https://randomuser.me/api/?gender={gender}&nat={nation}'.format( gender=gender, nation=nation )) return resp.json()['results'][0]
a824381706c2482292509c90f24c418f771b1c70
123,459
import secrets def random_with_float_step(start: int, stop: int, step: float) -> float: """Generates a random number between a range with a float step. Parameters ---------- start : int The inclusive lower bound stop : int The inclusive upper bound step : float The step of the range Returns ------- float The generated float """ return secrets.randbelow(int((stop - start) / step)) * step + start
e26b7c4167e11d78eea20ed52a012273a3b3628e
123,463
def path(tmp_path): """Return a path as a string.""" return str(tmp_path) + "/hi_hello"
8ff34ca912981ff2cca9b6f10c2d05ab379118a6
123,467
def get_bucket_iam_binding(iam_policy, role): """Get the binding matching a role, or None.""" return next(( binding for binding in iam_policy['bindings'] if binding['role'] == role), None)
674c4d9219f957fac2d7664e1ea900c412cc3f26
123,469
def getValidValues(values, threshold = 1e10): """ Get the valid values in a list. Valid means the absolute value is lower than some threshold. For numpy arrays, consider values[np.abs(values) < threshold] Parameters ---------- values : list of floats threshold : float, optional. The absolute value threshold of values. By default, 1e10. Returns ------- resX : list of int The indices of valid elements. resY : list of floats The values of valid elements. """ resX = [] resY = [] for i, value in enumerate(values): if (abs(value) < threshold): resX.append(i) resY.append(value) return resX, resY
722694c0d09a6520c1d79c3afcb317b646ae004f
123,471
import random def binary_tournament_selection(model, population, size): """ Select individual from the population of size tourn_size based on tournament evaluation :param model: Model used for evaluation :param population: Population to sample from :param size: Size of tournament :return: Most dominant individual from the tournament """ tournament = random.sample(population, size) best = tournament[0] for i in range(1, len(tournament)): if model.better(tournament[i], best) == 1: best = tournament[i] return best
260ee1df9a054b18c5b7d5b89dd8f759abbfad74
123,474
def gold_horse_ent_type_process_fn(d): """golden horse ent type process fn Source: https://github.com/hltcoe/golden-ho rse Entity type: B, I, O: Begining \ In middle \ Outside of entity GPE: Country, City, District... LOC: Location, zoo, school... PER: Person ORG: Organiazation NAM: Entity NOM: More general, 女生, 男的... Example: B-PER.NAM Only keep NAM here So after process: B-PER Arguments: ent_type {str} -- ent type from gold_horse data Returns: str -- processed enttype """ ent_type = d.split('\t')[1].replace('\n', '') # keep nam only ent_type = ent_type if 'NAM' in ent_type else 'O' ent_type = ent_type.replace('.NAM', '') return ent_type
6b7a46cbeeae86b8195c6ce745a0cb4740eac8d7
123,483
def pad_string(str, block_size): """Pad string to reach block_size.""" numpad = block_size - (len(str) % block_size) return str + numpad * chr(numpad)
7ec37fb546e90d8c9da7a736c3e99632dc0d3475
123,485
from typing import Dict from typing import Tuple from typing import OrderedDict def parse_labelmap_dataclass(labelmap: Dict[int, Label]) -> Tuple: # type: ignore """ A labelmap provides a map from integer ids to text and color labels. After loading a label map from json, this will parse the labelmap into commonly utilized mappings and fix the formatting issues caused by json. :param labelmap: Dictionary of label id and its corresponding Label class information. :return: (id2name {id <int>: name <str>}, id2color {id <int>: color (R <int>, G <int>, B <int>, A <int>)}. Label id to name and label id to color mappings tuple. """ id2name = OrderedDict() # {integer ids : str name} id2color = OrderedDict() # {integer ids: RGB or RGBA tuple} # Some evaluation tools (like the confusion matrix) need sorted names ids = [int(_id) for _id in labelmap.keys()] ids.sort() for _id in ids: id2name[_id] = labelmap[_id].name id2color[_id] = tuple(labelmap[_id].color) return id2name, id2color
9001737c8cb90e5957eaa9e61d943d711036054b
123,489
import math def one_to_one_matches(matches: dict): """ A filter that takes a dict of column matches and returns a dict of 1 to 1 matches. The filter works in the following way: At first it gets the median similarity of the set of the values and removes all matches that have a similarity lower than that. Then from what remained it matches columns for me highest similarity to the lowest till the columns have at most one match. Parameters ---------- matches : dict The ranked list of matches Returns ------- dict The ranked list of matches after the 1 to 1 filter """ set_match_values = set(matches.values()) if len(set_match_values) < 2: return matches matched = dict() for key in matches.keys(): matched[key[0]] = False matched[key[1]] = False median = list(set_match_values)[math.ceil(len(set_match_values)/2)] matches1to1 = dict() for key in matches.keys(): if (not matched[key[0]]) and (not matched[key[1]]): similarity = matches.get(key) if similarity >= median: matches1to1[key] = similarity matched[key[0]] = True matched[key[1]] = True else: break return matches1to1
a630d717c4c14e84289fb11588b79a7a79e6854e
123,495
import ast def get_inherits(tree): """ Get what superclasses this class inherits This handles exact names like 'MyClass' but skips things like 'cls' and 'mod.MyClass' Resolving those would be difficult :param tree ast: :rtype: list[str] """ return [base.id for base in tree.bases if type(base) == ast.Name]
ded54846d548c1a5bccd4387a958bff4d8a5fa01
123,497
import itertools def strip_head(sequence, values): """Strips elements of `values` from the beginning of `sequence`.""" values = set(values) return list(itertools.dropwhile(lambda x: x in values, sequence))
f5ae857ca9895206119a0af87786be4767d71a11
123,498
def get_complete_testing_sets(playlists, test_indices_dict): """ Generates dictionary with test buckets according to provided indices. Adds additional seed and groundtruth lists to playlists. Parameters: -------------- playlists: list, original playlists included in test set test_indices_dict: dict, dictionary including the indices for every split Returns: -------------- return_dict: dict, {bucket_no: [playlist1, playlist2, ..., playlistn], ...} """ # prepare return_dict return_dict = {} for bucket in test_indices_dict.keys(): return_dict[bucket] = [y for x, y in enumerate(playlists) if x in test_indices_dict[bucket]] # add seed tracks and ground_truth to playlists for key in return_dict.keys(): for playlist in return_dict[key]: playlist['seed'] = [x for x in playlist['tracks'][:key]] playlist['groundtruth'] = [x for x in playlist['tracks'][key:]] return return_dict
4846117155041762cfbc2aff86497debca55c9f1
123,501
def clean_param(par): """Returns a dictionary containing the parameter's 'type', 'name', 'desc', and an optional key 'sample'. """ param = {'type': par[0], 'desc': par[2][1:-1]} index = par[1].find('(') if index == -1: param['name'] = '`%s`' % par[1] param['ctor'] = '' else: param['name'] = '`%s`' % par[1][:index] param['ctor'] = par[1][index:] if len(par) > 3: param['sample'] = par[3] return param
d0f2305138c6e21bee68af6ac7ee8de3e29aef13
123,503
def parse_sentence_json(data): """ Function to parse the json response from the papers collection in Solr. It returns the results as a list with the sentence, file name and title.""" # docs contains sentence, fileName, id generated by Solr docs = data['response']['docs'] # Create a list object for the results with sentence, fileName and title results = [[docs[i].get('sentence')[0], docs[i].get('fileName'), docs[i].get('sentencenum')] for i in range(len(docs))] return results
118686b83092d1932ff1751b563a7a12a04f8633
123,510
def batch_center_crop_frac(batch_image, frac): """ Args: batch_image: [b, h, w, c] frac: 0.5, 0.75 """ b, h, w, c = batch_image.get_shape().as_list() start_h = int((h - frac * h)/2) start_w = int((w - frac * w)/2) end_h = start_h + int(frac * h) end_w = start_w + int(frac * w) croped_image = batch_image[:, start_h:end_h, start_w:end_w,:] return croped_image
409d3d457ec6551c1b9a8b0f459b02ce84ab27a5
123,513