content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def one_at(pos, size=8): """ Create a size-bit int which only has one '1' bit at specific position. example: one_at(0) -> 0b10000000 one_at(3) -> 0b00010000 one_at(5, 10) -> 0b0000010000 :param int pos: Position of '1' bit. :param int size: Length of value by bit. :rtype: int """ assert 0 <= pos < size return 1 << (size - 1 - pos)
3075f1da6dd39aa24c64a51cb8422ab553deb680
63,666
def make_site_pin_map(site_pins): """ Create map of site pin names to tile wire names. """ site_pin_map = {} for site_pin in site_pins: site_pin_map[site_pin.name] = site_pin.wire return site_pin_map
c507802ec4e95a13ae26e1e3e48200eef72948cf
63,669
import importlib def load_method_fn_from_method_path(module, method_path): """ Args: module (python module): module from which take the method. method_path (string): path to the method. Returns: if method_path is None returns None otherwise returns module.method_path(**method_kwargs) """ if not method_path: return None mpathsplit = method_path.split(".") method_name = mpathsplit[-1] path = module.__name__ middle_path = '.'.join(mpathsplit[:-1]) if middle_path: path += '.' + middle_path last_module = importlib.import_module(path) method_fn = getattr(last_module, method_name) return method_fn
d0c88b4ebb24f5878d2eebeb77a587cf0b3a2bd2
63,671
def revcomp(seq: str) -> str: """ reverse complement a nucleotide sequence. """ rc_nuc = { 'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C', } seq_rev = seq[::-1] seq_rev_comp = ''.join([rc_nuc[n] for n in list(seq_rev)]) return seq_rev_comp
b2745b3b3cd29b339f9305414bdfdaf297fb53a9
63,674
def flatten_diff(diff): """ For spawning, a 'detailed' diff is not necessary, rather we just want instructions on how to handle each root key. Args: diff (dict): Diff produced by `prototype_diff` and possibly modified by the user. Note that also a pre-flattened diff will come out unchanged by this function. Returns: flattened_diff (dict): A flat structure detailing how to operate on each root component of the prototype. Notes: The flattened diff has the following possible instructions: UPDATE, REPLACE, REMOVE Many of the detailed diff's values can hold nested structures with their own individual instructions. A detailed diff can have the following instructions: REMOVE, ADD, UPDATE, KEEP Here's how they are translated: - All REMOVE -> REMOVE - All ADD|UPDATE -> UPDATE - All KEEP -> KEEP - Mix KEEP, UPDATE, ADD -> UPDATE - Mix REMOVE, KEEP, UPDATE, ADD -> REPLACE """ valid_instructions = ("KEEP", "REMOVE", "ADD", "UPDATE") def _get_all_nested_diff_instructions(diffpart): "Started for each root key, returns all instructions nested under it" out = [] typ = type(diffpart) if typ == tuple and len(diffpart) == 3 and diffpart[2] in valid_instructions: out = [diffpart[2]] elif typ == dict: # all other are dicts for val in diffpart.values(): out.extend(_get_all_nested_diff_instructions(val)) else: raise RuntimeError( "Diff contains non-dicts that are not on the " "form (old, new, inst): {}".format(diffpart) ) return out flat_diff = {} # flatten diff based on rules for rootkey, diffpart in diff.items(): insts = _get_all_nested_diff_instructions(diffpart) if all(inst == "KEEP" for inst in insts): rootinst = "KEEP" elif all(inst in ("ADD", "UPDATE") for inst in insts): rootinst = "UPDATE" elif all(inst == "REMOVE" for inst in insts): rootinst = "REMOVE" elif "REMOVE" in insts: rootinst = "REPLACE" else: rootinst = "UPDATE" flat_diff[rootkey] = rootinst return flat_diff
4b41ab2dd6fd377a3a323f108b853eec63b6e5be
63,675
import yaml def load_config_file(config_file): """ 加载配置文件 Args: config_file(str): 配置文件路径 Returns: config_dict(dict): 配置词典 """ with open(config_file, 'r', encoding='utf-8') as rf: config_dict = yaml.load(rf, Loader=yaml.FullLoader) return config_dict
4937a16ed01b69c5fc2f37265a50f0cd33a9f2ad
63,676
def update_postcode(postcode_value): """Update postcodes using mapping dictionary. Takes postcode value, updates using postcode_mapping dictionary and returns updated value. """ postcode_mapping = {'78621' : '78681', '787664' : '78664', '78728-1275' : '78728'} if postcode_value not in postcode_mapping.keys(): return postcode_value else: print(postcode_value, "cleaned to -->", postcode_mapping[postcode_value]) return(postcode_mapping[postcode_value])
6a296e32d0af0a43e80d19b29060d31ca6fb6fa3
63,680
import re def convert_gcs_json_url_to_gsutil_form(url: str) -> str: """ Convert a GCS JSON API url to its corresponding gsutil uri. Parameters ---------- url: str The url in GCS JSON API form. Returns ------- gsutil_url: str The url in gsutil form. Returns empty string if the input url doesn't match the form. """ found_bucket, found_filename = None, None bucket = re.search("storage.googleapis.com/download/storage/v1/b/(.+?)/o", url) if bucket: found_bucket = str(bucket.group(1)) filename = re.search(r"/o/(.+?)\?alt=media", url) if filename: found_filename = str(filename.group(1)) if found_bucket and found_filename: return f"gs://{found_bucket}/{found_filename}" return ""
cf2d16a23aa250fb6d6e87e48e151f9f73bd4d86
63,684
def can_merge_packages(package_info_list): """Returns true if all versions of a package can be replaced with a package name Args: package_info_list: list of PackageInfo. Method assumes, that all items in package_info_list have the same package name, but different package version. Returns: True if it is safe to print only a package name (without versions) False otherwise """ first = package_info_list[0] for package in package_info_list: if package.licensed_files != first.licensed_files: return False return True
43748072642d5c1a6d78da33e9e861262a1682fc
63,686
def _gen_index_name(keys): """Generate an index name from the set of fields it is over. """ return u"_".join([u"%s_%s" % item for item in keys])
b07c9bcbe829644b8cdcf3aa28ded9b1f57622eb
63,690
def _isoformat(date): """Format a date or return None if no date exists""" return date.isoformat() if date else None
566e26fed7818874322f820e684c68940c726376
63,693
def get_cv(word, vowels, sep=None): """ Calculate the consonant ("C") and vowel ("V") structure of the given word. Returns a string of the characters "C" and "V" corresponding to the characters in the word. *vowels* -- A list of the characters representing vowels. *sep* -- String used to separate phonemes (if the words are phonological forms). To separate into individual characters, set to `None` (default). """ wsplit = list(word) if not sep else word.split(sep) pattern = ["C" if char not in vowels else "V" for char in wsplit] return "".join(pattern)
fbfe1c11e2b21f51fcd95bff454edd295e502314
63,694
import socket def check_devserver_port_used(port): """check if port is ok to use for django devserver""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # immediately reuse a local socket in TIME_WAIT state sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: sock.bind(('127.0.0.1', port)) used = False except socket.error: used = True finally: sock.close() return used
c32654af07e22122c62b521cf8af23880a131e4d
63,702
import collections def taxonomy_counts(checkins, beerstyle2cat, bid2beer): """ Aggregate beer checkins `checkins` list to given taxonomy `beerstyle2cat`. `beerstyle2cat` is a dictionary mapping an Untappd beer style string to a taxonomic category. """ counts = collections.Counter() for c in checkins: beer = bid2beer[c['beer']] bstyle = beer['beer_style'] cls = beerstyle2cat[bstyle] counts[cls] += 1 return counts
84fbcc5fa31a376d735b7392ecb553c55240e609
63,707
import torch from typing import Dict from typing import Any import json def inspect_parameters(module: torch.nn.Module, quiet: bool = False) -> Dict[str, Any]: """ Inspects the model/module parameters and their tunability. The output is structured in a nested dict so that parameters in same sub-modules are grouped together. This can be helpful to setup module path based regex, for example in initializer. It prints it by default (optional) and returns the inspection dict. Eg. output:: { "_text_field_embedder": { "token_embedder_tokens": { "_projection": { "bias": "tunable", "weight": "tunable" }, "weight": "frozen" } } } """ results: Dict[str, Any] = {} for name, param in sorted(module.named_parameters()): keys = name.split(".") write_to = results for key in keys[:-1]: if key not in write_to: write_to[key] = {} write_to = write_to[key] write_to[keys[-1]] = "tunable" if param.requires_grad else "frozen" if not quiet: print(json.dumps(results, indent=4)) return results
54bf3ac16744ec61aede08fd66ec8f49ace22778
63,708
import re def get_uninterpolated_placeholders(string): """Check if a string has any remaining uninterpolated values. Args: string (string): String object. Returns: list : List of uninterpolated values. """ # Regex to find matches matches = re.findall(r'%\(([a-zA-Z0-9_-]+)\)s', string) # Convert to set to remove duplicates, convert back and return return sorted(list(set(matches)))
ace5503d8e5cc23ab185314123e1fbb2579c43b6
63,709
from typing import Iterable from typing import Callable def quantify(it: Iterable, pred: Callable = bool) -> int: """ Count how many times the predicate is true >>> quantify([[], (), None, 0, 1, -1, 'fsdfds', '']) 3 """ return sum(map(pred, it))
e2ce08256736dc893e5d3c8e4423b86a3039aac1
63,712
def merge_dicts(*dictionaries): """ Merge multiple dictionaries. Last argument can be a boolean determining if the dicts can erase each other's content Examples -------- >>> dict1 = {'label': 'string'} >>> dict2 = {'c': 'r', 'label': 'something'} >>> merge_dicts(dict1, dict2, False) {'label': 'string', 'c': 'r'} >>> merge_dicts(dict1, dict2, True) {'label': 'something', 'c': 'r'} >>> merge_dicts(dict1, dict2) {'label': 'string', 'c': 'r'}""" # Argument for force rewriting the dictionaries if isinstance(dictionaries[-1], bool): force = dictionaries[-1] dictionaries = dictionaries[:-1] else: force = False merged_dictionary = {} for dictionary in dictionaries: if force: merged_dictionary.update(dictionary) else: for key in dictionary.keys(): if key not in merged_dictionary: merged_dictionary[key] = dictionary[key] return merged_dictionary
82690f2cfc26b897a55db08f609d5acb208955e3
63,722
def string_splitter(input_str, max_length=40): """ Returns a string that is the input string but with \n inserted every max_length chars :param input_str: :param max_length: int num of chars between \n :return: string """ chunks = [] chunk = '' pieces = input_str.split(' ') for piece in pieces: if len(chunk) + len(piece) < max_length: chunk = ' '.join([chunk, piece]) else: chunks.append(chunk) chunk = piece chunks.append(chunk) return '\n'.join(chunks).strip()
a1b77c8652997fb1508cc9607431799e04d47059
63,728
def read_parameters_dict_lines_from_file_header( outfile, comments="#", strip_spaces=True ): """Load a list of pretty-printed parameters dictionary lines from a commented file header. Returns a list of lines from a commented file header that match the pretty-printed parameters dictionary format as generated by `BaseSearchClass.get_output_file_header()`. The opening/closing bracket lines (`{`,`}`) are not included. Newline characters at the end of each line are stripped. Parameters ---------- outfile: str Name of a PyFstat-produced output file. comments: str Comment character used to start header lines. strip_spaces: bool Whether to strip leading/trailing spaces. Returns ------- dict_lines: list A list of unparsed pprinted dictionary entries. """ dict_lines = [] with open(outfile, "r") as f_opened: in_dict = False for line in f_opened: if not line.startswith(comments): raise IOError( "Encountered end of {:s}-commented header before finding closing '}}' of parameters dictionary in file '{:s}'.".format( comments, outfile ) ) elif line.startswith(comments + " {"): in_dict = True elif line.startswith(comments + " }"): break elif in_dict: line = line.lstrip(comments).rstrip("\n") if strip_spaces: line = line.strip(" ") dict_lines.append(line) if len(dict_lines) == 0: raise IOError( "Could not parse non-empty parameters dictionary from file '{:s}'.".format( outfile ) ) return dict_lines
6f06fade43b69da083b95b4ddc0d10f168779118
63,732
def extract_smali_method(method_name, smali_file): """ Extracts a smali method from a smali file :param str method_name: the method to be extracted :param str smali_file: the file to extract the method from :return: the extracted method or empty string if not found """ with open(smali_file, "r") as fd: smali_code = fd.read() smali_method = "" for line in smali_code.split("\n"): # if method has been found and end is in line if smali_method and ".end method" in line: return smali_method # if method has been found then start saving elif ".method" in line and method_name in line and not smali_method: smali_method = line elif smali_method: smali_method += "{}\n".format(line) return smali_method
e36f1d0dc1ac561ddcfb0fa237ead89f07780be4
63,733
def contains(list, value): """Test value is in the list""" return value in list
5dc8a462aeb6e23ff780e778c7a159528fa015cb
63,737
import torch def vectors_to_torch(vectors): """Convert numpy arrays in a vectors dictionary to torch double format. Args: vectors (:obj:`dict`): vectors dictionary as specified by s3dxrd.measurements.Id11.peaks_to_vectors() Returns: (:obj:`dict`): dictionary with same fields as ```vectors``` but with torch tensors replacing numby arrays. """ vectors["Y"] = torch.from_numpy(vectors["Y"]).double() vectors["entry"] = torch.from_numpy(vectors["entry"]).double() vectors["exit"] = torch.from_numpy(vectors["exit"]).double() vectors["nhat"] = torch.from_numpy(vectors["nhat"]).double() vectors["kappa"] = torch.from_numpy(vectors["kappa"]).double() vectors["L"] = torch.from_numpy(vectors["L"]).double() vectors["nsegs"] = torch.from_numpy(vectors["nsegs"]).double() vectors["sig_m"] = torch.from_numpy(vectors["sig_m"]).double() return vectors
1e28ef30103cef27768ab24c1f16970211ab073c
63,738
import hashlib def _hash_sha1(buf): """ Produce a 20-bytes hash of *buf* using SHA1. """ return hashlib.sha1(buf).digest()
bc7e8f23e5b6dac25f6cdf7c95a9aaa9ee8b1d6b
63,741
import uuid def generate_uuid_from_guid(guid: str, number: int): """Generate UUID from MD5 Hash of GUID and sequence number. Args: guid (str): Hex digest of UUID. number (int): Sequence number. Returns: str: Hex digest of generate UUID. """ return str(uuid.uuid3(uuid.UUID(guid), str(number)))
cf3d1830bd3f6993d8b607a9c3f9412fd5257ba0
63,752
from typing import Tuple def get_pos(target: Tuple[float, ...], area: Tuple[int, ...]) -> Tuple[int, ...]: """Get absolute position, given relative position and target area Parameters ---------- target : Tuple[float, float] Relative position area : Tuple[int, int] Absolute area Returns ------- Tuple[int, int] Absolute position """ return tuple(int(a * b) for a, b in zip(target, area))
44c469a807ca9e256f87a32bb2c03e4e0a1b9cf3
63,756
import torch def split_last_dimension(x,n): """Reshape x so that the last dimension becomes two dimensions. The first of these two dimensions is n. Args: x: a Tensor with shape [..., m] n: an integer. Returns: a Tensor with shape [..., n, m/n] """ chunk_size = int(x.shape[4]/ n) ret = torch.unsqueeze(x,5) ret = torch.cat(ret.split(split_size=chunk_size, dim=4),5)#.permute(0,1,2,4,3) # print('split', ret.shape) # ret.view(new_shape) # print('split view ', ret.shape) return ret
5ef93c3cb4cd452c045b1686ee0bc890b8c76e82
63,766
import binascii def bin2base64(bin_str): """ Convert bytes to base64 """ return binascii.b2a_base64(bin_str)
0482412894d339b97517bf100dc159084f482238
63,774
def read_cluster_file(clusters): """ Read a cluster output file. This file should have the form: cluster_integer SEQUENCE1 cluster_integer SEQUENCE2 ... Returns two dictionaries. One maps sequence to cluster, the other maps cluster number to sequence. """ # Read file f = open(clusters,'r') lines = f.readlines() f.close() # Parse file seq_to_cluster = {} cluster_to_seq = {} for l in lines: if l.strip() == "" or l[0] == "#": continue col = l.split() cluster = int(col[0]) seq = col[1] try: cluster_to_seq[cluster].append(seq) except KeyError: cluster_to_seq[cluster] = [seq] seq_to_cluster[seq] = cluster return seq_to_cluster, cluster_to_seq
da76c094fbdf75537b699d2b5a10e2dc8fab1ffd
63,777
def is_in_line(to_keep, line): """ Checks if any of the class names in to_keep are in the line :param to_keep: A list that holds the class names which the user wants to keep. :param line: A single line in the file that is currently being examined. :return: Returns True if a class name from to_keep is in the line. False otherwise. """ for item in to_keep: if item in line: return True return False
3115d7932bfae59232592d35532aa2bc3d51f5ad
63,778
import ast def is_list_addition(node): """Check if operation is adding something to a list""" list_operations = ["append", "extend", "insert"] return ( isinstance(node.func.ctx, ast.Load) and hasattr(node.func, "value") and isinstance(node.func.value, ast.Name) and node.func.attr in list_operations )
bad2ce31661f0de763a9fdac9fb9793a77cdbef3
63,780
def _check(shape, problem): """ Check if all points of the shape are contained in problem. """ return all([ point in problem for point in shape ])
9395197d46eb56c91396029be2f5c94962420bf3
63,781
from typing import Optional def gens_are_consistent( complex_phase, solvent_phase, ngens: Optional[int] = 2, nsigma: Optional[float] = 3, ) -> bool: """ Return True if GENs are consistent. The last `ngens` generations will be checked for consistency with the overall estimate, and those with estimates that deviate by more than `nsigma` standard errors will be dropped. sprint-5-minimal-test.json Parameters ---------- complex_phase : ProjectPair The complex phase ProjectPair object to use to check for consistency solvent_phase : ProjectPair The solvent phase ProjectPair object to use to check for consistency ngens : int, optional, default=2 The last `ngens` generations will be checked for consistency with the overall estimate nsigma : int, optional, default=3 Number of standard errors of overall estimate to use for consistency check """ # Collect free energy estimates for each GEN ngens = min(len(complex_phase.gens), len(solvent_phase.gens)) gen_estimates = list() for gen in range(ngens): complex_delta_f = complex_phase.gens[gen].free_energy.delta_f solvent_delta_f = solvent_phase.gens[gen].free_energy.delta_f if (complex_delta_f is None) or (solvent_delta_f is None): continue binding_delta_f = complex_delta_f - solvent_delta_f gen_estimates.append(binding_delta_f) if len(gen_estimates) < ngens: # We don't have enough GENs return False # Flag inconsistent if any GEN estimate is more than nsigma stderrs away from overall estimate for gen_delta_f in gen_estimates[-ngens:]: overall_delta_f = ( complex_phase.free_energy.delta_f - solvent_phase.free_energy.delta_f ) delta_f = overall_delta_f - gen_delta_f # print(gen_delta_f, overall_delta_f, delta_f) # if abs(delta_f.point) > nsigma*delta_f.stderr: if abs(delta_f.point) > nsigma * gen_delta_f.stderr: return False return True
ced51ee927b80a225c840d4556d50a252ccbe77c
63,784
def label_isolated(g): """label_isolated Creates a vertex property map with True if a node has no neighbours, else False. Parameters ---------- g (graph_tool.Graph): A graph. Returns ------- isolated_vp (graph_tool.VertexPropertyMap): Property map labelling isolated vertices. """ isolated_vp = g.new_vertex_property('bool') for v in g.vertices(): if v.out_degree() == 0: isolated_vp[v] = True else: isolated_vp[v] = False return isolated_vp
f14d60753cd50a4085a48620b19a8bce0c9397c4
63,786
def _is_valid_make_var(varname): """Check if the make variable name seems valid.""" if len(varname) == 0: return False # According to gnu make, any chars not whitespace, ':', '#', '=' are valid. invalid_chars = ":#= \t\n\r" for n in range(0, len(invalid_chars)): if invalid_chars[n] in varname: return False return True
5c233ff464dabc428bed9f5aa93a3f3f4aa60a0e
63,788
def uniquify_labels(labels): """determines set of unique characters in labels (as returned by load_labels) and the number of occurences of each unique character Parameters ---------- labels : list of str Returns ------- unique_labels : set of char, e.g. {'0','a','b','c'} counts : dict where keys are unique_labels and values are counts e.g. {'0': 100, 'a': 200, 'b': 250, 'c': 150} """ all_labels = ''.join(labels) unique_labels = set(all_labels) counts = dict(zip(unique_labels, [0] * len(unique_labels))) for label in all_labels: counts[label] += 1 return unique_labels, counts
b6eb2db46397c32b9a41f1c4affd2a1ca6a150a5
63,791
import hashlib def get_etag(text): """ Compute the etag for the rendered text""" return hashlib.md5(text.encode('utf-8')).hexdigest()
fdf0d3c93b23fd25934653d15f262864ea82f595
63,794
def get_resource_limits(cursor, user, host): """Get user resource limits. Args: cursor (cursor): DB driver cursor object. user (str): User name. host (str): User host name. Returns: Dictionary containing current resource limits. """ query = ('SELECT max_questions AS MAX_QUERIES_PER_HOUR, ' 'max_updates AS MAX_UPDATES_PER_HOUR, ' 'max_connections AS MAX_CONNECTIONS_PER_HOUR, ' 'max_user_connections AS MAX_USER_CONNECTIONS ' 'FROM mysql.user WHERE User = %s AND Host = %s') cursor.execute(query, (user, host)) res = cursor.fetchone() if not res: return None current_limits = { 'MAX_QUERIES_PER_HOUR': res[0], 'MAX_UPDATES_PER_HOUR': res[1], 'MAX_CONNECTIONS_PER_HOUR': res[2], 'MAX_USER_CONNECTIONS': res[3], } return current_limits
c0583c885b05194673fd474013b61a0637fa00e7
63,797
import inspect def instantiate_class_and_inject_attributes(cls, **kwargs): """ instantiates a class with the given kwargs, picking those arguments that are in the signature of cls to use for the __init__, and adding attributes to the constructed object with the remaining. :param cls: class to insantiate :param kwargs: keyword args (some for the class __init__, and others to inject) :return: An (possibly enhanced) class instance >>> class C: ... def __init__(self, a, b=3): ... self.a = a ... self.b = b ... >>> c = instantiate_class_and_inject_attributes(C, a=10, foo='bar') >>> c.__dict__ {'a': 10, 'b': 3, 'foo': 'bar'} >>> c = instantiate_class_and_inject_attributes(C, a=10, foo='bar', bar='foo', b=1000) >>> c.__dict__ {'a': 10, 'b': 1000, 'foo': 'bar', 'bar': 'foo'} >>> try: ... c = instantiate_class_and_inject_attributes(C, foo='bar', bar='foo', b=1000) ... except TypeError: # expected ... pass """ # cls_signature_args = inspect.signature(cls).parameters cls_signature_args = inspect.getfullargspec(cls.__init__).args[1:] cls_kwargs = dict() other_kwargs = dict() for k, v in kwargs.items(): if k in cls_signature_args: cls_kwargs[k] = v else: other_kwargs[k] = v o = cls(**cls_kwargs) o.__dict__.update(other_kwargs) return o
2b36c4f78611cad66c74ae4a2485853c6e6b17ad
63,800
def get_same_predictions(y_pred, y_pred_adv): """ Get the indexes of the predictions where the image and the adversarial image where classified as the same class. Parameters ---------- y_pred: array Array with the predictions of the model in dataset. y_pred_adv: array Array with the predictions of the model in adv_dataset. Returns ------- Array. """ indexes = [i for i, (y, y_adv) in enumerate(zip(y_pred, y_pred_adv)) if y == y_adv] return indexes
fe6d56e49edc79e1b53c74fb27a568ca21842f18
63,803
import collections import re def _build_question_dictionary(vqa, min_thre=0): """ :param vqa: VQA instance :param min_thre: only words that occur more than this number of times will be put in vocab :return: word-index dictionary """ counter = collections.defaultdict(int) for i, q in vqa.qqa.items(): words = re.findall(r"[\w']+", q['question']) for word in words: counter[word] += 1 question_dict = {} indx = 0 for word, num in counter.items(): if num > min_thre: question_dict[word] = indx indx += 1 return question_dict
adc227095fbd92f661680ec66884d5ac2e4821f5
63,805
def map_ores_code_to_int(code): """ Takes a 1-2 letter code from OREs and turns in into an int ORES Score map Stub - 0 Start - 1 C - 2 B - 3 GA - 4 FA - 5 """ return { 'Stub': 0, 'Start': 1, 'C': 2, 'B': 3, 'GA': 4, 'FA': 5, }[code]
5730b720ebab91db0d9a5708b458404c233d6b94
63,810
def format_tuple(values, resolution): """Returns the string representation of a geobox tuple.""" format = "%%0.%df" % resolution return "|".join(format % v for v in values)
792020d9043ad0abf5062e574cfbdbbf35d8f803
63,812
def f_htmltag(name: str, path: str) -> str: """ Receives tha URL and the name of the object it points to in dorder to create the html link tag :param name: str contains the name/title of the (html) plot. :param path: str contains the path to the (html) plot. :return: str. HTML tag with the link and title. This tag is part of a dropdown menu """ html_tag = '\t<option value="' + path + '">' + name + '</option>\n' return html_tag
cf6be68343137d1789d2e4e1e8b95119a32cd85b
63,815
def attr_to_dict(obj, attr, dct): """ Add attribute to dict if it exists. :param dct: :param obj: object :param attr: object attribute name :return: dict """ if hasattr(obj, attr): dct[attr] = getattr(obj, attr) return dct
a6b3281cdbd887577354290b56ae95128d19198f
63,817
def create_securitygroup_dialog(context, request, securitygroup_form=None, security_group_names=None): """ Modal dialog for creating a security group.""" return dict( securitygroup_form=securitygroup_form, security_group_names=security_group_names, )
affc450f86f20ad81a8a5750c3f817cc0245d220
63,818
def remove_species_and_rename_other(df, drop_species, rename_category): """ Remove the species in drop_species, and rename the "other" category to "rarely_requested_species". """ reduced_df = df.loc[~df.species_group.isin(drop_species),:].copy() reduced_df.species_group.cat.remove_categories(drop_species, inplace = True) reduced_df.species_group.cat.rename_categories(rename_category, inplace = True) return reduced_df
b5d453ebb4ed8ba5111e8ec909192927433224a8
63,821
def create_graph(V, color, label): """ Setup the graph informations: x values, y values, color and label name. Parameters ---------- V : dict V contains X and Y values. color : str color name. label : str label name Returns ------- dict : Returns a dict = {'x': X, 'y': Y, 'color': color, 'label': label} """ x, y = list(V.keys()) return { 'x': V[x], 'y': V[y], 'color': color, 'label': label }
5e5153606becdcbc18b77359c541925c3bb77915
63,823
import string def _normalize(word): """Convert word to a form we can look up in CMU dictionary.""" return word.strip().strip(string.punctuation).lower()
ae6bdaa9b05c68e2429464e8394bb8e40b8fe07a
63,824
def anotate(name): """Annotate an object with a name.""" def decorator(obj): setattr(obj, "_tea_ds_plugin", name) return obj return decorator
2e2634508fd8a9554bca9510f8f4e09017451be3
63,826
def countEdges(halfEdge): """Given a half-edge of a polygon, count the number of edges in this polygon.""" numberOfEdges=1 startingEdge=halfEdge edge=startingEdge.next while edge is not startingEdge: edge=edge.next numberOfEdges+=1 return numberOfEdges
9d7d67a4ffd9ecfd60104afa0307d2831adbcd08
63,834
def db_delete_from_user_fields(project_id: str, field: str) -> str: """Remove the specified project from the field in the UsersCollection.""" return f'db.UsersCollection.updateMany({{}}, {{ $unset: {{ "{field}.{project_id}" : ""}} }})'
04f94e12e0ffb0765456b453ffb6e942f0cc39ef
63,836
def find_missing_number(nums: list[int]) -> int: """ Complexity: N = len(nums) Time: O(N) (iterate the entire array) Space: O(1) (in-place computations) Args: nums: array containing n distinct numbers taken from the range [0,n] (n+1 possible numbers) Returns: the missing number in the range [0,n] Examples: >>> find_missing_number([]) 0 >>> find_missing_number([0]) 1 >>> find_missing_number([1]) 0 >>> find_missing_number([4, 0, 3, 1]) 2 >>> find_missing_number([8, 3, 5, 2, 4, 6, 0, 1]) 7 """ """ALGORITHM""" def swap_elements(i, j): nums[i], nums[j] = nums[j], nums[i] ## INITIALIZE VARS ## curr_idx, n = 0, len(nums) ## CYCLIC SORT ## O(n) while curr_idx < n: target_idx = nums[curr_idx] if curr_idx != target_idx and target_idx < n: swap_elements(curr_idx, target_idx) else: curr_idx += 1 ## FIND item ## O(n) for curr_idx in range(n): target_idx = nums[curr_idx] if curr_idx != target_idx: return curr_idx else: return n
aae8fffbef86eaf2dcdf0bf166e57858a4a9aa75
63,840
import torch def smooth_dice_beta_loss(pred: torch.Tensor, target: torch.Tensor, beta: float=1., smooth: float=1., eps: float=1e-6) -> torch.Tensor: """ Smoothed dice beta loss. Computes 1 - (((1 + beta**2) * tp + smooth) / ((1 + beta**2) * tp + beta**2 * fn + fp + smooth + eps)) :param pred: (torch.Tensor) predictions, logits :param target: (torch.Tensor) target, logits or binray :param beta: (float) weight to emphasize recall :param smooth: (float) smoothing value :param eps: (eps) epsilon for numerical stability :returns dice_loss: (torch.Tensor) the dice loss """ pred = torch.sigmoid(pred) target = (target > 0).float() tp = (pred.reshape(-1) * target.reshape(-1)).sum() fp = pred.reshape(-1).sum() - tp tn = ((1 - pred).reshape(-1) * (1-target).reshape(-1)).sum() fn = (1 - pred).reshape(-1).sum() - tn return 1 - (((1 + beta ** 2) * tp + smooth) / \ ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth + eps))
7b9086d8f7d0c94d405f04f55df787fcf406e1b1
63,853
def sort_dicts(dicts, sort_by): """ :param dicts: list of dictionaries :param sort_by: key by which the list should be sorted :return: sorted list of dicts """ return sorted(dicts, key=lambda k: k[sort_by])
1215b4f90de13ee45181a7573b3f5bd21fc36aab
63,855
def map_label(row, label_map): """ Creating a touple using two pre-existing columns, then using this touple as the key to look up table label in the dictionary Parameters ---------- row: row this function will apply to every row in the dataframe label_map: dictionary where the label will be identified Returns -------- table_label:str the label to be applied to that variable in the output table """ key = (row["variable_name"], row["value_label"]) table_label = label_map[key] return table_label
091f57783c995d1b69822b81bd264437ff5e1bcb
63,857
import base64 def serializePemBytesForJson(pem: bytes) -> str: """Serializes the given PEM-encoded bytes for inclusion in a JSON request body.""" return base64.b64encode(pem).decode('utf-8')
48f332bafadc18ccabd8f61715bee3d735296507
63,858
def mode(values): """Returns the mode of the values. If multiples values tie, one value is returned. Args: values: A list of values. Returns: The mode. """ counts = {k: values.count(k) for k in set(values)} return sorted(counts, key=counts.__getitem__)[-1]
19b82c2d05cec4755608883dc76b328d07e8a72c
63,862
import re def clean_text(txt, regex_lst, RE_EMOJI, stopwords_hindi): """This function takes the regular expression for various things such as punctuations and all which we want to remove and returns the cleaned sentence. Args: txt (str): Sentence which we have to clean. regex_lst (List[re]): List of all the regular expressions according to whom we have to clean the data. RE_EMOJI (re): The regular expression for the emojis removal. stopwords_hindi (List[str]): List of stopwords in Hindi Language Returns: str_cleaned (str): The cleaned sentence. """ str_cleaned = txt # Iterate over the list of regular expressions. for regex in regex_lst: str_cleaned = re.sub(regex, '', str_cleaned) str_cleaned = RE_EMOJI.sub(r'', str_cleaned) sent_splitted = str_cleaned.split() # Do not add the word to the list if it is in the stopwords. str_cleaned = " ".join([x.lower() for x in sent_splitted if x not in stopwords_hindi]) return str_cleaned
39a5662fec925fcf2ff3f2703c838cfdc36de412
63,865
def format_data(calendar, planning_parser): """ Parse every event in the calendar string given in parameter with the parser also given in parameter and return a list with the events as formatted data. The calendar string must respect the iCalendar format in the first place. The data returned correspond to this example : [ { "title": "Any title", "start_date": datetime.datetime(2017, 25, 09, 8, 15, 0, 0), "end_date": datetime.datetime(2017, 25, 09, 10, 15, 0, 0), "classrooms": ["Room 1", "Room b"], "groups": ["TD1", "TD2"], "teachers": ["Mr Smith"], "undetermined_description_items": ["Ms WÎεrd ϵncöding", "garbage"], "event_id": "ADE4567890123456d89012d456789012d456d89", "last_update": datetime.datetime(2017, 25, 08, 23, 40, 02) }, { ... }, ... ] :param calendar: the iCalendar string to parse :param planning_parser: the parser to use (an instance of EventParser) :return: a list of events """ ret = [] vevents = calendar.walk("VEVENT") for vevent in vevents: planning_parser.parse(vevent) appointment = { "title": planning_parser.get_title(), "start_date": planning_parser.get_start_date(), "end_date": planning_parser.get_end_date(), "classrooms": planning_parser.get_classrooms(), "groups": planning_parser.get_groups(), "teachers": planning_parser.get_teachers(), "undetermined_description_items": planning_parser.get_undetermined_description_items(), "event_id": planning_parser.get_event_id(), "last_update": planning_parser.get_update_time() } ret.append(appointment) return ret
3c90269bcf7c4bef3c3ad84807719f014a4da87d
63,870
def get_frequency(key=88, pitch=440.0): """ Get frequency from piano key number :param key: key number of the piano :param pitch: convert pitch of the A4 note in Hz :return: frequency in hz """ return pow(2, float(key - 49) / 12.0) * pitch
d73aa776d6a6760335b19d966b4198488eff4c21
63,871
def get_image_features(image): """Return image data as a list.""" return list(image.getdata())
b17d2bacee1821f0dee82ca1eb30ce85aa648057
63,885
import re def indention(str, front=re.compile(r"^\s+").match): """Find the number of leading spaces. If none, return 0. """ result = front(str) if result is not None: start, end = result.span() return end-start else: return 0 # no leading spaces
0586ce697116d9bc6a3c62acd2d054c9cb704888
63,888
def clip(n, start, stop=None): """Return n clipped to range(start,stop).""" if stop is None: stop = start start = 0 if n < start: return start if n >= stop: return stop-1 return n
bbb7091235d2bd24e704be31643f3ad3265533dd
63,891
def istype(type, *obj): """ Returns whether or not all the inputs are of the specified type Parameters ---------- type : type the type to check against to *obj : object... a sequence of objects Returns ------- bool True if the inputs are of the specified type, False otherwise """ return all([isinstance(o, type) for o in obj])
b626375b22a23aea6d21ba0df3acc1f84d2a4fc2
63,892
def _set_timelabel(obs, use_tref=True): """For a given observable, returns the timelabel to be used in plots Parameters ---------- obs : Observable instance use_tref : bool whether to use tref (set False for stationary plotting) Returns ------- timelabel : str """ if obs.mode != 'dynamics' and obs.timing == 'g': timelabel = 'Generations' if obs.tref is not None and use_tref: timelabel += ' (since tref {})'.format(obs.tref) else: timelabel = 'Time (minutes)' return timelabel
726e87d5122d3e3108c73d1b69c0ad6a0931503b
63,900
import torch def all_diffs(a, b): """ Returns a tensor of all combinations of a - b. Args: a (2D tensor): A batch of vectors shaped (B1, F). b (2D tensor): A batch of vectors shaped (B2, F). Returns: The matrix of all pairwise differences between all vectors in `a` and in `b`, will be of shape (B1, B2). """ return torch.unsqueeze(a, dim=1) - torch.unsqueeze(b, dim=0)
a7aac8eb876fad54af2dda38f97e0d7fdbd5f756
63,901
def wild2regex(string): """Convert a Unix wildcard glob into a regular expression""" return string.replace('.','\.').replace('*','.*').replace('?','.').replace('!','^')
c30360076fde573b3865143b761579e2c7466877
63,904
def _local_median(img, x, y, k): """ Computes median for k-neighborhood of img[x,y] """ flat = img[x-k : x+k+1, y-k : y+k+1].flatten() flat.sort() return flat[len(flat)//2]
ddcb0ad6fd878142dc7d1ab5ee86503e7d2dc397
63,905
def crop_center(img, cropx, cropy, cropz): """ Take a center crop of the images. If we are using a 2D model, then we'll just stack the z dimension. """ x, y, z, c = img.shape # Make sure starting index is >= 0 startx = max(x // 2 - (cropx // 2), 0) starty = max(y // 2 - (cropy // 2), 0) startz = max(z // 2 - (cropz // 2), 0) # Make sure ending index is <= size endx = min(startx + cropx, x) endy = min(starty + cropy, y) endz = min(startz + cropz, z) return img[startx:endx, starty:endy, startz:endz, :]
047144047667bafd02e5ef5691b3382391680e44
63,910
def slice_to_flow(arr, i, n): """ slice to the ith flow value given a total of n possible flow values""" assert(arr.shape[0] % n == 0) incr = round(arr.shape[0]/n) i_lower = i * incr i_upper = (i+1) * incr return arr[i_lower:i_upper, :]
7772fc0684327cf771169c766ce733ef8e757359
63,911
def utility(game, state, player): """Return the value to player; 1 for win, -1 for loss, 0 otherwise.""" return state.utility if player == 'W' else -state.utility
7e718af20e86967b4f7fff072e579c3acf9b2b5b
63,916
def parenthesise(_value): """Adds a parenthesis around a values.""" return '(' + _value + ')'
96b43a5b260ef6e26bc2c95c705350a9a8442430
63,917
import requests def get_username(uuid): """Get the username of a player from the UUID""" r = requests.get("https://api.mojang.com/user/profiles/{}/names".format(uuid)) if r.ok and r.status_code not in [204, 400]: return r.json()[-1]["name"] else: if r.status_code in [204, 400]: raise ValueError("User not found") else: raise Exception("Can't reach Mojang API")
d852b4072e3619d415c25f3cb4eb5f0c7a91e0bd
63,918
def _module_to_paths(module): """Get all API __init__.py file paths for the given module. Args: module: Module to get file paths for. Returns: List of paths for the given module. For e.g. module foo.bar requires 'foo/__init__.py' and 'foo/bar/__init__.py'. """ submodules = [] module_segments = module.split('.') for i in range(len(module_segments)): submodules.append('.'.join(module_segments[:i+1])) paths = [] for submodule in submodules: if not submodule: paths.append('__init__.py') continue paths.append('%s/__init__.py' % (submodule.replace('.', '/'))) return paths
3982259b0d22e9cea77297b5317cc57fbb8136b9
63,920
def is_long_book(book): """Does a book have 600+ pages?""" return book.number_of_pages >= 600
1cf0daca0aa027ba9a33ceb6b9858b8540576e24
63,922
def expand_type_name(type_): """ Returns concatenated module and name of a type for identification. :param type_: Type: :type type_: type :return: Type name, as ``<type's module name>.<type name>``. :rtype: unicode | str """ return '{0.__module__}.{0.__name__}'.format(type_)
0bed7e1dd71d623ad901cdf8c8ae39fa64246dad
63,923
def comma_separate(elements) -> str: """Map a list to strings and make comma separated.""" return ", ".join(map(str, elements))
4bcc788228515bfb05bc8b931880ee28814582cb
63,925
import pickle def import_model(clf_path = '../models/disaster_response_clf.pkl'): """ Function: load model from pickle file Args: clf_path (str): path of pickle file Return: model (GridSearch obj): loaded model """ with open(clf_path, 'rb') as f: model = pickle.load(f) return model
201013d0c34479a65aab196f1840b18bdc6665e8
63,932
def to_esmf(ts): """ Convert a UTC datetime into a ESMF string. :param ts: the datetime object :return: the date time in ESMF format """ return '%04d-%02d-%02d_%02d:%02d:%02d' % (ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second)
10ebddcdc7ab887b5fe25132d1b8591474dc9b11
63,933
from pathlib import Path def does_exist(os_path: str) -> bool: """ Check whether the file exists. """ return Path(os_path).exists()
11c2e6e32e3b0856b2e1faf547cddad0849cd55d
63,936
def removeDotGit(url): """ Remove trailing `.git` from the git remote url """ if url.endswith('.git'): return url[:-4] return url
421900a7d78eece269545c674a8ce8013d47972f
63,940
def log_message(source, *parts): """Build log message.""" message = source.__class__.__name__ for part in parts: message += f": {part!s}" return message
18f004f6d76a05dacced5a3b1e437bb766f8d78b
63,941
def interval_union(intervals): """ Returns total size of intervals, expect interval as (chr, left, right) """ intervals.sort() total_len = 0 cur_chr, cur_left, cur_right = intervals[0] # left-most interval for interval in intervals: # open a new interval if left > cur_right or chr != cur_chr if interval[1] > cur_right or interval[0] != cur_chr: total_len += cur_right - cur_left + 1 cur_chr, cur_left, cur_right = interval else: # update cur_right cur_right = max(interval[2], cur_right) # the last one total_len += cur_right - cur_left + 1 return total_len
c05495b91c2b61455ccec37398d8377ee27dc109
63,947
import typing def selective_join(parts: typing.List[str], n: int) -> typing.List[str]: """ Given the list of N+1 strings, and an integer n in [0, 2**N - 1] range, concatenate i-th and (i+1)-th string with space inbetween if bit i is not set in n. Examples: selective_join(['a', 'b', 'c'], 0b00) == ['a b c'] selective_join(['a', 'b', 'c'], 0b01) == ['a', 'b c'] selective_join(['a', 'b', 'c'], 0b10) == ['a b', 'c'] selective_join(['a', 'b', 'c'], 0b11) == ['a', 'b', 'c'] This function is used as part of finding all the ways to split a string by spaces. :param parts: Strings to join :param n: Integer (bit map) to set the positions to join :return: resulting list of strings """ result = [] concatenated = [parts[0]] for part in parts[1:]: if n & 1: result.append(' '.join(concatenated)) concatenated = [part] else: concatenated.append(part) n >>= 1 if concatenated: result.append(' '.join(concatenated)) return result
fb9ea884284d0a71cba37ebbc9487cd17f1007d3
63,949
def get_totals(cpuinfo): """Compute totals: - real: physical CPUs - cores: cores x physical CPU - total: logical CPUs real*cores*siblings Note: Siblings are only counted if Hyperthreading is enabled """ # We assume same CPU architecture for multi CPU systems real = len({cpuinfo[k]['physical_id'] for k in cpuinfo.keys()}) cores = int(cpuinfo['0']['cpu_cores']) total = real*cores # Hyperthreading support (added for completeness) siblings = int(cpuinfo['0']['siblings']) if cores != siblings: cpuinfo['siblings'] = siblings total *= siblings return real, cores, total
d54da926af84f0c588f329d3f17afdb5bac314ea
63,950
def dos_line(f, ndos): """ Parses density of states data from DOSCAR Parameters ---------- f : file object File object containing data in DOSCAR file ndos: int Number of lines in f to be parsed Returns ------- data : list List of density of states data line_length : float Amount of column in each line """ data = [] line_length = [] for n in range(ndos): line = next(f) if not line_length: line_length = len(line.split()) if len(data) != len(line.split()): data = [[] for i in range(len(line.split()))] for i, element in enumerate(line.split()): data[i].append(float(element)) return data, line_length
b9aa09f937b0a6199f5469c1b9332f04805977bf
63,952
def attr_sum_of_nodes(con,attr_name,node_ids=None): """Get the SUM() of node_ids's values for attr_name. con: a SQLite connection attr_name: The name of the attr values to fetch. node_ids: a list of ids to sompute the sum from. Returns a number that is the requested sum. """ if node_ids is None: sql = "select sum(value) from attrs where name = '%s' group by name" % attr_name else: ids = (str(id) for id in node_ids) sql = "select sum(value) from attrs where name = '%s' and parent in (%s) group by name" % (attr_name, ','.join(ids)) cur = con.execute(sql) row = cur.fetchone() if row: return row[0] else: return 0
e8d86652823e96bb9b8a6ad2b5c89011b166b061
63,959
def hamming(n): """Returns the nth hamming number""" hamming = {1} x = 1 while len(hamming) <= n * 3.5: new_hamming = {1} for i in hamming: new_hamming.add(i * 2) new_hamming.add(i * 3) new_hamming.add(i * 5) # merge new number into hamming set hamming = hamming.union(new_hamming) hamming = sorted(list(hamming)) return hamming[n - 1]
ef64b67810bff9ba6fbac0ca17a5b60f801279cb
63,960
def get_amount_in_ml(dose, concentration): """Calculates the amount of liquid drug to be given in mls, given a dose(mg/kg) and concentration (mg/ml. :params: dose(mg/kg) - Float concentration - Float :returns: Amount in mls - Float Examples: >>> get_amount_in_ml(2.5, 50) 0.05 >>> get_amount_in_ml(80, 50) 1.6 """ return dose/concentration
6dc9b0fe22c2e70984cd99145ecaad1ec6d3a906
63,965
def races_per_year(self): """Return total number of career starts for the horse divded by the horse's age as at the race date""" if self.age is not None and self.age > 0: return self.career.starts / self.age
7a6371ac95b1fb78ba19b18595a5df101c2b5988
63,967
def compare_np_arrays(x, y): """checks if 2 numpy arrays are identical""" return (x == y).all()
a694a5070063126d6930d9ced3f05047452593e9
63,971
def sql_select_one(tbl, col_key): """ select one column from a table """ return tbl[col_key]
e9bb16d3cc1d6e1c7bf5f9b6e4467391c5e8d674
63,973
def _get_model_or_module(crypten_model): """ Returns `Module` if model contains only one module. Otherwise returns model. """ num_modules = len(list(crypten_model.modules())) if num_modules == 1: for crypten_module in crypten_model.modules(): return crypten_module return crypten_model
bd4bc0613f872f79302b704d6cd4648af7ebf02d
63,976
def C_fingas(heavy, precent_dist_180): """ Returns the constant C used in fingas model source : (Fingas, 2015) Parameters ---------- heavy : True if the fuel need to follow a ln, else it will be a sqrt. precent_dist_180 : Percentage distilled by weight at 180 °C [] """ if heavy: return 0.165 * precent_dist_180 else: return 0.0254 * precent_dist_180
ba04d07b3e6a228fc28619c0d8ec7b2cb6af5b45
63,988
def build_speechlet_response(output, should_end_session): """ Return a speech output after an Alexa function is executed. Builds a simple speech response after the user invokes a particular Alexa function. The response used here is a simple, plain text response but can be enhanced by customizing the card that shows to users that have graphical displays (like the Echo Show or the Alexa app). See https://developer.amazon.com/docs/custom-skills/include-a-card-in-your-skills-response.html for more details about customizing the response. You can also specify whether the session should be ended after this response is heard, or whether Alexa should prompt for another input from the user using the should_end_session parameter. Args: output: A string that Alexa will read to the user as the output. should_end_session: True if the session should end after hearing this response. False if the session should say open for another input from the user. Returns: Dictionary containing the output speech and whether or not the session should end. """ return { 'outputSpeech': { 'type': 'PlainText', 'text': output }, 'shouldEndSession': should_end_session }
76ad5983f8199b0786bba42fc997551d3f03a252
63,993
def user_select(client, file_=None, maxi=None): """Prompt user to select one or more dimensions, and return their selections. client (obj): creopyson Client. `file_` (str, optional): Model name. Defaults is current active model. maxi (int, optional): The maximum number of dimensions that the user can select. Defaults is `1`. Raises: Warning: error message from creoson. Returns: (list:dict): List of selected dimension information name (str): Dimension name value (str|float): Dimension value; if encoded is True it is a str, if encoded is False it is a float. encoded (boolean): Whether the returned value is Base64-encoded. file (str): File name. relation_id (int): Relation ID number. """ data = {"max": 1} if file_ is not None: data["file"] = file_ else: active_file = client.file_get_active() if active_file: data["file"] = active_file["file"] if maxi is not None: data["max"] = maxi return client._creoson_post("dimension", "user_select", data, "dimlist")
b122fbe89119c76f2b47df278cb546a3959f5175
63,994
def calculate_CHI(seq_target, seq_source, df_target_freq, df_source_freq): """Calculate Codon Harmonization Index (CHI)""" df_source_freq = df_source_freq.set_index('codon') df_target_freq = df_target_freq.set_index('codon') diffsum = 0 for i in range(0, len(seq_source), 3): codon_target = str(seq_target[i:i+3]) codon_source = str(seq_source[i:i+3]) diffsum += abs(df_target_freq.loc[codon_target, 'freq'] - df_source_freq.loc[codon_source, 'freq']) return diffsum/(len(seq_source)/3)
04bc384be4137f4a10432e5d3fcd87953b2157db
63,997
import re def is_url_valid(string): """ This function checks whether input string follow URL format or not Args: string: Input string Returns: True: if string follows URL format False: if string doesn't follow URL format >>> is_url_valid("C:/users/sample/Desktop/image.jpg") False >>> is_url_valid("/examples/data/image.jpg") False >>> is_url_valid("http://google.com") True >>> is_url_valid("https://images.financialexpress.com/2020/01/660-3.jpg") True >>> is_url_valid("https://images.financialexpress.com 2020/01/660-3.jpg") False """ match_result = False pattern = re.compile( r'^(?:http|ftp)s?://' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... r'localhost|' r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' r'(?::\d+)?' r'(?:/?|[/?]\S+)$', re.IGNORECASE) if re.match(pattern, string): match_result = True return match_result
a6ff543fa68f1cdaef5f7a4c4be8f6e5979e6a5d
63,998
def word_to_index(vocab): """ Encoding words by indexing. Parameters ---------- vocab: list. Returns ------- word_to_idx dictionary. """ word_to_idx = {} for word in vocab: word_to_idx[word] = len(word_to_idx) return word_to_idx
24d0c71a27a1c337416d3a9bc29da1d3dac26996
64,007