content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def hamming(list_sequences): """Compute Hamming distance of two DNA strings""" string_a, string_b = list_sequences hamm = 0 for nt_a, nt_b in list(zip(string_a, string_b)): if nt_a != nt_b: hamm += 1 return hamm
df80a89927a604d8d88879165bf41b3bf9a1096e
35,091
def _get_intersection_over_union(hyp_rect, ref_rect): """Given two rectangles (hyp and ref) in shapely format, it returns the IOU value. IOU value is the ratio between the area of the intersection of the two polygons divided by the area of their union. Returns ------- iou_val: (float) """ try: rect_intersection = hyp_rect & ref_rect intersection_area = rect_intersection.area union_area = hyp_rect.area + ref_rect.area - intersection_area iou_val = float(intersection_area) / union_area return iou_val except: return 0
17f446adec1faa67b47e72b23ea15b797ec0878b
35,092
def lucasLehmer(p): """ Teste de Lucas-Lehmer para detectar numeros de Mersenne primos """ if p == 2: return True # 2 ^ p - 1 mersenne = ( 1 << p ) - 1 print("M(%d) = %d" % (p, mersenne)) s = 4 print ("S0 = %d" % (s)) for i in range(1, p - 1): print("S%d = (%d ** 2 - 2) (mod %d) = %d (mod %d)" % \ (i, s, mersenne, ((s ** 2 - 2) % mersenne), mersenne)) s = ((s ** 2) - 2) % mersenne if s == 0: return True else: return False
05de89c2dcac56d52e28862d7b56b739f4bc8a2c
35,094
def reformat_acs_vars(col): """Convert variable names to the same format used by the Census Detailed Tables API. See <https://api.census.gov/data/2019/acs/acs5/variables.html> for variable descriptions Parameters ---------- col : str column name to adjust Returns ------- str reformatted column name """ pieces = col.split("e") formatted = pieces[0] + "_" + pieces[1].rjust(3, "0") + "E" return formatted
535a42bef9da5ade2afe87abab59578cf60e0ba6
35,095
from typing import Union from datetime import datetime def _new_name(seq_type: str) -> Union[str, None]: """ Creates a new file name based on the sequence type. """ tstamp = datetime.now().strftime("%Y_%m_%d-%I-%M-%S_%p") if seq_type.lower() == "dna": return f"dna_sequence_{tstamp}.txt" if seq_type.lower() == "rna": return f"rna_sequence_{tstamp}.txt" print("Invalid sequence type. Choose RNA or DNA.") return "Failed!"
2ed107900bc936472894cb40ee44cc887e9f8201
35,097
def t_test_prepare_cha(data): """Prepares data for ttest() to produce output Table A.4 (difference in municipality characteristics). :param (df) data: dataframe (municipality_characteristics_data.dta) :return: tuple containing two dfs as elements (first: female mayor,second: male mayor) """ table_f = data[data["geschl_first_placed"] == "f"].drop(columns=data.columns[range(0, 12)]) table_m = data[data["geschl_first_placed"] == "m"].drop(columns=data.columns[range(0, 12)]) table_f = table_f.rename(columns={'log_bevoelkerung': 'Log(population)', 'log_flaeche': 'Log(land area)', 'log_debt_pc': 'Log(debt p.c.)', 'log_tottaxrev_pc': 'Log(tax revenues p.c.)', 'log_gemeinde_beschaef_pc': 'Log(local gov. employment p.c.)', 'log_female_sh_gem_besch': 'Log(female share, local gov. employment)', 'log_tot_beschaeft_pc': 'Log(total employment p.c.)', 'log_female_share_totbesch': 'Log(female share, total employment)', 'log_prod_share_tot': 'Log(manufacturing / total employment)', 'log_female_share_prod': 'Log(female share, manufacturing'}) table_m = table_m.rename(columns={'log_bevoelkerung': 'Log(population)', 'log_flaeche': 'Log(land area)', 'log_debt_pc': 'Log(debt p.c.)', 'log_tottaxrev_pc': 'Log(tax revenues p.c.)', 'log_gemeinde_beschaef_pc': 'Log(local gov. employment p.c.)', 'log_female_sh_gem_besch': 'Log(female share, local gov. employment)', 'log_tot_beschaeft_pc': 'Log(total employment p.c.)', 'log_female_share_totbesch': 'Log(female share, total employment)', 'log_prod_share_tot': 'Log(manufacturing / total employment)', 'log_female_share_prod': 'Log(female share, manufacturing)'}) return table_f, table_m
a6f1890bb5294d153516833d3788391918c6719a
35,098
def parse_and_validate_latitude(request): """Extract and Check the validity of latitude. Args: request: HTTP request. Returns: latitude(float) if valid. Raises: ValueError: if latitude is not float, or outside range [-90, 90]. """ lat = float(request.rel_url.query.get("lat")) if lat > 90 or lat < -90: raise ValueError return lat
3bde916112e0c24cd989275dba389826e56a4e5f
35,103
def get_key(my_dict, val): """ This function finds the key of a value in a dictionary. :param my_dict: The dictionary object to look for key :param val: the value of the target key :return: the first target key corresponding to the given value or string saying "key doesn't exist" """ for key, value in my_dict.items(): if val == value: return key return "key doesn't exist"
3ad9ed2a0ee65aaf232fc941a6ee94739b980880
35,105
import argparse def create_parser(): """ """ parser = argparse.ArgumentParser() parser.add_argument('-knn', '--knn_across', required=True, type=int) parser.add_argument('-ka', '--knn_within', required=True, type=int) parser.add_argument('-dt', '--result_date', type=str, help="eg: 200803") parser.add_argument('-isub', '--i_sub', type=str, help="[0-9]") return parser
fbab729635f9bf0212dd6481c2de00cfef502c13
35,106
def latlonfilter(df, llcrnr, urcrnr): """ removes rows from self.df with latitude longitude outside of the box described by llcrnr (lower left corner) and urcrnr (upper right corner) Parameters ---------- llcrnr : tuple lower left corner. (latitude, longitude) urcrnr : tuple upper right corner (latittude, longitude) inplace: boolean if TRUE then replaces self.df attribute removes rows with latitude longitude outside of the box described by llcrnr (lower left corner) and urcrnr (upper right corner) """ lat1 = llcrnr[0] lat2 = urcrnr[0] lon1 = llcrnr[1] lon2 = urcrnr[1] df = df[df["latitude"] < lat2] df = df[df["latitude"] > lat1] df = df[df["longitude"] > lon1] df = df[df["longitude"] < lon2] return df
afa89d002fb806721577dad1d7ad7ccf669c488e
35,107
def rgb(): """The basic cone sensitivities of your eyes""" return "red green blue".split()
0de059f79c9c4e99d13e846482f3988a16c47812
35,108
from typing import Optional from pathlib import Path def find_user_library() -> Optional[Path]: """Attempts to retrieve the path to the user's default music library folder.""" expected_path = Path('~/Music').expanduser() if expected_path.exists(): return expected_path
d94e4cfbb48a06a4e870633c7be2fcea4597437e
35,109
def translate_key(key): """ Function to return the correct configuration key. If not found return the key itself. Returns a string. """ mapping = { 'user': 'User', 'identityfile': 'IdentityFile', 'proxycommand': 'ProxyCommand', 'ip': 'Hostname', 'hostname': 'Hostname', 'port': 'Port', } if key in mapping: return str(mapping[key]) else: return str(key)
9c7b9b32d1b341946a9c66120c7b32c9c304c754
35,110
import hashlib def file_md5(file_content): """Checksum of content.""" m = hashlib.md5() m.update(file_content) return str(m.hexdigest())
8c68b2ab87dccf1605593942b3abc1e495a0c8f7
35,112
def get_target_ids(node_field_values): """Get the target IDs of all entities in a field. """ target_ids = [] for target in node_field_values: target_ids.append(target['target_id']) return target_ids
68e811b86d246cc0070934ce2a6dc940603e39f8
35,113
def match_rules(txt, rules): """Find rule that first matches in txt""" # Find first begin tag first_begin_loc = 10e100 matching_rule = None for rule in rules: begin_tag, end_tag, func = rule loc = txt.find(begin_tag) if loc > -1 and loc < first_begin_loc: first_begin_loc = loc matching_rule = rule return (matching_rule, first_begin_loc)
35a89b274d06e21bc3d03dd5a7c2ece532030303
35,114
def get_embedding(word, model): """ Method to get embedding of given word parameters ----------- :param word: str :param model: object Word embedding model :return: vector """ if word in model.wv.vocab: return model.wv[word] else: raise KeyError
d77fd6b03bea62859bbce8388bc0961c9fe6d449
35,115
def parse_potential_focal_methods(parser, focal_file): """ Parse source file and extracts potential focal methods (non test cases) """ parsed_classes = parser.parse_file(focal_file) potential_focal_methods = list() for parsed_class in parsed_classes: for parsed_method in parsed_class['methods']: method = dict(parsed_method) if not method['testcase']: #and not method['constructor']: #Class Info focal_class = dict(parsed_class) focal_class.pop('argument_list') focal_class['file'] = focal_file method['class'] = focal_class potential_focal_methods.append(method) return potential_focal_methods
6de08bdf4a0dcc293a2e5650d15e7b0c44e63022
35,116
def estimate_evacuate_timeout(session, host): """ Rough estimation of the evacuate uplimit based on live VMs memory """ mref = session.xenapi.host.get_metrics(host) metrics = session.xenapi.host_metrics.get_record(mref) memory_used = int(metrics['memory_total']) - int(metrics['memory_free']) # Conservative estimation based on 1000Mbps link, and the memory usage of # Dom0 (which is not going to be transferred) is an intentional surplus return (memory_used * 8. / (1000. * 1024 * 1024))
508b1c4df6549b30366c623640fce258163adc8b
35,117
def parse_group_name(group_name): """ Return an ((int, int), prefix) tuple from group name. Expects group to be in the form {prefix}/{x}_{y} raises ValueError if group_name is not in the expected format. """ idx = group_name.rfind('/') if idx < 0: raise ValueError('Bad group name: ' + group_name) prefix = group_name[:idx] vv = group_name[idx+1:].split('_') if len(vv) != 2: raise ValueError('Bad group name: ' + group_name) return tuple([int(v) for v in vv]), prefix
b3151a19cd3b6cdd5028ded0967a637e592764a5
35,118
import json def getConfig(configFileName='config.json'): """Reads config from file Parameters ---------- configFileName: str name of the file to be readed Returns ------- json: dict data structure defining parameters describing application and stored externally """ with open(configFileName, 'r') as config: return json.load(config)
c430b50f4c5cc342e328b8555fa48c8cd2a7fe17
35,120
def tf_sum(q, d, c): """ ID 1 for OHSUMED """ result = 0.0 for w in set(q) & set(d): result += d[w] return result
088e5ecda2d79df2afa5a306e50d980658673834
35,122
import json def decode_json(filename:str)->list: """ :param filename: file name of json :return: dataset_list The so call json file here is actually a series of json str stores in a single file. The attribute inside json can also be a list, label can both be done for tweets inside the list and user which is the json object. format example: -----begin of file----- {name: "jsonfile1", attribute1: "aa", attribute2: 11, tweets: [tweet1,tweet2,tweet3,tweet4]}\n {name: "jsonfile2", attribute1: "aa", attribute2: 11, tweets: [tweet1,tweet2,tweet3,tweet4]}\n {name: "jsonfile3", attribute1: "aa", attribute2: 11, tweets: [tweet1,tweet2,tweet3,tweet4]}\n {name: "jsonfile4", attribute1: "aa", attribute2: 11, tweets: [tweet1,tweet2,tweet3,tweet4]}\n {name: "jsonfile5", attribute1: "aa", attribute2: 11, tweets: [tweet1,tweet2,tweet3,tweet4]}\n ..... -----end of file----- """ ds = [] f = open(filename,'r',encoding='utf-8') for i in f: try: u = json.loads(i) ds.append(u) except BaseException as e: print(e) return ds
78fa34f206aa4c87879deeefd66790ab7491ed38
35,124
def get_ca_id_from_ref(ca_ref): """Parse a ca_ref and return the CA ID :param ca_ref: HHTO reference of the CA :return: a string containing the ID of the CA """ ca_id = ca_ref.rsplit('/', 1)[1] return ca_id
ece01d4566d6317e70c320eb86a52c2a7766b415
35,125
def cache_master_key(config): """Create a string determined by any cache-invalidating config elements.""" return ( "str{use_string}_" "vocab{vocab_size}_" "pg{use_page}_" "geom{use_geom}_" "amt{use_amount}_" "pad{pad_windows}_" "len{window_len}" ).format(**config)
adf39851298b0c15afdb2e3645ef91df6df11376
35,126
def clean_multiple_coordsets(protein): """ Deletes all but the first coordinate set of a protein. """ if len(protein.getCoordsets()) > 1: for i in range(len(protein.getCoordsets())): if i == 0: pass else: protein.delCoordset(-1) return protein
70e0c9355394b78331b802c40369b98c74ed75f1
35,127
import argparse def parse_args(args): """Parses the command-line.""" parser = argparse.ArgumentParser() parser.add_argument('--input', required=True, help='Path to input binary') parser.add_argument('--output', required=True, help='Path to output binary') parser.add_argument('--xcode-version', required=True, help='Version of Xcode') return parser.parse_args(args)
347f4fae9c289e8d3ce6f5a5a07383d180f59666
35,130
def block_quoter(foo): """ Indents input with '> '. Used for quoting text in posts. :param foo: :return: """ foo = foo.strip() split_string = foo.split('\n') new_string = '' if len(split_string) > 0: for i in split_string: temp_string = '> ' + i new_string += temp_string return new_string else: return '> ' + foo
272f7269cf53749e70b77dac101d22acaca1831e
35,131
def hash_series(series): """Fast series hash""" # hash(series.to_string()) # 112 ms # hash(tuple(series.items())) # 2.24 ms # hash((tuple(series.values), tuple(series.index.values))) # 1.77 ms # chosen solution: 82.3 µs return hash(( hash(series.index.values.data.tobytes()), hash(series.values.data.tobytes()), ))
252fe3ea429cecef88e376a19a189e3b3d45d3cf
35,133
def format_string(x: int, frame: str) -> str: """Helper function to format individual time frames. :param x: Integer :param frame: Time frame :return: Formatted time string """ return '' if x == 0 else f' {x} {frame} ' if x == 1 else f' {x} {frame}s '
adccee40ba89d012c7ae75d07a6912cac0c8d7d0
35,134
def import_pipeline(pipeline): """ Dynamically imports modules / classes """ mod = __import__('pipelines.' + pipeline, fromlist=[pipeline]) return getattr(mod, pipeline)
d17347eab5af741adef237cfb0c01085b517664e
35,137
import time def datetime_to_seconds_since_epoch(dt): """Converts a Python datetime to seconds since the epoch.""" return time.mktime(dt.timetuple())
7f9b48591c0199aa1c1882fe84ecea3a7bd9526f
35,138
def luma(col): """Calculates ITU-R 601-2 luma value for a given RGB color. This is the same formula used by `PIL.Image.Image.convert` when converting RGB to L.""" r, g, b = col return r * 299/1000 + g * 587/1000 + b * 114/1000
e57a11476205a0b9dc287c1239e8064762161544
35,140
def build_ranking(teams): """ Assigns rankings to the teams based on their winrates. If teams are tied, they will be assigned the same ranking as each other. :param teams: A list of teams, where each element is a list containing a team's name and winrate (among potentially other data). This list is already sorted by num wins in descending order. :type teams: list :return: List of teams with their rank, name, and number of wins, in ascending order of rank. """ out = [] prev_wins = float("inf") curr_rank = -1 cnt = 0 for name, wins, *_ in teams: cnt += 1 if wins < prev_wins: curr_rank += cnt prev_wins = wins cnt = 0 out.append([curr_rank, name, wins]) return out
7046e385bda56fd995fd261c253035b1ebb049a9
35,141
def net_income(ebt, tax): """ Computes net income. Parameters ---------- ebt : int or float Earnings before tax tax : int or float Tax expense Returns ------- out : int or float Net income """ return ebt - tax
7d72f10d98d3646837ad3f5eccb6c19d2900ea38
35,142
import os import json import subprocess def GetTaskCount(experiment_id, isolate_hash, experiment_start): """Determines number of swarming tasks with experiment name.""" query_command = [ 'python', os.environ.get('SWARMING_PY', 'swarming.py'), 'query', '-S', 'chromium-swarm.appspot.com', 'tasks/count?tags=experiment_id%%3A%s&start=%d' % (experiment_id + isolate_hash[:4], experiment_start) ] return int(json.loads(subprocess.check_output(query_command))['count'])
e6371b166a44d0de28741f062f869d36f4247d0c
35,146
import platform def is_gpu(): """Returns True if the code is running on a GPU platform """ return platform == 'gpu'
cb8f41e2532d87dffcbfd32b03f02ddff297f0fe
35,147
def sanity_check_iob(naive_tokens, tag_texts): """ Check if the IOB tags are valid. * Args: naive_tokens: tokens split by .split() tag_texts: list of tags in IOB format """ def prefix(tag): if tag == "O": return tag return tag.split("-")[0] def body(tag): if tag == "O": return None return tag.split("-")[1] # same number check assert len(naive_tokens) == len(tag_texts), \ f"""Number of tokens and tags doest not match. original tokens: {naive_tokens} tags: {tag_texts}""" # IOB format check prev_tag = None for tag_text in tag_texts: curr_tag = tag_text if prev_tag is None: # first tag assert prefix(curr_tag) in ["B", "O"], \ f"""Wrong tag: first tag starts with I. tag: {curr_tag}""""" else: # following tags if prefix(prev_tag) in ["B", "I"]: assert ( (prefix(curr_tag) == "I" and body(curr_tag) == body(prev_tag)) or (prefix(curr_tag) == "B") or (prefix(curr_tag) == "O") ), f"""Wrong tag: following tag mismatch. previous tag: {prev_tag} current tag: {curr_tag}""" elif prefix(prev_tag) in ["O"]: assert prefix(curr_tag) in ["B", "O"], \ f"""Wrong tag: following tag mismatch. previous tag: {prev_tag} current tag: {curr_tag}""" else: raise RuntimeError(f"Encountered unknown tag: {prev_tag}.") prev_tag = curr_tag
4fb16ed2bd7a623a7dad331d8b7c8a5033a382ea
35,148
def bytes_to_encode_dict(dict_bytes): """Exracts the encode dict when it has been encoded to a file. dict_dict contains the bytes string that is between 'DICT_START' and 'DICT_END' in the file.""" ret = dict() d = dict_bytes.decode("utf-8") pairs = d.split(",") for pair in pairs: key, value = pair.strip().split(": ") ret[int(key)] = value.replace("'", "") return ret
e3ff6eb6ad47f198d40914487b6f7342ae0241c8
35,149
def rank_permutation(r, n): """Given r and n find the permutation of {0,..,n-1} with rank according to lexicographical order equal to r :param r n: integers with 0 ≤ r < n! :returns: permutation p as a list of n integers :beware: computation with big numbers :complexity: `O(n^2)` """ fact = 1 # compute (n-1) factorial for i in range(2, n): fact *= i digits = list(range(n)) # all yet unused digits p = [] # build permutation for i in range(n): q = r // fact # by decomposing r = q * fact + rest r %= fact p.append(digits[q]) del digits[q] # remove digit at position q if i != n - 1: fact //= (n - 1 - i) # weight of next digit return p
6a8ec6e3a2165796a17b69f2f8cb26bc4e0d7489
35,150
def isclassattr(a, cls): """ Test if an attribute is a class attribute. """ for c in cls.__mro__: if a in c.__dict__: return True return False
114c84f575d1b59a78cfa4d32b8c04da7006b7ae
35,151
def firstdigit(n): """ Finding the first non-zero integer digit """ if n<0: # No more negatives n=n*(-1) if n==0: # Return zero if zero - not much can be done here, we'll skip it later. return int(0) if n<1: # Skip '0.' then iterate for i in str(n)[2:]: if int(i)>0: # Return first non-zero for cases like 0.40 return int(i) else: # Just that first digit return int(str(n)[0])
3185bb1fc0f28521c01e6391eda42cfdebf6c33b
35,152
def list_s3_keys_in_bucket(s3client, bucket, prefix=''): """ Returns a list of the keys situated at the given prefix in the given bucket :s3client: boto3.session.Session.client that represents a connection with s3 :bucket: string representing the s3 bucket's name :prefix: string representing the base filepath to search at in the s3 bucket, default: '' """ keys = [] response = s3client.list_objects(Bucket=bucket, Prefix=prefix)['Contents'] for csv in response: keys.append(csv['Key']) return keys
dd39b7f7074315458a200b34f8bf4acf385442e6
35,153
def get_beta_diversity_metrics(): """ List scikit-bio's beta diversity metrics The beta diversity metrics listed here can be passed as metrics to ``skbio.diversity.beta_diversity``. Returns ------- list of str Alphabetically sorted list of beta diversity metrics implemented in scikit-bio. See Also -------- beta_diversity get_alpha_diversity_metrics scipy.spatial.distance.pdist Notes ----- SciPy implements many additional beta diversity metrics that are not included in this list. See documentation for ``scipy.spatial.distance.pdist`` for more details. """ return sorted(['unweighted_unifrac', 'weighted_unifrac'])
b81ff498be70efb61ed92e92a8fd0a93b4d689bd
35,155
def split_name(a_name): """ If only one word given, return it as last. If more than two words given, return all but last as first. examples = { 'ok simple': ('ok', 'simple'), 'solo': ('', 'solo'), 'three part name': ('three part', 'name'), 'name with-hyphen': ('name', 'with-hyphen'), '': ('', '') } :param a_name: str :return: ('first', 'last') """ try: a_split = a_name.split() last = a_split[-1] first = ' '.join(a_split[:-1]) if not len(first) or not last: # no_first_name_count += 1 first = '' return first, last except IndexError: return '', ''
c4b735c723152bde677c9f85d5a6b0fe90deac1f
35,156
from typing import List def is_solved(puzzle_digits: str, unit_list: List[List[str]], values): """ Verifies a set of values is a solution to the puzzle. Args: puzzle_digits: the possible choices for a puzzle square unit_list: a list of units values: a solution to test Returns: true if solves, false otherwise """ # """A puzzle is solved if each unit is a permutation of the digits 1 to 9.""" def unit_solved(unit): return set(values[s] for s in unit) == set(puzzle_digits) return values is not False and all(unit_solved(unit) for unit in unit_list)
a19e160ebae803785f04f39023a8c4d5d7fd9935
35,157
from pathlib import Path import sys import os def is_conda(): """ The function will tell if the code is running in a conda env """ conda_path = Path(sys.prefix, 'conda-meta') return conda_path.exists() or os.environ.get( "CONDA_PREFIX", False) or os.environ.get("CONDA_DEFAULT_ENV", False)
10e42c06f64b4d6b508c7374ee249dc8c93c7838
35,159
import hashlib def compute_hash(signature): """Compute the hash out of the given values.""" return hashlib.sha512(signature.encode()).hexdigest()
bd94e36e9064c2b8a7e05c59429f36acab7a99a8
35,160
def fitsum(list): """Sum of fitnesses in the list, needed to build wheel of fortune.""" sum=0.0 for i in range(0,len(list)): sum+=list[i] return sum
ced8f524f16f7174417eb3f9ce9effcab84a65ae
35,162
from datetime import datetime def get_timestamp_id(year: bool = True, month: bool = True, date: datetime = datetime.utcnow()) -> str: """ Returns timestamp id (tp_id) in format '2021-01', '2021' or '01'. """ if not year: return date.strftime('%m') elif not month: return date.strftime('%Y') return date.strftime('%Y-%m')
ccf9fbaae3b93b239b422e15f633cf1347fc47b9
35,164
import argparse def parse_arguments(): """ Argument parser is used for parsing command line arguments :return: args """ parser = argparse.ArgumentParser() parser.add_argument("JIRA_USER", nargs='?', default='') parser.add_argument("USER_PSWD", nargs='?', default='') args_obj = parser.parse_args() return args_obj
0953ec3a4ed6a437838d91d8dec1b0f9021d2a07
35,166
import re def post_process (s): """Make output directly usable by layout module.""" s = re.sub ('(</?)([a-z]+)-([a-z]+)-([a-z]+)', r'\1\2\3\4', s) s = re.sub ('(</?)([a-z]+)-([a-z]+)', r'\1\2\3', s) s = re.sub ('(<(checkbox|(cancel|help|ignore|ok|push|more|no|radio|reset|retry|yes)button|(fixed(info|text)))[^>]*) text=', r'\1 label=', s) s = re.sub (' (height|width|x|y)="[0-9]*"', '', s) s = re.sub (' (label|text|title)="', r' _\1="', s) s = re.sub ('&([^m][^p]*[^;]*)', r'&amp;\1', s) s = re.sub (' hide="(TRUE|true|1)"', ' show="false"', s) s = s.replace ('<modaldialog', '<modaldialog sizeable="true"') s = s.replace (' rid=', ' id=') s = s.replace (' border="true"', ' has_border="true"') s = s.replace (' def-button="true"', ' defbutton="true"') s = s.replace (' drop-down="', ' dropdown="') s = s.replace (' tab-stop="', ' tabstop="') return s
2a8d910d7dad63560411e2359fcc140fd5ffa531
35,168
def _select_best_indel(indels): """Select the highest quality indel, based on the quality, prefering low earlier positions above later positions in case of ties.""" def _indel_by_quality_and_position(indel): # The negative position is used to select the first # of equally quality indels return (float(indel.qual), -indel.pos) return max(indels, key = _indel_by_quality_and_position)
fcee293103c86d7683d54ea030f5b6d0c4b31a21
35,170
def temporal_affine_backward(dout, cache): """ Backward pass for temporal affine layer. Input: - dout: Upstream gradients of shape (N, T, M) - cache: Values from forward pass Returns a tuple of: - dx: Gradient of input, of shape (N, T, D) - dw: Gradient of weights, of shape (D, M) - db: Gradient of biases, of shape (M,) """ x, w, b, out = cache N, T, D = x.shape M = b.shape[0] dx = dout.reshape(N * T, M).dot(w.T).reshape(N, T, D) dw = dout.reshape(N * T, M).T.dot(x.reshape(N * T, D)).T db = dout.sum(axis=(0, 1)) return dx, dw, db
e43b1ada3db1cac4189f1c4dbc9aa4e7a9c40b64
35,173
from typing import Tuple from typing import List from typing import Dict def selection(triple: Tuple[str, str, str], variables: List[str]) -> Dict[str, str]: """Apply a selection on a RDF triple, producing a set of solution mappings. Args: * triple: RDF triple on which the selection is applied. * variables: Input variables of the selection. Returns: A set of solution mappings built from the selection results. Example: >>> triple = (":Ann", "foaf:knows", ":Bob") >>> variables = ["?s", None, "?knows"] >>> selection(triple, variables) { "?s": ":Ann", "?knows": ":Bob" } """ bindings = dict() if variables[0] is not None: bindings[variables[0]] = triple[0] if variables[1] is not None: bindings[variables[1]] = triple[1] if variables[2] is not None: bindings[variables[2]] = triple[2] return bindings
fee52583e62d589863214e74e99fc427a0b6577d
35,175
import re def parse_firewall_rule(rule_str): """ Transforms a string of multiple inputes to a dictionary list parameter: (string) rules A firewall rule in the specified project Return firewall rules as dictionary list """ rules = [] regex = re.compile(r'ipprotocol=([\w\d_:.-]+),ports=([ /\w\d@_,.\*-]+)', flags=re.I) for f in rule_str.split(';'): match = regex.match(f) if match is None: raise ValueError('Could not parse field: %s' % (f,)) rules.append({'IPProtocol': match.group(1), 'ports': match.group(2).split(',')}) return rules
4b3f364f9d102b664ca6f7dab4956ad96be810ae
35,178
import os def is_running_in_appveyor(): """Check if the AppVeyor environment variable is set.""" return os.environ.get("APPVEYOR", "").lower() == "true"
750ca3cdbf2c34682376a2c969a1734a487d4c06
35,179
def trace_eci(basis, ix_to_eci): """Run through a basis.json or eci.json object and trace over eci values. :basis: json :ix_to_eci: dict {basis_function_index:eci_value} :returns: json """ for cf in basis["cluster_functions"]: lfix=cf["linear_function_index"] if lfix in ix_to_eci: cf["eci"]=ix_to_eci[lfix] elif "eci" in cf: del cf["eci"] else: continue return basis
8655e5d288482d9b8e507af724aaacce35497d63
35,180
def solution(l): """ My first thought to compute the sum of the elements of the square of the adjacency matrix of the graph. That takes too long. Since we only need totals, we compute how many edges end at a vertex and how many start at a vertex. The dot product of these vectors gives the number of paths of length 2. The dot product can be distributed with respect to the addition of the vector storing the number of edges ending at a vertex. So, it can be accumulated along the way. """ n = len(l) if n <= 2: return 0 number_of_pairs_ending_in = n*[0] number_lucky_pairs = 0 for i in range(n): for j in range(i): if l[i]%l[j]==0: number_of_pairs_ending_in[i] += 1 number_lucky_pairs += number_of_pairs_ending_in[j] return number_lucky_pairs
d2184d62e1b8f8f03cfd8c0d036fd64d4ad31fcb
35,181
def AAPILoad(): """Execute commands while the Aimsun template is loading.""" return 0
e32c7c44848faa92cfd9fca9ddc7ea780c9c4f0b
35,182
def recvall(sock, size: int): """Receive data of a specific size from socket. If 'size' number of bytes are not received, None is returned. """ buf = b"" while size: newbuf = sock.recv(size) if not newbuf: return None buf += newbuf size -= len(newbuf) return buf
6a0f6814cdaf6847d467f4c5620c3897b1ff2ac8
35,183
import torch def geomean(iterable): """Calculate geometric mean of a given iterable. :param iterable: a torch.Tensor with one dimension :return: the geometric mean of a given iterable """ # sign = torch.sign(iterable) temp = torch.sum(torch.log10(torch.add(torch.abs(iterable), 1e-30)), -1) temp = torch.mul(temp, 1 / iterable.size()[-1]) # return torch.mul(torch.pow(10, temp), sign) return torch.pow(10, temp)
89078f861824c84b95a9be7b5dcacff116154125
35,184
def type_tuple(*args): """ Return sorted tuple """ return tuple(sorted(args))
344d11fbb0d751b46f1ca47465804a7a70b55c4f
35,186
import imp def is_pkg(module): """ Return :data:`True` if a Module represents a package. """ return module.kind == imp.PKG_DIRECTORY
8fbed6c8c16c9125baef681664cacc9edaa05cc1
35,187
def is_IPv4(ip_string): """Returns true if the string is an IPv4: 4 digits < 255, separated by dots""" digit_list = ip_string.split(".") if len(digit_list) != 4: return False for d in digit_list: if int(d) > 255: return False return True
1a0b20b4b366e8f4e19e225d32b7a887aed3fe17
35,189
import os def get_imlist(path, NUM_PERSONS, NUM_IMAGES_PER_PERSON): """ Returns a list of filenames for NUM_PERSONS and NUM_IMAGES_PER_PERSON """ list_filenames=[] list_labels=[] #add labels and images for num_person in range(NUM_PERSONS): for num_faces in range(NUM_IMAGES_PER_PERSON): filename =path+os.sep+str( (num_faces+1)+(num_person*10) )+'.pgm' #exits? if os.path.exists(filename): list_filenames.append(filename) list_labels.append(num_person) return [list_filenames, list_labels]
7cb9417c962497e3f48abee8fdff325587e1523d
35,190
def extractValue(indexString, content): """ Extracts an integer value after the indexString from the given content. Searched for the given string, then moves over that string + 1 pos (new line), then reads the numbers until a '<' is found indexString - The string to search for. content - The content to search in. Returns: Integer if found, else -1 """ index = content.find(indexString) if (index == -1): raise ValueError('String not found!', indexString) index += len(indexString) + 1 numberStr = ''; number = 0 while (content[index] != '<'): if (content[index] != ','): numberStr += content[index] index = index + 1 number = int(numberStr) return number
30daae4a53a4d898e5d8a9a1272cf7f0c3e170d2
35,191
import time def wait_for_ego_vehicle(world): """Loops until a hero vehicle is spawned. Note: The loop ticks the simulation. """ # Connect to the ego-vehicle spawned by the scenario runner. while True: time.sleep(1) possible_actors = world.get_actors().filter('vehicle.*') for actor in possible_actors: if actor.attributes['role_name'] == 'hero': return actor world.tick()
22b584ef02a4db6f6c856ef7e69cf1a17e7bb150
35,192
def parse_context_table_records_list(records_list: list, fmt: str, is_delete: bool = False): """ Parses records list given as an argument in context tables management commands. Args: records_list: The list of records fmt: The format of each record, e.g. id:key:value is_delete: Whether or not it is a delete request Returns: (list) The records, in request payload format. """ records = [] for record_item in records_list: record_item = record_item.split(':') keys = fmt.split(':') if len(keys) != len(record_item): raise ValueError('records argument is malformed.') record = {k: v for k, v in zip(keys, record_item)} if is_delete: record['key'] = '' if record.get('value'): record['value'] = record['value'].split(';') elif record.get('value') == '': record['value'] = [] records.append(record) return records
f03b077cfa421688a7e4e58058c0c4ef7d9e619b
35,193
def thr_half(tensor): """Get the middle between min/max over batch dimension.""" m = tensor.min(0, keepdim=True).values M = tensor.max(0, keepdim=True).values return m, (M - m) / 2.0
7d403d048f9c4f914474e4639be7ec1a1a012020
35,194
def _get_all_shortcuts(directories): """ Args: directories (deque of str objects): All directories up to but excluding the function folder Returns: list: All possible shortcuts to the mcfunction path str: The mcfunction path as specified by minecraft Examples: >>> i = InFileConfig() >>> directories = deque(["ego", "floo_network", "init"]) >>> i._get_all_shortcuts(directories) (['ego:floo_network/init', 'floo_network/init', 'init'], 'ego:floo_network/init') """ # gets the mcfunction path mcfunction_path = directories.popleft() + ":" mcfunction_path += "/".join(directories) # shortcuts also has the mcfunction path to map to itself to pass the FunctionBuilder containment test shortcuts = [] shortcuts.append(mcfunction_path) # gets all shortcuts to the full name while directories: shortcut = "/".join(directories) shortcuts.append(shortcut) directories.popleft() return shortcuts, mcfunction_path
ad8912b1f5ea58ae14ec8fd139b009d745f38bf5
35,195
def leiaint(num): """ --> programa só vai retornar se o numero for inteiro :param num: recebe o numero digitado :return: numero """ while True: try: num = int(input('digite um nr: ')) break except ValueError: print('ERRO! DIGITE UM NR INTEIRO') return num
ed300dc792e8af3c295f6dc3e56286931fba280e
35,196
def quote_path(path: str) -> str: """ Quote a file path if it contains whitespace. """ if " " in path or "\t" in path: return f'"{path}"' else: return path
fae1dc338fe672871c08ef4b5aa2160dacbba650
35,197
import re def remove_comments(tex_source): """Delete latex comments from TeX source. Parameters ---------- tex_source : str TeX source content. Returns ------- tex_source : str TeX source without comments. """ # Expression via http://stackoverflow.com/a/13365453 return re.sub(r'(?<!\\)%.*$', r'', tex_source, flags=re.M)
efad4eba12e93af92ca55b6c926d0acf14b653c4
35,198
from datetime import datetime import pytz def from_unixtime(unixtime_, timezone_="UTC"): """ Convert a unixtime int, *unixtime_*, into python datetime object Parameters ---------- `unixtime_` : int Unixtime i.e. seconds since epoch `timezone_` : string The timezone of the output date from Olson timezone database. Defaults to utc. Returns ------- datetime.datetime Python datetime object (timezone aware) Notes ----- unixtime == seconds since epoch (Jan 01 1970 00:00:00 UTC)\n pytz http://pythonhosted.org/pytz/\n Unit test: UKPVLiveTestCase.test_to_unixtime """ return datetime.fromtimestamp(unixtime_, tz=pytz.timezone(timezone_))
e23c795dd83c45eae376d70af7ea8632b844af43
35,199
from typing import Tuple def split_title(s: str) -> Tuple[str, str]: """Split a title into hashes and text""" list = s.split(' ', 1) return list[0], list[1]
035f6da40097ea1dc1debc0f590fd77471606b42
35,200
def is_mariadb(self): """ Method to be monkey-patched onto connection, to allow easy checks for if the current database connection is MariaDB """ with self.temporary_connection(): server_info = self.connection.get_server_info() return 'MariaDB' in server_info
fb8a1c0fcdca6daa3b6b6f343bbac265c03e59f5
35,204
def add_dimention(tensor): """[Function that allows a single tensor to be processed by the neural networks that use Sequential function] Args: tensor ([type]): [single tensor whitout the batch dimention] Returns: tensor[type]: [returns a tensor with extra dimention so it can be prosseced by Sequential in pytorch] """ tensor = tensor.unsqueeze(0) #Add extra dimention tensor.double() is equivalent to tensor.to(torch.float64) tensor = tensor.double() #Formats tensor into double type return tensor
03191db56a073f0a06b1358fe05719e57c8d9bec
35,205
def get_number_rows(ai_settings, ship_height, alien_height): """calculate how many row can the screen hold""" available_space_y = ai_settings.screen_height-3*alien_height-ship_height number_rows = int(available_space_y/(2*alien_height)) return number_rows
4cd769a162bc47447293d0ac34ff86298e9beb65
35,206
def valid_pt(pt, shape): """ Determine if a point (indices) is valid for a given shaped """ for i, j in zip(pt, shape): if i < 0: # index is not negative return False if i >= j: # index is less than j return False return True
8d2ff4de6666bf60f128493b4a41859ba0a79a32
35,207
def length(x): """Get length of elements""" return x.size
70310b7b09173ece02db4c4aff4c14d53fc22f78
35,210
def error_func(guess, x, data, data_model): """ @param guess : parameter list:: an estimate of the model parameters of the final solution @param x : numpy array of float:: x-values @param data : numpy array of y-values @param data_model : function to be fitted @return: numpy array of float:: differences between the data and the model """ return data - data_model(x, *guess)
7ee32c6ec8738a46936205c7235e789ea7d2bd5f
35,211
def parse_file(filename): """ Parses file for error messages in logs Args: filename Returns: error_count: count of error messages in file error_msgs: list of error messages """ # Initialize return vals error_count = 0 error_msgs = [] with open(filename, 'r') as file: for line in file: # Try to find error message and locate index in string str_to_find = 'error -' str_idx = line.lower().find(str_to_find) # If error is found, extract and increment count if str_idx != -1: error_count += 1 str_start = str_idx + len(str_to_find) + 1 error_msg = line[str_start:].strip() error_msgs.append(error_msg) return error_count, error_msgs
855ad9411646961b3afdec14e7b3547af81fae84
35,214
import subprocess def run_cmd(args_list): """ Take as arguments those arguments that we need to run as a command in the shell. Return the decoded and stripped results of the command runned in the shell. Args: args_list (list): list of the arguments of the command to run Returns: [str]: output of the command in the shell """ print("Running system command: {0}".format(" ".join(args_list))) proc = subprocess.Popen( args_list, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) s_output, s_err = proc.communicate() if s_output: return s_output.decode("utf8").strip() else: print(f"SOMETHING GO WRONG \U0001F447\n{s_err}")
decb9bb9e8048ae1469349fed8082f5e353d0a4d
35,215
import pickle def pickle_dump(obj, path): """ Dump a pickle """ with open(path, 'wb') as outfile: return pickle.dump(obj, outfile)
9fb609bcbee03f2294bd4b76bdbc1b0f0ad93875
35,217
def _strip_right(str, suffix): """Returns str without the suffix if it ends with suffix.""" if str.endswith(suffix): return str[0: len(str) - len(suffix)] else: return str
fdf03a237c353cc1579aec89fcc18b69ca2af8da
35,219
import click def false_to_none(ctx, param, value): """Convert Python False to a None""" try: if value: retval = True else: retval = None except ValueError: raise click.BadParameter('Invalid value: {0}'.format(value)) return retval
881d240d1ab961a80eb6a861c321401dc9d5fa59
35,222
def repeat_tensor_for_each_element_in_batch(torch_tensor, n): """ Repeats a certain torch tensor n times for each element in a batch. :param torch_tensor: given torch tensor :param n: number of repeats :return: new tensor, where every row of torch_tensor is repeated n times """ data_shape = torch_tensor.shape[1:] # 3 repeats = [1, n] + [1] * len(data_shape) expanded = torch_tensor.unsqueeze(1).repeat(*repeats) return expanded.view(-1, *data_shape)
1fef4542e953a49c483c81c6094d5677092e5b66
35,223
def dig_deeper(entry, field, res, depth=10): """A helper function for :func:`get_wiktionary_field_strings`. It recursively locates the target field. Args: entry (dict or list): the entity to investigate field (str): the field to look up res (list): the list of found entities to update depth (integer): maximum recursion depth (otherwise this does blow up memory for some entries like "cat") Returns: (list): the updated list of found entities """ if depth > 0: if isinstance(entry, dict): for key, val in entry.items(): if field == key: if entry[key]: # if isinstance(entry[key], str): res.append(entry[key]) return res elif isinstance(val, list): for i in val: depth -= 1 res = dig_deeper(val, field, res, depth) elif isinstance(entry, list): for i in entry: depth -= 1 res = dig_deeper(i, field, res, depth) return res else: return res
6332ed989b2815a7a4e5247e7b9f28f3a88b11be
35,225
def ranges_to_indices(range_string): """Converts a string of ranges to a list of indices""" indices = [] for span in range_string.split('/'): if ':' in span: start_idx, stop_idx = [int(idx) for idx in span.split(':')] stop_idx += 1 # add 1 since end index is excluded in range() indices.extend(list(range(start_idx, stop_idx))) else: indices.append(int(span)) return indices
7f3fe841c9f0c2e309013184ffcb6a35c1dab42b
35,226
import math def polar(x, y): """ Cartesian coordinates to polar coordinates; adapted to fit compass view. """ r = math.sqrt(x**2 + y**2) p = math.atan2(y, x) * (-1) + math.radians(90) return p, r
54461bb1fcdb6a359966d8436e883a5f1206a8d8
35,227
def is_error_start(line): """Returns true if line marks a new error.""" return line.startswith("==") and "ERROR" in line
a76fb0b424098c1f60c1b63a1fc36ef08a9fb8f2
35,228
def subexpr_before_unbalanced(expr, ltok, rtok): """Obtains the expression prior to last unbalanced left token.""" subexpr, _, post = expr.rpartition(ltok) nrtoks_in_post = post.count(rtok) while nrtoks_in_post != 0: for i in range(nrtoks_in_post): subexpr, _, post = subexpr.rpartition(ltok) nrtoks_in_post = post.count(rtok) _, _, subexpr = subexpr.rpartition(rtok) _, _, subexpr = subexpr.rpartition(ltok) return subexpr
f8ed685a585c23ec9fa82610fdc5f7df7ca5b9e2
35,229
def somevalues(ddtt): """returns some values""" return ddtt.Name, ddtt.Construction_Name, ddtt.obj
12019cbbfd276b07b1c5e6f69270b2f15f26812b
35,230
def ExtractCommand(line): """ Input: a line that might contain a command and some comment Output: the command itself Purpose: extract the command from all the giberish, if none is found return nothing """ result="" line=line.lstrip() line=line[:line.find("//")] line=line.rstrip() line+='\n' return line
c6b0a29182c1f3cc1e4455266279d1ae7730d063
35,231
import torch def soft_reward(pred, targ): """ BlackBox adversarial soft reward. Highest reward when `pred` for `targ` class is low. Use this reward to reinforce action gradients. Computed as: 1 - (targ pred). Args: pred: model log prediction vector, to be normalized below targ: true class integer, we want to decrease probability of this class """ # pred = F.softmax(pred, dim=1) pred_prob = torch.exp(pred) gather = pred[:,targ] # gather target predictions ones = torch.ones_like(gather) r = ones - gather r = r.mean() return r
d6a420b49d22d87c2d5eb4662a1ac3d1c3175660
35,235
import os import click def list_local_plugins(plugin_type, plugins_path, plugin_details): """List local plugins with details.""" installed_plugins = list() for plugin in next(os.walk(plugins_path))[1]: s = plugin_details(plugin) installed_plugins.append(plugin) click.secho(s) if not installed_plugins: click.secho("[*] You do not have any {0}s installed, " "try installing one with `honeycomb {0} install`".format(plugin_type)) return installed_plugins
19810446dd4a11666c4a5c1a677f6cae21ea5da6
35,236
import psutil def get_mountpoints(system): """ Enumerates the physical device mountpoints. """ if system == 'Linux': phydevs = [] with open("/proc/filesystems", "r") as f: for line in f: if not line.startswith("nodev"): phydevs.append(line.strip()) mountpoints = [] with open('/etc/mtab', "r") as f: for line in f: if line.startswith('none'): continue fields = line.split() device = fields[0] mountpoint = fields[1] fstype = fields[2] if fstype not in phydevs: continue if device == 'none': device = '' mountpoints.append(mountpoint) else: # `system in ('FreeBSD', 'Windows')` mountpoints = [part.mountpoint for part in psutil.disk_partitions()] return mountpoints
8bee768eb16ab96aa145afd601161df5c22ab586
35,237
def check_py_file(files): """ Return a list with only the python scripts (remove all other files). """ py_files = [fich for fich in files if fich[-3:] == '.py'] return py_files
942f4f2560eaeab540be59a88d5e8211efc787a0
35,241
def py_import_order(python_source_path): """Validate that python imports are alphabetized.""" def _validate_block(import_block): """Ensure that a single block is ordered properly.""" if not import_block: return [] sorted_import_block = sorted(import_block, key=lambda i: i.lower()) if sorted_import_block == import_block: return [] return ['\n'.join(sorted_import_block)] def _validate_imports(data): """Test that a file's contents are ordered properly.""" imports = [] from_imports = [] corrected_import_blocks = [] for line in data.splitlines(): if line.startswith('import '): imports.append(line) else: corrected_import_blocks += _validate_block(imports) imports = [] if line.startswith('from '): from_imports.append(line) else: corrected_import_blocks += _validate_block(from_imports) from_imports = [] # Though rare, if a file ends with an import we must still validate them. corrected_import_blocks += _validate_block(imports) corrected_import_blocks += _validate_block(from_imports) if not corrected_import_blocks: return None suggestions = '\n\n--------\n\n'.join(corrected_import_blocks) return ('File {filename} has non-alphabetized import blocks. ' 'Suggested order:\n\n{suggestions}').format( filename=python_source_path, suggestions=suggestions) with open(python_source_path) as handle: return _validate_imports(handle.read()) return None
278e0c839f53064a925275c20ba9527e12eb854e
35,243