content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import unittest def load_tests_from_testcase(test_case_class, args): """ load tests from testcase class Args: test_case_class: TestCase class object args: parse args Returns: unittest.TestSuite() """ if 'all' in args.case_list: # Load all test cases # test_suite = unittest.TestLoader().loadTestsFromTestCase(RawTestCase) test_suite = unittest.TestSuite() tc_names = unittest.TestLoader().getTestCaseNames(test_case_class) if not tc_names and hasattr(test_case_class, 'runTest'): tc_names = ['runTest'] for tc_name in tc_names: if tc_name.split("test_")[1] in args.exclude_case_list: continue test_suite.addTest(test_case_class(tc_name, args)) else: case_name_list = [] args_list = [] for case in args.case_list: if case in args.exclude_case_list: continue case_name = "test_" + case case_name_list.append(case_name) args_list.append(args) # Load the spec test cases # test_suite = unittest.TestSuite(map(RawTestCase, case_name_list)) test_suite = unittest.TestSuite(map(lambda x, y: test_case_class(x, y), case_name_list, args_list)) return test_suite
1c5384ec9d3296e1c7c18e2848a8f32089ace675
11,912
def PyTmHMSXtoS(h, m, s, x): """ Convert hours-minutes-seconds-milliseconds to seconds as float Parameters ---------- h: int, hours m: int, minutes s: int, seconds x: float, milliseconds Returns ------- float, seconds """ return h * 3600.0 + m * 60.0 + s + x
550362fe48d4e6c8c94b0c885611b607c8e39e63
11,913
def baby_names_collapsed_from_list(a_baby_names_list): """ a_baby_names_list is a list of lists, each element [name, rank] Collapse list element to a string """ print('baby_names_collapsed_from_list') baby_names_collapsed = [] for baby_element in a_baby_names_list: baby_names_collapsed.append('{} {}'.format(baby_element[0], baby_element[1])) #print(baby_names_collapsed) return baby_names_collapsed
f874c1fb205e0a86e46db8a3e2c0002712db82cb
11,916
def HeadingStr(heading): """ Gives a heading string given the heading float """ headstr = "?" if heading != None: if heading < 22.5 or heading >= 337.5: headstr = "N" elif heading >=22.5 and heading < 67.5: headstr = "NE" elif heading >= 67.5 and heading < 112.5: headstr = "E" elif heading >= 112.5 and heading < 157.5: headstr = "SE" elif heading >= 157.5 and heading < 202.5: headstr = "S" elif heading >= 202.5 and heading < 247.5: headstr = "SW" elif heading >= 247.5 and heading < 292.5: headstr = "W" elif heading >= 292.5 and heading < 337.5: headstr = "NW" return headstr
1f1276e3f8f9c963703ced42d9391d1ee0598e3e
11,919
def polyval(p,x): """ Replacement for the polyval routine in numpy. This version doesnt check the input variables to make sure they are array_like. This means that when masked arrays are treated correctly when they are passed to this routine. Parameters ---------- p : a 1D array of coefficients, highest order first x : a 1D array of points at which to evaluate the polynomial described by the coefficents in p Example ------- >>> x = numpy.array([1,2,3]) >>> p = numpy.array([2,0]) >>> qcutils.polyval(p,x) array([2,4,6]) >>> y = numpy.array([1,c.missing_value,3]) >>> y = numpy.ma.masked_where(y==c.missing_value,y) >>> qcutils.polyval(p,y) masked_array(data = [2 -- 6], mask = [False True False], fill_value = 999999) """ y = 0 for i in range(len(p)): y = x*y + p[i] return y
7887e504aef74f35bc014462e9bb0eb2c4548971
11,920
def get_user_by_email(email, db_model): """Take an email from request and return user object if existed. """ return db_model.query.filter(db_model.email == email).first()
656ccd8b7fc66141a1e2023ebcfda84dd1caea16
11,921
from collections import defaultdict def E(): """ Сеть компании состоит из серверов. Серверы соединены друг с другом в кластеры. Каждый сервер может скачивать файлы только с серверов в его кластере. Составьте программу, которая сообщать, из каких источников определённый сервер может скачать необходимый файл. Статус: принята """ def cluster(graph, root): """Наодит узлы кластера, которому принадлежит root""" visited = set() queue = [root, ] while queue: root = queue.pop() queue.extend(n for n in graph[root] if n not in visited) visited.add(root) return visited with open('input.txt', 'r') as f_in, open('output.txt', 'w') as f_out: n = int(f_in.readline()) net = defaultdict(set) # для каждого узла множество смежных for _ in range(n): a, b = f_in.readline().split() net[a].add(b), net[b].add(a) # разобъем сеть на кластеры clusters = dict() for n, subnet in net.items(): if n not in clusters: c = cluster(net, n) for subling in c: clusters[subling] = c q = int(f_in.readline()) for _ in range(q): target, _ = f_in.readline().split() sources = f_in.readline().split() c = clusters[target] # Серверы следует выводить в том порядке, в котором они перечислены # в описании соответствующего запроса во входных данных. res = [n for n in sources if n in c] if res: f_out.write(f'{len(res)} {" ".join(res)}\n') else: f_out.write('0\n')
c5ce5e64f2be3c25e63d3f413970b13c1a094725
11,922
import secrets def random_key(length: int = 16) -> str: """Generate a random key of specified length from the allowed secret characters. Args: length: The length of the random key. """ secret_allowed_chars = ( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~" ) return "".join(secrets.choice(secret_allowed_chars) for _ in range(length))
a50daa81f3a01430a211299ae7123196a4b34264
11,923
import re def fetch(name: str, **kwargs): """ """ func = f"""_fetch_{re.sub("[_-]+dict", "", name.lower()).replace("-", "_")}""" retval = eval(f"{func}(**kwargs)") return retval
62fbe0e478bbb81955e7aa5205b9ddf5197d7fe5
11,924
import re def commit_message_contains_query(message, query_terms): """ Check if the commit message contains the query terms @param message: The commit message @param query_terms: The terms that we look for in the message @return: """ tester = r'\b(' + '|'.join(query_terms) + ')' has_refactor_string = r'\b(refactor)' return bool(re.search(tester, message, re.IGNORECASE)) and not bool( re.search(has_refactor_string, message, re.IGNORECASE))
b701dac00971658be13a6b7207f8685c3388609e
11,925
def get_event_role(bot, guild): """Return the event role, if it exists""" result = bot.db.get_event_role_id(guild.id) if result: for role in guild.roles: if role.id == result.get('event_role_id'): return role
85656ee6b65896762197008108c7b09830a5a4a8
11,926
def laser(asteroid_angles): """ Takes list of relative asteroid positions with manhattan distances and relative angles from (0,1), sorted in order of increasing angle and increasing distance where angles are equal. Simulates firing a laser from 0 radians (along (0,1)) to 2 pi radians and destroying the first asteroid on each angle during the rotation. Returns a list of the asteroid relative positions, distances and angles in order from first to last destroyed. """ destruct_ord = [] asteroids = asteroid_angles[:] i = 0 # Run until all asteroids have been destroyed while len(asteroids) > 0: angles_hit = [] asteroids_to_remove = [] #print("Starting new laser rotation") # Find any asteroids on angles not yet hit by the laser and add them to removal list for entry in asteroids: if entry[3] not in angles_hit: angles_hit.append(entry[3]) destruct_ord.append(entry) asteroids_to_remove.append(entry) # Remove the destroyed asteroids for the next spin of the laser for entry in asteroids_to_remove: #print("Removed: ", entry, i) i += 1 asteroids.remove(entry) return destruct_ord
80947e23f1c01d60d73a2fc2f41fa5304b9b1ab4
11,927
import os import re def parse_captions(path): """Parse lines from captions""" lines = [] for fname in os.listdir(path): if re.findall(r'\.vtt$', fname): with open(os.path.join(path, fname), 'r') as f: get_next = False for line in f.readlines(): if re.match(r'\d\d:\d\d:\d\d\.\d\d\d --> \d\d:\d\d:\d\d\.\d\d\d', line): get_next = True elif get_next: lines.append(line[:-1]) get_next = False return "\n".join(lines)
c8c52c650578aa03d16982963f4abcf1436dcfe4
11,928
from typing import Optional from typing import Dict from typing import Any def commit_draft( access_key: str, url: str, owner: str, dataset: str, *, draft_number: int, title: str, description: Optional[str] = None, ) -> Dict[str, str]: """Execute the OpenAPI `POST /v2/datasets/{owner}/{dataset}/commits`. Arguments: access_key: User's access key. url: The URL of the graviti website. owner: The owner of the dataset. dataset: Name of the dataset, unique for a user. draft_number: The draft number. title: The draft title. description: The draft description. Returns: The response of OpenAPI. Examples: >>> commit_draft( ... "ACCESSKEY-********", ... "https://api.graviti.com", ... "czhual", ... "MNIST", ... draft_number=2, ... title="commit-2", ... ) { "commit_id": "85c57a7f03804ccc906632248dc8c359", "parent_commit_id": "784ba0d3bf0a41f6a7bfd771d8c00fcb", "title": "upload data", "description": "", "committer": "czhual", "committed_at": "2021-03-03T18:58:10Z" } """ url = f"{url}/v2/datasets/{owner}/{dataset}/commits" post_data: Dict[str, Any] = {"draft_number": draft_number, "title": title} if description: post_data["description"] = description return open_api_do( # type: ignore[no-any-return] "POST", access_key, url, json=post_data ).json()
395070863b678892ca1004de20ac1b163d6e2da8
11,929
def DotProduct_DML(feat, M, query=None, is_sparse=False, is_trans=False): """ DotProduct distance with DML. """ if query is None: query = feat return -query.dot(M).dot(feat.T)
7a1d1173e351d6f621755fe5dfc64bcbdcc95b97
11,930
import os import sys import pickle def read_vest_pickle(gname, score_dir): """Read in VEST scores for given gene. Parameters ---------- gname : str name of gene score_dir : str directory containing vest scores Returns ------- gene_vest : dict or None dict containing vest scores for gene. Returns None if not found. """ vest_path = os.path.join(score_dir, gname+".vest.pickle") if os.path.exists(vest_path): if sys.version_info < (3,): with open(vest_path) as handle: gene_vest = pickle.load(handle) else: with open(vest_path, 'rb') as handle: gene_vest = pickle.load(handle, encoding='latin-1') return gene_vest else: return None
517f07242240d4767a0f573c2a02446cb363aa70
11,931
import random def create_fake_risk(data): """ Since the raw data had almost only 'unknown_risk' values and I wanted to have my visualisations look cool, I decided to do a Diederik Stapeltje and make up the data myself. ¯\_(ツ)_/¯ """ risks = [] # for a specific amount of times, add the different risk values for i in range(int(len(data) / 7 * 0.5)): risks.append('High_risk') for i in range(int(len(data) / 7 * 2)): risks.append('Low_risk') for i in range(int(len(data) / 7 * 3.5)): risks.append('No_risk') for i in range(int(len(data) / 7 * 1)): risks.append('unknown_risk') # add two more to get to the exact len(data) risks.append('No_risk') risks.append('Low_risk') # shuffle the list so it's not in order random.shuffle(risks) # add fake risk data to DataFrame data['Risk2'] = risks return data
c42edc03a57913d96701cc8b36c388d42c1c9bfc
11,932
import functools import os def section_from_file(fname: str, relpath: bool = True, text_var: str = "text"): """Make a decorator for providing a default argument from a text file Args: fname (str): Name of the file to be read relpath (bool): If :data:`True` (default), the file name is interpreted as a relative path, otherwise as an absolute path text_var (str): Name of the variable to pass to the decorated function Returns: The decorator function""" @functools.wraps(section_from_file) def section_from_file_(func, fname=fname): """Decorator function""" if relpath: fname = os.path.realpath(os.path.join(os.path.dirname(__file__), fname)) with open(fname, mode="r", encoding="utf-8") as f: # Decorated function return functools.partial(func, **{text_var: f.read()}) return section_from_file_
261b6b7d846d2435c1dbe8e77f771b97fea5abe6
11,933
import random def capnp_id() -> str: """ Generates a valid id for a capnp schema. Returns: str -- capnp id """ # the bitwise is for validating the id check capnp/parser.c++ return hex(random.randint(0, 2 ** 64) | 1 << 63)
55ae4e112c3ba223168627d7d06fab327a0d4f82
11,934
def linspace(start,stop,np): """ Emulate Matlab linspace """ return [start+(stop-start)*i/(np-1) for i in range(np)]
b1be58298ff9983f6e2f6c5156cb4497ef8668d9
11,935
import pandas def _coerce_numeric(table): """Coerce a table into numeric values.""" for i in range(table.shape[1]): table.iloc[:, i] = pandas.to_numeric(table.iloc[:, i], errors='coerce') return table
cba1a224655e3536e361212569b7d6397a58786e
11,936
def generate_wiki_redirect_text(redirect_name: str) -> str: """Generate wikitext for redirect.""" return f'#REDIRECT [[{redirect_name}]]'
f6e55fa20004d836ea601a1d3966d070273df237
11,938
def dnnlib_shadow_submit_convert_path(txt): """Not sure what this really does, so just a no-op.""" return txt
9a883453edeb6171386dca221557eead8f9058b2
11,939
import re def read_html(htmlfile): """ Reads the HTML file to a string. Removes some potential trouble makers. """ with open(htmlfile, "r") as infile: html = infile.read() # Clean out some unnecessary stuff html = re.sub("<div class=\"wp-about-author.*?</div>", "", html, re.S) html = re.sub("<h4>Sobre la autora</h4>", "", html) html = re.sub("More Posts</a>", "", html) html = re.sub("Follow Me:", "", html) html = re.sub("<em>", "", html) html = re.sub("</em>", "", html) html = re.sub("<i>", "", html) html = re.sub("</i>", "", html) return html
bb0f724792cd817464a8720199ce7c7035e6b0f1
11,941
import time def get_datetime(): """Return the current datetime in a database compatible format""" return time.strftime('%Y-%m-%d %H:%M:%S')
804166ac7f56899733043871ea5ec9fcce474ee7
11,944
import inspect def expand_lambda_function(lambda_func): """ Returns a lambda function after expansion. """ lambda_function = inspect.getsource(lambda_func).split(",")[1].strip() if lambda_function.endswith(")"): return lambda_function[:-1] return lambda_function
7076ce6bdee4077f6cbf665d686cd28c2420d67e
11,945
def get_dict_from_params_str(params_str): """Get the dictionary of kv pairs in a string separated by semi-colon.""" if params_str: kvs = params_str.split(";") params_dict = {} for kv in kvs: k, v = kv.strip().split("=") params_dict[k] = eval(v) return params_dict else: return None
611f80b55c090ff5aa6afa78ee4b1b5db074cab0
11,946
def WENOReconstruct( u_stencil, eps, p ): """WENO reconstruction. This reconstructs u_{i+1/2} given cell averages \\bar{u}_i at each neighboring location. See `High Order Weighted Essentially Nonoscillatory Schemes for Convection Dominated Problems'. Input ----- u_stencil : stencil describing current solution eps : regularization parameter p : power parameter Returns ------- uiphf : u_{i+1/2} after performing the full reconstruction procedure. """ uim2, uim1, ui, uip1, uip2 = u_stencil return uim2/30 - (13*uim1)/60 + 47*(ui/60) + 9*(uip1/20) - uip2/20
aa49be7b069f09c90c9b350d86575376eb5c9fbb
11,949
def read_ssm_params(params, ssm): """Build dictionary from SSM keys in the form `ssm://{key}` to value in the paramater store. """ result = ssm.get_parameters(Names=[s[len('ssm://'):] for s in params], WithDecryption=True) if result['InvalidParameters']: raise ValueError('Failed to lookup some keys: ' + ','.join(result['InvalidParameters'])) return {'ssm://'+x['Name']: x['Value'] for x in result['Parameters']}
dc4aa3272b8b371621074230bc2ce2b3cad1dd72
11,951
import re def is_stable_version(version): """ A stable version has no letters in the final component, but only numbers. Stable version example: 1.2, 1.3.4, 1.0.5 Not stable version: 1.2alpha, 1.3.4beta, 0.1.0rc1, 3.0.0dev """ if not isinstance(version, tuple): version = version.split('.') last_part = version[-1] if not re.search('[a-zA-Z]', last_part): return True else: return False
995dff30ff81908e0d5a00d88816d874e9bfd626
11,953
from datetime import datetime def chart_grouping_as_date(value): """Transforms a string YYYYMM or YYYY in a date object""" value = str(value) for format in ('%Y', '%Y%m'): try: return datetime.strptime(str(value), format).date() except ValueError: pass
92f80be0baea30944ebd90e1377dfa953f5fb7fa
11,954
def impartial(f, *args, **kwargs): """ Acts just like Python's functools.partial, except that any arguments to the returned function are ignored (making it 'impartial', get it?) """ def func(*_a, **_k): return f(*args, **kwargs) return func
ef512146057ae09546b6e7fac64c2496ac8f7d53
11,955
import argparse def get_args() -> argparse.Namespace: """ Parse command line arguments. Returns: argparse.Namespace: parsed arguments """ parser = argparse.ArgumentParser() parser.add_argument("--path", help="full path to top level show folder", required=True) parser.add_argument( "--scheme", help="name scheme to apply to beginning of all files after renaming", required=True, ) parser.add_argument("--title_regex", help="regular expression to extract original episode name") parser.add_argument( "--ignore", help="files and folders that should be ignored", nargs="+", default=[], ) parser.add_argument( "--non_media", help="file extensions that should be ignored", nargs="+", default=["nfo", "txt"], ) parser.add_argument("-e", "--execute", help="actually make the changes", action="store_true") return parser.parse_args()
18deeb1debefbe9bde5e793728b111382fdb72bd
11,956
def get_existing_symptoms(journal): """Given a journal w/ proper format, aggregates all the symptoms args: journal (dict) returns: [str]: array of symptom names """ symptoms = [] for log in journal['journal']: symptoms.extend([ symptom['name'] for symptom in log['symptoms'] if symptom["name"] not in symptoms ]) return symptoms
e8f90ff3344318b53ae91e79d29f572110103959
11,958
def cal_fps(raw_trajs_frequent_pattern: dict, sd_trajs_frequent_pattern: dict) -> float: """ calculate FPS Args: raw_trajs_frequent_pattern: frequent patterns of the original trajectory sd_trajs_frequent_pattern : frequent patterns that generate trajectories Returns: FPS """ FP = 0 for p in list(raw_trajs_frequent_pattern.keys())[:50]: if p in sd_trajs_frequent_pattern.keys(): re = abs(raw_trajs_frequent_pattern[p] - sd_trajs_frequent_pattern[p]) / raw_trajs_frequent_pattern[p] FP += re return FP / 50
0b877ba77d928d0550f3fb695c0805f9447b4f61
11,959
def compara_assinatura(ass_main, matriz_ass_input): """ Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas. """ lista_Sab = [] soma_mod = 0 if type(matriz_ass_input[0]) is list: for lin in range(len(matriz_ass_input)): for col in range(len(matriz_ass_input[lin])): soma_mod += abs(ass_main[col] - matriz_ass_input[lin][col]) Sab = soma_mod / 6 lista_Sab.append(Sab) return lista_Sab else: for i in range(len(matriz_ass_input)): soma_mod += abs(ass_main[i] - matriz_ass_input[i]) Sab = soma_mod / 6 return Sab
b9dae58c37f06cbcbab577e54e3bbbcf793d7977
11,961
import os import pickle def load_options(run_folder, options_file_name = 'options-and-config.pickle'): """ Loads the training, model, and noise configurations from the given folder """ with open(os.path.join(run_folder, options_file_name), 'rb') as f: train_options = pickle.load(f) noise_config = pickle.load(f) hidden_config = pickle.load(f) return train_options, hidden_config, noise_config
23af1f8082fbfa509a4c7675bc6dd8d666cbbc87
11,962
import re def prune_lines(infile): """ Discard all lines which don't have the data we're after Adapted from http://stackoverflow.com/questions/17131353/problems-targeting-carriage-return-and-newlines-with-regex-in-python """ result = [] with open(infile, 'r') as text: lines = text.readlines() text.close() for line in lines: line = line.strip() # trim leading and trailing whitespace # skip lines that don't contain "===", "XXCOPY" or " = " if re.match('===*', line): result.append(line) elif re.match('^XXCOPY', line): result.append(line) elif re.match('^.+? = ', line): result.append(line) return result
0d3bccf996b2afeab3e56cd29e5559235cb34d12
11,963
def _GetDefiningLayerAndPrim(stage, schemaName): """ Searches the stage LayerStack for a prim whose name is equal to schemaName. """ # SchemaBase is not actually defined in the core schema file, but this # behavior causes the code generator to produce correct C++ inheritance. if schemaName == 'SchemaBase': return (stage.GetLayerStack()[-1], None) else: for layer in stage.GetLayerStack(): for sdfPrim in layer.rootPrims: if sdfPrim.name == schemaName: return (layer, sdfPrim) raise Exception("Could not find the defining layer for schema: %s" % schemaName)
c898b1f1fdd3430b044b556a269b0cdf84a552c1
11,964
def twoSum(nums, k): """ :type nums: List[int] :type k: int :rtype: bool """ # Time Complexity - O(n) # Space Complexity - O(n) res = set() for i in nums: if i in res: return True res.add(k - i) return False
93e6d3f4d2d80f28a125b85b3eab5f4601d119d6
11,965
def unique_by(func, objects): """ Sorts by applying func to each item :param func: Applied to each object to get the sortable result :param objects: iterable :return: The sorted objects """ seen = set() def hash(obj): value = func(obj) return value not in seen and not seen.add(value) return [obj for obj in objects if hash(obj)]
e3f72626543b0b0234530f936e02a100f00e6cba
11,967
def property_info(info, delimiter=' | '): """Returns property info from tupple. Input tuple must containts flags if property is writable, readable and deletable. #!jinja {# return someone like this: WRITE | READ | DELETE #} {{ property_info(info) }} """ rv = [] if info[0]: rv.append('READ') if info[1]: rv.append('WRITE') if info[2]: rv.append('DELETE') return delimiter.join(rv)
0b39c7ba69a05365f0051eab9941fd2ddce02642
11,969
import re def clean(s): """Strip leading & trailing space, remove newlines, compress space. Also expand '{NL}' to a literal newline. :param str s: :rtype: str """ s = s.strip() s = re.sub(' +', ' ', s) s = s.replace('\n', '') s = s.replace('\r', '') s = s.replace('{NL}', '\n') return s
f2c07984de1766b7a1d0258b29ad8030e87896e8
11,971
import time import math def timeSince(since): """ compute the training/evaluate time """ now = time.time() s = now - since m = math.floor(s / 60) s -= m * 60 h = math.floor(m / 60) m -= h * 60 return '%d h %d m %d s'%(h, m, s)
4fefbf6198bc89ca53eae341c116b0f6e693e8d1
11,972
import argparse import sys def get_options(cmd_args=None): """ Argument Parser. """ parser = argparse.ArgumentParser( prog='{}'.format(sys.argv[0]), usage='%(prog)s [options]', description='SAGA Live Monitoring') parser.add_argument( '--tripinfo', type=str, required=True, help='SUMO TripInfo file (XML).') parser.add_argument( '--out', type=str, required=True, help='Output file (CSV).') return parser.parse_args(cmd_args)
eab3eff5847defd3e2c68a2924bf2f0410f9a282
11,974
def get_test_specific_options(test, BLIND=False, ignore_others=True, ignore_labels=None): """ Function to use for determining test specific options, currently this is tuned to 4CR dataset; Need to be replaced for specific dataset. """ # Need to change when the total test options changed # Color key Experiment 1: (Groups 5-8 are unusually flexible; group 1,6-8 are different strains than 2-5) exp1 = { #"0.0": r'Controls', "1.0": r"Controls WT/SAL male P60-90 Bl/6J/CR", "2.0": r"FI male P60 Taconic", "3.0": r"FR male P60 Taconic", "4.0": r"ALG male P60 Taconic", "5.0": r"ALS male P60 Taconic", "6.0": r"5d COC test at P90 Bl/6CR", "7.0": r"BDNF met/met Ron tested at P60", "8.0": r"P26 males WT Bl/6CR"} # Color Key Experiment 2 data (focusing on angel's mice and bdnf/trkb manipulations) P40-60 ages exp2 = {"1.0": r"Controls VEH/SAL/WT", "2.0": r"acute NMPP1pump", "3.0": r"chronic NMPP1pump", "4.0": r"BDNF Val/Val Ron", "5.0": r"P1-23 NMPP1H20", "6.0": r"P1-40 NMPP1H20", "7.0": r"BDNF Met/Met Ron"} if not ignore_others: exp1["-1.0"] = r"OTHERS" exp2["-1.0"] = r"OTHERS" exp1_params = {'UMAP': {'n_neighbors': 10, 'min_dist': 0.8, 'n_components': 3, 'metric': 'euclidean'}} exp2_params = {} TEST_LABEL_ALIAS = { 'exp1_label_FI_AL_M': None if BLIND else exp1, 'exp2_Angel': None if BLIND else exp2, 'age': None, 'RL_age': None, 'RL_treat_sex': None, 'RL_treat': None, 'RL_sex': None } IGNORE_LABELS = { 'exp1_label_FI_AL_M': ['-1.0', '0.0', '1.0', '3.0', '4.0', '6.0', '7.0'], 'exp2_Angel': ['-1.0', '1.0'], 'age': None, 'RL_age': None, 'RL_treat_sex': ['ALS_F', 'FI_F', 'FR_F'], 'RL_treat': None, 'RL_sex': None } DIM_PARAMS = { 'exp1_label_FI_AL_M': exp1_params, 'exp2_Angel': exp2_params, 'age': {}, 'RL_age': {}, 'RL_treat_sex': {}, 'RL_treat': {}, 'RL_sex': {} } return TEST_LABEL_ALIAS[test], IGNORE_LABELS[test] if ignore_labels is None else ignore_labels,\ DIM_PARAMS[test]
0b56619a7eefadc58921a1d7145afeba9e7b9d74
11,975
def people(): """ People in organization :return: list of users """ # users = get_users() return dict()
ac2c0b1403d9d00fe7fa6c8b631fefcf71c8266b
11,977
def parse_by_category(category, data): """ filters database content by category from dumps :param category: accepts string :param data: accepts multi-dimensional iterable data type :return: returns filtered multi-dimensional LIST containing TUPLES """ new_dat = [] for entry in data: if entry[1] == category: new_dat.append(entry) return new_dat
255d69580ed666aac38eaa70442ee4e215a7e802
11,978
def user_to_dict(user): """Build a json flask response using the given data. :Returns: A flask response with json data. :Returns Type: :py:class:`flask.Response` """ data = {} if user.is_authenticated: roles = [{'id': r.id, 'description': r.description, 'name': r.name} for r in user.roles] data = { 'id': user.id, 'name': user.email, 'email': user.email, 'roles': roles } return data
aed99bf8ff05dfca0b4bac4b2a674c4e71871c54
11,979
import numpy def flatten_training(training_pairs): """Convert active label training pairs to two lists. Args: :training_pairs: (dict) dictionary of either 'match' or 'distinct', where 'match' is a list of pairs of records which are the same, and 'distinct' is a list of pairs of records which are different { 'match': [ [record_1, record_2] ], 'distinct': [ [record_1, record_3] ] } Returns: :examples: (list)[list] ordered list of all the record pairs (distinct and match) [ [record_1, record_2], [record_1, record_3] ] :y: (list)[int] list of either 1 or 0, corresponding to examples list 1 = match 0 = distinct """ examples = [] y = [] for label, pairs in training_pairs.items(): for pair in pairs: if label == 'match': y.append(1) examples.append(pair) elif label == 'distinct': y.append(0) examples.append(pair) return examples, numpy.array(y)
0813134356eed5478cf61b73cc054cda8fc460c5
11,981
def replace_extension(filename, new_extension, add_to_end=False): """Replaces the extension in the filename with the new_extension.""" dot = filename.rfind(".") if dot < 0 or filename[dot + 1:] == new_extension or add_to_end: filename_base = filename else: filename_base = filename[:dot] return filename_base + "." + new_extension
75b04c37dffb3dd92f3fe12295b18efb52dfa2ef
11,982
from bs4 import BeautifulSoup def wrap_with_tag(html: str, document_wrapper_class: str) -> str: """ Wraps a string of HTML with a div using a given wrapper class Args: html (str): The HTML to be wrapped document_wrapper_class(str): The class with which to wrap the HTML Returns: str: Newly wrapped HTML """ soup = BeautifulSoup(html, 'html.parser') new_div = soup.new_tag('div') new_div['class'] = document_wrapper_class for element in soup: new_div.append(element) return new_div.prettify()
075cf4ef818eb38f2b0a8a16c76fcc5cc11cdec9
11,983
def slave_entry(slave, programs, filesystems): """ Template tag {% slave_entry slave programms %} is used to display a single slave. Arguments --------- slave: Slave object programs: Array of programs Returns ------- A context which maps the slave object to slave and the array of programs to programs. """ return { 'slave': slave, 'programs': programs, 'filesystems': filesystems, }
7449eec9d906bfe74245cbdec2a76fd7a2fc8157
11,984
from pathlib import Path def get_and_mark_bundle_cache_version(bundle_base: str, *, previously_bundled: bool) -> int: """ Check and return the bundle cache version. The marker filename is `.bundle_cache_version`. :param str bundle_base: The bundle directory :param bool previously_bundled: true if the user has previously used this workspace to build a bundle :returns: the cache layout version to use """ marker_path = Path(bundle_base) / '.bundle_cache_version' bundle_cache_version = 2 if previously_bundled: bundle_cache_version = 1 if marker_path.is_file(): bundle_cache_version = \ int(marker_path.read_text().rstrip()) marker_path.write_text(str(bundle_cache_version) + '\n') return bundle_cache_version
74ab1584b60ffb77dbb3709d01ea4df00a448ea4
11,986
def isinrectbnd(x: int, y: int, xmin: int, ymin: int, xmax: int, ymax: int) -> bool: """Checks if the x and y values lie within the rectangular area defined by xmin, ymin and xmax, ymax Args: x, y: (x,y) coordinates to test xmin, ymin: min (x, y) bounds xmax, ymax: max (x, y) bounds Returns: boolean value True -> (x, y) is in bounds False -> (x, y) is out of bounds """ return (x < xmax and y < ymax) and \ (x > xmin and y > ymin)
3f67de8669a258a554a8754786579517a07bc321
11,987
def remove_prefix(string: str, prefix: str): """ Removes a prefix from a string if present. Args: string (`str`): The string to remove the prefix from. prefix (`str`): The prefix to remove. Returns: The string without the prefix. """ return string[len(prefix) :] if string.startswith(prefix) else string[:]
598e1b9b863d342e757e54cf94035da63e3ace1f
11,990
def get_params(opt_over, net, net_input, downsampler=None): """ Returns parameters that we want to optimize over. :param opt_over: comma separated list, e.g. "net,input" or "net" :param net: network :param net_input: torch.Tensor that stores input `z` :param downsampler: :return: """ opt_over_list = opt_over.split(',') params = [] for opt in opt_over_list: if opt == 'net': params += [x for x in net.parameters()] elif opt == 'down': assert downsampler is not None params = [x for x in downsampler.parameters()] elif opt == 'input': net_input.requires_grad = True params += [net_input] else: assert False, 'what is it?' return params
b8a3c26b5307c0ba584e3841a2d98f337c618bf8
11,991
from typing import Optional from typing import Any def nested_get(dct: dict, *keys: str) -> Optional[Any]: """Multi-level get helper function.""" for key in keys: dct = dct.get(key, {}) return dct if dct else None
cd881389157d67365793240e1e2e0b39f4bc1726
11,995
def only_moto(request): """Return True if only moto ports are to be used for mock services.""" return request.config.option.only_moto
1ab211925a411d4999a22e77d819da88b4477ed6
11,996
def find(sub_str, target_str): """[summary] 字符串查找 Arguments: sub_str {str} -- substring target_str {str} -- target string Returns: bool -- if substring is found in target string """ return sub_str in target_str # in operator用的是Boyer–Moore算法,最坏情况O(mn), 最好情况O(n/m) # 速度肯定好过re.search()
d26e4ad79eaf913d81126d7647036b857de5ed6d
11,997
def check_faces_in_caption(photo): """Checks if all faces are mentioned in the caption.""" comment = photo.comment if photo.getfaces() and not comment: return False for face in photo.getfaces(): parts = face.split(" ") # Look for the full name or just the first name. if (comment.find(face) == -1 and (len(parts) <= 1 or comment.find(parts[0]) == -1)): return False return True
eed03439df84a1ddd4cb7bcbb269af7c60adfcb5
11,998
def LfromS(seq): """ Compute Schroder function value L from a given sequence. This performs the calculation by plodding through the algorithm given in the paper. Humans can easily recognize the relation between elements of S and length of runs of 0s or 1s in the binary representation of L. Knowing that, there probably is a slicker way to code LfromS(). Args: S (list of int): Sequence - see paper for details Returns: value of L, real in range 0.0 .. 1.0 Note that overflows are no problem - S may contain "large" values like ten. Note also there's no check on the length of S, or if it's empty. """ Lambda = 1.0 L = Lambda/2 for c in reversed(seq): L = L/(2**c) L = Lambda - L L = Lambda-L return L
6adf8c0b6f8e12d7e6a79efcb38c628ef0b29031
11,999
import random def contest(other): """contests human ability""" return random.randint(1, 20) > round(other.advantage / 5)
a87cc2ffc7d01f493d99b3808735f7954847caaa
12,001
def get_trader_violation(model, trader_id, trade_type): """Get trader violation""" if (trader_id, trade_type) in model.V_TRADER_TOTAL_OFFER.keys(): return sum(model.V_CV_TRADER_OFFER[trader_id, trade_type, i].value for i in range(1, 11)) else: return 0.0
1ef4822345bd16e17d0b351148275a288fc281a5
12,003
def shorter(item): """Make a string shorter. item -- a unicode string.""" if len(item) > 2: return item[:-2] + u'\u2026' # ellipsis return item
d3d9f678c5055e43f2120c1779bbd41066c8684d
12,004
def route_con(lst): """ Функция для получения списка лучшего пути :param lst: list -- список кортежей лучшего пути :return result: list -- список лучшего пути """ result = [] for elem in range(len(lst)): if elem == 0: result.append(lst[elem][0]) result.append(lst[elem][1]) else: result.append(lst[elem][1]) return result
dae6a3613c5908615b1504b9960a6981d84b3224
12,005
import yaml import subprocess def validate_output(output, fmt, mcf): """compare values in merp2tbl output with merp -d row for row, non-NA must agree Parameters ---------- output : dict as returned by format_output fmt : str 'tsv' or 'yaml' mcf : str path to merp command file Returns ------- rval, msg : 2-ple of int, str rval 0 = success, positive = warning, negative = fail msg = brief explanation """ merp2tbl_vals = [] if fmt == "yaml": for out in yaml.load(output, Loader=yaml.SafeLoader): if "value" in out.keys(): merp2tbl_vals.append(out["value"]) else: msg = "yaml value key not found, cannot validate data" return (1, msg) elif fmt == "tsv": out_lines = output.split("\n") header = out_lines[0].split("\t") value_idx = None try: value_idx = header.index("value") except ValueError: msg = "tsv value column not found, cannot validate data" return (2, msg) if value_idx is not None: merp2tbl_vals = [ out_line.split("\t")[value_idx] for out_line in out_lines[1:] ] merp2tbl_vals = [v if v == "NA" else float(v) for v in merp2tbl_vals] else: raise ValueError("unknown format: ", fmt) if merp2tbl_vals == []: msg = "no merp2tbl values not found, cannot validate data" return (1, msg) # run merp -d and slurp values proc_res = subprocess.run( ["merp", "-d", mcf], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) merp_vals = [ float(v) for v in proc_res.stdout.decode("utf-8").split("\n") if len(v.strip()) > 0 ] # length mismatch if len(merp_vals) != len(merp2tbl_vals): msg = "merp2tbl " + mcf + "output value length mismatch" return -1 # check value for value, skip NAs for i, v in enumerate(merp2tbl_vals): if v != "NA" and not merp_vals[i] == merp2tbl_vals[i]: msg = "merp2tbl {0} output line {1}: {2} != merp -d {3}".format( mcf, i, merp2tbl_vals[i], merp_vals[i] ) return (-2, msg) return (0, "")
2d9e7c89f8280751abfc328042c0ee3bd1572f4a
12,007
def getViolationFromAria2(ariaViolation, constraint, violationList): """Descrn: Make a constraint's violation object in the input violation list given an Aria2 violation analysis object. Inputs: Aria2 violation analysis object, NmrConstraint.DistanceConstraint, NmrConstraint.ViolationList Output: NmrConstraint.Violation """ violation = None if ariaViolation.isViolated(): calcDist = ariaViolation.getAverageDistance().getValue() calcError = ariaViolation.getAverageDistance().getError() fracViols = max(0.0, min(ariaViolation.getDegreeOfViolation(), 1.0)) violValue = ariaViolation.getUpperBoundViolation().getValue() violation = violationList.newViolation(violation=violValue,calcValue=calcDist, calcValueError=calcError,constraint=constraint, fractionViolated=fracViols) return violation
f89718bb8a2b767e51e9efa657768327548822f1
12,009
from typing import Dict import pathlib def dir_parser(path_to_dir: str) -> Dict[str, Dict[str, str]]: """ Parses the given directory, and returns the path, stem and suffix for files. """ files = pathlib.Path(path_to_dir).resolve().glob("*.*") files_data = {} for file in files: files_data[file.stem] = { "suffix": file.suffix, "path": file.as_posix(), } return files_data
3b6c0ac172ad863470d492aa99713fab3ecf5d99
12,011
import inspect def is_redefined_dataclass_with_slots(old: type, new: type) -> bool: """Return True if `new` is the `old` dataclass redefined with `__slots__.""" # Must both be dataclasses. if "__dataclass_fields__" not in old.__dict__ or "__dataclass_fields__" not in new.__dict__: return False # Old class must not have __slots__. # New class must have __slots__ since that would be the purpose for recreating the class. # __slots__ must be checked directly on the class, ignoring any inherited __slots__. if "__slots__" in old.__dict__ or "__slots__" not in new.__dict__: return False # This doesn't definitively indicate it's the same class, but it's good enough. return ( inspect.getmodule(old) == inspect.getmodule(new) and old.__name__ == new.__name__ and old.__bases__ == new.__bases__ )
c5c760bca752c0458139419f3c8e519ed8adad66
12,012
def filter_data(data): """ Filters data for years after 2010 and data originating from Syria. Inputs: data (pd.DataFrame): Input data with column "Year" and "Origin" Returns: data (pd.DataFrame): Filtered data """ if 'Origin' in data.columns: data = data[(data.Year > 2010) & (data.Origin.str.contains('Syria'))] else: data = data[(data.Year > 2010)] return data
2daf6fbd815fcea9dd3c2712363cd99cdca62a34
12,013
def strrep(strg,x): """ A função retorna uma string com n repetições; str, int -> str """ return strg*x
6e3d59666778af9db781a11e6f48b61010e7de4d
12,014
def task_build(): """Create full distribution.""" return { 'actions': ['python -m build'], 'task_dep': ['gitclean', 'mo', 'docs'], }
7112ab22031299207dfb9c14d2482ef52e975f89
12,016
def get_positions(structure): """Wrapper to get the positions from different structure classes""" try: # ASE structure return structure.get_scaled_positions() except AttributeError: try: # diffpy structure return structure.xyz except AttributeError: raise ValueError("Unable to get positions from structure")
6428139131de02b577925be9499668642a11a69c
12,017
from typing import List def parse_comp_internal( comp_rules: str, top_delim: str, bottom_delim: str, rule_start: str ) -> List[str]: """ Do the heavy handling to parse out specific sub-sections of rules. :param comp_rules: Rules to parse :param top_delim: Section to parse :param bottom_delim: Section to cut away :param rule_start: What rules to pull that rule from :return: List of asked for components """ # Keyword actions are found in section XXX comp_rules = comp_rules.split(top_delim)[2].split(bottom_delim)[0] # Windows line endings... yuck valid_line_segments = comp_rules.split("\r\n") # XXX.1 is just a description of what rule XXX includes. # XXX.2 starts the action for _most_ sections keyword_index = 2 return_list: List[str] = [] for line in valid_line_segments: # Keywords are defined as "XXX.# Name" # We will want to ignore subset lines like "XXX.#a" if f"{rule_start}.{keyword_index}" in line: # Break the line into "Rule Number | Keyword" keyword = line.split(" ", 1)[1].lower() return_list.append(keyword) # Get next keyword, so we can pass over the non-relevant lines keyword_index += 1 return sorted(return_list)
5563e1eb7bbe9d738f550a220a733c6c2d11b509
12,019
def finish_waiting(event, messageCenter, roomGrid): """Events that happen at the end of some input cycle.""" if roomGrid.waitFunction[0] == "move": location = roomGrid.waitFunction[1]["location"] if event.location == [-1, -1]: to_location = roomGrid.lockedSpace else: to_location = event.location objType = roomGrid.waitFunction[1]["objType"] roomGrid.setWaitFunction(None, None) res = roomGrid.moveObject(objType, location, to_location) if not res: messageCenter.setText("I couldn't do that.") return False
c3b74736b875e531af29d5f4c470543c0ca975ec
12,020
import logging import io import sys import types def _RunMethod(dev, args, extra): """Runs a method registered via MakeSubparser.""" logging.info('%s(%s)', args.method.__name__, ', '.join(args.positional)) result = args.method(dev, *args.positional, **extra) if result is not None: if isinstance(result, io.StringIO): sys.stdout.write(result.getvalue()) elif isinstance(result, (list, types.GeneratorType)): r = '' for r in result: r = str(r) sys.stdout.write(r) if not r.endswith('\n'): sys.stdout.write('\n') else: result = str(result) sys.stdout.write(result) if not result.endswith('\n'): sys.stdout.write('\n') return 0
34c267aa1379a81bea5c73b0a2fa9f3a9aa9b3d7
12,021
def fully_connected(input, params): """Creates a fully connected layer with bias (without activation). Args: input (:obj:`tf.Tensor`): The input values. params (:obj:`tuple` of (:obj:`tf.Variable`, :obj:`tf.Variable`)): A tuple of (`weights`, `bias`). Probably obtained by :meth:`fully_connected_params`. Returns: :obj:`tf.Tensor`: The output values. """ weights, bias = params return input @ weights + bias
77815acfe17674bc20035900b75d8e4ddc982855
12,022
def GetIslandSlug(island_url: str) -> str: """该函数接收一个小岛 URL,并将其转换成小岛 slug Args: island_url (str): 小岛 URL Returns: str: 小岛 slug """ return island_url.replace("https://www.jianshu.com/g/", "")
71e1b609e5bd3c703a4d5d0789fddff117969e69
12,023
def cec_module_spr_e20_327(): """ Define SunPower SPR-E20-327 module parameters for testing. The scope of the fixture is set to ``'function'`` to allow tests to modify parameters if required without affecting other tests. """ parameters = { 'Name': 'SunPower SPR-E20-327', 'BIPV': 'N', 'Date': '1/14/2013', 'T_NOCT': 46, 'A_c': 1.631, 'N_s': 96, 'I_sc_ref': 6.46, 'V_oc_ref': 65.1, 'I_mp_ref': 5.98, 'V_mp_ref': 54.7, 'alpha_sc': 0.004522, 'beta_oc': -0.23176, 'a_ref': 2.6868, 'I_L_ref': 6.468, 'I_o_ref': 1.88e-10, 'R_s': 0.37, 'R_sh_ref': 298.13, 'Adjust': -0.1862, 'gamma_r': -0.386, 'Version': 'NRELv1', 'PTC': 301.4, 'Technology': 'Mono-c-Si', } return parameters
55593d3a561d6ebafba424f50406128e58406073
12,024
def get_base_classification(x: str) -> str: """ Obtains the base classification for a given node label. Args: x: The label from which to obtain the base classification. Returns: The base classification. """ return x.split('_', 1)[0]
8122d435af8ac6aef43faab349ee98dca75469d4
12,025
def check_individuals(ped_individuals, vcf_individuals): """ Check if the individuals from ped file is in vcf file Arguments: ped_individuals (iterator): An iterator with strings vcf_individuals (iterator): An iterator with strings Returns: bool: if the individuals exists """ for individual in ped_individuals: if individual not in vcf_individuals: raise IOError("Individuals in PED file must exist in VCF file") # Raise proper exception here return True
e65b24390c8cebff7870e46790cf1c0e9b2d37c6
12,026
import dill def load(filename): """ Load an instance of a bayesloop study class that was saved using the bayesloop.save() function. Args: filename(str): Path + filename to stored bayesloop study Returns: Study instance """ with open(filename, 'rb') as f: S = dill.load(f) print('+ Successfully loaded study.') return S
19ce91d2a4bb552362bd8f8ab67194e1241356d1
12,027
def des_magnitude_zero_point(bands=''): """ Sample from the distribution of single epoch zeropoints for DES """ dist = {'g': 26.58, 'r': 26.78, 'i': 26.75, 'z': 26.48, 'Y': 25.40} return [dist[b] for b in bands.split(',')]
568b6f17abb0957c22cb6c300af06d72b41e6c73
12,028
import configparser import sys def loadAuth(path): """ Load guthub api token form file :param path: File's path :type path: string :return: token :rtype: string """ try: config = configparser.ConfigParser() config.read(path) return config['github']['token'] except: print("Auth configuration not usable!", file=sys.stderr) sys.exit(1)
1e8d78124fe5e664bfb5a7532cc71e5f5d90d9cc
12,031
from typing import List def load_grid_from_string(grid_str: str) -> List[List[int]]: """Returns a grid by loading a grid passed as a string.""" array = [int(cell) for cell in grid_str.split()] grid = [[int(cell) for cell in array[i : i + 3]] for i in range(0, len(array), 3)] return grid
c3b9a91c9298b54226f5ef532d1948a41dc67eac
12,032
def number(bus_stops): """ There is a bus moving in the city, and it takes and drop some people in each bus stop. You are provided with a list (or array) of integer pairs. Elements of each pair represent number of people get into bus (The first item) and number of people get off the bus (The second item) in a bus stop. Your task is to return number of people who are still in the bus after the last bus station (after the last array). Even though it is the last bus stop, the bus is not empty and some people are still in the bus, and they are probably sleeping there :D Take a look on the test cases. Please keep in mind that the test cases ensure that the number of people in the bus is always >= 0. So the return integer can't be negative. The second value in the first integer array is 0, since the bus is empty in the first bus stop. """ getting_on = 0 getting_off = 0 for bus_stop in bus_stops: getting_on += bus_stop[0] getting_off += bus_stop[1] return getting_on - getting_off
382c0112c223a6681508ec3d7a345ec92391657d
12,033
import torch def omp(X: torch.Tensor, y: torch.Tensor, *, k=None, device=None) -> torch.Tensor: """ y = Xa :param X: (n_features, n_components) :param y: (n_features) :param k: n_nonzero_coefs :param device: pytorch device :return: (n_components) """ assert X.dim() == 2 assert y.dim() == 1 assert X.size(dim=0) == y.size(dim=0) n_features, n_components = X.size() if k is None: k = n_features // 10 r = y S = [] a = torch.zeros((n_components,), device=device) for _ in range(k): cor = X.T @ r ind = torch.argmax(torch.abs(cor)) S.append(ind) index = torch.tensor(S, device=device) A_S = torch.index_select(X, 1, index) P = A_S @ torch.linalg.pinv(A_S.T @ A_S) @ A_S.T r = (torch.eye(n_features, device=device) - P) @ y index = torch.tensor(S, device=device) A_S = torch.index_select(X, 1, index) x_S = torch.linalg.pinv(A_S.T @ A_S) @ A_S.T @ y a[index] = x_S return a.reshape(n_components)
5f11464371401e630e0f7a37eedb53d29016bd27
12,035
import random def generate_random_color(): """ Generates the hexadecimal code of a random pastel color """ # generates random RGB r = (random.randrange(1, 256) + 255) / 2 g = (random.randrange(1, 256) + 255) / 2 b = (random.randrange(1, 256) + 255) / 2 # clamp function clamp = lambda x: int(max(0, min(x, 255))) # returns hexadecimal color code return "#{0:02x}{1:02x}{2:02x}".format(clamp(r), clamp(g), clamp(b))
d6efcb30f1296e031ee764076b2659b0d4a7cd93
12,036
def fixbackslash(value): """Replace backslashes '\' in encoded polylines for Google Maps overlay.""" return value.replace('\\','\\\\')
20a1e6132c379049e949f50e413c66cf5e67e7dc
12,037
def _filter_tuples(diced_str, to_remove): """ Returns *diced_str* with all of the tuples containing any elements of the *to_remove* iterable filtered out. This is used to drop search terms from the diced_str once they've been matched. For example: # start with the output of the _dice doctest >>> p = [('a', 'b', 'c'), ('a', 'b'), ('b', 'c'), ('a',), ('b',), ('c',)] >>> _filter_tuples(p, ("a")) [('b', 'c'), ('b',), ('c',)] >>> _filter_tuples(p, ("b", "c")) [('a',)] """ # true if the tupl does not contain # any of the elements in *to_remove* def _func(tupl): for x in to_remove: if x in tupl: return False return True return filter(_func, diced_str)
625ca421e3b1ec3dd9f5187fe994ee095eff8d30
12,038
def print_xm_info(xm_dict, name_re): """Print a dictionary of xmethods.""" def get_status_string(m): if not m.enabled: return " [disabled]" else: return "" if not xm_dict: return for locus_str in xm_dict: if not xm_dict[locus_str]: continue print ("Xmethods in %s:" % locus_str) for matcher in xm_dict[locus_str]: print (" %s%s" % (matcher.name, get_status_string(matcher))) if not matcher.methods: continue for m in matcher.methods: if name_re is None or name_re.match(m.name): print (" %s%s" % (m.name, get_status_string(m)))
e2564a5fcb7dc435c3ba1fa71fe82532d2b5083e
12,040
def format_multitask_preds(preds): """ Input format: list of dicts (one per task, named with task_name), each having a 'predictions' list containing dictionaries that represent predictions for each sample. Prediction score is represented by the field {task_name}_score in each of those dicts. Output format: a list of lists of dictionaries, where now each dictionary include scores for all the tasks that were included in the input. """ out = [] score_names = [f"{task['task']}_score" for task in preds[1:]] first_task = preds[0] for sentence_idx, sentence in enumerate(first_task["predictions"]): out_sent = [] for token_idx, token in enumerate(sentence): for task, score in zip(preds[1:], score_names): token[score] = task["predictions"][sentence_idx][token_idx][score] out_sent.append(token) out.append(out_sent) return out
470b23f6a5cc6b8e48ce0becfafd62104e016de8
12,041
def msp(items): """Yield the permutations of `items` items is either a list of integers representing the actual items or a list of hashable items. The output are the unique permutations of the items. Parameters ---------- items : sequence Yields ------- list permutation of items Notes ----- Reference: "An O(1) Time Algorithm for Generating Multiset Permutations", Tadao Takaoka. https://pdfs.semanticscholar.org/83b2/6f222e8648a7a0599309a40af21837a0264b.pdf Taken from @smichr """ E = list(reversed(sorted([i for i in items]))) def visit(head): (rv, j) = ([], head) for i in range(N): (dat, j) = E[j] rv.append(dat) return rv N = len(E) # put E into linked-list format (val, nxt) = (0, 1) for i in range(N): E[i] = [E[i], i + 1] E[-1][nxt] = None head = 0 afteri = N - 1 i = afteri - 1 yield visit(head) while E[afteri][nxt] is not None or E[afteri][val] < E[head][val]: j = E[afteri][nxt] # added to algorithm for clarity if j is not None and E[i][val] >= E[j][val]: beforek = afteri else: beforek = i k = E[beforek][nxt] E[beforek][nxt] = E[k][nxt] E[k][nxt] = head if E[k][val] < E[head][val]: i = k afteri = E[i][nxt] head = k yield visit(head)
a91f2b80997b73cf7dbcbec88a923f855e117c26
12,042
def _NormalizeGitPath(path): """Given a |path| in a GIT repository (relative to its root), normalizes it so it will match only that exact path in a sparse checkout. """ path = path.strip() if not path.startswith('/'): path = '/' + path if not path.endswith('/'): path += '/' return path
f2781ac304d4c15e835fa174d4aa816ea920045f
12,043
def unpack(t, n): """Iterates over a promised sequence, the sequence should support random access by :meth:`object.__getitem__`. Also the length of the sequence should be known beforehand. :param t: a sequence. :param n: the length of the sequence. :return: an unpackable generator for the elements in the sequence.""" return (t[i] for i in range(n))
4601cc51b2ae1015a31e8dc525756d8f590c2d8b
12,044
import re def parse_value(expr): """Parse text into Python expression. Args: expr: String to be converted to Python expression (e.g. a list). Returns: Parsed Python expression. """ try: return eval(expr) except: return eval(re.sub(r"\s+", ",", expr)) else: return expr
f033e35b77bb3839dc6ccc05e1f96898a1a93c0c
12,045
from typing import List def pkg_asset_download_urls(url) -> List[str]: """ Example URL for release assets: tarball: https://github.com/ICGC-TCGA-PanCancer/awesome-wfpkgs2/releases/download/fastqc-wf.0.2.0/fastqc-wf.0.2.0.tar.gz json: https://github.com/ICGC-TCGA-PanCancer/awesome-wfpkgs2/releases/download/fastqc-wf.0.2.0/pkg-release.json This currently is to address the compatibilty issue related to release tag change, eg, fastqc-wf.0.2.0 => fastqc-wf.v0.2.0 The added 'v' provides a bit more clarity and it's mentioned in github as a common practice Later it's possible we use this to expand support for downloading package asset in multiple mirror sites """ urls = [url] url_parts = url.split('/') release_tag, filename = url_parts[-2:] if release_tag.split('.')[1].startswith('v'): release_tag = release_tag.replace('.v', '.', 1) if filename.endswith('.tar.gz'): filename = filename.replace('.v', '.', 1) urls.append('/'.join(url_parts[:-2] + [release_tag, filename])) return urls
2e66629efa07c00473f8bfb86f2db816211d24ed
12,046
def _grid_out_property(field_name, docstring): """Create a GridOut property.""" def getter(self): self._ensure_file() # Protect against PHP-237 if field_name == 'length': return self._file.get(field_name, 0) return self._file.get(field_name, None) docstring += "\n\nThis attribute is read-only." return property(getter, doc=docstring)
71bb15738528142d4e410811ba021884ed5bb5ef
12,047
def create_stack_list(input_list): """ :param input_list: list (of page visits), duplicates are possible :return: stack list: loops have been removed, consecutive duplicates also have been removed """ # print('#################') # print(input_list) stack_list = [] list_of_backward_jumps = [] for i in range(0, len(input_list)): if i > 0 and input_list[i] == input_list[i - 1]: continue if input_list[i] not in stack_list: stack_list.append(input_list[i]) else: list_of_backward_jumps.append((stack_list.pop(), input_list[i])) if 'end' in input_list: print('sprünge nach "end":') print(input_list[input_list.index('end'):]) # print(stack_list) return stack_list, list_of_backward_jumps
d82d4b3825d2d034eb4aad4c39f8c0876fb82c86
12,049