content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _sum_edge_attr(G, node, attr, method='edges', filter_key=None, split_on='-', include_filter_flags=None, exclude_filter_flags=None): """accumulate attributes for one node_id in network G Parameters ---------- G : networkx.Graph or networkx.MultiGraph a graph network to sum edge attributes at a given node. NOTE: For Directed graphs (DiGraph and MultiDiGraph) the 'edges' method is equivalent to the 'out_edges' method. node : string or int the networkx node at which to query edge attributes attr : string an edge attribute key that maps to an int or float. Strings will not throw an error, but string concatenation is not the purpose of this function. method : string, optional (default='edges') a method name to select edges for the summation. Valid options include 'edges' (default), 'in_edges' and 'out_edges'. NOTE: For Directed graphs (DiGraph and MultiDiGraph) the 'edges' method is equivalent to the 'out_edges' method. filter_key : string, optional (default=None) edge attribute key that will be searched by the filter flags kwargs split_on : string, optional (default='-') filter_key string will be split by this character to form a list of flags. include_filter_flags : list, optional (default=None) exclude_filter_flags : list, optional (default=None) Returns ------- float the sum of the values associated with the `attr` """ edges = getattr(G, method)(node, data=True) if not edges: return 0 includes = edges if include_filter_flags is not None: includes = [ edge for edge in edges if any([ i in str(edge[2][filter_key]).split(split_on) for i in include_filter_flags])] excludes = [] if exclude_filter_flags is not None: excludes = [ edge for edge in edges if any([ i in str(edge[2][filter_key]).split(split_on) for i in exclude_filter_flags])] edges = [i for i in includes if i not in excludes] return sum([data.get(attr, 0) for _from, _to, data in edges])
270f0afb943b6c6828a6cc8a22452ee5eabbcfb8
28,320
def get_team_from_city(city): """Returns a team abbreviation from cityname. """ citydict = { "ANA": "ANAHEIM", "ARI": "ARIZONA", "BOS": "BOSTON", "BUF": "BUFFALO", "CAR": "CAROLINA", "CBJ": "COLUMBUS", "CGY": "CALGARY", "CHI": "CHICAGO", "COL": "COLORADO", "DAL": "DALLAS", "DET": "DETROIT", "EDM": "EDMONTON", "FLA": "FLORIDA", "LAK": "LOSANGELES", "MIN": "MINNESOTA", "MTL": "MONTREAL", "NJD": "NEWJERSEY", "NSH": "NASHVILLE", "NYI": "NYISLANDERS", "NYR": "NYRANGERS", "OTT": "OTTAWA", "PHI": "PHILADELPHIA", "PIT": "PITTSBURGH", "SJS": "SANJOSE", "SEA": "SEATTLE", "STL": "STLOUIS", "TBL": "TAMPABAY", "TOR": "TORONTO", "VAN": "VANCOUVER", "VGK": "VEGAS", "WPG": "WINNIPEG", "WSH": "WASHINGTON", } # Flip because I'm lazy citydictflip = {value: key for key, value in citydict.items()} try: return citydictflip[city] except KeyError: return "nope"
6f4bdb88128119e8951ce18dddcb4d6137b9797f
28,321
def sample_address(): """Return a legal address""" return { "first_name": "Test", "last_name": "User", "country": "US", }
c164536592489b11abf6eaf3f2b9124759d483ae
28,322
def get_column_indexes(column_name_items): """ This function returns the indexes for the columns of interest from the CSV file. :param column_name_items: List of column names :type column_name_items: list :return: Column index for 'TRUTH.TOTAL', 'QUERY.TP', 'QUERY.FP', 'TRUTH.FN', 'METRIC.Precision' and 'METRIC.Recall' :rtype: list """ total_index = column_name_items.index('TRUTH.TOTAL') tp_index = column_name_items.index('QUERY.TP') fp_index = column_name_items.index('QUERY.FP') fn_index = column_name_items.index('TRUTH.FN') precision_index = column_name_items.index('METRIC.Precision') recall_index = column_name_items.index('METRIC.Recall') return [total_index, tp_index, fp_index, fn_index, precision_index, recall_index]
51c38d048db6530bc502dfb64e95384ee096428a
28,324
def is_sorted(t): """Predicate, true if t is sorted in ascending order. t: list """ # sorted(t) will return a sorted version of t, without changing t. # == will compare the two lists to see if their value is the same # The is operator would fail here, even if the lists look identical # i.e. return (t is sorted(t)) would return false always. return t==sorted(t)
3c346a349cd0d870c5cef549a574674ea566ae6c
28,326
import typing def get_param_query(sql: str, params: dict) -> typing.Tuple[str, tuple]: """ Re-does a SQL query so that it uses asyncpg's special query format. :param sql: The SQL statement to use. :param params: The dict of parameters to use. :return: A two-item tuple of (new_query, arguments) """ if not params or len(params) < 1: return sql, () # Dump the params into key -> value pairs. kv = [(k, v) for (k, v) in params.items()] # Define the list of items that are used to return. items = [] fmt_dict = {} # Iterate over the key-values, adding each key to the fmt_dict with the number index. for n, (k, v) in enumerate(kv): n += 1 # Add to the format dict. fmt_dict[k] = "${}".format(n) items.append(v) # Finally, format the SQL with the required params. sql_statement = sql.format(**fmt_dict) return sql_statement, tuple(items)
44d2316f346ec53354d7ebeb69387c093ab3089b
28,327
def csv_list(value): """ Convert a comma separated string into a list Parameters ---------- value : str The string object to convert to a list Returns ------- list A list based on splitting the string on the ',' character """ if value: result = [] for item in value.split(','): item = item.strip() if item: result.append(item) return result return []
d65e004eb6696e7418e4f5f65a6271562c462cab
28,328
def get_ifort_versions(self, eval_and_save=True): """ :return: platforms to compiler configurations :rtype: dict """ dct = {} self.gather_ifort_versions(dct) return dct
8241ae20ba7a4808f7ffe645385f3bb9b326f453
28,329
def find_section_id(sections, id): """ Find the section with a given id """ for idx, section in enumerate(sections): try: if section['id'] == id: return idx except KeyError: continue return None
5ee29faea5a0966873966fc85ecfe1f89b08ecbb
28,330
import argparse def parse_arguments(): """ """ parser = argparse.ArgumentParser() mutually_exclusive_type = parser.add_mutually_exclusive_group() mutually_exclusive_type.add_argument("--seed", help="Export SEED files from SVN", action="store_true", default=False) mutually_exclusive_type.add_argument("--cm", help="Export CM files from SVN", action="store_true", default=False) mutually_exclusive_type.add_argument("--tree", help="Export SEED files from SVN", action="store_true", default=False) mutually_exclusive_input = parser.add_mutually_exclusive_group() mutually_exclusive_input.add_argument("--acc", help="Rfam family accession", action="store", type=str) mutually_exclusive_input.add_argument("-f", help="List of Rfam family accessions (.txt)", action="store", type=str) parser.add_argument("--dest-dir", help="Destination directory to generate files to") return parser
f9d0813c4629ea2720fa17d5ec1afe3a8c04167b
28,332
import typing def voiced_variants(base_phone) -> typing.Set[str]: """ Generate variants of voiced IPA phones Parameters ---------- base_phone: str Voiced IPA phone Returns ------- set[str] Set of base_phone plus variants """ return {base_phone + d for d in ["", "ʱ", "ʲ", "ʷ", "ⁿ", "ˠ", "̚"]} | { d + base_phone for d in ["ⁿ"] }
99111f1fcbfabb27a22efb75121d9f71cf76b64b
28,333
def clean_string(s: str, extra_chars: str = ""): """Method to replace various chars with an underscore and remove leading and trailing whitespace Parameters ---------- s : str string to clean extra_chars : str, optional additional characrters to be replaced by an underscore Returns ------- s: str clean string """ chars = "-.()%" + extra_chars for char in chars: s = s.replace(char, "_") s = s.strip() return s
7f93f1fea075bb09ba3b150a6ff768d0a266093c
28,335
from typing import Counter def count_characters_two( string ): """ Counts using collections.Count """ counter = Counter(string) return counter
d2c3b5eef156507f2b7b8b9a3b3b5a1a54a0a766
28,336
import torch def unique_2d(*X): """Get the unique combinations of inputs X. Parameters ---------- X : array-like of type=int and shape=(n_samples, n_features) Input events for which to get unique combinations Returns ------- *X_unique : np.array of shape=(n_samples_unique, n_features) Unique input event combinations inverse : np.array of shape=(n_samples,) Inverse used to reconstruct original values """ # Get input shapes shapes = [x.shape[1] for x in X] # Get unique combined inputs unique, inverse = torch.unique( torch.cat(X, dim=1), dim = 0, return_inverse = True ) # Retrieve original inputs result = list() previous = 0 for shape in shapes: result.append(unique[:, previous:previous+shape]) previous += shape # Add inverse result.append(inverse) # Return result return tuple(result)
8a4580c9dbbc8118f1f43d723874ccd26c4eb1ec
28,337
def findTilt(root): """ :type root: TreeNode :rtype: int """ leftTilt = 0 rightTilt = 0 if root.left: leftTilt = findTilt(root.left) if root.right: rightTilt = findTilt(root.right) return abs(leftTilt - rightTilt)
5b179d44ff2944b3a212fde7c733ddf824905f2e
28,338
import random def random_color(): """Returns a random color using RGB values""" # Picks a random integer between 0 and 255 as the R component r = random.randint(0, 255) # Picks a random integer between 0 and 255 as the G component g = random.randint(0, 255) # Picks a random integer between 0 and 255 as the B component b = random.randint(0, 255) # Returns a Tuple of the RGB components of the color return r, g, b
35a91edab729014580476f840b4c79b26b720d03
28,339
import torch def run_single(model: torch.nn.Module, *args) -> torch.Tensor: """ Runs a single element (no batch dimension) through a PyTorch model """ return model(*[a.unsqueeze(0) for a in args]).squeeze(0)
f0c74c90a403086cf1f0057a3ee4f8d785668e26
28,340
def coverage(cov): """Function to format coverage""" d = {'parsed' : 'None', 'l1_cond' : 'None', 'l1_hgt' : 'None', 'l2_cond' : 'None', 'l2_hgt' : 'None', 'l3_cond' : 'None', 'l3_hgt' : 'None', 'l4_cond' : 'None', 'l4_hgt' : 'None', 'unit' : 'None', 'string' : 'N/A'} if cov == "None": return d d['parsed'] = cov # add in the parsed string d['unit'] = 'feet' # set the unit. coverage_split = cov.split(" ") # enumerate it: see default dictionary cond and hgt vars. cloud_d = {'CLR' : 'Clear', 'FEW' : 'Few', 'SCT' : 'Scattered', 'BKN' : 'Broken', 'OVC' : 'Overcast'} string = '' for index, element in enumerate(coverage_split): if 'CLR' in element: d[f'l{index}_cond'] = 'Clear' d[f'l{index}_hgt'] = '0000' string = 'Clear' else: if index > 0: # adds in the comma appropriately. string += ", " # extract the conditions, make english-like, same with height. conditions = cloud_d[element[:3]] height = str(int(element[3:]) * 100) # add into dictionary at appropriate height level. # syntax a bit tricky, but iterating through l1_hgt, l2_hgt, etc d[f'l{str(index + 1)}_cond'] = conditions if height != '0': d[f'l{str(index + 1)}_hgt'] = height # form the string and append. string += conditions + " at " + height + " feet" else: d[f'l{str(index + 1)}_hgt'] = "0000" # form the string and append. string += conditions + " at surface" string += '.' # append a period to the string. d['string'] = string # add in the string. return d
240ddb71624b60327f4bb2973d0519a77e9261e1
28,341
def linspace(start, stop, num): """Custom version of numpy's linspace to avoid numpy depenency.""" if num == 1: return stop h = (stop - start) / float(num) values = [start + h * i for i in range(num+1)] return values
13b24de6255426196d234d9a61ce33d7e5db5879
28,342
def check_matching(adj_matrix): """Checks if det = 0""" div = 727 for cur_ind in range(len(adj_matrix)): col_ind = -1 for j in range(len(adj_matrix)): if adj_matrix[cur_ind][j] != 0: col_ind = j break if col_ind == -1: return False for other_ind in range(len(adj_matrix)): if other_ind == cur_ind: continue if adj_matrix[other_ind][col_ind] != 0: adj_matrix[other_ind] = (adj_matrix[other_ind] * adj_matrix[cur_ind][col_ind] - adj_matrix[cur_ind] * adj_matrix[other_ind][col_ind]) % div return True
c7f3c6aa391e682586651a1b4fddac7b67649ce1
28,343
import hashlib def hash_file(file_path: str) -> str: """ return sha1 hash of the file """ with open(file_path, "r") as content_file: hash_object = hashlib.sha1(content_file.read().encode("utf-8")) return hash_object.hexdigest()
fb71d4610a3b081b5b69e49c721fa3a0da61e859
28,345
def is_minimum_in_period(cube): """Check if cube contains minimum values during time period.""" for cell_met in cube.cell_methods: if cell_met.method == 'minimum' and 'time' in cell_met.coord_names: return True
a06b08c32a1529877f9dc62824823048232fe906
28,348
def create_sorted_stats_dfs(joined_df): """ Takes a dataframe with phrase as index and percentage_occurrences_start, percentage_docs_start (start is a year, for e.g. percentage_occurrences_2007, percentage_docs_2007), percentage_occurrences_2017, percentage_docs_2017, total_occurrences_diff, doc_differences (2017-start for phrase count and doc count for each phrase), minyear (min. year count for the phrase), avgyearcount (avg. over the years), and the totalcount over the whole period as columns, and produces 4 dataframes: (i) phrases with positive total_occurrences_diff (in descending order), (ii) phrases with negative total_occurrences_diff (in ascending order), (iii) phrases with postive doc_differences (in descending order), (iv) phrases with negative doc_differences (in ascending order). Note that total_occurrences_diff and doc_differences are expressed in percentages, and not whole numbers. Also, note that phrases which don't show a trend (0 difference) are not inserted into any of the 4 dataframes.""" positive_phrases_diff = joined_df[joined_df.total_occurrences_diff>0] positive_phrases_diff = positive_phrases_diff.sort_values(by='total_occurrences_diff', ascending=False) negative_phrases_diff = joined_df[joined_df.total_occurrences_diff<0] # Ascending order for negative: we want high negative values to be first. negative_phrases_diff = negative_phrases_diff.sort_values(by='total_occurrences_diff') positive_docs_diff = joined_df[joined_df.doc_differences>0] positive_docs_diff = positive_docs_diff.sort_values(by='doc_differences', ascending=False) negative_docs_diff = joined_df[joined_df.doc_differences<0] # Ascending order for negative: we want high negative values to be first. negative_docs_diff = negative_docs_diff.sort_values(by='doc_differences') return positive_phrases_diff, negative_phrases_diff, positive_docs_diff, negative_docs_diff
ca62bc9d62c393dcf0be0fd28787989d2ffa01e7
28,350
def describe(stringParam): """ Add a description for reporting readability. """ def callableTest(method): assert callable(method), 'Method is not callable. {0}'.format(method) method.description = stringParam return method return callableTest
2644fef9778275653a6a23f416659a6b926dc1ed
28,352
import csv def relationship(flights_file, planes_file): """ returns the relationship between two databases. """ with open(flights_file) as flights: flights_reader = csv.reader(flights) flights_coloums = next(flights_reader) with open(planes_file) as planes: planes_reader = csv.reader(planes) planes_coloums = next(planes_reader) relationship = [ relation for relation in flights_coloums if relation in planes_coloums ] if not relationship: return None return relationship
905c5a3ffccc6c751b4205e49f288bee6dc311ac
28,354
def get_efficiency(rewards, episode_length, hyperams): """ calculates the "agario mass efficiency", which is a quantity that i invented lol It is supposed to capture the rate at which mass was accumulated relative to the density of pellets in the arena. In this way, performance can be compared across episodes of different lengths, arenas of different sizes and with different numbers of pellets. """ G = rewards.sum() pellet_density = hyperams.num_pellets / pow(hyperams.arena_size, 2) efficiency = G / (episode_length * pellet_density) return efficiency
579f6d9efdc900c5e4ea39bb6bbf6ff326fa6117
28,355
def calc_tn_forClass(class_no, classes): """calc tn for certain class number""" tn =0 for i in range(len(classes)): if i == class_no: continue for j in range(len(classes[i])): if(j == class_no): continue tn += classes[i][j] return tn
f3ad2b88ba7e42a14335c1908b008e95f4e8c6b4
28,357
import torch def tensor(x): """Construct a PyTorch tensor of data type `torch.float64`. Args: x (object): Object to construct array from. Returns: tensor: PyTorch array of data type `torch.float64`. """ return torch.tensor(x, dtype=torch.float64)
2734d09e8c3a563dda48f7954029f3f857b3aff3
28,359
def cache_arr(arr): """ Hold reference to most recent memoization entries Hack until RAPIDS supports Arrow 2.0, when pa.Table becomes weakly referenceable """ return arr
f46242729bf75cbb4709867d503d1e3984ca4e4d
28,360
import asyncio def loop(): """Ensure usable event loop for everyone. If you comment this fixture out, default pytest-aiohttp one is used and things start failing (when redis pool is in place). """ try: return asyncio.get_event_loop() except RuntimeError: return asyncio.new_event_loop()
f72466ecc975981fbf803f7971112ac3a3aab229
28,361
def parse_right_anchor_intersects(right_file, loop_coords, intersects): """ Parse file of right-anchor intersects. """ with open(right_file, 'r') as f: for line in f: entry = line.strip().split() # Fix end position for Rep1 Loop ID loop_id_1 = entry[3] if loop_id_1 in loop_coords: loop_coords[loop_id_1]['end'] = entry[2] # Fix end position for Rep2 Loop ID loop_id_2 = entry[12] if loop_id_2 in loop_coords: loop_coords[loop_id_2]['end'] = entry[11] # Record if right anchor intersects as well if loop_id_1 in intersects: if loop_id_2 in intersects[loop_id_1]: intersects[loop_id_1][loop_id_2] = [1, 1] return loop_coords, intersects
6582ce9a8d63a0aefb10014160b2b87d35fed2b0
28,362
def _first_or_none(array): """ Pick first item from `array`, or return `None`, if there is none. """ if not array: return None return array[0]
e8430cf316e12b530471f50f26d4f34376d31ce2
28,363
def _approx_lm_beams(beams, precis=5): """Return beams with scores rounded.""" simple_beams = [] for text, _, frames, s1, s2 in beams: simple_beams.append((text, frames, round(s1, precis), round(s2, precis))) return simple_beams
59db43e0141d2ccb60e91c182fbcfc68aadf40ef
28,364
import os def eazy_filenames(input_dir, name): """ Generate names for EAZY files Args: input_dir (str): Path to eazy inputs/ folder (can be relative) This is where eazy will be run name (str): Name of the source being analzyed Returns: tuple: catalog_filename, parameter_filename, translate_file """ if not os.path.isdir(input_dir): os.mkdir(input_dir) catfile = os.path.join(input_dir, '{}.cat'.format(name)) param_file = os.path.join(input_dir, 'zphot.param.{}'.format(name)) translate_file = os.path.join(input_dir, 'zphot.translate.{}'.format(name)) # return catfile, param_file, translate_file
071be46eacccdd8f3e29da764e24257a517f5bdf
28,366
def dict_subtract(d1, d2): """Subtract one dictionary from another. Args: d1 (dict): First dictionary. d2 (dict): Second dictionary. Returns: dict: `d1 - d2`. """ if set(d1.keys()) != set(d2.keys()): raise ValueError("Dictionaries have different keys.") return {k: d1[k] - d2[k] for k in d1.keys()}
a8098f66ce1ca85803c90d9cfc058ae96b3f8123
28,369
import re def clean_name(name: str) -> str: """ Bring the name into a standard format by replacing multiple spaces and characters specific for German language """ result = re.sub(r"\s+", " ", name) return ( result.replace("ß", "ss") .lower() .replace("ä", "ae") .replace("ü", "ue") .replace("ö", "oe") )
8cb8ba45fcec1dcc0e04ccfcd4263ae3e82e9fb5
28,370
def get_and_unpack(x): """ This function is used to decompose a list of remote objects that corresponds to a tuple of lists. For example: @ray.remote def f(): return ['a', 'a'], ['b', 'b'] get_and_unpack(ray.get([f.remote() for _ in range(2)])) >>> [['a', 'a', 'a', 'a'], ['b', 'b', 'b', 'b']] """ list_of_lists = list(zip(*x)) results = [] for item_list in list_of_lists: tmp = [] for item in item_list: tmp += item results.append(tmp) return results
4109297938aa08a7761be81587937106a6ecad5d
28,372
def mock_cube_to_empty_field(cube): """Return associated field for mock cube.""" return cube.field
a7fa930d1a0982e8ee2752c3da22f5e722d93d96
28,375
import os def ext_name_to_path(name): """Convert extension name to path - the path does not include the file extension Example: foo.bar -> foo/bar """ return name.replace('.', os.path.sep)
73ff171288969884a56add436c7b7d9396ba5ca2
28,376
import re def check_id_card(id_card): """ 检查身份证号码格式是否正确 :param id_card: :return: """ return bool(re.match(r'[1-9]\d{5}[12]\d{3}(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01])\d{3}[0-9xX]$', id_card) or re.match(r'[1-9]\d{7}(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01])\d{3}$', id_card))
db3e44679d84e1b33b4f5da461e8d7295ec4d344
28,378
import os def folder_tree(tmp_path): """Create a folder tree with some links""" # Create empty folders d1 = tmp_path / "d1" d1.mkdir() d2 = tmp_path / "d2" d3 = d2 / "d3" d3.mkdir(parents=True) # Place some files in there # (tmp_path / "testfile").write_bytes(b'X' * 1024) (d1 / "testfile").write_bytes(b"X" * 1024) (d2 / "testfile").write_bytes(b"X" * 1024) (d3 / "symlink").symlink_to(d1 / "testfile") os.link(dst=str(d3 / "hardlink"), src=str(d1 / "testfile")) return tmp_path
b6d965969afe3ffce0e427f2f2030e0442f4b227
28,379
import json def json_from_file(file_path): """ Read a file and convert it into a json object :param file_path: path of file :return: A json object """ file = open(file_path, "r") return json.loads(file.read())
3342c083e64254a271b31b1c7a6747ac448f4078
28,381
import torch def batch_audio(audios, max_frames=2048): """Merge audio captions. Truncate to max_frames. Pad with 0s.""" mfcc_lengths = [len(cap[:max_frames, :]) for cap in audios] mfcc = torch.zeros(len(audios), max(mfcc_lengths), audios[0].size(1)) for i, cap in enumerate(audios): end = mfcc_lengths[i] mfcc[i, :end] = cap[:end] return mfcc.permute(0, 2, 1), torch.tensor(mfcc_lengths)
79ea2a9e8459dda945f00e4f9a4edb22a445da93
28,382
def aliquot_sum(input_num: int) -> int: """ Menemukan jumlah alikuot dari bilangan bulat input, di mana alikuot jumlah suatu bilangan n sebagai jumlah semua bilangan asli kurang dari n yang membagi n secara merata. Untuk contoh, jumlah alikuot dari 15 adalah 1 + 3 + 5 = 9. Ini adalah implementasi O(n) sederhana. param input_num: bilangan bulat positif yang jumlah alikuotnya dapat ditemukan return: jumlah alikuot dari input_num, jika input_num positif. Jika tidak, naikkan ValueError Penjelasan Wikipedia: https://en.wikipedia.org/wiki/Aliquot_sum >>> aliquot_sum(19) 1 """ if not isinstance(input_num, int): raise ValueError("input harus integer") if input_num <= 0: raise ValueError("input harus positif") return sum( divisor for divisor in range(1, input_num // 2 + 1) if input_num % divisor == 0 )
28b86964483637763721705e6d57b01846cc9e1b
28,383
def get(node, name): """ Given a KML Document Object Model (DOM) node, return a list of its sub-nodes that have the given tag name. """ return node.getElementsByTagName(name)
84c48b9e1ec1e71ee615357a10586950f995c991
28,384
import tempfile import os import json import shutil import subprocess def client_worker(request): """ Defines a client worker fixture. This creates a docker container with SSH enabled and the code mounted to test the evaluator's client workers. """ container = {} worker_path = tempfile.mkdtemp() os.makedirs(os.path.join(worker_path, "test_container")) info_path = os.path.join(worker_path, "worker.json") worker_dict = { "ip" : "127.0.0.1", "port" : 2222, "username" : "root", "password" : "Docker!", "geneva_path" : "/code", "python" : "python3" } with open(info_path, "w") as fd: json.dump(worker_dict, fd) container["worker"] = info_path def fin(cid): shutil.rmtree(worker_path) if cid: print("\nCleaning up container") subprocess.check_call(["docker", "stop", cid]) cid = None # Run the base docker container to give us a worker client cid = subprocess.check_output(["docker", "run", "--privileged", "--dns=8.8.8.8", "-id", "-p", "2222:22", "-v", "%s:/code" % os.path.abspath(os.getcwd()), "base"]).decode("utf-8").strip() request.addfinalizer(lambda: fin(cid)) print("\nCreated container %s" % cid[:8]) container["id"] = cid container["cid"] = cid[:8] #output = subprocess.check_output(["docker", "exec", "-i", cid, "ifconfig", "eth0"]) #ip = re.findall( r'[0-9]+(?:\.[0-9]+){3}', output.decode("utf-8"))[0] #print("Parsed container ip: %s" % ip) container["ip"] = "0.0.0.0" subprocess.check_call(["docker", "exec", "-i", cid, "service", "ssh", "start"]) return container
1551054faefd0082cf3e5480f6432b560db2b94e
28,386
import random def eval_datasets_flist_reader_balance2(flist): """ :param flist format: impath,impath\nimpath impath\n ... :return: (q, r, label) list """ imlist = [] label = 0 with open(flist, 'r') as rf: for line in rf.readlines(): q,r = line.strip().split(',') if q == 'query_id': continue imlist.append((q,r,label)) label += 1 # balance the labeled pairs and unlabeled pairs balance_list = [] unlabeled_list = [] for pair in imlist: q,r,_ = pair if r == '': # no ap pair unlabeled_list.append(pair) else: balance_list.append(pair) # 1:1 balance_list.extend(random.sample(unlabeled_list, len(balance_list))) random.shuffle(balance_list) return balance_list
73d36d4c4c538e17f9ba592baebe62c22d6ee446
28,387
from typing import List from typing import Tuple def bin_search_range(key: int, table: List[Tuple[int, str]], low: int, high: int) -> str: """Recursive binary search in range low..high inclusive""" """Base case 1: Key is not in table""" if low > high: return "No such value" mid = (high + low) // 2 entry_key, entry_value = table[mid] """Base case 2: Key found at midpoint in table""" if entry_key == key: return entry_value if key < entry_key: """Recursive case 1: Key must be before midpoint, or absent""" return bin_search_range(key, table, low, mid - 1) else: """Recursive case 2: Key must be after midpoint, or absent""" return bin_search_range(key, table, mid + 1, high)
2fec23371eecd2fd0299f67b23d28a1f06a36c04
28,388
import sys import termios import tty def _unix_getch(): """Turning stdin into a TeleTypewriter (ttl) and reading only one byte from it. This method is appropriated from the readchar library (https://github.com/magmax/python-readchar). Returns: bytes: The key pressed as a binary string. """ fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(fd) # The ttl return a string, but I encode it to binary to be identical to the Windows version. ch = sys.stdin.read(1).encode("ascii") finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch
6613984ed4c370e4bbfb3f9b13773569890d7ad8
28,391
def hpo_genes_from_dynamic_gene_list(case_obj, is_clinical, clinical_symbols): """ Case where dynamic_panel_phenotypes is empty, perhaps because user has added custom genes to HPO panel Args: case_obj(dict): models.Case) is_clinical(bool): if True, only list genes from HPO that are among the case clinical_symbols clinical_symbols(set): set of clinical symbols Returns: hpo_genes(set): """ gene_list = [ gene.get("hgnc_symbol") or str(gene["hgnc_id"]) for gene in case_obj["dynamic_gene_list"] ] unique_genes = set(gene_list) if is_clinical: unique_genes = unique_genes.intersection(set(clinical_symbols)) return unique_genes
12b25c60bdbc10f39b13825e692e8e4a5e3f4a47
28,392
def int_to_bitstring(x: int) -> str: """ Function to convert an integer to AT LEAST a 32-bit binary string. For integer less than 32 bits, it will pad the integer with extra bits of 0s until it is of size 32bit. If the integer is greater than 32-bits, then return the binary representation with the minimum number of bits to represent the integer. Parameters ---------- x: int Integer to convert to bitstring Returns ------- str Bitstring of AT LEAST size 32-bits. Notes ----- Here are some example cases. .. highlight:: python .. code-block:: python >>> int1 = 2 >>> int_to_bitstring(int1) 00000000000000000000000000000010 >>> int2 = 9999999999999 >>> int_to_bitstring(int2) 10010001100001001110011100101001111111111111 In the first example, the binary representation of 2 is simply 10. Hence, the function pads the bitstring with 30 0s on the left to return a 32-bit bitstring. In the second example, 9999999999999 in binary consist of > 32-bits, hence the function returns the full binary representation. """ return '{:032b}'.format(x)
34c022eedf5d73d68cac56f296bd53cc62b2be24
28,394
async def elec_measurement_zha_dev(elec_measurement_zigpy_dev, zha_device_joined): """Electric Measurement ZHA device.""" zha_dev = await zha_device_joined(elec_measurement_zigpy_dev) zha_dev.available = True return zha_dev
67346d2eb2f70d8a876f1dbaa5840c45aa48586c
28,395
def get_best_hits(matches, evalue_cutoff): """ get the best hit: one with smallest evalue and highest score. if at least two hits have same evalue and score, report them all """ best_hits = [] evalue_sorted = sorted(matches, key=lambda x: x[2]) best_evalue = evalue_sorted[0][2] if best_evalue <= evalue_cutoff: scores = [] for el in evalue_sorted: if best_evalue == el[2]: scores.append(el) score_sorted = sorted(scores, key=lambda x: x[3], reverse=True) best_score = score_sorted[0][3] for el in score_sorted: if best_score == el[3]: best_hits.append(el) return best_hits
9bf21eecc896d29fb888ec772fc7dbab8a6913cc
28,396
import re def find_coords_string(file_path): """ Parse a file path using a regular expresion to find a substring that looks like a set of coordinates, and return that. """ match = re.search( "([-]?[\d]{1,3}\.[\d]{1,3}[_,][-]?[\d]{1,3}\.[\d]{1,3})", file_path ) if not match: return None coords_string = match.groups()[0] return coords_string
2d6fd569122222d3600aea5500bd515dea8b8596
28,397
import requests def load_inspect_api(url): """ Load inspect API and return Json response """ r = requests.get(url) if r.status_code != 200: raise RuntimeError( 'Failed to read inspect API. Error %d.' % (r.status_code)) return r.json()
6a3a722a6a5e9752fd3f9ef5806cca3209392bbe
28,398
def can_edit_address(user, address): """Determine whether the user can edit the given address. This method assumes that an address can be edited by: - users with proper permission (staff) - customers who "own" the given address. """ has_perm = user.has_perm("account.manage_users") belongs_to_user = address in user.addresses.all() return has_perm or belongs_to_user
724be0e3023a4776604f869b506a35f216c3461e
28,399
def parse_input(inp, options): """Parses user-provided input Check if the given input can be seen as an index of the options list. Parameters ---------- inp : str User input options : list List of option strings Returns ---------- parsed_choice The index corresponding to the input if index is valid for the options list else None """ try: parsed_choice = int(inp) if parsed_choice >= 0 and parsed_choice < len(options): return parsed_choice except: return None
fcb63ce473d08363373af3488b87b49a19c782e0
28,401
import functools def cmp_to_key(): """Convert C style compare function to key function.""" def compare_sizes(list_a, list_b): """Old compare function.""" size_a, size_b = len(list_a), len(list_b) return 1 if size_a > size_b else -1 if size_a < size_b else 0 heroes = ['rahan', 'lucky luke', 'conan', 'samba'] return max(heroes, key=functools.cmp_to_key(compare_sizes))
61da2bda4085e793dcb769218bc3eb36541aefec
28,402
def filename_segment(value): """Check that the string is valid for use as part of a filename.""" for character in value: if not character.isalnum() and character not in '-_. ': raise ValueError(f"invalid value: '{value}'") return value
44f9e0b26501accf7facb0c766938865648a5121
28,404
import itertools def implied_loop_expr(expr, start, end, delta): """given the parameters of an implied loop -- namely, the start and end values together with the delta per iteration -- implied_loop_expr() returns a list of values of the lambda expression expr applied to successive values of the implied loop.""" if delta > 0: stop = end+1 else: stop = end-1 result_list = [expr(x) for x in range(start,stop,delta)] # return the flattened list of results return list(itertools.chain(result_list))
92828aa49bbd7f5ffbd5ab7666f0de60bb4b4aef
28,406
def check_if(s: str, d: dict, voting: str) -> str: """ if the name is already in the dict, return the name in the dict else return the given name ("s") """ if voting in d: d = d[voting] keys = [d[key] for key in d]+['Lukas', 'Melvin', 'Niclas'] # keys is (ex.) ['Fridrich', 'Lukas', 'Melvin', 'Niclas] for element in keys: if s.lower().replace(' ', '') == element.lower().replace(' ', ''): return element return s return s
dae0cce9c13ea03fd74e8cab3c71438ca26f3d31
28,407
import os def compose_affines(reference_image, affine_list, output_file): """Use antsApplyTransforms to get a single affine from multiple affines.""" cmd = "antsApplyTransforms -d 3 -r %s -o Linear[%s, 1] " % ( reference_image, output_file) cmd += " ".join(["--transform %s" % trf for trf in affine_list]) os.system(cmd) assert os.path.exists(output_file) return output_file
8dea8c5bfb0e27dd3c626e957397063048446723
28,408
def _early_stop(args, eval_history): """ Determines early stopping conditions. If the evaluation loss has not improved after `args.early_stop` epoch(s), then training is ended prematurely. Args: args: `argparse` object. eval_history: List of booleans that indicate whether an epoch resulted in a model checkpoint, or in other words, if the evaluation loss was lower than previous losses. Returns: Boolean indicating whether training should stop. """ return ( len(eval_history) > args.early_stop and not any(eval_history[-args.early_stop:]) )
5e8d698703750a2f4be2564b4905c5e7d42815ed
28,409
def _line_to_array(agile_line): """convert the weird AGILEPack weights output to an array of floats""" entries = agile_line.split(',')[1:] return [float(x) for x in entries]
0693a75ecce5dc105aedf9fa44d16e1e6959befa
28,412
def _schema_to_keys(s): """Return the entry keys for schema of dict type.""" def _get_d(s): d = s while hasattr(d, 'schema'): d = s.schema return d d = _get_d(s) if not isinstance(d, dict): return None return (_get_d(ss) for ss in d.keys())
3be129394e372e21b8cb104bc18a8ddfd99c74aa
28,413
def video_ready(channel): """ Returns true if the video playing on `channel` has a frame ready for presentation. """ return False
8355426458fd1bc8d4201db4a6bab5ce038d7a28
28,414
import requests def need_retry(exception): """ need to retry :param exception: :return: """ result = isinstance(exception, (requests.ConnectionError, requests.ReadTimeout)) if result: print('Exception', type(exception), 'occurred, retrying...') return result
7a0908cf2d31d24dba70558d8a2292a288f5ff42
28,416
def convert_frequency_to_period(X): """ Conversion function for the optional period axis. """ return ["%.2f" % (1/z) for z in X]
40bccc87a60e5fd45662d257fefc3505a5725154
28,417
def calculate_scale(map_scale, map_width): """ Calculates the image scale with in pixels together with the scale label using map scale (meters per pixels) and map width (pixels) """ image_width_meter = round(map_scale * float(map_width)) scale_num_guess = str(int(round(image_width_meter * 0.2))) scale_num = int(2 * round(float(int(scale_num_guess[0])) / 2)) * 10 ** ( len(scale_num_guess[1:])) scale_num = scale_num if scale_num else 1 * 10 ** (len(scale_num_guess[1:])) scale_width = round(scale_num / map_scale) scale_label = f"{scale_num} m" if scale_num < 1000 else "{} km".format( scale_num / 1000) return scale_width, scale_label
feeaa92a2bfb2fbc4c7fe13176348997f0ba8453
28,418
def buzz(number): """ Buzz """ return "Buzz" if number % 5 == 0 else False
91444762c537014e8ffe22831cc38960b3beaaa4
28,421
from typing import List import os import re def get_frame_ids(im_fd: str, pc_fd: str) -> List[str]: """Return a list a frame ids that present in both im and pc folders""" im_ids = [re.findall("\d+", fn)[0] for fn in os.listdir(im_fd)] pc_ids = [re.findall("\d+", fn)[0] for fn in os.listdir(pc_fd)] return list(set(im_ids).intersection(set(pc_ids)))
640483d8efa8f6b4f3cc1d87ba84e749a1d3e5cd
28,422
def get_B_theta(R, thetadot): """Get B_theta from Q, Qdot""" if R <= 0: raise ValueError("R cannot be less than or equal to zero.") return R ** 2 * thetadot
b80633370c045ab7823ebd70e2e0c1b4c7e80023
28,424
def Data_Normalization(data): """ This function normalises an image data set input: data: grayscale image data set return normalised images """ return data/255 - 0.5
826bfe7aabb6bab403894ce837bcc8a0ceee6acf
28,425
import re def tournament_results(filename="rezultat.txt"): """ Read tournament results (fitness) from file :param filename: path for tournament results :return: array with fitness for each unit of population """ try: f = open(filename, "r") lines = f.readlines() results = [] for line in lines: if line.__contains__("fitness"): numbers = re.findall(r'-?\d+$', line) results.append(int(numbers[0])) return results except IOError: print("IO error while reading results of matches from rezultat.txt")
ade5bbb6991b68758eefa3ad0ee00a16b5130509
28,427
import os def is_video(path): """Is a file a video with a known video extension ['.avi','.mp4','.mov','.wmv','.mpg']?""" (filename, ext) = os.path.splitext(path) return ext.lower() in ['.avi', '.mp4', '.mov', '.wmv', 'mpg']
316204c24fcd5a36ad307db310fb0ff5a2ffd7ca
28,428
def is_dialog_complete(handler_input): """ Checks whether the dialog is complete according to the dialog model :param handler_input: (HandlerInput) :return: (boolean) """ if not hasattr(handler_input.get_request(), "dialogState"): return False return handler_input.get_request().dialogState == "COMPLETE"
c9137b497bf472e88f05f8395166db5b18fe5f51
28,429
def ismutabletuple(element): """Check if element is of type mutabletuple.""" return True if hasattr(element, 'MutableTupleUniqueIdentifier') else False
64d732cbfc1060f5969501c0de978294e582cd4f
28,430
def valid(day, meal): """ A meal is valid if it feeds enough people """ if "Tuesday" not in day: return True # Today is not Tuesday, so any meal will work else: if meal.feeds_a_crowd.item() == "Yes": return True # today is Tuesday and the meal feeds a crowd return False
c34f4d5364ae29178986a9507738632c9b2f3827
28,431
def json_wraper(log): """ model object to json dictionary Parms: model objects """ return { 'id': "%s" % log.id, 'updated_date': log.updated_date, 'occured': log.occured, 'programme': log.programme}
0bd315f9870f8f604b10b50e89d7346e070dff8a
28,432
def get_mongo_primary_host_and_port(mongo_hosts_and_ports): """ :param mongo_hosts_and_ports: All host and ports stored in the private settings xml :return: mongo primary host and port """ for host_and_port in mongo_hosts_and_ports.split(','): if '001' in host_and_port: properties = host_and_port.split(':') return properties[0], properties[1]
5651540a9cdd7074c05da814db3a640d11141b2a
28,433
import numpy def pack_grid_cells(cells, resolution): """ Pack the cell centers (grid points) into a convienient numpy array. The resolution of the grid is prepended for later use. """ return numpy.vstack((resolution, [c._center for c in cells]))
ad2c4a8586a818b8371e24c70e5491c8e2084f34
28,434
def log_call(call_next, *args, **kwargs): """Decorator to log the call signature""" arg_items = [str(a) for a in args] + [ f"{k}={v}" for k,v in kwargs.items() ] arg_string = ",".join(arg_items) print(f"Calling {call_next}({arg_string})") return call_next(*args, **kwargs)
4c6becf8731944b2c2e80c51a16fe62034d81102
28,435
def tags_to_text(tags): """Return a text of sorted tags""" str_tags = [str(t) for t in tags] sorted_tags = sorted(str_tags) return '\n'.join(sorted_tags)
a4bf2111517e3b8f616e84490bd37e9f197c698a
28,436
def _sort_by_amount(transactions: list) -> list: """ Sort transactions by amount of dollars. """ sorted_transactions = sorted(transactions, key=lambda transaction: transaction["amount"], reverse=True) return sorted_transactions
fdc364e55a8501bd0a7b1ce9f24d6afbb1c4c4a0
28,438
def _union_1d(first, second, size): """Computes if slice first and second overlap for the size of the dimension""" first_min, first_max, _ = first.indices(size) second_min, second_max, _ = second.indices(size) if first_max >= second_min and second_max >= first_min: return slice(min(first_min, second_min), max(first_max, second_max)) else: return None
fba68ae5b36b646a0621e4f1204675737d1a1704
28,440
def protocol_stage_settings(settings): """"Load settings for each milling stage, overwriting default values. Parameters ---------- settings : Dictionary of user input argument settings. Returns ------- protocol_stages List containing a dictionary of settings for each protocol stage. """ protocol_stages = [] for stage_settings in settings["lamella"]["protocol_stages"]: tmp_settings = settings["lamella"].copy() tmp_settings.update(stage_settings) protocol_stages.append(tmp_settings) return protocol_stages
fd2b65ffcf71ea674e32eb80a69faec27ff9b66d
28,441
def modifystring(s, sub, offset): """overwrites 'sub' at 'offset' of 's'""" return s[:offset] + sub + s[offset + len(sub):]
2cdedba0b52a0d4d53a249e76d6fcb2f8ae0d6b4
28,442
def evaluate(env, policy, num_episodes = 10): """Evaluates the policy. Args: env: Environment to evaluate the policy on. policy: Policy to evaluate. num_episodes: A number of episodes to average the policy on. Returns: Averaged reward and a total number of steps. """ total_timesteps = 0 total_returns = 0.0 for _ in range(num_episodes): episode_return = 0 timestep = env.reset() while not timestep.is_last(): action = policy.act(timestep.observation) timestep = env.step(action) total_returns += timestep.reward[0] episode_return += timestep.reward[0] total_timesteps += 1 return total_returns / num_episodes, total_timesteps / num_episodes
8ebac5d9862662b45395c8cbdacace2cb4058b47
28,444
import shlex def _join(split_command: list[str]) -> str: """Concatenate the tokens of the list split_command and return a string.""" return " ".join(shlex.quote(arg) for arg in split_command)
127cd1a7366c843c328b1e29632811e964de0542
28,445
import os def getBaseNameNoExt(givenPath): """Returns the basename of the file without the extension""" filename = os.path.splitext(os.path.basename(givenPath))[0] return filename
43cd0834cde03ddc2784be272b8faeb367abaf48
28,446
from pathlib import Path def check_file_exists(sub_dir: Path, file: Path): """Check if the supplied `file` arg exists. Args: sub_dir (Path): the fully qualified subdirectory file (Path): the file to check Returns: Path: if valid, the file Raises: Exception: if save file doesn't exist or is in the wrong place """ if file.resolve().parent != sub_dir: file = sub_dir / file if not file.exists(): raise Exception(f'{file} isn\'t in {sub_dir}') if file.exists(): return file else: save_file = sub_dir / file if not save_file.exists(): raise Exception( f"""{save_file} doesn't exist. Please check the filename and/or location. """ ) return save_file
e421dc28adb49291fc4740c5163604e772d6aa2e
28,447
import os def GetNumCores(): """Returns the number of cores on the machine. For hyperthreaded machines, this will be double the number of actual processors.""" return int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
932ef8e5c9da17d5963577941538064c04e4778c
28,448
def uploaded_file_extension(filename: str) -> str: """ Returns the extension from a file name :param filename: Name of the file with its extension :type filename: str :return: The extension of the file :rtype: str """ extension = filename.rsplit('.', 1)[1].lower() return "%s%s" % (".", extension)
03d62a28997d535427fded44d0df1ffc8a646f6f
28,449
def work(x0, y0, x, h, function): """ Подсчитать количество итераций, используя размер шага или высота шага h """ n = (int)((x - x0) / h) # Итерировать по количеству итераций y = y0 for i in range(1, n + 1): k1 = h * function(x0, y) k2 = h * function(x0 + 0.5 * h, y + 0.5 * k1) k3 = h * function(x0 + 0.5 * h, y + 0.5 * k2) k4 = h * function(x0 + h, y + k3) # Обновить следующее значение y y = y + (1.0 / 6.0) * (k1 + 2 * k2 + 2 * k3 + k4) # Обновить следующее значение x x0 = x0 + h return y
cb9afebf0a7b2067d26f615ab866775c5f611024
28,450
def load_fragment(soup, fragment, cache): """ Load and cache a BNC fragment, given the id `fragment`. """ if fragment in cache: return cache[fragment] fragment_xml = soup.find('text', {'xml:id': fragment}) cache[fragment] = fragment_xml return fragment_xml
56333e7736d501f0cef66f1b956cefd1d8c3071b
28,453
def may_change_leading_case(text): """ Checks whether the string `text` can change the letter case of its leading letter. """ for c in text: if c.isalpha(): return True if c.isspace(): continue return False return False
ba3b30c4e5a65104671f25970a65b93e6d4d7dcf
28,454
def shouldSetupAPI(): """ Return whether the api needs to be compiled/pythonified """ try: return False except ImportError: return True
dea7c970d55e569d82bdf87ca757763d53a5139d
28,455
import time def get_time_dif(start_time): """获取已使用时间""" end_time = time.time() time_dif = end_time - start_time return time_dif return timedelta(seconds=int(round(time_dif)))
9b118bf2847f37627520804dd8ca4b55854d1830
28,456
def sinum_frmt(x): """ Generate SIunitx style formatted number output for plotting tables to TeX. This function can be used as a formatter for printing DataFrames to Latex tabes when using the Latex package SIunitx. Parameters ---------- x : int, float Value to format. Returns ------- str Formatted value. """ if isinstance(x, (int, float)): if x < 1e3: return r'\num{' + '{0:.3G}'.format(x) + '}' elif x >= 1e3: return r'\num{' + '{0:.0f}'.format(x) + '}' else: # x is nan return '-' else: return x
3ed5311d242b99c946e1ea1d514187a5823b2be0
28,457
def vprint(verbose: bool): """ Utility function for optional printing. Parameters ---------- verbose: bool If True, returns print function, else False Returns ------- callable """ return print if verbose else lambda *a, **k: None
5295914f2fc9b8aaa146b74c8b5ff3ec7b501cab
28,458