content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _scale(dimen: str, scale: float = 1) -> int | str: """Get a scaled dimen. Args: dimen (str): dimension in px scale (float): scale Returns: Optional[int, str]: scaled dimen (min=1) """ if scale == 1: return dimen return max(int(int(dimen) * scale), 1)
450a969f658f5768b1e3988859c4f20ff6b2dfd3
49,005
from typing import Any from typing import Union def _batch_set_tuple(batch: Any, key: Union[int, str], value: Any) -> Any: """"Sets key value pairs in tuples and NamedTuples.""" if hasattr(batch, '_fields'): # NamedTuple if isinstance(key, str): batch = batch._replace(**{key: value}) else: batch_list = list(batch) batch_list[key] = value batch = batch._make(batch_list) else: # Normal tuple. batch = list(batch) batch[key] = value batch = tuple(batch) return batch
d725614cd7fc1795d76ea22c204b8f446ff8cab2
49,006
from typing import List def scale_prev(prev_vals: List[float], fraction: float) -> float: """ Apply a multiplier to the previous value. """ return prev_vals[-1] * fraction
9685214aaece3a6935886cbf91472e877f7e3619
49,008
import numpy def _default_processing(data, dist_func, type=numpy.float_): """Creates a dict and calculates the distance matrix for a 2D tsp problem. Parameters ---------- data : list of list The lists in the list always contain the name of a point, the x coordinate and the y coordinate in that order seperated by spaces. dist_func : function A function to calculate the distance between the points. This function's arguments must be the x and y coordinates of the first point, followed by the x and y coordinates of the second point. type : numpy.dtype, optional The data type used by the numpy array. The default is numpy.float_, which is the default datatype when creating a numpy.ndarray. Returns ------- dist_matrix : numpy.ndarray The distance matrix for the problem. dictionary : {int : int} A dictionary that can convert a position the distance matrix to the name given in data. """ # make dist_matrix size = len(data) # dist_matrix [from] [to] dist_matrix = numpy.full((size, size), numpy.inf, dtype=type) for i in range(size): # ditance between a point and itself is always zero. dist_matrix[i][i] = 0 # get coordinates point i_dist_x = data[i][1] i_dist_y = data[i][2] for j in range(i + 1, size): # get coordinates all points j_dist_x = data[j][1] j_dist_y = data[j][2] # calculate distance dist_matrix[i][j] = dist_func( i_dist_x, i_dist_y, j_dist_x, j_dist_y) dist_matrix[j][i] = dist_func( i_dist_x, i_dist_y, j_dist_x, j_dist_y) return dist_matrix
44f47bc42c6f1eb0ac12dd50771cd6a7cc7e294c
49,009
def convert (hour): """ converts Hours in to seconds (hours * 3600) Args: hours(int) return: (int value) number of seconds in hours """ seconds = 3600 result = hour * seconds return result
1c7d06da2582c0c580ed48ddc18f142727001fa8
49,011
import json def load_spam_rounds(path): """Load spam round transaction XDRs.""" spam_rounds = [] with open(path) as f: for rnd in f: spam_rounds.append(json.loads(rnd)) return spam_rounds
3ff640a1f529fadb76d08cd55063cb471568c683
49,012
def _r_count_num_m(n, factors_d, i): """Count of numbers coprime to d less than end; sum( gcd(m, d) == 1 for m in range(n, n+i) ) Uses inclusion exclusion on prime factorization of d """ if n == 0: return 0 if i < 0: return n return _r_count_num_m(n, factors_d, i-1) - _r_count_num_m(n // factors_d[i], factors_d, i-1)
2d5eacf6d1df2d4277bd9abd760bc132a09452d8
49,013
def NormalizeLineEndings(s): """Convert string containing various line endings like \n, \r or \r\n, to uniform \n.""" return '\n'.join(s.splitlines())
8dc5c3cf579427fe402ee84cc15b73ccc2bbe88d
49,014
def tags_from(element): """Receive element from dataset and return separated tags""" tags = list(map(lambda token: token[1], element["tags"])) return tags
bd4338440052eb0c80995f996c21d7fde0831c55
49,015
def hi_there(name="joseangel"): """ Hi There Function """ return "hi " + name
be432539bd19ded67704355a068886fa80c7c32d
49,019
import collections def extract_selection_info(datalog_rule): """ Non-Recursive datalog rule Extract and store the information for attributes selected (computed) from the query Args: datalog_rule: data structure storing the information of the datalog rule Return: attributes-map: {<key, value>}: key - attribute index in the head value - [atom_index, attribute_index] in the body if the attribute is selected from the atoms in the rule body - constant if the attribute is some specific value attributes-type-map (atm): atm[i] represents the type of attribute indexed at i(e.g., variable, constant, aggregation) aggregation-map: {<key, value>}: key - attribute index in the head involving aggregation, value - corresponding aggregation operator """ head = datalog_rule['head'] head_arg_list = head['arg_list'] head_arg_num = len(head_arg_list) body = datalog_rule['body'] body_atom_list = body['atoms'] body_atom_num = len(body_atom_list) # map attributes of the head to the position of the corresponding attribute in the body attributes_map = collections.OrderedDict({}) # map attributes of the head to the specific type (e.g., variable, aggregation) attributes_type_map = list() # map the aggregation attributes of the head to the specific aggregation operator aggregation_map = dict() def search_attribute_mapping_in_body_atoms(arg_name): for atom_index in range(body_atom_num): cur_body_atom = body_atom_list[atom_index] cur_body_atom_arg_list = cur_body_atom['arg_list'] cur_body_atom_arg_num = len(cur_body_atom_arg_list) for arg_index in range(cur_body_atom_arg_num): if arg_name == cur_body_atom_arg_list[arg_index].name: return [atom_index, arg_index] for head_arg_index in range(head_arg_num): cur_head_arg = head_arg_list[head_arg_index] if cur_head_arg.type == 'variable': cur_head_arg_name = cur_head_arg.name attributes_type_map.append('var') attributes_map[head_arg_index] = search_attribute_mapping_in_body_atoms(cur_head_arg_name) elif cur_head_arg.type == 'aggregation': cur_head_arg_name = cur_head_arg.name['agg_arg'] attributes_type_map.append('agg') aggregation_map[head_arg_index] = cur_head_arg.name['agg_op'] if cur_head_arg_name['type'] == 'attribute': attribute_mapping = search_attribute_mapping_in_body_atoms(cur_head_arg_name['content']) attributes_map[head_arg_index] = \ {'type': 'attribute', 'map': attribute_mapping} elif cur_head_arg_name['type'] == 'math_expr': math_expr = cur_head_arg_name['content'] lhs_attri_name = math_expr['lhs'] rhs_attri_name = math_expr['rhs'] math_op = math_expr['op'] lhs_attribute_mapping = search_attribute_mapping_in_body_atoms(lhs_attri_name) rhs_attribute_mapping = search_attribute_mapping_in_body_atoms(rhs_attri_name) attributes_map[head_arg_index] = \ {'type': 'math_expr', 'lhs_map': lhs_attribute_mapping, 'rhs_map': rhs_attribute_mapping, 'math_op': math_op} elif cur_head_arg.type == 'math_expr': attributes_type_map.append('math_expr') math_expr = cur_head_arg.name lhs_attri_name = math_expr['lhs'] rhs_attri_name = math_expr['rhs'] math_op = math_expr['op'] lhs_attribute_mapping = search_attribute_mapping_in_body_atoms(lhs_attri_name) rhs_attribute_mapping = search_attribute_mapping_in_body_atoms(rhs_attri_name) attributes_map[head_arg_index] = \ {'type': 'math_expr', 'lhs_map': lhs_attribute_mapping, 'rhs_map': rhs_attribute_mapping, 'math_op': math_op} elif cur_head_arg.type == 'constant': attributes_type_map.append('constant') attributes_map[head_arg_index] = cur_head_arg.name return [attributes_map, attributes_type_map, aggregation_map]
f6eadf82045bc74234dd5c0a007b78eee38b8a05
49,021
def get_version(diff_file, ix=True): """Determine the product version from the diff file name. param ix denotes if the diff file was generated by APIx or CLIx """ split_ver = diff_file.split("/")[-1].split("-") if "-comp.yaml" in diff_file: return split_ver[0] else: return f"{split_ver[0]}-to{split_ver[2]}"
a1d899abdea8ee59c76b6103bd27cb0b40e100cd
49,022
from typing import Optional from typing import Union from typing import cast from typing import Dict def validate_axis(axis: Optional[Union[int, str]] = 0, none_axis: int = 0) -> int: """Check the given axis is valid.""" # convert to numeric axis axis = cast( Dict[Optional[Union[int, str]], int], {None: none_axis, "index": 0, "columns": 1} ).get(axis, axis) if axis in (none_axis, 0, 1): return cast(int, axis) else: raise ValueError("No axis named {0}".format(axis))
654cd35d1ddd4ff27c0a37635a211ddfca710c6e
49,024
def aes_unpad(s, block_size=32, padding='{'): """ Removes padding to get the value from @s for AES decryption @s: #str being AES encrypted or decrypted @block_size: the AES block size @padding: character to pad with -> unpadded #str .. from vital.security import aes_pad aes_unpad("swing{{{{{{{{{{{{{{{{{{{{{{{{{{{") # -> 'swing' .. """ return s.rstrip(padding)
2d2e956cd24a900ecd6b1c431c40ca5d9550beaf
49,025
def cos_series(x): """Returns cos(x) for x in reange -pi/2 .. pi/2""" # https://en.wikipedia.org/wiki/Trigonometric_functions#Power_series_expansion C=[1.,0.5,0.08333333333333333,0.03333333333333333,0.017857142857142856,0.011111111111111111,0.007575757575757576,0.005494505494505495,0.004166666666666667,0.0032679738562091504,0.002631578947368421,0.0021645021645021645,0.0018115942028985507,0.0015384615384615385,0.0013227513227513227,0.0011494252873563218,0.0010080645161290322,0.00089126559714795,0.0007936507936507937,0.0007112375533428165,0.000641025641025641] N,x2,r = len(C), x**2, 1. for j in range(N-1,0,-1): r = 1. - C[j] * ( x2 * r ) return r
28ebfa85a7162a666ce8fcf5c852e3d2d2577ba5
49,026
def _for_new_texts_get_ngrams_vector(text, vectorizer): """Not called here, just used to store a method that contains a vectorizer to be used later. """ vector = vectorizer.transform([text]).toarray()[0] return(vector)
c23a6c7d6d631a4329719aba1cea410cc130b9dd
49,028
import torch def so3_log_abs_det_jacobian(x): """ Return element wise log abs det jacobian of exponential map :param x: Algebra tensor of shape (..., 3) :return: Tensor of shape (..., 3) Removable pole: (2-2 cos x)/x^2 -> 1-x^2/12 as x->0 """ x_norm = x.double().norm(dim=-1) mask = x_norm > 1e-10 x_norm = torch.where(mask, x_norm, torch.ones_like(x_norm)) ratio = torch.where( mask, (2 - 2 * torch.cos(x_norm)) / x_norm ** 2, 1 - x_norm ** 2 / 12 ) return torch.log(ratio).to(x.dtype)
82d22cef96578a90d677d4d631dfb2ed7c783d05
49,029
def inherits_from(obj, a_class): """ Returns booleanType response for class inheritance test Args: obj: object to evaluate a_class: class value for testing """ if (type(obj) != a_class): return isinstance(obj, a_class) else: return False
8cca684124a4c948bbfde7c147bd3be439f79931
49,030
def a1(row_index, column_index): """Get an A1 notation for a cell specified by its row index and column index""" ord_first = ord('A') ord_last = ord('Z') letters_count = ord_last - ord_first + 1 level, letter_index = divmod(column_index, letters_count) letter = chr(ord_first + letter_index) * (1 + level) number = row_index + 1 return '{letter}{number}'.format(letter=letter, number=number)
d319600fcdddfbe7c0a715b56e8d00b6b9198d84
49,031
def get_E2K_lookup_dict(the_dict, main_key, sub_key): """Returns a lookup dictionary specified by main_key and sub_key, returning an empty dictionary if any is missing. This is for use in the post-processing functions.""" subdict = the_dict[main_key].get(sub_key, {}) \ if the_dict.get(main_key) else {} if subdict: return {v.get('ID'): k for k, v in subdict.items() if v.get('ID')}
56dbb81592a00e873dc5bda1234377a48bde9e94
49,032
def make_feat_paths(feat_path): """ Make a feature path into a list. Args: feat_path (str): feature path Returns: paths (list): list of paths """ if feat_path is not None: paths = [feat_path] else: paths = None return paths
6a7960c86a7f97afbe7b248970fd7f7f8efeda1e
49,034
def add_fea_prfx(df, prfx: str, id0: int): """ Add prefix feature columns. """ return df.rename(columns={s: prfx+str(s) for s in df.columns[id0:]})
a7c35073fb610a8e6d8f9f37f998ff0b943f1183
49,035
def annuity_factor(n, i): """ Calculate annuity factor Args: n: depreciation period (40 = 40 years) i: interest rate (0.06 = 6%) Returns: annuity factor derived by formula (1+i)**n * i / ((1+i)**n - 1) """ return (1 + i) ** n * i / ((1 + i) ** n - 1)
9efb7062c609c260482a3f870350148518dba5d9
49,036
import os def castep_bin_SiO2(): """Binary output from a singlepoint with `calculate_stress: true`. """ return os.path.join(os.path.split(__file__)[0], 'test_data/SiO2.castep_bin')
1cc221b479b1da528cc6cea335c23c66c7fda1b5
49,037
import struct def get_floatDAWG(freqs, word, small): """ Извлечение слова из BytesDAWG """ if word in freqs: return struct.unpack("f", freqs[word][0])[0] return small
7e1c66abfd3a157212665316d8939261e54461a3
49,038
def trim_to_preferred_protocols(hosts_and_urls): """ remove all but http and ftp URLs, and if both http and ftp are offered, leave only http. Return [(hostid, url), ...] """ results = [] try_protocols = ('https', 'http', 'ftp') for (hostid, hcurls) in hosts_and_urls: protocols = {} url = None for hcurl in hcurls: for p in try_protocols: if hcurl.startswith(p+':'): protocols[p] = hcurl for p in try_protocols: if p in protocols: url = protocols[p] break if url is not None: results.append((hostid, url)) return results
c4e108e4a650431a39d3a689153f516c1b711143
49,039
def is_invalid_schema(schema, test_value): """ Checks schema against tests with dictionary nesting >>> is_invalid_schema({"valid_key": "some_value"}, {"valid_key": "some_value"}) False >>> is_invalid_schema({"invalid_key": "some_value"}, {"valid_key": "some_value"}) True >>> is_invalid_schema( ... {"nested": {"valid_key": "some_value", "another_key": "some_value"}}, ... {"nested": {"valid_key": "some_value"}} ... ) False >>> is_invalid_schema( ... {"nested": {"invalid_key": "some_value", "another_key": "some_value"}}, ... {"nested": {"valid_key": "some_value"}} ... ) True >>> is_invalid_schema( ... {"nested": {"valid_key": "some_invalid_value", "another_key": "some_value"}}, ... {"nested": {"valid_key": "some_value"}} ... ) True >>> is_invalid_schema( ... {"nested": {"double": {"valid_key": "some_value", "another_key": "some_value"}}}, ... {"nested": {"double": {"valid_key": "some_value"}}} ... ) False >>> is_invalid_schema( ... {"nested": {"double": {"valid_key": "some_value", "another_key": "some_value"}}}, ... {"nested": {"double": {"valid_key": "some_value"}, "some_key": "no_value"}} ... ) True """ if isinstance(test_value, dict): return any( is_invalid_schema(schema[k], test_value[k]) if k in schema else True for k in test_value.keys() ) return schema != test_value
894109fc9910fc708d9a8800e1169d6e00876e0d
49,040
import sys def read_stdin(): """Read data from stdin.""" if sys.version_info < (3, 0): return sys.stdin.read() else: return sys.stdin.buffer.read()
61018f2fc55ad1b31814fb0baad4522ba4df426f
49,041
import click def lprint(ctx, out): """ echo output of stdout and stderr if it's not a watch invoked command """ if out.stdout: click.echo(out.stdout) if out.stderr: click.echo(out.stderr) return out
0b44b9312cd3a347aecdfe382ed766b1459ff39d
49,042
def create_state(current_version, lr, total_ite, optimizer): """ Create a checkpoint to be saved """ state = { 'version': current_version, 'lr': lr, 'total_ite': total_ite, 'optimizer': optimizer.state_dict() } return state
e318b02c6e2a5a15e9e3d65741962dbd95cfdd84
49,044
from typing import Union import os def find_caltable(field_name: str, rcu_mode: Union[str, int], caltable_dir='caltables'): """ Find the file of a caltable. Args: field_name: Name of the antenna field, e.g. 'DE602LBA' or 'DE602' rcu_mode: Receiver mode for which the calibration table is requested. caltable_dir: Root directory under which station information is stored in subdirectories DE602C/etc/, RS106/etc/, ... Returns: str: full path to caltable if it exists, None if nothing found Example: >>> find_caltable("DE603LBA", "3", caltable_dir="test/CalTables") 'test/CalTables/DE603/CalTable-603-LBA_INNER-10_90.dat' >>> find_caltable("ES615HBA", "5") is None True """ station, field = field_name[0:5].upper(), field_name[5:].upper() station_number = station[2:5] filename = f"CalTable-{station_number}" if str(rcu_mode) in ('outer', '1', '2'): filename += "-LBA_OUTER-10_90.dat" elif str(rcu_mode) in ('inner', '3', '4'): filename += "-LBA_INNER-10_90.dat" elif str(rcu_mode) == '5': filename += "-HBA-110_190.dat" elif str(rcu_mode) == '6': filename += "-HBA-170_230.dat" elif str(rcu_mode) == '7': filename += "-HBA-210_250.dat" else: raise RuntimeError("Unexpected mode: " + str(rcu_mode) + " for field_name " + str(field_name)) if os.path.exists(os.path.join(caltable_dir, filename)): # All caltables in one directory return os.path.join(caltable_dir, filename) elif os.path.exists(os.path.join(caltable_dir, station, filename)): # Caltables in a directory per station return os.path.join(caltable_dir, station, filename) else: return None
2548c7b0c097f62bc40719fbcb6939ff58817d4b
49,045
def formed_bond_keys(tra): """ keys for bonds that are formed in the transformation """ #print('tra test:', len(tra), tra) #if len(tra) == 1: # frm_bnd_keys = tra #else: frm_bnd_keys, _ = tra return frm_bnd_keys
7190e308c86f3c453696379e5b914c9665e8b482
49,046
import re def handle_lambda(call, line, file): """ Resolve calls that point to <lambda>. Finds the lambda definition in the file/line pointed to by the `call` and extracts the name of the variable that gets assigned the lambda. :param call: call dictionary :param line: line dictionary :param file: file dictionary of the file pointed to by the call :return: updated call dictionary """ if call['entry_point'] == '<lambda>': if line['line_number'] == call['line_number']: return {} num = call['line_number'] # file the name of the variable defined in file['lines'][num-1] m = re.search(r'(?<=\s)(\w*)(?=\s*=\s*lambda)', file['lines'][num-1]['code']) if m is not None: call['entry_point'] = m.group(0) return call
ff9c99f123596d7554bebdeb8c607fd8864ef1a4
49,047
import math def filter_greater_135deg(arc, columns=13): """function for calculating endpoints of a line, starting at (0/0). When connecting endpoints with start points applying a given angle, a line can be drawn that serves as filter for angles between 91° and 134°. It decides on the fly how big the filters must be: starting with default value of grid size and changing (increasing) it when needed args: angle: the angle in degrees for which a filter should be created returns: tuple pairs of start and end points of the line in a numpy array notation""" angle = 180 - arc # angle between x-axis and line. "drawing angle to the left" angle = math.radians(angle) rows = int(columns) x1=0 y1=0 # right angled triangle: hypotenuse = adjacent (=columns) / cos(angle). # adjacent is known => # columns hypotenuse = columns / math.cos(angle) #x-coordinate of the endpoint of the line, actually this is always the # of cols, for angles > 135° always 0: #one arm of angle (other one is bottom line (x-axis for CC sys.)) intersects the columns always at 0. x2 = 0.0 y2 = round(y1 + hypotenuse * math.sin(angle),3) y2 = round((columns - y2),3) y2 = round((y2-1),3) # bring starting points array form, drawing the line from the lower left side (max. # rows, max. # cols). x1 = float(rows-1) #change from CC notation to array notation: subtract 1 as counting in array starts at 0 y1 = x1 # only if default column parameter isn't used and if the one used instead would create too small an array for given angle if y2 < 0: while y2 < 0: # increase grid size step by step columns = columns +1 # set back all previously calculated values rows = columns x1 =0.0 y1=0.0 hypotenuse = columns / math.cos(angle) y2 = round(y1 + hypotenuse * math.sin(angle),3) y2 = round((columns - y2),3) y2 = float(round(y2-1)) x1 = float(columns-1) y1 = x1 return (x1,y1,x2,y2, columns)
a75a1a82c4517a2b8a308abcb3fe39a7cf485dcd
49,048
import collections def get_month_most_posts(dates): """Receives a list of datetimes and returns the month (format YYYY-MM) that occurs most""" c = collections.Counter() for d in dates: c[str(d.year) + '-' + str(d.month).zfill(2)] += 1 return c.most_common(1)[0][0]
6c71301657e45529706688cdb51e4bbe43284d41
49,049
import os def _read_control_file(file_dir, cfg): """Read '*.ctl' file.""" ctl_path = os.path.join(file_dir, cfg['binary_prefix'] + '.ctl') with open(ctl_path, mode='r') as ctl_file: contents = ctl_file.read() contents = contents.split() ctl = {} ctl['binary_prefix'] = cfg['binary_prefix'] endian = contents[contents.index('OPTIONS') + 1].lower() if endian == 'big_endian': ctl['dtype'] = '>f4' elif endian == 'little_endian': ctl['dtype'] = '<f4' else: raise ValueError(f"Unknown endian {endian}") t_def = contents[contents.index('TDEF') + 1:contents.index('TDEF') + 5] x_def = contents[contents.index('XDEF') + 1:contents.index('XDEF') + 5] y_def = contents[contents.index('YDEF') + 1:contents.index('YDEF') + 5] ctl['t_size'] = int(t_def[0]) ctl['x_size'] = int(x_def[0]) ctl['y_size'] = int(y_def[0]) ctl['t_start'] = t_def[2] ctl['x_start'] = float(x_def[2]) ctl['y_start'] = float(y_def[2]) ctl['t_delta'] = t_def[3] ctl['x_delta'] = float(x_def[3]) ctl['y_delta'] = float(y_def[3]) return ctl
c4975e99db043ca19949e3b433c6d917dfc79bc4
49,052
import math def get_data_file(lon, lat): """ Returns the correct HGT data file to get elevation from. Credit: Aatish Neupane Link: https://librenepal.com/article/reading-srtm-data-with-python/ :param lat: The latitude of the desired point. :param lon: The longitude of the desired point. :return: The name of the data file. :raise: ValueError: If lat/lon are not within supported bounds. """ if 48 <= math.floor(lat) < 60 and 110 < math.floor(lon) <= 121: return "N{}W{}.hgt".format(math.floor(lat), math.floor(lon)) else: raise ValueError("Point does not fall within supported bounds")
c454df12af58fc1174fec591d5d32b3b97f85118
49,053
import random def generateRandomPort(): """Generate random number between 30000 and 32767 for External Condor Port""" random.seed() port = random.randrange(30000, 32767) print("Now using port: {}".format(port)) return port
55bf41c5e0cc5afb8029c8a33b408eba0efc3d72
49,054
def initialDataLists(D): """ Initialize grid, scalars, and vectors lists used in convertToVtk function. """ grid = []; # simple scalars = [list(x) for x in [()]*len(D["scalar"].keys())]; # more complicated vectors = [list(x) for x in [()]*len(D["vector"].keys())]; # more complicated return [grid, scalars, vectors];
96dab994f29c79d874fd4be55cc96757a3b7c8f4
49,055
from typing import Optional from typing import Union from typing import Type from enum import Enum def enum_or_raise(value: Optional[Union[str, int]], enum: Type[Enum]) -> Enum: """Return Enum or raise exception.""" if value is None: raise Exception("Received None value for enum %s" % enum) return enum(value)
d1a5346ea1facce984bdbcc5b854559c9d335387
49,057
def get_list_of_new_entries(device_entries: list, file_entries: list) -> list: """ Compares log entries from the local logfile with the log entries from the device Args: device_entries (list): List of LogEntry Objects, fetched from the device file_entries (list): List of LogEntry Objects, fetched from the local logfile Raises: N/A Returns: new_entries (list): List of LogEntry objects from the device which are not yet present in the local logfile """ new_entries = [] # if there are no log entries in the local log file or it does not exist yet, # simply return the log entries from the device if not file_entries: return device_entries # if there are no log entries on the device, return empty list if not device_entries: return new_entries # otherwise, add the latest log entries from the device until # we reach a log entry already present in the local log file while device_entries[-1].timestamp > file_entries[-1].timestamp: new_entries.append(device_entries.pop()) # sort by timestamp so the log entries are in the correct order (new entries last) new_entries.sort(key=lambda entry: entry.timestamp) return new_entries
baa16c9999f42e6d71a1d6bc62305f42239b61b0
49,058
def get_region_start(region: str) -> int: """Return the start position of the region string.""" if ":" not in region: return 1 contig, start_end = region.split(":") start, end = start_end.split("-") return int(start)
4dc1392815560d6e3886addaef7837ecf21069ba
49,059
def sign(): """Show sign for positive numbers: + ↔ yes, - ↔ no, ' ' ↔ show space.""" return "x may be {0:*^+5}, {0:*^-5}, {0:*^ 5}!".format(1)
f7df67e735dc1c66cdbda3a615d064a8ac4b704d
49,060
def _parse_attrib(text): """ Parses a string of the form 'key=val'and returns a key, value pair. @type text: C{str} @param text: textual representation of key, value pair @rtype: C{tuple} @return: key, value pair """ key, val = [s.strip() for s in text.split('=')] if '.' in val: try: val = float(val) except ValueError: pass else: try: val = int(val) except ValueError: pass return key, val
bd6c43799c5aa7259d5c610fd2c6374f987b6057
49,062
import os import subprocess import sys import time import socket import requests def spawnFireflyServer(port=5000,frames_per_second=30,decimation_factor=1,max_time=10): """Wrapper to :func:`Firefly.server.startFireflyServer` that instead starts a background process. :param port: port number to serve the :code:`.html` files on, defaults to 5000 :type port: int, optional :param frames_per_second: enforced FPS for stream quality, used only if localhost:<port>/stream is accessed, defaults to 30 :type frames_per_second: int, optional :param decimation_factor: factor to decimate data that is being passed through localhost:<port>/data_input, defaults to 1 :type decimation_factor: int, optional :param max_time: maximum amount of time to wait for a Firefly server to be available. :type max_time: float, optional :return: subprocess.Popen :rtype: subprocess handler :raises RuntimeError: if max_time elapses without a successful Firefly server being initialized. """ ## wrap passed arguments into a list of strings args = ["%d"%port,"%d"%frames_per_second,"%d"%decimation_factor] run_server = os.path.join(os.path.dirname(__file__),'run_server.py') process = subprocess.Popen([sys.executable, run_server]+args) init_time = time.time() ## check if port is in use with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: print( "Waiting up to %d seconds for background Firefly server to start"%max_time, end="") while True: try: requests.post(f'http://localhost:{port:d}',json="test") break except: ## need to re-check the connection each iteration #if s.connect_ex(('localhost', port)) != 0: #break if time.time()-init_time >= max_time: raise RuntimeError( "Hit max wait-time of %d seconds."%max_time+ " A Firefly server could not be opened in the background.") else: print(".",end="") time.sleep(1) print("done! Your server is available at - http://localhost:%d"%port) return process
b66ca0cb46ff5fce8d17b70a36f2473de672d6ff
49,064
def parse_datetitle(raw_metadata): """ uses tags to try to find title and date :param raw_metadata: :return: """ try: date_temp = raw_metadata["date"][0]["datetime"] except: date_temp = "" return { "date": date_temp, "title": raw_metadata["title"].text }
d63c66f6ed9d5ebdafc7075c5f8c76b497134f25
49,065
def instjoin(*instnames): """Return hierarchical instance names from instance name components >>> instjoin('I1','VS') 'I1.VS' >>> instjoin('','VS') 'VS' """ return '.'.join([part for part in instnames if len(part) > 0])
3bd842de4bc6026d41d36267d2cca1b6d9d4b575
49,066
def calculate_average_precision(precision_list,recall_list): """ compute average_precision Args: precision_list: list of precision recall_list: list of recall Returns: """ count = len(precision_list) if len(recall_list) != count: raise ValueError("the number in precision_list and recall_list is inconsistent") ap = 0 for idx in range(1,count): ap += precision_list[idx]*(recall_list[idx] - recall_list[idx-1]) #abs return ap
cd95b94347eac22d7572e99920df480663b87770
49,067
import os def ExpandSconsPath(path): """Expands a directory path into the path to the scons file that our build uses. Ex: magiflute/plugin/common => magicflute/plugin/common/common.scons Args: path: The directory path to expand. Returns: The expanded path. """ return '%s/%s.scons' % (path, os.path.basename(path))
78823b95cdee301974545bef20afc911846b9683
49,068
import os def clean_extension(filename): """Return clean file extension - lowercase - without .gz - without . prefix examples: file.fa.gz -> fa file.pdb -> pdb nosuffix -> '' """ filename = filename.lower() if filename.endswith('.gz'): filename = filename[:-3] _, extension = os.path.splitext(filename) return extension[1:]
43e2ec2d89fc0226d1306923b00a32a8049f5073
49,069
import os import tempfile def get_plugin_tempdir(mytempdir): """If mytempdir is not set, set to a save directory for saving state.""" if mytempdir: return mytempdir if os.name != 'nt': nag_temppaths = ['/var/log/nagios', '/var/lib/nagios3'] else: nag_temppaths = [tempfile.gettempdir()] for path in nag_temppaths: if os.path.isdir(path): return path return None
72a6f0f7e7b360516c750104da9088276d4aa0f6
49,070
def same_first_last(L: list) -> bool: """Precondition: len(L) >= 2 Return True if and only if first item of the list is the same as the last. >>> same_first_last([3, 4, 2, 8, 3]) True >>> same_first_last(['apple', 'banana', 'pear']) False >>> same_first_last([4.0, 4.5]) False """ if (L[0] == L[len(L)-1]): return True else: return False
734cfdbfebaa957848414f7ba16f3235d751e038
49,071
import numpy def dotprod(u,v): """ This is a wrapper around general dense or sparse dot products. It is not necessary except as a common interface for supporting ndarray, scipy spmatrix, and PySparse arrays. Returns the dot product of the (1 x m) sparse array u with the (m x 1) (dense) numpy array v. """ #print "Taking the dot product u.v, where" #print "u has shape " + str(u.shape) #print "v = " + str(v) try: dotprod = numpy.array([0.0]) # a 1x1 array. Required by spmatrix. u.matvec(v, dotprod) return dotprod[0] # extract the scalar except AttributeError: # Assume u is a dense array. return numpy.dot(u,v)
ca9d6d4bb7bc577ee510410d0e2a0890c7bae197
49,073
def _records_match(old_ttl, old_record_data, new_ttl, new_record_data): """Checks to see if original and new TTL and values match.""" matches = True if old_ttl != new_ttl: matches = False if old_record_data != new_record_data: matches = False return matches
982f8d5b9f6c72b95a5d6b76d4909eb89906e3df
49,074
def htmlDataset(dataset = {}, title=""): """ Utility function to generate HTML Table from a Dataset""" content = "<TABLE cellpadding=5> <caption align=top>" + title + " </caption><TR></TR><TR></TR>" for row in dataset['DATA']: content += "<TR>" for col in dataset['DATA'][row]: if row==0: content += "<TH align=left bgcolor=#BBDB88>" else: content += "<TD align=left bgcolor=#FFFAB2>" content += col if row==0: content += "</TH>" else: content += "</TD>" content += "</TR>" content += "</TABLE>" return content
871c44487fadacdb8c255389a57065b0887df1c6
49,075
import logging def get_logger(): """ The function returns the common logger for all OTE training tests. """ logger_name = ".".join(__name__.split(".")[:-1]) return logging.getLogger(logger_name)
26fa81856262a0a0964c0323f14e4cb8a432993e
49,076
def get_bprop_npu_alloc_float_status(self): """Grad definition for `NPUAllocFloatStatus` operation.""" def bprop(out, dout): return () return bprop
4fa3c9f6e6bf099ca43779071be93e383897c72e
49,077
def clamp(n, lower, upper): """ :param n: Number to clamp :param lower: Lower bound :param upper: Upper bound :return: Clamped number """ return max(lower, min(n, upper))
df283f640cfee371b44d8ab33c90ac959f3601e2
49,078
def _passthrough(folder_out: str) -> str: """ Pass the state from the main thread to the mergeing function """ return folder_out
7ac97ca389bfb75470238309986a3bafbe7260a2
49,079
import six def GetLabels(args, client, instance_properties=False): """Gets labels for the instance message.""" labels_value = client.messages.Instance.LabelsValue if instance_properties: labels_value = client.messages.InstanceProperties.LabelsValue if args.labels: return labels_value(additionalProperties=[ labels_value.AdditionalProperty( key=key, value=value) for key, value in sorted(six.iteritems(args.labels)) ]) return None
f32e135a211c151d97062e5535e7b0149f081ea9
49,080
import re def clean_text(string): """ Remove links, mentions, special characters and numbers """ return ' '.join(re.sub("(@[A-Za-z]+)|([^A-Za-z \t])|(\w+:\/\/\S+)", "", string.lower()).split())
6dd7dc754ca77f2a7667cf470735a47e6afa069a
49,081
def pivot_index(df): """Extrai os indexadores de uma pivot table, exlcuindo a linha de total""" rows = df.shape[0] return df.index[:rows - 1]
7fce4bf9b236aab07882c0fc26e6af650437aedc
49,082
def get_video_parts(video_path): """Given a full path to a video, return its parts.""" video_path = video_path.replace('\\','/') # Adapt for win parts = video_path.split('/') filename = parts[3] filename_no_ext = filename.split('.')[0] classname = parts[2] train_or_test = parts[1] return train_or_test, classname, filename_no_ext, filename
9f514909f398b0abf5b929c5ca384971168a268e
49,084
def find_cal_by_type(standards, std_type): """ :param standards: list, of CalEvent objects :param std_type: string, in ['high', 'mid', 'low'] :return: returns the first CalEvent in the list with the matching standard type """ return next((ce for ce in standards if ce.standard_used == std_type), None)
f87e011534e6aa16c1c508bcafd334d7886f5568
49,085
import re def match_mm3_angle(mm3_label): """Matches MM3* label for angles.""" return re.match('[\sa-z]2', mm3_label)
bb9d3a41e9b0aabb6efe36efc251ceb83c134911
49,086
def _getcnstr(obj, enum): """Get the constraint object""" ac = obj.AccessConstraints if ac.IsConstraintActive(enum): cnstr = ac.GetActiveConstraint(enum) else: cnstr = ac.AddConstraint(enum) return cnstr
fe8852cc4e1cd4e1e862135f245fdabb246ed8e7
49,088
def dict2json(dictionary): """ 字典转json """ content = '' for key in dictionary: if content: content = content + ',' content = content + '"' + key + '": "' content = content + dictionary.get(key).replace('"', '\"').replace('\\', '\\\\') + '"' return "{" + content + "}"
3e0d00aec2617926aead36b43f5719fb86ecff36
49,089
from typing import Any def _float_val(col: Any) -> float: """Convert marker data to float values so we can put them in a typed np.array. The marker column has type float if it has a 0.0 value, and would only have type str for a marker value.""" if isinstance(col, str): return 1.0 return float(col)
8bd8636a21bb7e72eb6f585702462a34a63575a5
49,090
def getkeyval(key,commentdct): """ returns the value of the key commentdct can be {'default': ['4'], 'maximum': ['6'], 'required-field': [''], 'note':['this is', ' a note']} now key='default' will return the number 4 key='note' will return 'this is\\n a note' """ #--------minimum, minimum>, maximum, maximum<, min-fields---------- #returns the number or None if key.upper() in [ 'minimum'.upper(), 'minimum>'.upper(), 'maximum'.upper(), 'maximum<'.upper(), 'min-fields'.upper()]: try: val=commentdct[key][0] return float(val) except KeyError: return None #--------field, note, units, ip-units, # default, type, object-list, memo---------- #returns the value or None if key.upper() in [ 'field'.upper(), 'note'.upper(), 'units'.upper(), 'ip-units'.upper(), 'default'.upper(), 'type'.upper(), 'object-list'.upper(), 'memo'.upper()]: try: val=commentdct[key] st='' for el in val: st=st+el+'\n' st=st[:-1] except KeyError: return None return st #--------required-field, autosizable, unique-object, required-object---------- #returns True or False if key.upper()in [ 'required-field'.upper(), 'autosizable'.upper(), 'unique-object'.upper(), 'required-object'.upper()]: return commentdct.has_key(key) #--------key, reference---------- #returns a list if key.upper()in [ 'key'.upper(), 'reference'.upper()]: try: return commentdct[key] except KeyError: return None
66cd513619253d07d9750df9594050dc2884aad5
49,091
def __collatz_recursivo(numero: int, pasos: list) -> list: """Calcula recursivamente la conjetura de Collatz, devuelve los pasos realizados""" pasos.append(numero) if numero == 1: return pasos if numero % 2 == 0: return __collatz_recursivo(numero // 2, pasos) return __collatz_recursivo(numero * 3 + 1, pasos)
249a5de371528e5fe0f3aee247a0ce525461ca32
49,092
import base64 def validate_signature(value): """ Validate a JSON input for signature. Parameters: value (any): Input value Returns: boolean: True if value has the format required for a signature """ try: return base64.b64encode(base64.b64decode(value.split(',')[1])) == \ bytes(value.split(',')[1], encoding='ascii') except: print("Expected base64, got something else") return False
05000297dc7593e7a2073fef93e606d3164cac5d
49,093
def get_multiple_ranges(service, spreadsheet_id, range_names): """ Get multiple ranges from Google Sheets. :param service: Authenticated sheets service object :param spreadsheet_id: Id of the spreadsheet :param range_names: Ranges to get :return: Results of the query """ result = service.spreadsheets().values().batchGet( spreadsheetId=spreadsheet_id, ranges=range_names ).execute() ranges = result.get('valueRanges', []) return ranges
36bf548fe3d444909854317490379e5c64a0da33
49,094
def get_finishers(df): """ Filters the DataFrame for complete replies. :param df: :return: """ return df[(df['dispcode'] >= 31) & (df['dispcode'] <= 34)]
8493943ed38f3a62b6a3d742ab2b4febccb34429
49,095
def payload_satisfies(pred): """This wraps a predicate so it is applied to the payloads of intervals. The output function expects one or more Intervals as input (depending on how many arguments ``pred`` expects) and applies the predicate to the payloads of the Intervals instead of the Intervals themselves. Arg: pred: The predicate to wrap. Returns: An output function that applies ``pred`` to payloads. """ def new_pred(*interval_args): return pred(*[i.payload for i in interval_args]) return new_pred
8d758587d9a361740cfcb88c9ea29b7bea6f1ff7
49,096
def get_clusterequipment(api): """ Tests and tools should not use this directly. This is for hooking the libraries together. Parameter: api (qalib.api.Api) Returns a qalib.corelibs.equipmentprovider.ClusterEquipment object """ if hasattr(api, '_context'): return api._context.cluster try: return api._clusterequipment except AttributeError: raise ValueError("Not an Api object")
c08d9b756159ce49d164881bb09e7db2141a0fff
49,099
import os def create_xvfb_cmd(): """Creates the Xvfb command with a write descriptor. Returns: List: Containing 2 lists. The second List contains a read and a write descriptor. The first List is the command to launch Xvfb process with the same write descriptor(from the first list) embedded in the command. """ # Using os.pipe() can lead to race conditions (ie.usage of same set of file descriptors between 2 processes) # when called in quick succession and also when running tests. # Using os.pipe2() with the flag os.O_NONBLOCK will avoid race conditions. dpipe = os.pipe2(os.O_NONBLOCK) # Allow child process to use the file descriptor created by parent. os.set_inheritable(dpipe[1], True) xvfb_cmd = [ "Xvfb", "-displayfd", # Write descriptor str(dpipe[1]), "-screen", "0", "1600x1200x24", "-dpi", "100", # "-ac", "-extension", "RANDR", # "+render", # "-noreset", ] return xvfb_cmd, dpipe
9052ec9652d845fa6c3e531a2825e0fb0e200b1d
49,100
def reshape_(dat, len_lv1, len_lv2): """ lstm reshaping """ # raw shape shp = dat.shape # length to discard len_lv3, len_dsc = divmod(shp[0], len_lv1 * len_lv2) remove_leftover = dat if len_dsc == 0 else dat[:-len_dsc] ## if there are leftover signal that doesn't divide evenly, remove it from the signal; return remove_leftover.reshape(len_lv3, len_lv2, len_lv1 * shp[1])
f4076f42d76d51c55a4b66aafd59aa4f0a3f10eb
49,101
def fix_label_length(labels: list, max_length: int=20) -> list: """ :param labels: a list of textual labels :return: a list of updated labels Ensure that all labels are shorter than a given length; """ fixed_labels = [] for l in labels: if len(l) <= max_length: fixed_labels += [l] else: fixed_labels += [l[:max_length-3] + "..."] return fixed_labels
25f20ceff1cfc2419837dc3fb0bdb8cf13b88082
49,102
def mongodb_str_filter(base_field, base_field_type): """Prepare filters (kwargs{}) for django queryset for mongodb where fields contain strings are checked like exact | startswith | contains | endswith >>> mongodb_str_filter(21, '1') '21' >>> mongodb_str_filter(21, '2') {'$regex': '^21'} >>> mongodb_str_filter(21, '3') {'$regex': '.*21.*'} >>> mongodb_str_filter(21, '4') {'$regex': '21$'} """ q = '' base_field = str(base_field) if base_field != '': if base_field_type == '1': # Equals q = base_field if base_field_type == '2': # Begins with q = {'$regex': str('^' + base_field)} if base_field_type == '3': # Contains q = {'$regex': str('.*' + base_field + '.*')} if base_field_type == '4': # Ends with q = {'$regex': str(base_field + '$')} return q
02c81184002946e57ac97b5fabe1412be6759821
49,106
def PI(improvements): """ Format a string of percent improvements. """ Z = sum(improvements) if Z == 0: return f"[{0.0:6.2f},{0.0:6.2f},{0.0:6.2f}]" z = improvements/Z return "[" + ",".join(f"{x*100.0:6.2f}" for x in z) + "]"
3a986310c710d169aff956a11353b83ad3a489b5
49,108
def get_eso_file_version(raw_version): """Return eso file version as an integer (i.e.: 860, 890).""" version = raw_version.strip() start = version.index(" ") return int(version[(start + 1) : (start + 6)].replace(".", ""))
d456ce65585a8aad0c08d9e53b6aeb77132a0d49
49,109
def have_readme(answer): """Check if project has a readme""" return answer['check_readme']
47e963d36a18211e40580dae26e1c44b82f12375
49,111
from datetime import datetime def is_expired(dt): """ 判断时间是否过期 :param dt:日期字符串 :return:True or False """ if isinstance(dt, str): dt = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S') return datetime.now() > dt
3adfb90674e0173fc91ae1a6d007e02729c3eaee
49,112
def io(graph): """ finde input und output nodes (funktioniert nicht) notizen: sachen die keine outputs sind (normalerweise): VarIsInitializedOp, ReadVariableOp, Identity, Const, AssignVariableOp (?) """ ops = graph.get_operations() outputs_set = set(ops) inputs = [] for op in ops: if len(op.inputs) == 0 and op.type != 'Const': inputs.append(op) else: for input_tensor in op.inputs: if input_tensor.op in outputs_set: outputs_set.remove(input_tensor.op) outputs = list(outputs_set) return (inputs, outputs)
5b735c85669dfcf6966cadfd2641cbbdce8cef3d
49,113
import re def is_valid_position(password: str) -> bool: """ Check if given password is valid. Example: '1-3 b: cdefg' is invalid: neither position 1 nor position 3 contains b. :type password: str :rtype: bool """ first_index, second_index, letter, pwd = re.split(': |-| ',password) return (pwd[int(first_index)-1] == letter) ^ (pwd[int(second_index)-1] == letter)
1222d27a99ff26e5bcf0be8e4fb98d28d792830f
49,114
import subprocess def is_git_repo(git_dir=''): """ Check if a directory is a valid git repo or not. :param git_dir: the git directory returns True if git_dir is a valid git repo, otherwise, False """ cmds = ['git', 'status'] if git_dir: ret = subprocess.run(cmds, cwd=git_dir).returncode else: ret = subprocess.run(cmds).returncode return ret == 0
751ba992de6f09fd5dadb8df2988132035c053ad
49,115
def int_to_bytes(x, length): """Converts bigendian integer to byte array with fixed length.""" res = bytearray(length) for i in range(length): res[length - i - 1] = int(x) % 256 x = int(x) // 256 return res
f8cfceca1c51483115c7d2d877b9d4a6c7c5fef9
49,117
def strip_linebreaks(s): """ Strip excess line breaks from a string """ return u"\n".join([c for c in s.split(u'\n') if c])
537b835608e8697da4d9c05f1983fe39ef09b980
49,119
def find_output_node(nodetree): """ Get first output node that has incoming links. """ for n in nodetree.nodes: if n.type.startswith('OUTPUT'): for inp in n.inputs: if inp.is_linked: return n return None
f3dafd1873baacdc1568a3ab6d7d8d70602127ec
49,120
import socket def is_unused_port(port, protocol): """ Check whether the port is unused Args: port (int): port protocol (str): application protocol (tcp or udp) Return: bool: whether the port is unused """ protocols = { 'udp': socket.SOCK_DGRAM, 'tcp': socket.SOCK_STREAM, } sock = socket.socket(family=socket.AF_INET, type=protocols[protocol]) try: sock.bind(('127.0.0.1', port)) sock.close() return True except socket.error: return False
6a40d4a1b9882d1f8cf2d9bfa56b59e2f81c7737
49,121
import glob def filebrowser(ext=''): """ returns files with an extension """ target = "**/*.{}".format(ext) return [filepath for filepath in glob.iglob(target, recursive = True)]
63cf4bc4d26054340ffc481cdcf34783f4de982d
49,122
def _make_rv_params_variable(rv, **params): """ Wraps the random variable rv, allowing it to accept time-dependent parameters. """ if any(callable(x) for x in params.values()): return lambda t: rv( **{k: (x(t) if callable(x) else x) for k, x in params.items()}) else: return rv(**params)
a497962b67f88a9bc01855e695d17a0aceaa8958
49,124
def _call_initialize(klass): """Call `_initialize` when the class is created""" klass._initialize() return klass
367642e8c53bf1bf4d7e8625dbe60938a5b0d7ff
49,126
import torch def tsp_get_min_dist(dist_matrix): """ Takes a distance matrix dist_matrix of shape (n,n) or (bs,n,n) Returns the mean minimal distance between two distinct nodes for each of the *bs* instances """ if not isinstance(dist_matrix,torch.Tensor): try: dist_matrix = torch.tensor(dist_matrix) except Exception: raise ValueError(f"Type {type(dist_matrix)} could not be broadcasted to torch.Tensor") solo=False if len(dist_matrix.shape)==2: solo=True dist_matrix = dist_matrix.unsqueeze(0) bs,n,_ = dist_matrix.shape max_dist = torch.max(dist_matrix) eye_mask = torch.zeros_like(dist_matrix) eye_mask[:] = torch.eye(n) dist_modif = dist_matrix + max_dist*eye_mask min_dist = torch.min(dist_modif,dim=-1).values min_mean = torch.mean(min_dist,dim=-1) if solo: min_mean = min_mean[0] return min_mean
1eea80f43b062567b58d9938016e6dce6c46e731
49,127
import os def read_filenames_with_ending(path_to_dir, ending): """ Reads filenames from a directory with a given ending """ res = [] for filename in os.listdir(path_to_dir): if filename.endswith(ending): res.append(filename) return res
29dd6114971d5e80f0124f44d27b679660b869cc
49,128
import math def isclose(a: float, b: float, rel_tol: float = 0.25, abs_tol: float = 1) -> bool: """Wraps math.isclose with very generous defaults. This is useful for many floating-point operations where the spec does not make accuracy requirements. """ if not (math.isfinite(a) and math.isfinite(b)): raise ValueError(f"{a=} and {b=}, but input must be finite") return math.isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol)
5cbfbd4087a895ea634d3a5432930b18edf1e40a
49,129
import os import csv def symbol_map(gene_symbols): """ This gene code map was generated on February 18th, 2019 at this URL: https://www.genenames.org/cgi-bin/download/custom?col=gd_app_sym&col=gd_prev_sym&status=Approved&status=Entry%20Withdrawn&hgnc_dbtag=on&order_by=gd_app_sym_sort&format=text&submit=submit it enables us to map the gene names to the newest version of the gene labels """ filename = os.path.join(os.path.dirname(__file__), 'datastore', 'genenames_code_map_Feb2019.txt') with open(filename) as csv_file: csv_reader = csv.reader(csv_file, delimiter='\t') x = {row[0]: row[1] for row in csv_reader} map = {} for key, val in x.items(): for v in val.split(", "): if key not in gene_symbols: map[v] = key return map
67f19263f7a026dd29c0ec6a6ea4c6afad2b5057
49,130
def clean_data(df_): """ Clean and update DataFrames """ df_['CALL_TYPE'] = df_['CALL_TYPE'].map({'A': 0, 'B': 1, 'C': 2}) df_['DAY_TYPE'] = df_['DAY_TYPE'].map({'A': 0, 'B': 1, 'C': 2}) df_['WEEK'] = df_['TIMESTAMP'].apply(lambda x: x % (7*24*3600)) df_['DAY'] = df_['TIMESTAMP'].apply(lambda x: x % (24*3600)) df_['HOUR'] = df_['TIMESTAMP'].apply(lambda x: x % (3600)) df_.loc[(df_['ORIGIN_CALL'].isnull()), 'ORIGIN_CALL'] = -1 df_.loc[(df_['ORIGIN_STAND'].isnull()), 'ORIGIN_STAND'] = -1 for col in ('ORIGIN_CALL', 'ORIGIN_STAND', 'MISSING_DATA', 'TRAJECTORY_IDX'): df_[col] = df_[col].astype(int) # df_ = df_.dropna(axis=0, subset=['ORIGIN_LAT', 'ORIGIN_LON', 'DEST_LAT', # 'DEST_LON']) # df_ = df_.drop(labels=['DAY_TYPE', 'TRIP_ID'], axis=1) return df_
2cf6342d98bfef372b9cc6b372ee59ba7601285f
49,131
import os def get_models(path): """ Get the path to the 3d model """ models = [] for f in os.listdir(path): if ((f.endswith(extension) for extension in ['npy']) and not f.startswith('.DS_Store')): models.append(os.path.join(path, f)) return models
4333044805934fb26b80ae666f21aa043bc01c07
49,132
import re def get_orbit_number(the_file): """ GET_ORBIT_NUMBER Returns the orbit number from filename (last serie of 6 consecutives number in filename """ to_return = re.findall("[0-9]{6}", the_file)[-1] #Get orbit number return to_return
8d7d8ce3d093ab5790a4a1f5f44928b18a169f3c
49,133