content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def map_pair_name_to_exchange_name(pair_name): """ We're preparing to add the notion that exchanges can have multiple trading pairs into our system. Each exchange is going to have a single ExchangeData db object but have one wrapper for each pair. Order.exchange_name is going to refer to the pair, but most accounting functions take place on the ExchangeData object. Thus, we need a mapping of ExchangeWrapper -> ExchangeData. This function will serve that purpose for now. To add a master-slave relationship to a pair, add a line like this: if pair_name == 'GEMINI_ETH_USD': # [slave pair] return 'GEMINI_BTC_USD' # [master pair] """ return pair_name
314d82d234eb5d096f7bad4c8e75e7f7bde32b75
700,607
def lower_first(string): """Return a new string with the first letter capitalized.""" if len(string) > 0: return string[0].lower() + string[1:] else: return string
fc6fba78d15633f1ab21105fbd46883797444fb1
700,609
def brocher_vp(f): """ V_p derived from V_s via Brocher (2005) eqn 9. """ f *= 0.001 f = 0.9409 + f * (2.0947 - f * (0.8206 - f * (0.2683 - f * 0.0251))) f *= 1000.0 return f
20c8d4961f1660384ecccd081db84a5351ec4d6e
700,610
def final(data): """ Last evolution time point, can be used to obtain parameters at the very end of the run. :param data: numpy ndarray containing the history of the system. :return: selection of the last evolution point. """ return ([data.shape[0]-1],)
46a489d0674bece12476af74db4cf4d17f623c4b
700,611
def make_default_config(project): """ Return a default configuration for exhale. **Parameters** ``project`` (str) The name of the project that will be searched for in ``testing/projects/{project}``. **Return** ``dict`` The global default testing configuration to supply to ``confoverrides`` with ``@pytest.mark.sphinx``, these are values that would ordinarily be written in a ``conf.py``. """ return { "breathe_projects": { project: "./_doxygen/xml" }, "breathe_default_project": project, "exhale_args": { # required arguments "containmentFolder": "./api", "rootFileName": "{0}_root.rst".format(project), "rootFileTitle": "``{0}`` Test Project".format(project), "doxygenStripFromPath": "..", # additional arguments "exhaleExecutesDoxygen": True, "exhaleDoxygenStdin": "INPUT = ../include" } }
7b913634f0df656a870d4886cf29391727eb4b21
700,612
def cylinder_divergence(xi, yi, zi, r, v): """ Calculate the divergence of the velocity field returned by the cylinder_flow() function given the path of a streamtube providing its path components xi, yi, zi. The theoretical formula used to calculate the returned variable 'div' has been obtained by hand and is susceptible to errors. Input: xi, yi, zi :: 1D arrays. Components of the path of the streamtube. r :: Float. Radius of the cylinder. v :: Float. Modulus of the velocity of the flow. Output: div :: 1D array. The calculated divergence. """ div = -4*v*r**2*xi*zi**2 / (xi**2 + zi**2)**3 return div
23ef35bee21270f2255e1b378c5e6cc5839b4d40
700,614
def to_conll_iob(annotated_sentence): """ `annotated_sentence` = list of triplets [(w1, t1, iob1), ...] Transform a pseudo-IOB notation: O, PERSON, PERSON, O, O, LOCATION, O to proper IOB notation: O, B-PERSON, I-PERSON, O, O, B-LOCATION, O """ proper_iob_tokens = [] for idx, annotated_token in enumerate(annotated_sentence): tag, word, ner = annotated_token if ner != 'O': if idx == 0: ner = "B-" + ner elif annotated_sentence[idx - 1][2] == ner: ner = "I-" + ner else: ner = "B-" + ner proper_iob_tokens.append((tag, word, ner)) return proper_iob_tokens
92fd0904782d241c9729df8a167840e38dfde605
700,615
import unicodedata import re def normalize(text): """ Normalizes text before keyword matching. Converts to lowercase, performs KD unicode normalization and replaces multiple whitespace characters with single spaces. """ return unicodedata.normalize('NFKD', re.sub(r'\s+', ' ', text.lower()))
c03a3148d39161cfd5751a306ca842362c46fb28
700,617
import random def random_int(n=5): """ Create a random string of ints :return: """ return str(random.randint(1, 10 ** n))
e5fa29ac6177a5c4f6cdd2ee73a5d72736667505
700,619
def check_prime(number): """ Checks if the given number is prime :param: number Number to evaluate for primality :rtype: bool True if the number is a prime, false otherwise """ for divisor in range(2, int(number**0.5) + 1): if number % divisor == 0: return False return True
37c7cc341c07d02d4c95bf9b95223368504d79e9
700,621
import argparse def parse_args(): """set and check parameters.""" parser = argparse.ArgumentParser(description="BRDNet process") parser.add_argument("--pipeline", type=str, default=None, help="SDK infer pipeline") parser.add_argument("--clean_image_path", type=str, default=None, help="root path of image without noise") parser.add_argument('--image_width', default=500, type=int, help='resized image width') parser.add_argument('--image_height', default=500, type=int, help='resized image height') parser.add_argument('--channel', default=3, type=int , help='image channel, 3 for color, 1 for gray') parser.add_argument('--sigma', type=int, default=15, help='level of noise') args_opt = parser.parse_args() return args_opt
a545a0cfc9e2ecf4c09e0daa785a962a22159577
700,622
def genify(options_list, gen): """ Choose position from interval of discrete parameters based on gen value :param options_list: List of discrete values :param gen: DNA gen value :return: """ # Probability steps for projecting to given gen prob_step = 1 / len(options_list) for i in range(len(options_list)): if gen <= i * prob_step: return options_list[i] return options_list[-1]
e0b277da2843cf3d7191bbf0445cd80358e7bdcc
700,623
def to_int(value): """Return the absolute value.""" try: return int(float(value)) except (ValueError, TypeError): try: return int(float(value.replace(',','').replace('/','.').replace('\\','.'))) except Exception: return 0
460d4317ee02c9c27a5574f95e9b4cdcc6d55e6b
700,624
def getCharsSegments(rows, bounds): """ Gets the char segments of text row images acording to chars bounds. rows and bounds are list of the same matching sizes. Parameters ---------- rows(list) : The list of segmented text row images bounds(list) : Bounds matching chars iamges sizes in a text rows Returns ------- list : List of segment char images in text row images """ charsSegments = [] boundsIndex = 0 for row in rows: rowCharBounds = bounds[boundsIndex] rowCharsSegments = [] for charBound in rowCharBounds: rowCharsSegments += [ row[(charBound[0]):(charBound[1]),(charBound[2]):(charBound[3])] ] boundsIndex += 1 charsSegments += [rowCharsSegments] rowCharsSegments = [] return charsSegments
d9a14ec47e5bc384cb2df1d5e69fc41613dd9d29
700,625
import json def load_cat_to_name(json_path): """Load Label Name from JSON file.""" with open(json_path, 'r') as f: cat_to_name = json.load(f) return cat_to_name
43c30e9dbe29e29d507a873e6cc37f22bc7984c4
700,626
import typing def _consts(fn: typing.Callable) -> tuple: """ Returns a tuple of the function's constants excluding the docstring. """ return tuple(x for x in fn.__code__.co_consts if x != fn.__doc__)
bd60a35cf5243fd158d6e65df6e0a16f6fd9051b
700,628
import inspect import sys import unittest def suite(): """ This method must be included at the end of all sub-test modules. To use in other modules, copy this entire method to the new module. :return: (unittest.TestSuite) Test suite for this sub-test """ tests = inspect.getmembers(sys.modules[__name__], inspect.isclass) loader = unittest.TestLoader() full_suite = [] for test in tests: test_suite = loader.loadTestsFromTestCase(test[1]) full_suite.append(test_suite) return unittest.TestSuite(full_suite)
725ab5a006f0ebb9ffc1334bb3d7ec2c7b8afb38
700,629
def x_y_to_name(x, y) -> str: """ Make name form x, y coords Args: x: x coordinate y: y cooridante Returns: name made from x and y """ return f"{x},{y}"
40ae99a789fdf029407ab0290ae58e85e911833c
700,630
def _lvl_error(level): """Get the lng/lat error for the hilbert curve with the given level On every level, the error of the hilbert curve is halved, e.g. - level 0 has lng error of +-180 (only one coding point is available: (0, 0)) - on level 1, there are 4 coding points: (-90, -45), (90, -45), (-90, 45), (90, 45) hence the lng error is +-90 Parameters: level: int Level of the used hilbert curve Returns: Tuple[float, float]: (lng-error, lat-error) for the given level """ error = 1 / (1 << level) return 180 * error, 90 * error
52f8653252de1120d34c9a0377e85c07111874d6
700,632
def problem1(limit, mod1, mod2): """Problem 1""" result = 0 for cur in range(0, limit): if cur % mod1 == 0 or cur % mod2 == 0: result += cur return result
e0be92fc6e2be4730606f02256e59018420655e6
700,633
def atmDensPoly6th(ht, dens_co): """ Compute the atmosphere density using a 6th order polynomial. This is used in the ablation simulation for faster execution. Arguments: ht: [float] Height above sea level (m). dens_co: [list] Coeffs of the 6th order polynomial. Return: atm_dens: [float] Atmosphere neutral mass density in kg/m^3. """ # Compute the density rho_a = 1000*(10**(dens_co[0] + dens_co[1]*(ht/1000) + dens_co[2]*(ht/1000)**2 + dens_co[3]*(ht/1000)**3 + dens_co[4]*(ht/1000)**4 + dens_co[5]*(ht/1000)**5)) return rho_a
653b134d513c3fd9b55e72ee37a4c7116aadf8cf
700,634
import uuid def get_file_name() -> str: """ Creates a unique file name for datastore by appending timestamp to the file name :return: """ uniq_append_string = uuid.uuid4().hex return "LOCAL_STORAGE_{}".format(uniq_append_string)
a865199dc688dfcbb7077d9cf0a30aa9f5199849
700,635
import hashlib def __str_to_hash(string_to_hash: str, errors: str = 'ignore') -> str: """ Encodes the given string and generates a hash from it. """ string_hash = string_to_hash.encode(encoding="utf-8", errors=errors) return hashlib.md5(string_hash).hexdigest()
e7c0c419f9fa39bf5884198ec1dbfe4816fb7383
700,636
def find_overlap_percentage(nominator, denominator): """This function is used to find overlap percentage between 2 lists""" common_skills_set = set(nominator).intersection(denominator) common_skills_list = list(common_skills_set) if common_skills_list: overlap_percentage = len(common_skills_list) / len(denominator) * 100 else: overlap_percentage = 0 return overlap_percentage
605f55e4698255a97a800d8a183078b25e6c2a34
700,638
def report_withot_driver_part(): """Returns part from correct final report""" return [" 10. Pierre Gasly | SCUDERIA TORO ROSSO HONDA | 0:01:12.941", " 11. Carlos Sainz | RENAULT | 0:01:12.950"]
0117401194c9c36f86b6b8f1572f06f673a8b085
700,640
from typing import Union from typing import Tuple def get_rationed_resizing( resized: Union[int, float], length: int, other_length: int ) -> Tuple[int, int]: """ Get resized lengths for `length` and `other_length` according to the ratio between `resized` and `length`. Parameters ---------- resized : int or float Already resized length. If float, it is the ratio. length : int Non-resized length related to `resized`. other_length : int Other non-resized length to resize according the ratio. Returns ------- resized : int First resized length according ratio. other_resized : int Other resized length according ratio. """ ratio = resized if type(resized) == float else resized / length resized = resized if type(resized) == int else round(ratio * length) other_resized = round(ratio * other_length) return resized, other_resized
dde1ce579c192178090fe07c145fd3e153d92599
700,641
import collections def reorder(fields, order, key): """ Reorders `fields` list sorting its elements in order they appear in `order` list. Elements that are not defined in `order` list keep the original order. :param fields: elements to be reordered :param order: iterable that defines a new order :param key: a function of one argument that is used to extract a comparison key from each element in `fields` :return: reordered elements list """ ordered = collections.OrderedDict() for field in fields: ordered[key(field)] = field for ord in reversed(order or ()): ordered.move_to_end(ord, last=False) return ordered.values()
48eb32841f5abec38c74a8ae88615a7c39d3087c
700,643
def extCheck( extention: str ) -> str: """ Ensures a file extention includes the leading '.' This is just used to error trap the lazy programmer who wrote it. :param extention: file extention :type extention: str :return: Properly formatted file extention :rtype: str """ if extention[ 0 ] != '.': extention = '.' + extention return extention
7055c23cd8d2fa0e74dd916aa6d06d9547d49b7f
700,644
import logging def load_data(in_file, max_example=None, relabeling=True): """ load CNN / Daily Mail data from {train | dev | test}.txt relabeling: relabel the entities by their first occurence if it is True. """ documents = [] questions = [] answers = [] num_examples = 0 f = open(in_file, 'r') while True: line = f.readline() if not line: break question = line.strip().lower() answer = f.readline().strip() document = f.readline().strip().lower() if relabeling: q_words = question.split(' ') d_words = document.split(' ') assert answer in d_words entity_dict = {} entity_id = 0 for word in d_words + q_words: if (word.startswith('@entity')) and (word not in entity_dict): entity_dict[word] = '@entity' + str(entity_id) entity_id += 1 q_words = [entity_dict[w] if w in entity_dict else w for w in q_words] d_words = [entity_dict[w] if w in entity_dict else w for w in d_words] answer = entity_dict[answer] question = ' '.join(q_words) document = ' '.join(d_words) questions.append(question) answers.append(answer) documents.append(document) num_examples += 1 f.readline() if (max_example is not None) and (num_examples >= max_example): break f.close() logging.info('#Examples: %d' % len(documents)) return (documents, questions, answers)
da81dcc56469aaccee3da36d1451ac8eaeb4a2b7
700,645
import copy def delete_items(dictionary, key_list): """ This function performs a deep copy of a dictionary, checks if the specified keys are included in the copy, and deletes key-value pairs accordingly. Parameters: dictionary (dict): a dictionary key_list (list): a list of the keys to be deleted Returns: dict_copy (dict): dictionary with key-value pairs removed """ dict_copy = copy.deepcopy(dictionary) for k in key_list: if k in dict_copy.keys(): del(dict_copy[k]) return dict_copy
45fb652661387ca5d40aea443c1d4a82f74db5bd
700,646
def some_method(a1, a2): """ some_method returns the larger num :param a1: num1 :param a2: num2 :return: 1 or 2 """ if a1 > a2: return 1 elif a1 < a2: return 2 else: return 0
2505053aae5b0e0d1e906881d8de50b7ad97a28e
700,647
def backends_mapping(custom_backend, private_base_url, lifecycle_hooks): """ Create 2 separate backends: - path to Backend 1: "/echo-api" - path to Backend 2: "/httpbin" """ return {"/echo-api": custom_backend("backend_one", endpoint=private_base_url("echo_api"), hooks=lifecycle_hooks), "/httpbin": custom_backend("backend_two", endpoint=private_base_url("httpbin_go"), hooks=lifecycle_hooks)}
b98fb61afc00efc902e5bd511fefcea6727a7125
700,648
from sympy import Poly def _solve_inequality(ie, s): """ A hacky replacement for solve, since the latter only works for univariate inequalities. """ if not ie.rel_op in ('>', '>=', '<', '<='): raise NotImplementedError expr = ie.lhs - ie.rhs p = Poly(expr, s) if p.degree() != 1: raise NotImplementedError('%s' % ie) a, b = p.all_coeffs() if a.is_positive: return ie.func(s, -b/a) elif a.is_negative: return ie.func(-b/a, s) else: raise NotImplementedError
f90151b25d6225e382c0f5dac686f2ff5529cf0e
700,649
def support(shape1, shape2, direction): """Find support for the Minkowski difference in the given direction. PARAMETERS ---------- shape1, shape2: Shape The inputs for Minkowski difference. `shape1` is subtracted from `shape2`. direction: Point The direction for finding the support. RETURN ------ : Point Support for Minkowski difference in the given direction. """ return shape1.support(direction) - shape2.support(-direction)
4b9292116c9447549f36099d4a6928c4c6e74e28
700,650
import os import json def load_metadata(directory, filename='meta.json'): """Load the metadata of a training directory. Parameters ---------- directory : string Path to folder where model is saved. For example './experiments/mnist'. """ path_to_metadata = os.path.join(directory, filename) with open(path_to_metadata) as metadata_file: metadata = json.load(metadata_file) return metadata
d9bb76ea707fa87d4715569f08a7e43ae5373d6c
700,651
from pathlib import Path def is_empty(path: Path) -> bool: """Checks if a directory has files Args: path: The path to the directory to check Returns: bool: True if the dir is empty, False if it contains any files """ return not any(path.iterdir())
b99045eee29922c7ef2e91cd1b8b71ab54181e1e
700,652
def find_best_group_or_software(obj_tour_list): """ Find group with step 2 and 3 with 2 or more subtechniques """ # Ideal: find Step 2 and Step 3 with most subtechniques obj_w_best_step_2_3 = {} for obj_tour in obj_tour_list: # First group if not obj_w_best_step_2_3: obj_w_best_step_2_3 = obj_tour # Verify if group tour has step 2 and step 3 elif obj_tour.get('step2') and obj_tour.get('step3'): # Only update if step 2 and 3 where found if obj_w_best_step_2_3.get('step2') and obj_w_best_step_2_3.get('step3'): # When new group has same or more subtechniques if (obj_w_best_step_2_3['step2'][1] <= obj_tour['step2'][1]) and (obj_w_best_step_2_3['step3'][1] <= obj_tour['step3'][1]): obj_w_best_step_2_3 = obj_tour else: obj_w_best_step_2_3 = obj_tour # Case when step 2 is available but not step 3 # Only update if current group does not have a step 3 elif obj_tour.get('step2') and not obj_w_best_step_2_3.get('step3'): if not obj_w_best_step_2_3.get('step2'): obj_w_best_step_2_3 = obj_tour elif obj_w_best_step_2_3['step2'][1] < obj_tour['step2'][1]: obj_w_best_step_2_3 = obj_tour # Case when step 3 is available but not step 1 # Only update if current group does not have a step 2 elif obj_tour.get('step3') and not obj_w_best_step_2_3.get('step2'): if not obj_w_best_step_2_3.get('step3'): obj_w_best_step_2_3 = obj_tour elif obj_w_best_step_2_3['step3'][1] < obj_tour['step3'][1]: obj_w_best_step_2_3 = obj_tour return obj_w_best_step_2_3
e23ad122b4f57143c5d271ebfcf42f23637fb2e2
700,653
import os def dataset_urls(): """Returns a list of legacy datasets available for testing""" legacy_data_directory = os.path.join(os.path.dirname(__file__), 'data', 'legacy') versions = os.listdir(legacy_data_directory) urls = ['file://' + os.path.join(legacy_data_directory, v) for v in versions] return urls
51def92fc81111e01e237c46b0d927c50d0d0146
700,654
def partition(groups, train_part=0.8, val_part=0.1, test_part=0.1): """Splits groups into training, validation, and test partitions. Args: groups (list): list of units (e.g. dicts). train_part (float): proportion in [0, 1] of units for training. val_part (float): self-explanatory. test_part (float): self-explanatory. Returns: lists of data-chunks for each. """ assert train_part + val_part + test_part == 1. total_size = len(groups) train_part_end = int(total_size * train_part) val_part_end = train_part_end + int(total_size * val_part) train_groups = groups[:train_part_end] val_groups = groups[train_part_end:val_part_end] if test_part == 0.: val_groups += groups[val_part_end:] test_groups = [] else: test_groups = groups[val_part_end:] return train_groups, val_groups, test_groups
c2cf56e54809a7c8c3a75c8b8bdbb3764aa9b988
700,655
import csv def get_first_last_onset(csv_file): """Gets the first and last onset times.""" with open(csv_file) as f: full_track = list(csv.reader(f)) first_onset = float(full_track[0][0]) if first_onset < 0: first_onset = abs(first_onset) # we only store the first onset if it is # negative (i.e., starts in an upbeat) else: first_onset = 0 last_onset = float(full_track[-1][0]) return first_onset, last_onset
d567530abf15fa5e256f7f826a813ad5982e3b0e
700,656
def __check_if_alive(processes): """ Quickly check if at least one of the list of processes is alive. Returns True if at least one process is still running. """ c = set([x.exitcode for x in processes]) return None in c
22458fa4b2ca07fe8c1c21a60eac87d6546f8abd
700,657
import glob def test_data_list() -> list: """Return the list of paths to the test input data files Parameters: None Returns: list: The list of paths to the test input data files """ return glob.glob("test_data/*.rws.gz")
da3362910d0727fb21f433b34a77ad437ea0ccb1
700,658
def route_vm_logic(logicFn): """ Decorates a function to indicate the viewmodel logic that should be executed after security checks and business logic passes. :param logicFn: The viewmodel logic function to assign. :return: The decorated function. """ def decorator(fn): fn.route_vm_logic = logicFn return fn return decorator
6dcbd274bd35b34f9faffb455d121940821cfa04
700,659
def draw_turn(row, column, input_list, user): """ Draw the game board after user typing a choice. Arguments: row -- the row index. column -- the column index. input_list -- a two dimensional list for game board. user -- the user who type the choice Returns: input_list -- a two dimensional list for game board after changed. """ mark_dict = {'player1':'X', 'player2':'O'} input_list[row-1][column-1] = mark_dict[user] return input_list
6f44e770a2fa04b5992ffd21cad5a799c3423de5
700,660
import socket def is_open_port(port): """ Check if a port is open (listening) or not on localhost. It returns true if the port is actually listening, false otherwise. :param port: The port to check. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex(('127.0.0.1',port)) return result == 0
4eb8f52744cc7f330dd101b613d5db4ab8d0d0fc
700,661
import re def match_id(url): """匹配歌单ID""" try: res = re.findall('id=(\d{4,12})', url) return res[0] except Exception as e: pass return None
8a84cfe0ca0ebea154c451064ce838d4816e62d6
700,662
def _jupyter_bundlerextension_paths(): """ Entrypoint for Jupyter Notebook Bundler Shows up in the 'Download' menu on Jupyter Classic Notebook """ return [{ 'name': 'chrome_pdf_export', 'label': 'PDF via Chrome (.pdf)', 'module_name': 'nbpdfexport.bundler', 'group': 'download' }]
1d948a5eed6f4240e760c52bec776c3fbf291f14
700,663
def median(data_list): """ Finds the median in a list of numbers. :type data_list list """ data_list = list(map(float, data_list)) length = len(data_list) data_list.sort() # Test whether the length is odd if length & 1: # If is is, get the index simply by dividing it in half index = int(length / 2) return data_list[index] # If the length is even, average the two values at the center low_index = int(length / 2) - 1 high_index = int(length / 2) average = (data_list[low_index] + data_list[high_index]) / 2 return average
c41a1336d17e5e991101bd91c9c56cb59624dc82
700,664
import functools def on_package_attributes(**attr_dict): """Decorator: executes instance function only if object has attr valuses. Executes the decorated method only if at the moment of calling the instance has attributes that are equal to certain values. Args: attr_dict (dict): dictionary mapping attribute names to their required values """ def _execute_under_condition(func): @functools.wraps(func) def _wrapper(instance, *args, **kwargs): # If all the attributes have the value we require, then execute has_all_attributes = all( [hasattr(instance, key) for key in attr_dict] ) if has_all_attributes: has_the_right_values = all( [getattr(instance, key) == value for key, value in attr_dict.items()] # NOQA: ignore=E501 ) if has_the_right_values: func(instance, *args, **kwargs) return _wrapper return _execute_under_condition
9c3f4be4bbe02d0d70b97ae5d9437480262b5e79
700,665
import re import csv def _getIdsFromQuery(queryString, syn, downloadLocation): """Helper function that extracts the ids out of returned query.""" if re.search('from syn\\d', queryString.lower()): tbl = syn.tableQuery(queryString, downloadLocation=downloadLocation) check_for_id_col = filter(lambda x: x.get('id'), tbl.headers) assert check_for_id_col, ValueError("Query does not include the id column.") ids = [x['id'] for x in csv.DictReader(open(tbl.filepath))] return ids else: raise ValueError('Input query cannot be parsed. Please see our documentation for writing Synapse query:' ' https://docs.synapse.org/rest/org/sagebionetworks/repo/web/controller/TableExamples.html')
3f38f02c01c87932317d1241645dca6fac3b484e
700,666
def multi_to_single(y, combinations): """Convert multilabel indices to singlelabel strings.""" single = [] for y_i in y: y_i_str = ", ".join([str(i) for i in y_i]) single.append(combinations.index(y_i_str)) return single
7e8839173b047cb0bb89d143ce2b30b928630f2d
700,667
def _parse_taxa_report(local_path, **kwargs): """Return a dict of taxa_name to read_counts.""" out, abundance_sum = {}, 0 with open(local_path) as taxa_file: for line_num, line in enumerate(taxa_file): line = line.strip() tkns = line.split('\t') if not line or len(tkns) < 2: continue if len(tkns) == 2: taxon = tkns[0] taxon = taxon.split('|')[-1] abundance = float(tkns[1]) elif len(tkns) == 6: taxon = tkns[5].strip() taxon_rank = tkns[3].strip().lower() if len(taxon_rank) > 1: continue taxon = f'{taxon_rank}__{taxon}' abundance = float(tkns[1]) else: if line_num == 0: continue taxon = tkns[1] abundance = float(tkns[3]) if (not kwargs.get('species_only', False)) or ('s__' in taxon): out[taxon] = abundance abundance_sum += abundance if kwargs.get('normalize', False): out = {k: v / abundance_sum for k, v in out.items()} if kwargs.get('minimum_abundance', 0): out = {k: v for k, v in out.items() if v >= kwargs['minimum_abundance']} return out
b3b18010820dbc409b66c8c1d5636430c052382d
700,668
import json def body_part_to_headers_and_data(part): """ convert part (of multi-part body) to headers dict and content. de-serializes json if content-type is application/json. :param part: BodyPart decoded by MultipartDecoder :return: tuple pair of headers dict and content """ if b'application/json' in part.headers[b'Content-Type'].lower(): return part.headers, json.loads(part.text) return part.headers, part.text
edd95f6dba9f4157c6a51b2dd6a6c4fb8a34e9db
700,670
def get_utm_zone(lon): """ Calculate UTM zone. Arguments: lon: float Longitude, in degrees. West: negative, East: positive. Returns: zone: int UTM zone number. """ zone = int(1+(lon+180.0)/6.0) return zone
cf8c0d596f146417ebf0d3a18cd0b70e825388aa
700,671
def get_num_params(vocab_size, num_layers, num_neurons): """Returns the number of trainable parameters of an LSTM. Args: vocab_size (int): The vocabulary size num_layers (int): The number of layers in the LSTM num_neurons (int): The number of neurons / units per layer Returns: int: The number of trainable parameters """ num_first_layer = 4 * (num_neurons * (vocab_size + num_neurons) + num_neurons) num_other_layer = 4 * (num_neurons * 2 * num_neurons + num_neurons) num_softmax = vocab_size * num_neurons + vocab_size return num_first_layer + (num_layers - 1) * num_other_layer + num_softmax
c9620e74206878cc3390895dacbf10c84da42829
700,672
import pickle def import_data(object_name): """Extract data from data dir.""" with open(f"data/{object_name}.pkl","rb") as my_file: return pickle.load(my_file)
d497048d53e3ba95bceccc172b7d47781c3bbc2c
700,673
def convert_to_nullable(input_val, cast_function): """For non-null input_val, apply cast_function and return result if successful; for null input_val, return None. Args: input_val (Any): The value to attempt to convert to either a None or the type specified by cast_function. The recognized null values are '.', None, '', and 'NULL' cast_function (Callable[[Any], Any]): A function to cast the input_val to some specified type; should raise an error if this cast fails. Returns: None if input is the null value. An appropriately cast value if input is not null and the cast is successful. Raises: Error: whatever error is provided by cast_function if the cast fails. """ if input_val in ['.', None, '', 'NULL']: result = None else: result = cast_function(input_val) return result
ba12f32d2bcced066257788188a2a9d91fcfec37
700,674
def piece_size(file_size): """ Based on the size of the file, we decide the size of the pieces. :param file_size: represents size of the file in MB. """ # print 'Size {0} MB'.format(file_size) if file_size >= 1000: # more than 1 gb return 2 ** 19 elif file_size >= 500 and file_size < 1000: return 2 ** 18 elif file_size >= 250 and file_size < 500: return 2 ** 17 elif file_size >= 125 and file_size < 250: return 2 ** 16 elif file_size >= 63 and file_size < 125: return 2 ** 15 else: return 2 ** 14
8b48e98a22035f594c2582401cf4145acbf4d680
700,676
import subprocess def execute(command): """ Run local image in a container Arguments --------- command: string of command to execute """ command = f'"{command}"' process = subprocess.run( f"docker exec --workdir $maple_target $maple_container bash -c {command}", shell=True, check=True, ) return process.returncode
9e4a871447e0c109d14f6e30f6ec2a26d9034416
700,677
import math def odd_improvement(lst): """Calculates the improvement of odds compared to their base values. The higher above 0, the more the odds improved from base-value. The lower under 0, the more the odds deteriorated. Used https://en.wikipedia.org/wiki/Logit as a source for this formula. """ base_probability = lst[0] current_probability = lst[1] improvement = math.log(current_probability / (1 - current_probability)) - math.log( base_probability / (1 - base_probability)) return improvement
4f8607d452fc96b57c9573ed0b07bd5a38791876
700,678
def nearest(items, pivot): """Find nearest value in array, including datetimes Args ---- items: iterable List of values from which to find nearest value to `pivot` pivot: int or float Value to find nearest of in `items` Returns ------- nearest: int or float Value in items nearest to `pivot` """ return min(items, key=lambda x: abs(x - pivot))
0f8766e5680b3b271876a80055b99312bde8366f
700,679
def CODE(string): """ Returns the numeric Unicode map value of the first character in the string provided. Same as `ord(string[0])`. >>> CODE("A") 65 >>> CODE("!") 33 >>> CODE("!A") 33 """ return ord(string[0])
0f680fe1e45156c00d0a5839e24f1619a456773f
700,680
def expected_weighted(da, weights, dim, skipna, operation): """ Generate expected result using ``*`` and ``sum``. This is checked against the result of da.weighted which uses ``dot`` """ weighted_sum = (da * weights).sum(dim=dim, skipna=skipna) if operation == "sum": return weighted_sum masked_weights = weights.where(da.notnull()) sum_of_weights = masked_weights.sum(dim=dim, skipna=True) valid_weights = sum_of_weights != 0 sum_of_weights = sum_of_weights.where(valid_weights) if operation == "sum_of_weights": return sum_of_weights weighted_mean = weighted_sum / sum_of_weights if operation == "mean": return weighted_mean
5d3518de9bd52407cdcf140bd1c43dd5d78f9c37
700,681
import numpy def load_values(files): """ Loads the sasa values from the files in the files dictionary. Returns the maximum and minimum values. Values are loaded into the "files" structure. """ min_val = float("inf") max_val = 0.0 for filename in files: files[filename]["values"] = numpy.loadtxt(filename) max_val = max(numpy.max(files[filename]["values"]), max_val) min_val = min(numpy.min(files[filename]["values"]), min_val) return max_val, min_val
a03ff0e928c192f57b911445b216f4baf0d88d7d
700,683
def format_call(__fn, *args, **kw_args): """ Formats a function call, with arguments, as a string. >>> format_call(open, "data.csv", mode="r") "open('data.csv', mode='r')" @param __fn The function to call, or its name. @rtype `str` """ try: name = __fn.__name__ except AttributeError: name = str(__fn) args = [ repr(a) for a in args ] args.extend( n + "=" + repr(v) for n, v in kw_args.items() ) return "{}({})".format(name, ", ".join(args))
0dce4bf0166f59f810063596f872b9f641f84234
700,684
def generate_fieldnames(value, prefix=''): """ """ fieldnames = [] if isinstance(value, dict): prefix = prefix + '.' if prefix != '' else '' for key in sorted(value.keys()): subnames = generate_fieldnames(value[key], prefix='{}{}'.format(prefix, key)) fieldnames.extend(subnames) elif isinstance(value, list): # NOTICE: list support is NOT reliable subnames = ['{}[id]'.format(prefix)] if len(value) > 0: subnames += generate_fieldnames(value[0], prefix='{}[]'.format(prefix)) else: subnames += ['{}[]'.format(prefix)] fieldnames.extend(subnames) else: fieldnames.append(prefix) return fieldnames
e5ba2a4bc8786aa37542535e6c2bf2dd11b76648
700,685
import os def get_data_dir(file=''): """Return the full path to the directory used to store the API data """ data_dir = os.getenv('TRAPI_DATA_DIR') if not data_dir: # Output data folder in current dir if not provided via environment variable data_dir = os.getcwd() + '/output/' else: if not data_dir.endswith('/'): data_dir += '/' return data_dir + file
febef61b3f22942d087e9d2472b6a99da7bf2a4c
700,686
def _get_value_pos(line, delim): """ Finds the first non-whitespace character after the delimiter Parameters: line: (string) Input string delim: (string) The data delimiter """ fields = line.split(delim, 1) if not len(fields) == 2: raise Exception(f"Expected a '{delim}' delimited field. Actual: {line}") return len(line) - len(fields[1].lstrip())
8337c92045f2d3ccb91479502b30c7d191e53f34
700,687
def norm(data): """Normaliza una serie de datos""" return (data - data.min(axis=0))/(data.max(axis=0)-data.min(axis=0))
9e2a23d8d734a4e77ec99c0dcb0ae4d85f971ede
700,688
def format_size(size): """格式化大小 >>> format_size(10240) '10.00K' >>> format_size(1429365116108) '1.3T' >>> format_size(1429365116108000) '1.3P' """ if size < 1024: return '%sB' % size elif size < 1024 **2: return '%.2fK' % (float(size) / 1024) elif size < 1024 ** 3: return '%.2fM' % (float(size) / 1024 ** 2) elif size < 1024 ** 4: return '%.2fG' % (float(size) / 1024 ** 3) elif size < 1024 ** 5: return '%.2fT' % (float(size) / 1024 ** 4) else: return "%.2fP" % (float(size) / 1024 ** 5)
66aa2301350def395e32bae87dabccc18a126786
700,689
def get_download_url_for_platform(url_templates, platform_info_dict): """ Compare the dict returned by get_platform_info() with the values specified in the url_template element. Return true if and only if all defined attributes match the corresponding dict entries. If an entry is not defined in the url_template element, it is assumed to be irrelevant at this stage. For example, <url_template os="darwin">http://hgdownload.cse.ucsc.edu/admin/exe/macOSX.${architecture}/faToTwoBit</url_template> where the OS must be 'darwin', but the architecture is filled in later using string.Template. """ os_ok = False architecture_ok = False for url_template in url_templates: os_name = url_template.get("os", None) architecture = url_template.get("architecture", None) if os_name: if os_name.lower() == platform_info_dict["os"]: os_ok = True else: os_ok = False else: os_ok = True if architecture: if architecture.lower() == platform_info_dict["architecture"]: architecture_ok = True else: architecture_ok = False else: architecture_ok = True if os_ok and architecture_ok: return url_template return None
a5eb81ff5aec82b24dc6128e908519dbee4e13e3
700,690
import getpass def get_username() -> str: """ Returns username lowercase >>> username = get_username() >>> assert len(username) > 1 """ _username = getpass.getuser().lower() return _username
aa7c5d2974502bd411cd1a77218ca74171d3dc71
700,691
def extract_id(source): """ Attempts to extract an ID from the argument, first by looking for an attribute and then by using dictionary access. If both fail, the argument is returned. """ try: return source.id except AttributeError: pass try: return source["id"] except (TypeError, KeyError) as err: pass return source
7ec169cfd6edf70c9d414ec61edc3bf514a80e02
700,692
import shutil def pytest_report_header(config): """Add header information for pytest execution.""" return [ "LAMMPS Executable: {}".format( shutil.which(config.getoption("lammps_exec") or "lammps") ), "LAMMPS Work Directory: {}".format( config.getoption("lammps_workdir") or "<TEMP>" ), ]
dc07ae457cc49a1fc1ac43643a193bb3b6a84399
700,693
def transform_score(data, score_card): """ 特征映射回分值 Args: data: 特征表 score_card: 评分卡 Returns: 返回转化后的得分 """ base_score = score_card[score_card['Bins'] == '-']['Score'].values[0] data['Score'] = base_score for i in range(len(data)): score_i = base_score for k in set(score_card[score_card['Bins'] != '-']['Variables']): bin_score = score_card[(score_card['Woe'] == data.iloc[i][k]) & (score_card['Variables'] == k)]['Score'] score_i += bin_score.values[0] data['Score'].iloc[i] = score_i return data
d3afafeca40f0bcf97e522cb32b4f139d1c0321e
700,694
def get_lwt_topic(mqtt: dict) -> dict: """Return last will topic.""" if not mqtt["last_will_topic"]: mqtt["last_will_topic"] = f"{mqtt['client_id']}/lwt" return mqtt
fdc6941dff5d702adeebf81f02d10c7f6a21c180
700,696
import os import sys def verifyDirectory(path, create, quiet): """ ERROR CHECK: verify directory exists :param path: str, path to save the file :param create: bool, whether to create missing dir :param quiet: bool, whether to quietly return T/F :return: exists, bool, indicates existence of directory """ exists = True if not os.path.isdir(path): exists = False if create: os.makedirs(path) if not quiet: print("Creating path: {}".format(path)) elif not quiet: print("ERROR: Specified path doesn't exist:" + " {}".format(path)) sys.exit() # end if return exists
c7a479dc44beaac96a6c963fc2ec8735c95d1687
700,698
import os import platform import re def spacja(sciezka): """ escaping space and special char in pathname """ sciezka = os.path.normpath(sciezka) if platform.system() == "Windows": czy_spacja = re.search(" ", sciezka) if czy_spacja is not None: sciezka = '"' + sciezka + '"' else: path = os.path.splitext(sciezka) sciezka = re.escape(path[0]) + path[1] return sciezka
3ca0c3d743662d6811e5281b4b7d5c3988902a1b
700,699
def not_in(a, b): """Evalutes a not in b""" result = False if a in b else True return result
bf5f2fd22a48f4ba517c75de2d05c11ab585756b
700,700
def encode_imsi(imsi): """ Convert a IMSI string to a uint + length. IMSI strings can contain two prefix zeros for test MCC and maximum fifteen digits. Bit 1 of the compacted uint is always 1, so that we can match on it set. Bits 2-3 the compacted uint contain how many leading 0's are in the IMSI. For example, if the IMSI is 001010000000013, the first bit is 0b1, the second two bits would be 0b10 and the remaining bits would be 1010000000013 << 3 Args: imsi - string representation of imsi Returns: int representation of imsi with padding amount at end """ if imsi.startswith('IMSI'): imsi = imsi[4:] # strip IMSI off of string prefix_len = len(imsi) - len(imsi.lstrip('0')) compacted = (int(imsi) << 2) | (prefix_len & 0x3) return compacted << 1 | 0x1
31d025168ba2421be2b235a049983ee331437ffd
700,701
def parse_content(content): """Parse the content of a file into a dictionary mapping words to word""" words = {} for line in content.split('\n'): word, frequency = line.split() words[word] = int(frequency) return words
83757725e29d00732835002db4a2dec4b4d71b8c
700,702
def reply(threaded=False): """Plugin reply decorator.""" def wrapper(func): func._is_reply = True func._is_threaded = threaded return func return wrapper
016917e073471150696ba02dbb25d07dca95865c
700,703
import typing import itertools def generate_fixed_permutations( base_name: str, locations: typing.List[str], perm_length: int, ) -> typing.List[typing.List[str]]: """ Generate path permutations of a specified length which always start and end at base_name. :param base_name: Name of base to begin from. :param locations: List of locations that can be visited. :param perm_length: Length of the trip in stops. :return: List of all possible paths of specified length. """ location_perms = itertools.permutations(locations, perm_length) return [[base_name] + list(perm) + [base_name] for perm in location_perms]
61c03cb166ca4dc7691d3c09eaedf26b5ff3c288
700,705
import re def cleanEngText(eng_raw_string, customize_stop_words=[]): """ Args: eng_raw_string: str - customize_stop_words: list - all stopwords to remove Returns: refined_doc: str - curated string of eng text """ # Remove dates # 1 or 2 digit number followed by back slash followed by 1 or 2 digit number ... pattern_dates = '(\d{1,2})/(\d{1,2})/(\d{4})' pattern_fig = 'Figure (\d{1,2})' pattern_image = '^Image .$' replace = '' eng_raw_string = re.sub(pattern_dates, replace, eng_raw_string) eng_raw_string = re.sub(pattern_fig, replace, eng_raw_string) eng_raw_string = re.sub(pattern_image, replace, eng_raw_string) # remove punctuation and special characters eng_raw_string = re.sub("[^A-Za-z0-9]+", ' ', eng_raw_string) # Remove custom stop words tokens = [token for token in eng_raw_string.split() if token not in customize_stop_words] refined_doc = '' for word in tokens: refined_doc += ' {}'.format(word) return refined_doc
7a3c24991538f5fdce9aa9625725899ae52ed195
700,706
import typing import pathlib import hashlib from pathlib import Path def md5(p: typing.Union[pathlib.Path, str], bufsize: int = 32768) -> str: """ Compute md5 sum of the content of a file. """ hash_md5 = hashlib.md5() with Path(p).open('rb') as fp: for chunk in iter(lambda: fp.read(bufsize), b''): hash_md5.update(chunk) return hash_md5.hexdigest()
2399177dca4d64de231287f7bd25346df8437dbd
700,707
import os import time def get_file_mod_time(pth): """ Safely get last modification time for a file. Prevents situation when file is deleted between file existence check and last file modification check. :param str pth: file path to check :return float: number of seconds since Jan 1, 1970 00:00:00 """ try: return os.path.getmtime(pth) except Exception as e: print( "Could not determine timestamp for '{}'. Returning current time. " "Caught exception: {}".format(pth, getattr(e, "message", repr(e))) ) return time.time()
a3046860b0cf7a193618bc20071e8532a52f9e89
700,708
def load_module(name): """load a module Args: name: python dotted namespace path of the module to import Returns: imported module Raises: FailedImport if importing fails """ m = __import__(name) # __import__('foo.bar') returns foo, so... for bit in name.split('.')[1:]: m = getattr(m, bit) return m
6cfcb58fccbf7d0c6de22a561312aff8931b8317
700,710
import torch def project_simplex(v, z=1.0): """Project a vector v onto the simplex. That is, return argmin_w ||w - v||^2 where w >= 0 elementwise and sum(w) = z. Parameters: v: Tensor of shape (batch_size, n) z: real number Return: Projection of v on the simplex, along the last dimension: (batch_size, n) """ v_sorted, _ = v.sort(dim=-1, descending=True) range_ = torch.arange(1.0, 1 + v.shape[-1]) cumsum_divided = (v_sorted.cumsum(dim=-1) - z) / range_ # rho = (v_sorted - cumsum_divided > 0).nonzero()[-1] cond = (v_sorted - cumsum_divided > 0).type(v.dtype) rho = (cond * range_).argmax(dim=-1) tau = cumsum_divided[range(v.shape[0]), rho] return torch.clamp(v - tau.unsqueeze(-1), min=0)
80c3ec1b4bb94681b5515840da27a32a09512680
700,711
def uniquify(iterable): """ Make unique list while preserving order. """ unique = [] for entry in iterable: if entry not in unique: unique.append(entry) return unique
a579c5e4cf8b38213fbc9cdcac2c122586bab97a
700,712
from pathlib import Path import yaml def read_config(): """Read the config file for cenv from the users-home path if it exists. If there is no user-config-file the default one is used. Returns: the content of the read config file. """ user_config_path = Path.home() / '.config/cenv/cenv.yml' default_config_path = Path(__file__).parent / 'cenv.yml' # Collect settings from config file .cenv.yml main_config = yaml.safe_load(default_config_path.open().read()) # if a user-config-file exists, read the content and update the main-config if user_config_path.exists(): user_config = yaml.safe_load(user_config_path.open().read()) main_config.update(user_config) return main_config
83322b77fcdb1030179309f7139cf9e6df9a8232
700,713
from typing import Dict from typing import Any def create_default_metadata() -> Dict[str, Any]: """Creates a dictionary with the default metadata.""" return { 'title': 'Default title', 'base_url': 'https://example.org', 'description': 'Default description', 'language': 'en-US', 'copyright': 'Licensed under the ' '<a href="http://creativecommons.org/licenses/by/4.0/"> ' 'Creative Commons Attribution 4.0 International License.' }
2ee6aeffaafd209cb93e33bd42291ac3c12b10d8
700,714
import os def paths_differ(path1,path2): """Check whether two paths differ.""" if os.path.isdir(path1): if not os.path.isdir(path2): return True for nm in os.listdir(path1): if paths_differ(os.path.join(path1,nm),os.path.join(path2,nm)): return True for nm in os.listdir(path2): if not os.path.exists(os.path.join(path1,nm)): return True elif os.path.isfile(path1): if not os.path.isfile(path2): return True if os.stat(path1).st_size != os.stat(path2).st_size: return True with open(path1,"rb") as f1: with open(path2,"rb") as f2: data1 = f1.read(1024*16) data2 = f2.read(1024*16) while data1: if data1 != data2: return True data1 = f1.read(1024*16) data2 = f2.read(1024*16) if data1 != data2: return True elif os.path.exists(path2): return True return False
89268b17eed7bb70a8f123c99a818fce85be9053
700,715
import re def extract_authorization_token(request): """ Get the access token using Authorization Request Header Field method. Or try getting via GET. See: http://tools.ietf.org/html/rfc6750#section-2.1 Return a string. """ auth_header = request.META.get('HTTP_AUTHORIZATION', '') if re.compile(r'^[Bb]earer\s{1}.+$').match(auth_header): access_token = auth_header.split()[1] else: access_token = request.GET.get('access_token', '') return access_token
9776df3ecd59ba3db15664259a6e65114ec61a07
700,716
def filter_problematic(features, vcf_file="data/problematic_sites_sarsCov2.vcf", callback=None): """ Apply problematic sites annotation from de Maio et al., https://virological.org/t/issues-with-sars-cov-2-sequencing-data/473 which are published and maintained as a VCF-formatted file. :param features: list, return object from import_json() :param vcf_file: str, path to VCF file :return: """ vcf = open(vcf_file) mask = {} for line in vcf.readlines(): #line = line.decode('utf-8') if line.startswith('#'): continue _, pos, _, ref, alt, _, filt, info = line.strip().split() if filt == 'mask': mask.update({int(pos)-1: { # convert to 0-index 'ref': ref, 'alt': alt, 'info': info} }) # apply filters to feature vectors count = 0 for row in features: filtered = [] for typ, pos, alt in row['diffs']: if typ == '~' and int(pos) in mask and alt in mask[pos]['alt']: continue if typ != '-' and 'N' in alt: # drop substitutions and insertions with uncalled bases continue filtered.append(tuple([typ, pos, alt])) count += len(row['diffs']) - len(filtered) row['diffs'] = filtered if callback: callback('filtered {} problematic features'.format(count)) return features
e1edd82c6223fc88b1db5e929c6147464fc7051d
700,717
import os def _make_requirements(foldername, virtual_env_obj): """ Function that build the 'requirements.txt' file used to create a virtual environment by the PIPENV, based on :virtual_env_obj: object. """ requirements_filename = os.path.join(foldername, 'requirements.txt') with open(requirements_filename, 'w') as requirement_file: for pkg in virtual_env_obj.packages: requirement = "{}{}{}\n".format( pkg.name, pkg.version_specifier, pkg.version, ) #print("\n\n>",pkg.name], "<->",pkg.version], "<-[", str(i) ,"]\n\n") requirement_file.write(requirement) return requirements_filename
daa8bb3af7c0a953cf7ce64e13e21259128c2127
700,718
def power_mod(val, power, m_value): """ Calculate power mod the efficent way """ if power <= 100: return (val ** power) % m_value if power % 2 == 0: return (power_mod(val, power // 2, m_value) ** 2) % m_value return (power_mod(val, power // 2, m_value) * power_mod(val, power // 2 + 1, m_value)) % m_value
59bb407db88e344b33571d74eef75ae0f7baa54e
700,719
from typing import List def _inv_shift_rows(s: List[List[bytes]]) -> List[List[bytes]]: """ Performs the inverted shift rows transformation as described in the standard :param s: the state matrix :return: the new state matrix with shifted rows """ s[0][1], s[1][1], s[2][1], s[3][1] = s[3][1], s[0][1], s[1][1], s[2][1] s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2] s[0][3], s[1][3], s[2][3], s[3][3] = s[1][3], s[2][3], s[3][3], s[0][3] return s
bdb593e912275bfdf387334916123830e081ed50
700,720
def get_id_character_mapping(data, columns): """Creating a mapping between characters and ids given dataframe. Args: data: dataframe that contains characters that need to be converted to ids column: a column of the dataframe that contains characters that need to be converted to ids columns: Returns: id_to_character: dictionary of ids and characters character_to_id: dictionary of characters and ids """ characters = set([]) for column in columns: [characters.update(set(val)) for index, val in data[column].iteritems()] characters = list(sorted(characters)) id_to_character = {i: characters[i] for i in range(len(characters))} character_to_id = {characters[i]: i for i in range(len(characters))} return id_to_character, character_to_id
904a9dde05c26d2d669d16e466317379acbdeec5
700,723
def __args_to_weka_options(args): """ Function that creates list with options (args) in format approperiate for weka. :param args: dictionery with command line input :return: list of command line arguments """ result = [] for k,v in args.items(): if v: result.append("-" + k) result.append(v) elif v == "": result.append("-" + k) return result
1d480ffaf840ae67d805d7845684eef24d3da583
700,724
import os def batch_input_file_validator(value): """ All input files should be valid image files. Supported extensions: ['.jpg','.jpeg','.png','.bmp','.webp'] """ for i in value: if not os.path.isfile(value): raise TypeError("Select a valid input file.") return value
2e326ee3022bfe1def13a775c9585669a27eb3cd
700,725