content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def getRailCoordinates(message, key): """Return a list of x, y coordinates of the rail in its zig zag order.""" railCoordinates = [] y = 0 direction = 'DOWN' for x in range(len(message)): railCoordinates.append((x, y)) if direction == 'DOWN': y += 1 if y == key: # Reached the bottom rail, now go up. direction = 'UP' y -= 2 elif direction == 'UP': y -= 1 if y == -1: # Reached the top rail, now go down. direction = 'DOWN' y += 2 return railCoordinates
19d858aa41c8513e2da52c65d19352b2e028bb14
48,746
import subprocess def resource_get(name): """used to fetch the resource path of the given name. <name> must match a name of defined resource in metadata.yaml returns either a path or False if resource not available """ if not name: return False cmd = ['resource-get', name] try: return subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return False
34e1a69044e4b131b99ff314f00db9a866391b91
48,747
import subprocess def call(*popenargs, **kwargs): """overwrite subprocess.call method to still record outputs (former implementation do not call 'communicate()') """ _p = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, *popenargs, **kwargs) # this insure that we record everythhing _, _ = _p.communicate() return _p.poll()
9d0e3659767d948fde5a47eb99b4d2abf9fef212
48,748
def simplify_arg_role_name(role_name: str): """Converts the name of the argument role to the simple form (e.g. "Time-Within" to "Time"). """ cols = role_name.split("-") if len(cols) > 1 and cols[0] != "Time": print(cols) return cols[0]
b9cba2234da2656055945da7c39896b1d4d29416
48,749
import textwrap def wrap(to_wrap, width=70): """Preserve new line char in textwrap""" out = [] for line in to_wrap.split('\n'): if not line: out.append('') wrapped = textwrap.wrap(line, width=width) for wrapped_line in wrapped: out.append(wrapped_line) return '\n'.join(out)
e5a8e7f4ab1935883a2e2174e8f642e27ecb3447
48,750
def get_activity_charts_data(activity_actions, sync_user): """Get data for Translation activity and Review activity charts.""" human_translations = set() machinery_translations = set() new_suggestions = set() peer_approved = set() self_approved = set() rejected = set() for action in activity_actions: action_type = action["action_type"] performed_by = action["performed_by"] translation = action["translation"] machinery_sources = action["translation__machinery_sources"] user = action["translation__user"] approved_user = action["translation__approved_user"] date = action["translation__date"] approved_date = action["translation__approved_date"] # Review actions performed by the sync process are ignored, because they # aren't explicit user review actions. performed_by_sync = performed_by == sync_user if action_type == "translation:created": if len(machinery_sources) == 0: human_translations.add(translation) else: machinery_translations.add(translation) if not approved_date or approved_date > date: new_suggestions.add(translation) # Self-approval can also happen on translation submission if performed_by == approved_user and not performed_by_sync: self_approved.add(translation) elif action_type == "translation:approved" and not performed_by_sync: if performed_by == user: self_approved.add(translation) else: peer_approved.add(translation) elif action_type == "translation:rejected" and not performed_by_sync: rejected.add(translation) return ( human_translations, machinery_translations, new_suggestions, peer_approved, self_approved, rejected, )
39472e8396799654ea7fe5bf1c304161b5f3ea92
48,751
def find_nth_loc(string=None, search_query=None, n=0): """ Searches the string via the search_query and returns the nth index in which the query occurs. If there are less than 'n' the last loc is returned :param string: Input string, to be searched :type string: str :param search_query: char(s) to find nth occurance of :type search_query: str :param n: The number of occurances to iterate through :type n: int :return idx: Index of the nth or last occurance of the search_query :rtype idx: int :return id_count: Number of identifications prior to idx :rtype id_count: int """ # Return base case, if there's no string, query or n if not string or not search_query or 0 >= n: return -1, 0 # Find index of nth occurrence of search_query idx, pre_idx = -1, -1 id_count = 0 for i in range(0, n): idx = string.find(search_query, idx+1) # Exit for loop once string ends if idx == -1: if pre_idx > 0: idx = pre_idx+1 # rest to last location # If final discovery is not the search query if string[-len(search_query):] != search_query: idx = len(string) break # Keep track of identifications if idx != pre_idx: id_count += 1 pre_idx = idx return idx, id_count
1293bda0ee8a681e8b8647e10fcbadda2a788df5
48,755
from typing import Sequence def count_number_of_depth_increases(depths: Sequence[int], window_size: int = 1) -> int: """Counts the number of times depth increases in a sequence of sonar measurements.""" depth_changes = ( sum(depths[i : i + window_size]) - sum(depths[i - 1 : i - 1 + window_size]) for i in range(1, len(depths) - window_size + 1) ) return sum(d > 0 for d in depth_changes)
8435c3ea61c204ee3db9565f929e0b510420e127
48,756
def main(*, equation: str) -> dict: """ Gets the result of a calculation :param formula: Stuff on which calculation will be carried on Example: 5+7*9 :return: Dictionary """ try: new_equation = equation.replace("^", "**") new_equation = equation.replace("\\", "/") return {"output": f"{str(eval(new_equation))}"} except Exception: return {"output": "Please write the formula properly"}
59166d19f1bd5aeac32f24f09060e729b75024ad
48,757
import traceback import sys def create_schema(connection, schema_name): """This function add the schemas into databases""" try: old_isolation_level = connection.isolation_level connection.set_isolation_level(0) pg_cursor = connection.cursor() pg_cursor.execute("CREATE SCHEMA %s" % schema_name) connection.set_isolation_level(old_isolation_level) connection.commit() # Get schema details of newly created schema pg_cursor.execute("SELECT sch.oid, sch.nspname FROM pg_namespace sch" " WHERE sch.nspname='%s'" % schema_name) schema = pg_cursor.fetchone() connection.close() return schema except Exception: traceback.print_exc(file=sys.stderr)
8d65a79bb880c0da45d9986f4cebdabeaa8b71a2
48,758
def get_data_label_id(label_datas): """ 获取数据对应的标签id映射字典 """ label2id = {} data_label_id = {} for line in label_datas: line = line.replace('\n', '') data_id = line.split('\t')[0] label_ids = line.split('\t')[1].split(',') for label_id in label_ids: label2id[label_id] = label2id.get(label_id, len(label2id)) label_ids = [label2id[label] for label in label_ids] data_label_id[data_id] = label_ids return data_label_id, label2id
23950255504d90b522d4ed6a19782ffcbadea1f8
48,759
def validate_tag(tag): """ Chceks if tag is valid. Tag should not be empty and should start with '#' character """ tag = tag.strip().lower() return len(tag) > 1 and tag.startswith("#")
16e112376368567dee7d03faed8f37f14931328b
48,760
import os def check_database(third_party_info, third_party_evidence): """ Given dictionary of third party SDKs returns type of SDK :param third_party_info: dictionary of SDK Classifications :param third_party_evidence: evidence list of sdks :return third_party_evidence: updated evidence list of SDKs """ for i in third_party_evidence: cutdown = os.path.basename(os.path.normpath(i)) if cutdown in list(third_party_info.keys()): classifications = [third_party_info[cutdown]] else: classifications = ["n/a"] if "n/a" not in third_party_evidence[i]: third_party_evidence[i] = third_party_evidence[i] + classifications else: third_party_evidence[i] = classifications return third_party_evidence
dada9c000addda6e2c7d027610255fe21c145bfd
48,761
import logging def make_url(url_no_file="https://files.rcsb.org/download/", organism_entry="2ki5"): """ Takes the download URL without the file of the PDB database and the organism entry and converts it in to a download link for the PDB file. Returns the full URL for download. """ logging.info("Starting to make URL with %s and %s", url_no_file, organism_entry) url = url_no_file + organism_entry + ".pdb" logging.info("URL %s created successfuly", url) return url
6a1352b66d2657dc57d9d1a907f36f7cb4390bd7
48,766
def make_linear_part(max_score, min_score): """ :param bottom: the proportion of the graph used for the bottom "sigmoid" :param middle: the proportion of the graph used for the middle linear part :param top: the proportion of the graph used for the top "sigmoid" :param max_score: the maximum score seen on train :param min_score: the minimum score seen on train :return: the linear part of the ui score mapping """ slope = 1 / (max_score - min_score) def linear_part(x): return x * slope + 1 - slope * min_score return linear_part
8151c78032fe65342459223cadcf3a03f1cc6aa6
48,767
def expose_header(header, response): """ Add a header name to Access-Control-Expose-Headers to allow client code to access that header's value """ exposedHeaders = response.get('Access-Control-Expose-Headers', '') exposedHeaders += f', {header}' if exposedHeaders else header response['Access-Control-Expose-Headers'] = exposedHeaders return response
049383730da4fbc5a6286ed072655c28ea647c1e
48,768
def unique(lst): """ Returns a copy of 'lst' with only unique entries. The list is stable (the first occurance is kept). """ found = set() lst2 = [] for i in lst: if i not in found: lst2.append(i) found.add(i) return lst2
6e50803439b65a3fa71814956e947103ca2b634d
48,769
import math def pickLabelFormat(increment): """Pick an appropriate label format for the given increment. Examples: >>> print pickLabelFormat(1) %.0f >>> print pickLabelFormat(20) %.0f >>> print pickLabelFormat(.2) %.1f >>> print pickLabelFormat(.01) %.2f """ i_log = math.log10(increment) if i_log < 0 : i_log = abs(i_log) decimal_places = int(i_log) if i_log != decimal_places : decimal_places += 1 else : decimal_places = 0 return "%%.%df" % decimal_places
a91372f7b1b770531b23147a6704a20bf8a5e689
48,770
def func(x, y): """Documentación de la función. Hace x * y - 5""" return x * y - 5
52679233c492d45933289be05854bffccf741b5b
48,771
def ensure_valid_model_type(specified_type, model_type_list): """ Checks to make sure that `specified_type` is in `model_type_list` and raises a helpful error if this is not the case. Parameters ---------- specified_type : str. Denotes the user-specified model type that is to be checked. model_type_list : list of strings. Contains all of the model types that are acceptable kwarg values. Returns ------- None. """ if specified_type not in model_type_list: msg_1 = "The specified model_type was not valid." msg_2 = "Valid model-types are {}".format(model_type_list) msg_3 = "The passed model-type was: {}".format(specified_type) total_msg = "\n".join([msg_1, msg_2, msg_3]) raise ValueError(total_msg) return None
6708e44d7ea381c89ea31343cb8f435f40fc279f
48,772
import heapq def top10(m): """Return the top 10 keys from a dict ordered by their values.""" return heapq.nlargest(10, m.keys(), key=m.get)
3e5e5eddf6c11a4a46fcb0e3e9bc342cfa219826
48,773
from typing import Union def find_error_character(chunk: str) -> Union[str, None]: """Find the first illegal character for a given chunk.""" stack = [] bracket_mapping = {")": "(", "]": "[", "}": "{", ">": "<"} open_brackets = ["(", "[", "{", "<"] for char in chunk: if char in open_brackets: stack.append(char) else: top_char = stack.pop() if top_char != bracket_mapping[char]: return char return None
fd5aa5360f68b2e61f7d658caedbf1ec67b9de99
48,774
import platform def generate_user_agent(package_name: str, package_version: str) -> str: """Generate a user-agent string in the form <package info> <python info> <os info>. Parameters ---------- package_name : str Name of the package to include in the user-agent string. package_version : str Version of the package to include in the user-agent string. Returns ------- str User-agent string. """ python_implementation = platform.python_implementation() python_version = platform.python_version() os_version = platform.platform() return f"{package_name}/{package_version} {python_implementation}/{python_version} ({os_version})"
adb7d164de435c606a01abc35c963b0f8d5505e8
48,776
def clean_headers(headers): """Takes headers dict and removes irrelivant headers.""" response = dict() for k, v in headers.items(): if k.upper().startswith('X'): response[k] = v return response
6965b8c9a9850513181d0f24a6de4f48b79529f8
48,777
def summary(txt): """ Returns the first line of a string. """ lines = txt.split('\n') return lines[0]
eb86e9a5225f2b99b500b79927ef3af1ccc7fabb
48,778
def dict_no_none(*args, **kwargs) -> dict: """ Helper to build a dict containing given key-value pairs where the value is not None. """ return { k: v for k, v in dict(*args, **kwargs).items() if v is not None }
de0dbf04f0cea588f190f9ae52cf2696e0c5c6ea
48,779
from typing import Any def shortname(obj: Any) -> str: """Get object name (non-qualified).""" if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): obj = obj.__class__ return '.'.join((obj.__module__, obj.__name__))
78a3d85ad11e8262d515fb2bead6c9d9fdbf085b
48,780
import numpy def kurtosisTest(kurt, n): """ @brief Transforms the kurtosis to a normal distribution """ n = float(n) e = 3.*(n-1.)/(n+1.) v = 24.*n*(n-2.)*(n-3.)/(n+1.)**2/(n+3.)/(n+5.) mask = kurt != 0 x = (kurt[mask] - e)/numpy.sqrt(v) b = 6.*(n**2-5.*n+2.)/(n+7.)/(n+9.)*numpy.sqrt(6.*(n+3.)*(n+5.)/n/(n-2.)/(n-3.)) A = 6.+8./b*(2./b+numpy.sqrt(1.+4./b**2)) kt = numpy.zeros(kurt.shape) kt[mask] = ((1.-2./9./A)-((1.-2./A)/(1.+x*numpy.sqrt(2./(A-4.))))**(1./3.))/numpy.sqrt(2./9./A) kt[~mask] = -1000 return kt
0b59f186dd679ca186da710853b95af20ec245d3
48,781
def unique(sequence): """ Returns a unique list preserve the order of original list """ seen = set() return [x for x in sequence if not (x in seen or seen.add(x))]
433bc766e21f1e0a6e983383054f51ba84cd5636
48,783
def flatten(items): """Removes one level of nesting from items Parameters ---------- items : iterable list of items to flatten one level Returns ------- flattened_items : list list of flattened items, items can be any sequence, but flatten always returns a list. Examples -------- >>> from skbio.util import flatten >>> h = [['a', 'b', 'c', 'd'], [1, 2, 3, 4, 5], ['x', 'y'], ['foo']] >>> print(flatten(h)) ['a', 'b', 'c', 'd', 1, 2, 3, 4, 5, 'x', 'y', 'foo'] """ result = [] for i in items: try: result.extend(i) except TypeError: result.append(i) return result
5c280d08911e4fdbd32de3e20e75410e871b1d57
48,784
def get_help_text(instance, field_name): """ Returns help_text for a field. inspired by https://stackoverflow.com/questions/14496978/fields-verbose-name-in-templates call in template like e.g. get_help_text <classname> "<fieldname>" """ try: label = instance._meta.get_field(field_name).help_text except Exception as e: label = None if label: return "{}".format(label) else: return "No helptext for '{}' provided".format(field_name)
b2432ee430744c28630efe27489c68607ca0a495
48,785
def first_column_empty_5(): """First column is empty ... test will demonstrate this edge case can be handled""" return [ [], [ {"id": 5, "name": "Asparagus", "colour": "green", "count": 9, "herb": False}, {"id": 2, "name": "Basil", "colour": "green", "count": 5, "herb": True}, {"id": 6, "name": "Beans", "colour": "yellow", "count": 5, "herb": False}, {"id": 7, "name": "Beets", "colour": "red", "count": 5, "herb": False}, {"id": 24, "name": "Bell Peppers", "colour": "red", "count": 12, "herb": False}, ], [ {"id": 8, "name": "Broccoli", "colour": "green", "count": 8, "herb": False}, {"id": 15, "name": "Cucumbers", "colour": "green", "count": 9, "herb": False}, {"id": 45, "name": "Dill", "colour": "green", "count": 4, "herb": True}, {"id": 17, "name": "Eggplant", "colour": "purple", "count": 8, "herb": False}, {"id": 1, "name": "Tomatoes", "colour": "red", "count": 8, "herb": False}, ], ]
6fcd16c259b838ba80257194684c71b7736536fb
48,786
import inspect import os def rel_path(relative_filename): """Returns the full path of the file relative to the caller module. :param relative_filename: target filename relative to the caller's containing folder. :return: the full path of the target relative file. """ # Extract the filename of the caller's stack frame. caller_frame = inspect.stack()[1] try: caller_filepath = inspect.getabsfile(caller_frame[0]) finally: # Force remove frame reference to prevent garbage-collection issues. del caller_frame return os.path.join( os.path.dirname(os.path.realpath(caller_filepath)), relative_filename)
8fd02357000e6b29bced6d617bf2f11cd3b389ba
48,787
def _format_collider_string(colliders): """ Write the string for the bath gas collider and their efficiencies for the Lindemann and Troe functional expressions: :param colliders: the {collider: efficiency} dct :type colliders: dct {str: float} :return: collider_str: Chemkin string with colliders and efficiencies :rtype: str """ collider_str = ' ' # name_buffer collider_str += ''.join( ('{0:s}/{1:4.3f}/ '.format(collider, efficiency) for collider, efficiency in colliders.items())) collider_str += '\n' return collider_str
4e4aa8ae46dfcf05f00222b4cf5c95384977d651
48,788
def get_bbox_limits(bbox): """ This module gets a bbox and transform it in this format [x_start, y_start, width, height] and transform it into this format [x_start, y_start, x_end, y_end] """ bbox[:,2] += bbox[:,0] bbox[:,3] += bbox[:,1] return bbox
42d066d8e028d73b60543b8f6916100c8709047d
48,790
import time import logging def l__(msg, out=True): """ ... starting logging msg giving a time stamp ... """ logging.info(' ' + msg + ' -->') if out: return time.time()
ea4f68158e44a9922b8b4d32feac26bad50deee7
48,792
def is_users_personal_account(context, account): """ Return `true` if the current path is for the user's personal account. Used to determine which items to show in the settings submenu. """ request = context["request"] return account.is_personal and ( request.path.startswith("/me/") or request.user.is_authenticated and request.path.startswith(f"/{request.user.username}/") )
9a5938761fb3a8c5876152bb326f008b8e607f95
48,793
from pathlib import Path import toml def set_fields_from_pyproject( fields: dict, pyproject_path: Path = Path(__file__).parents[2].joinpath("pyproject.toml"), name: str = "inboard", version: str = "0.1.0", ) -> dict: """Create a dictionary of keys and values corresponding to pydantic model fields. When instantiating the pydantic model, the dictionary can be unpacked and used to set fields in the model. """ try: pyproject = dict(toml.load(pyproject_path))["tool"]["poetry"] return {key: pyproject.get(key) for key in fields if pyproject.get(key)} except Exception: return {"name": name, "version": version}
52d2764c3286a348f6cf9a5e371c030e9df6e4fc
48,794
def split_data(data): """ :param data: 36 * H * W :return: """ x1, y1 = data[0:32 + 1:4], data[1:32 + 2:4] x2, y2 = data[2:32 + 3:4], data[3:32 + 4:4] return x1, y1, x2, y2
5728093118e0c16a90015c1af26c1124f65ba9a4
48,795
import random import string def _rand_id(ncar: int = 8) -> str: """Random hash""" return ''.join([random.choice( string.ascii_letters + string.digits) for n in range(ncar)])
7978509db06f22713062489fd1c174fcdd82fb17
48,796
def byte_unit(file_size): """ バイトの単位を変換 Parameters ---------- file_size : int ファイルのバイト数 Returns ---------- (return) : str 適切な単位に変換後の文字列 """ if file_size < 1024: return str(file_size) + ' B' elif file_size < 1024**2: return str(round(file_size / 1024 ,2)) + ' KB' elif file_size < 1024**3: return str(round(file_size / 1024**2 ,2)) + ' MB' elif file_size < 1024**4: return str(round(file_size / 1024**3 ,2)) + ' GB' else: return str(file_size) + ' B'
d9df45cfd81a14d6a92403a632cac1827c4f6595
48,797
def kl_div(mean, log_var): """Kl divergence Args: mean: mean vector log_var: log variance vector Returns: kl divergence """ loss = 0.5 * (mean.pow(2) + log_var.exp() - log_var - 1).mean(dim=0) return loss.sum()
b91790d922c259c09103023112bbeb30a38ed67d
48,799
def layer_size(X, Y): """ Get number of input and output size, and set hidden layer size :param X: input dataset's shape(m, 784) :param Y: input labels's shape(m,1) :return: n_x -- the size of the input layer n_h -- the size of the hidden layer n_y -- the size of the output layer """ n_x = X.T.shape[0] n_h = 10 n_y = Y.T.shape[0] return n_x, n_h, n_y
81add6bf528cfe872e62f622161bb25fb7dcb1d3
48,800
from typing import Set def done(job: str, completed: Set[str]) -> str: """Convert set membership into `Yes` for `No`. :param job: The job to check if it was acted on :param completed: The jobs acted on, :returns: Yes or No """ return 'Yes' if job in completed else 'No'
6d056f2471dafb3cab274ffb916e073c2fb62cf6
48,801
from typing import get_origin from typing import get_args def has_origin(typ, origin, num_args=None): """ Determines if a concrete class (a generic class with arguments) matches an origin and has a specified number of arguments. This does a direct match rather than a subclass check. The typing classes use dunder properties such that ``__origin__`` is the generic class and ``__args__`` are the type arguments. Note: in python3.7, the ``__origin__`` attribute changed to reflect native types. This call attempts to work around that so that 3.5 and 3.6 "just work." """ t_origin = get_origin(typ) if not isinstance(origin, tuple): origin = (origin,) return t_origin in origin and (num_args is None or len(get_args(typ)) == num_args)
9ba281ac18f1e152d9456103a61070e8e6c70e09
48,804
def sol(request): """Establish a small range of sols over which to test.""" return request.param
0f001d2e4b3af6a50471e7dffb861b716dfcc13f
48,806
def extend(cls, static=False): """Extends the cls class. Parameters ---------- cls : class Class to extend. static : boolean, default = False Wheter extend as a static method. Returns ------- decorator Decorator for extending classes. """ if static: return lambda f: (setattr(cls, f.__name__, staticmethod(f)) or f) else: return lambda f: (setattr(cls, f.__name__, f) or f)
ab3456d11b2ce7edde1a468e743fc77bcc1e3f58
48,807
def create_collector(collector_class, result_dir, test_name): """Return a new Collector of the given class :param collector_class: The collector class to be used. :param result_dir: Directory with test results :param test_name: Test to be run :return: A new CollectorController. """ return collector_class(result_dir, test_name)
5780297706e8ee00b3e7cc7cfd5da7997fd5dd66
48,808
def class_values(names_file): """Get integer value of classes from .names file Args: names_file (type): File containing a list of classes. Each line of the file should contain a separate class name Returns: class_dict (dict): Dict that maps class names with integer values """ class_dict = {} with open(names_file) as f: for index, name in f: class_dict[name] = index if not class_dict: raise ValueError("Names file is empty.") return class_dict
5b856853edac6b14cb3246a776d2584655b593b2
48,809
from typing import Dict def multimodel_num_layers() -> Dict[str, int]: """Number of layers in each of sub-models in `multimodel` fixture.""" return { 'encoder': 2, 'forecaster': 6, 'combined': 10, }
115e274decf2e18f5fae31d07fca46183853f513
48,810
def sublist(list1: list, list2: list) -> bool: """ Check if all the element of list1 are inside list2, preserving the order and subsequence of the elements :param list1: list1 of element to find :param list2: list2 target list :return: a boolean which represent True if the statement is respected """ _list2_idx = 0 for element in list2: if element == list1[0]: _list1_idx = 1 _list2_iterator = _list2_idx+1 while _list1_idx < len(list1): try: _value = list2[_list2_iterator] except Exception as ex: return False if _value != list1[_list1_idx]: break _list2_iterator +=1 _list1_idx +=1 if _list1_idx == len(list1): return True _list2_idx +=1 return False
6981a35efe0bb18b9d2ca2593a9f38ea049acb28
48,811
def getstring(data): """convert data from file into a text string """ result = '' for bytechar in data: if bytechar == b'\x00': continue result += str(bytechar) return result
4abd5468da4878efd122bc707edb2b6a726ce1ab
48,812
def read_answers(filename, sep): """ Read word-sense-annotated file and create data structure that translates each word sense to an id """ with open(filename, 'r') as f: words = [] senses = [] senses_id = {} sense_count = 0 for line in f.readlines(): for token in line.split(): split_token = token.split(sep) words.append(split_token[0]) sense = 'a' if len(split_token) > 1: sense = split_token[-1] senses.append(sense) if sense not in senses_id: senses_id[sense] = sense_count sense_count += 1 answers = {} for w, s in zip(words, senses): if w not in answers: answers[w] = [] answers[w].append(senses_id[s]) return answers
3c69be23f3030cc777eebff094da2e4bf1e16004
48,814
from typing import Dict from typing import List def get_available_languages(localisation: Dict) -> List[str]: """Get list of languages that are available for outputting human readable procedure descriptions. Args: Dict: Localisation collection, e.g. ``PlatformClass().localisation`` Returns: List[str]: List of language codes, e.g. ['en', 'zh'] """ available_languages = ['en'] for _, human_readables in localisation.items(): for language in human_readables: if language not in available_languages: available_languages.append(language) return available_languages
25506eb7a7819f89b54df05e67e65c44f58285bb
48,815
def sql_number_list(target): """ Returns a list of numbers suitable for placement after the SQL IN operator in a query statement, as in "(1, 2, 3)". """ if not target: raise ValueError elif not isinstance(target, list): target = [target] return "(%s)" % (", ".join(["%d" % (i,) for i in target]))
386a8d10ed9fb7f625080eb3636f9d3909cacc6a
48,816
import torch def fold(input: torch.Tensor, window_size: int, height: int, width: int) -> torch.Tensor: """ Fold a tensor of windows again to a 4D feature map :param input: (torch.Tensor) Input tensor of windows [batch size * windows, channels, window size, window size] :param window_size: (int) Window size to be reversed :param height: (int) Height of the feature map :param width: (int) Width of the feature map :return: (torch.Tensor) Folded output tensor of the shape [batch size, channels, height, width] """ # Get channels of windows channels: int = input.shape[1] # Get original batch size batch_size: int = int(input.shape[0] // (height * width // window_size // window_size)) # Reshape input to output: torch.Tensor = input.view(batch_size, height // window_size, width // window_size, channels, window_size, window_size) output: torch.Tensor = output.permute(0, 3, 1, 4, 2, 5).reshape(batch_size, channels, height, width) return output
92a68279b357f8b977d4b144fd249540de833fb5
48,817
from typing import Optional from pathlib import Path def to_uri( path: str, local_basedir: Optional[Path] = None, relative_prefix: str = "" ) -> str: """ Given a path or URI, normalize it to an absolute path. If the path is relative and without protocol, it is prefixed with `relative_prefix` before attempting to resolve it (by default equal to prepending `cwd://`) If path is already http(s):// or file://... path, do nothing to it. If the path is absolute (starts with a slash), just prepend file:// If the path is cwd://, resolve based on CWD (even if starting with a slash) If the path is local://, resolve based on `local_basedir` (if not given, CWD is used) Result is either http(s):// or a file:// path that can be read with urlopen. """ local_basedir = local_basedir or Path("") if str(path)[0] != "/" and str(path).find("://") < 0: path = relative_prefix + path prot, rest = "", "" prs = str(path).split("://") if len(prs) == 1: rest = prs[0] else: prot, rest = prs if prot.startswith(("http", "file")): return path # nothing to do elif prot == "local": # relative, but not to CWD, but a custom path rest = str((local_basedir / rest.lstrip("/")).absolute()) elif prot == "cwd": # like normal resolution of relative, # but absolute paths are still interpreted relative, # so cwd:// and cwd:/// are lead to the same results rest = str((Path(rest.lstrip("/"))).absolute()) elif prot == "": # relative paths are made absolute if not Path(rest).is_absolute(): rest = str((Path(rest)).absolute()) else: raise ValueError(f"Unknown protocol: {prot}") return f"file://{rest}"
6ebe76a1574fe844d808ac4889afe163d9d3131a
48,818
from docutils.core import publish_string def html(text, **kwargs): """Returns the text as html.""" # assuming run from the correct directory return publish_string(source=text, writer_name='html', **kwargs)
9aaecf69bf1f7bce109a74242df2f2901d237261
48,819
def need_depend(glyph): """ Finds RGlyph object that needs depend attribute. """ return not glyph.name.endswith('V') and glyph.hasOverlap()
6f253d96e90aef44baaec43a437d62f65e152782
48,820
def unscale_bb(bounding_box, shape): """ compute absolute bounding box :param bounding_box: [x, y, w, h] relative bounding box :param shape: shape of [x, y, w, h] :return: """ abs_x = bounding_box[:, 0] * shape[:, 0] abs_y = bounding_box[:, 1] * shape[:, 1] abs_width = bounding_box[:, 2] * shape[:, 0] abs_height = bounding_box[:, 3] * shape[:, 1] return [abs_x, abs_y, abs_width, abs_height]
71c8be2c864988c5abd4368db05ec24b411474a8
48,821
import argparse def parse_args(): """Generate args.""" parser = argparse.ArgumentParser() parser.add_argument( "--in_file", type=str, required=True, help="File to extract mentions from" ) parser.add_argument( "--out_file", type=str, required=True, help="File to write extracted mentions to", ) parser.add_argument( "--entity_db_dir", type=str, required=True, help="Path to entity db" ) parser.add_argument("--min_alias_len", type=int, default=1) parser.add_argument("--max_alias_len", type=int, default=6) parser.add_argument("--num_workers", type=int, default=8) parser.add_argument("--num_chunks", type=int, default=8) parser.add_argument("--verbose", action="store_true") return parser.parse_args()
7ee93b3ea0badb0a4cef1c53628875cfac52cdb3
48,822
import ast def safe_determine_type(string): """ Determine the python type of the given literal, for use in docstrings Args: string (str): The string to evaluate Returns: ``str``: The type, or "TYPE" if the type could not be determined """ try: return ast.literal_eval(string).__class__.__name__ except ValueError: try: if ( string.startswith('set(') or isinstance(ast.literal_eval(string.replace( '{', '[').replace('}', ']')), list) ): return 'set' except ValueError: return 'TYPE'
75d24fceeb5db3db715300ec4c74c2bd60765bfa
48,823
def greatest_common_divisor(a, b): """Retorna o maior divisor comum (MDC) entre a e b. Args: a (int): primeiro valor. b (int): segundo valor. Returns: int: Maior divisor comum (MDC) entre a e b. """ if 0 in (a, b): return 1 if a < b: return greatest_common_divisor(a, b - a) elif b < a: return greatest_common_divisor(a - b, a) return a
72cd71b9afb763156476d70c887e41a47ca95089
48,825
def polyester_gl_input(): """Table C3.8.1 - generica material data""" return { "name": "polyester", "density": 1200, "modulus_x": 3000000, "modulus_xy": 1140000, "poisson": 0.316, }
ce5f49a707961104bfab9b181af937b3743cecfc
48,826
from numpy import right_shift,bitwise_and,empty,uint8,uint16 def mono12p_to_image_ver2(rawdata,row,col): """Converts FLIR Mono12p flat data format (uint8) to 16 bit image (uint16).""" arr = rawdata.reshape(-1,3) N = 2*len(arr) b0 = empty(N,uint8) b1 = empty(N,uint8) b0[0::2] = arr[:,0] b0[1::2] = bitwise_and(arr[:,1],15) b1[0::2] = bitwise_and(arr[:,1],240) b1[1::2] = arr[:,2] image_flat = empty(N,uint16) image_flat[0::2] = b0.view(uint16) image_flat[1::2] = right_shift(b1.view(uint16),4) return image_flat.reshape((row,col))
79453fe3ecfbdb9e43205ecd96f4562b8a138066
48,827
def normalize(data): """ Normalize data into interval <0, 1> """ total = sum(data) normalized = [] for item in data: normalized.append(item/total) return normalized
d82dae78a0767d6a4519aca518d7ce27049fab41
48,828
def _jupyter_nbextension_paths(): # pragma: no cover """Allows commands like jupyter nbextension install --py jupytext jupyter nbextension enable --py jupytext jupyter labextension install jupyterlab-jupytext""" return [ dict( section="notebook", # the path is relative to the `jupytext` directory src="nbextension", # directory in the `nbextension/` namespace dest="jupytext", # _also_ in the `nbextension/` namespace require="jupytext/index", ) ]
0714407d7c9468363845bf62c478e3d5944d803c
48,829
import torch import math def sym_diag(mat): """Diagonal of a symmetric matrix Parameters ---------- mat : (..., M * (M+1) // 2) tensor A symmetric matrix that is stored in a sparse way. Its elements along the last (flat) dimension are the diagonal elements followed by the flattened upper-half elements. E.g., [a00, a11, aa22, a01, a02, a12] Returns ------- diag : (..., M) tensor Main diagonal of the matrix """ mat = torch.as_tensor(mat) nb_prm = int((math.sqrt(1 + 8 * mat.shape[-1]) - 1)//2) return mat[..., :nb_prm]
23c29d1c6429089e88720e9ba7d3d6f8968f9ca0
48,831
def borrow(sharedvar, boro=True): """ Gets the numpy ndarray underlying a sharedVariable """ return sharedvar.get_value(borrow=boro)
02b3b8e06406320282a5058fc7a803203454166d
48,832
def intersect(seq1, seq2): """Returns a list of elements in both seq1 and seq2.""" ret = [] for elem in seq1: if elem in seq2: ret.append(elem) return ret
b7cb43616e8000427005c350b2b07a59768f2995
48,833
def code(value: int) -> str: """Constructs an ANSI code with the provided `value`.""" return f'\033[{value}m'
48b499b5055d616bc7f51043e96429be123b5dd2
48,834
import difflib def GetBestStringMatchValue(string1, string2): """ Return the value of the highest matching substrings between two strings. Parameters ---------- string1 : string First string. string2 : string Second string. Returns ---------- int Integer value representing the best match found between string1 and string2. """ # Ignore case string1 = string1.lower() string2 = string2.lower() # Ignore non-alphanumeric characters string1 = ''.join(i for i in string1 if i.isalnum()) string2 = ''.join(i for i in string2 if i.isalnum()) # Finding best match value between string1 and string2 if len(string1) == 0 or len(string2) == 0: bestRatio = 0 elif len(string1) == len(string2): match = difflib.SequenceMatcher(None, string1, string2) bestRatio = match.ratio() else: if len(string1) > len(string2): shortString = string2 longString = string1 else: shortString = string1 longString = string2 match = difflib.SequenceMatcher(None, shortString, longString) bestRatio = match.ratio() for block in match.get_matching_blocks(): subString = longString[block[1]:block[1]+block[2]] subMatch = difflib.SequenceMatcher(None, shortString, subString) if(subMatch.ratio() > bestRatio): bestRatio = subMatch.ratio() return(bestRatio)
a8fe07317db7ce26478b1faa0df13c3558c8fea5
48,835
def unwrap_filter(response, category): """ Strips one layer of aggregations (named by <category>) from a ElasticSearch query response, leaving it still in proper ES response format. :param response: An Elasticsearch aggregation response dictionary. :param category: Name of the topmost aggregation in the response. :returns: The same response, with one level of aggregation removed. """ unwrapped = response.copy() unwrapped['aggregations'] = response['aggregations'][category] return unwrapped
82d6533edf8091e5a6b353f903f652bcb700dcde
48,836
def get_longest_consecutive_chars(string): """ Given a string, return the longest consecutive character sequence. """ longest_consecutive_chars = '' current_consecutive_chars = '' for char in string: if char not in current_consecutive_chars: current_consecutive_chars += char else: current_consecutive_chars = char if len(current_consecutive_chars) > len(longest_consecutive_chars): longest_consecutive_chars = current_consecutive_chars return longest_consecutive_chars
4607a1adada254c36e46a28c036cb852992544a5
48,837
import math def root_mean_squared_error(LeadDay,df,model,observed): """ Args: LeadDay: specifically applies to forecast models with lead times if no lead day is available input None LeadDay is desired Lead day (int) df: data frame where Observed and Model data is found model & observed : column name (string) Return: Root Mean Squared Error Function: subtract model values from observed square result and sum all squares divide by number of values """ count = 0 total = 0 if LeadDay is None: for a, j in zip(df[observed], df[model]): diffsquared = ((j) - (a))**2 total += diffsquared count +=1 else: for a, j in zip(df[LeadDay][observed], df[LeadDay][model]): diffsquared = ((j) - (a))**2 total += diffsquared count +=1 avg = total/count return math.sqrt(avg)
0bd5b5627fa2dda1ef444bd12c69f670daa18e44
48,838
def remap_constraint_indices(tokenized_sequence, detokenized_sequence, constraint_indices): """ Map the constraint indices of a tokenized sequence to the indices of a detokenized sequence Any time there was '@@ ' in the tokenized sequence, we removed it - the detokenized sequence has fewer spaces than the tokenized sequence """ constraint_idx_starts = {start: end for start, end in constraint_indices} constraint_idx_ends = {end: start for start, end in constraint_indices} remapped_indices = [] tokenized_idx = 0 current_offset = 0 true_start = None for true_idx, output_char in enumerate(detokenized_sequence): if tokenized_idx in constraint_idx_starts: true_start = tokenized_idx - current_offset elif tokenized_idx in constraint_idx_ends: assert true_start is not None, 'if we found an end, we also need a start' true_end = tokenized_idx - current_offset remapped_indices.append([true_start, true_end]) true_start = None # this logic assumes that post-processing did not _change_ any characters # I.e. no characters were substituted for other characters while output_char != tokenized_sequence[tokenized_idx]: tokenized_idx += 1 current_offset += 1 if tokenized_idx > len(tokenized_sequence): raise IndexError('We went beyond the end of the longer sequence: {}, when comparing with: {}'.format( tokenized_sequence, detokenized_sequence )) if tokenized_idx in constraint_idx_starts: true_start = tokenized_idx - current_offset elif tokenized_idx in constraint_idx_ends: assert true_start is not None, 'if we found an end, we also need a start' true_end = tokenized_idx - current_offset remapped_indices.append([true_start, true_end]) true_start = None tokenized_idx += 1 if true_start is not None: true_end = tokenized_idx - current_offset remapped_indices.append([true_start, true_end]) return remapped_indices
787bafe4d1fa527385ec40217c2045e567a39e18
48,840
def pad_with_zeros(hist_list): """ For each year which doesn't exist here, put 0 """ last_year = hist_list[0][0] - 1 # initialize to be less than the first year i = 0 while i < len(hist_list): year_item = hist_list[i] if year_item[0] - last_year > 1: # fill the gap while year_item[0] - last_year > 1: last_year += 1 hist_list.insert(i, (last_year, 0)) i += 1 last_year += 1 i += 1 return hist_list
3d2c2f9fba9b55039dcb4e42e27a9da9e3fb7048
48,841
from typing import Tuple def find_prefix(root, prefix: str) -> Tuple[bool, int]: """ Check and return if prefix exists and how many """ node = root if not root.children: return False, 0 for char in prefix: char_not_found = True # search all children in present node for child in node.children: if child.char == char: # we found the char char_not_found = False # assign node as the children containing the char and break node = child break if char_not_found: return False, 0 # We have found the prefix and we are at the node that indicates # the count of the prefix return True, node.counter
d7fd44ceecd62f94f6473b2f403e492dae1048c8
48,842
def find_phone(p_id, phone_data): """ Finds the phone corresponding to the given user id """ if p_id in phone_data['ids']: position = phone_data['ids'].index(p_id) return phone_data['names'][position] return "-"
278e3f9141eadce0e5505b11a5240bc12a3b7820
48,843
def get_ext_domain_inurl_scheme_prefix(ext_domain, force_https=None): """旧版本遗留函数, 已经不再需要, 永远返回空字符串""" return ''
d97ccdc5c5f1a13e4693d26ac60c81bc9d1a0ad6
48,844
import re def convert_to_python_var_name(name): """converts a imc server variable to python recommended format Args: name (str): string to be converted to python recommended format """ pattern = re.compile(r"(?<!^)(?=[A-Z])") python_var = re.sub(pattern, '_', name).lower() if python_var != "class": return python_var else: return "class_"
e2efa8e31d5446151cd89037da80a7d2447e7006
48,847
import os def _local_filename(filename): """Get an absolute path to filename in the same directory as this module.""" return os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
9e730c6b944616237645d2893d8f58de4c941d1e
48,849
import os import pickle def read_results_and_agent_types(folder_path): """ Plots the pickled results in a folder from old runs """ results = dict() for fname in os.listdir(folder_path): if fname.endswith(".pkl"): agent_name = fname.replace("-results_dict.pkl", "") results[agent_name] = pickle.load(open(os.path.join(folder_path, fname), 'rb')) return results
0b6bc584e99377e94a66f43ca931cb227ad1067e
48,851
def full_method(apple, pear, banana=9, *args, **kwargs): """ methody docstring """ return (apple, pear, banana, args, kwargs)
f331eb74d472b8d98b2db8119e402fe57a6a14d8
48,853
def change_series(data): """ Filter a pandas series to only the times when there is change. """ data = data.diff() data = data[data!=0] return data.iloc[1:]
2de1d4ff1e2057af2e01b855f857623aeb96b667
48,854
def week(): """ Week number for API calls """ return 5
85f55103136ad19c8bc24faa4339689eb978e381
48,855
import os import re def listdirs(directory = '.', dirname_regex = '.+'): """A utility function to list directories in a directory.""" dirnames = [] rawnames = os.listdir(directory) for rawname in rawnames: matchobj = re.search(dirname_regex, rawname) if matchobj is None: continue fullpath = os.path.join(directory, rawname) if os.path.isdir(fullpath): dirnames.append(rawname) return dirnames
c9a2941b8983e50d2404b3a0738e1c1da76b203f
48,856
def module_file(module): """Return the correct original file name of a module.""" name = module.__file__ return name[:-1] if name.endswith('.pyc') else name
4fbda2b1c839690f880a6f6c1bcba2ca4168a701
48,857
def clamp(value, smallest, largest): """Return `value` if in bounds else returns the exceeded bound""" return max(smallest, min(value, largest))
8ce129fd2bccacf7b45d9b4924f69a54bbb15e3b
48,859
import os def get_xlsx_files(path): """ Gets all XLSX files in a given directory """ list_of_files = {} for (dirpath, dirnames, filenames) in os.walk(path): for filename in filenames: if '$' in filename: continue if filename.endswith('.xlsx'): list_of_files[filename] = os.sep.join([dirpath, filename]) return list_of_files
d0694a9046de206b06904ae5c96dbad9d637f190
48,863
def extra_whitespaces(): """Specially useful to remove extra whitespaces from text. Just replace by an empty str. """ return r'(?<=[ ]{1})[ ]{1,}'
e95848c0dfc857fa9825140c546134b54c68ebe9
48,864
def convert_to_reverse_ip(ip): """A dummy function that takes some time.""" reverse_ip = ip.split('.') reverse_ip.reverse() reverse_ip = '.'.join(reverse_ip) return reverse_ip
01250f71364af25b3e2fb61e685b584fbd7c6140
48,865
def oid_outside_tree(oid1, oid2): """ Return True if oid2 outside of oid1 tree Used by SNMPSession.walk """ try: if oid2.startswith(oid1): return False return True except: return True
a1957c9d6b04dcd97db321f8d8779c349d14e265
48,867
def calculate_iou(bb1, bb2): """ Calculates the Intersection Over Union (IOU) score of the two bounding-boxes (the overlap between two bounding boxes). each bounding-box coordinates are in the form of: (x_min, y_min, x_max, y_max). """ bb1_x_min, bb1_y_min, bb1_x_max, bb1_y_max = bb1 bb2_x_min, bb2_y_min, bb2_x_max, bb2_y_max = bb2 # get the intersection's coordinate: intersection_x_min = max(bb1_x_min, bb2_x_min) intersection_x_max = min(bb1_x_max, bb2_x_max) intersection_y_min = max(bb1_y_min, bb2_y_min) intersection_y_max = min(bb1_y_max, bb2_y_max) # calculate the intersection's width, height, and area: intersection_w = max(intersection_x_max - intersection_x_min, 0) intersection_h = max(intersection_y_max - intersection_y_min, 0) intersection = intersection_w * intersection_h # calculate the union's area: union = ((bb1_x_max - bb1_x_min) * (bb1_y_max - bb1_y_min) + # bb1 area (bb2_x_max - bb2_x_min) * (bb2_y_max - bb2_y_min) - # bb2 area intersection) # calculate the IOU: iou = intersection / union return iou
5a0379085bbe428e812b2b0a39b39046cc0adefc
48,868
def fastDot(A, B, C): """ [Added 23/9/2018] [Updated 1/10/2018 Error in calculating which is faster] Computes a fast matrix multiplication of 3 matrices. Either performs (A @ B) @ C or A @ (B @ C) depending which is more efficient. """ size = A.shape n = size[0] p = size[1] if len(size) > 1 else 1 size = B.shape k = size[1] if len(size) > 1 else 1 size = C.shape d = size[1] if len(size) > 1 else 1 # Forward (A @ B) @ C # p*k*n + k*d*n = kn(p+d) forward = k*n*(p+d) # Backward A @ (B @ C) # p*d*n + k*d*p = pd(n+k) backward = p*d*(n+k) if forward <= backward: return (A @ B) @ C return A @ (B @ C)
22fbb77397ccae78b50e41b071da1980b6e9c75b
48,869
import os def get_full_path(filepath): """ Returns absolute path of file regardless of if user YAML input contains absolute or relative path. Will fail if path does not exist. Args: filepath: string path to desired file """ cwd = os.getcwd() cwdpath = os.path.join(cwd, filepath) if os.path.exists(os.path.dirname(filepath)): fullpath = filepath elif os.path.exists(os.path.dirname(cwdpath)): fullpath = cwdpath else: raise OSError("Not a valid path.") return fullpath
d078e5c94cb15126f8b43ce6b00495617bd45584
48,870
def _IOC(typ, nr, size, direction): """ generic IOCTL structure """ # 32bit: 2 bit direction, 14 bit size, 8bit type 8 bit nr, # r = ((size | (direction << 14)) << 16) | cmd r = direction << 30 | size << 16 | typ << 8 | nr return r
270782255b9207eb21cb14ce72a93e41528191d3
48,871
def string_to_seq(seq_str): """Return a codepoint sequence (tuple) given its string representation.""" return tuple([int(s, 16) for s in seq_str.split("_")])
1021578ecb694ec9fc974f0b590ef0fc7c71f20a
48,872
import torch def create_pytorch_model( number_of_features, number_of_categories_by_label, label_to_test, **kwargs ): """ Create a toy PyTorch model that has the right shape for inputs and outputs """ class TorchToy(torch.nn.modules.module.Module): def __init__(self, number_of_features, number_of_categories): super(TorchToy, self).__init__() self.fc1 = torch.nn.Linear(number_of_features, 128) self.relu1 = torch.nn.ReLU() self.fc2 = torch.nn.Linear(128, number_of_categories) def forward(self, x): x = self.fc1(x) x = self.relu1(x) x = self.fc2(x) return x number_of_categories = number_of_categories_by_label[label_to_test] model = TorchToy(number_of_features, number_of_categories) return model
d27a82bd581accb4fccba3c09f88f26b00127eab
48,873
from typing import Counter import collections def numericalize_tok(tokens, max_vocab=50000, min_freq=0, unk_tok="_unk_", pad_tok="_pad_", bos_tok="_bos_", eos_tok="_eos_"): """Takes in text tokens and returns int2tok and tok2int converters Arguments: tokens(list): List of tokens. Can be a list of strings, or a list of lists of strings. max_vocab(int): Number of tokens to return in the vocab (sorted by frequency) min_freq(int): Minimum number of instances a token must be present in order to be preserved. unk_tok(str): Token to use when unknown tokens are encountered in the source text. pad_tok(str): Token to use when padding sequences. """ if isinstance(tokens, str): raise ValueError("Expected to receive a list of tokens. Received a string instead") if isinstance(tokens[0], list): tokens = [p for o in tokens for p in o] freq = Counter(tokens) int2tok = [o for o,c in freq.most_common(max_vocab) if c>min_freq] unk_id = 3 int2tok.insert(0, bos_tok) int2tok.insert(1, pad_tok) int2tok.insert(2, eos_tok) int2tok.insert(unk_id, unk_tok) tok2int = collections.defaultdict(lambda:unk_id, {v:k for k,v in enumerate(int2tok)}) return int2tok, tok2int
48fafe61938203bde66edcab90ded616ed23aa82
48,874