content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import re def address_group(address, group_name=None): """ Return part of address upto group_name :param address: str hdf address :param group_name: str name of group :return: reduced str """ if group_name is None: names = address.replace('\\', '/').split('/') return '/'.join(names[:-1]) return re.findall(r'(.+?%s.*?)(?:\/|$)' % group_name, address, re.IGNORECASE)[0]
eaebf034e5716f1653ffe112d73109f5a8850e65
47,745
from datetime import datetime def parse_timestamp(timestamp: str): """ Parse a timestamp returned by discord This is not a reliable method at all, and if you need an accurate and safe way to read properties that use this function, it is advised that you checkout the dateutil or arrow libraries for that. :param timestamp: An ISO8601 timestamp :type timestamp: str :return: A parsed datetime object with the corresponding values :rtype: datetime.datetime """ return datetime.strptime(timestamp[:-6], "%Y-%m-%dT%H:%M:%S.%f")
49c7300df8d18821cc98ff2770a93e7d119c54af
47,754
from typing import Iterable def _not_none_count(sequence: Iterable) -> int: """ A helper function for counting the number of non-`None` entries in a sequence. :param sequence: the sequence to look through. :return: the number of values in the sequence that are not `None`. """ return sum(1 for _ in filter(lambda item: item is not None, sequence))
82fe58ef458245655feba4c0d82c1160dbfba525
47,757
def get_task_parameter(task_parameters, name): """Get task parameter. Args: task_parameters (list): task parameters. name (str): parameter name. Returns: Task parameter """ for param in task_parameters: param_name = param.get('name') if param_name == name: return param return
12fe7e38dd74c92b8042efd8f6e77404b7ef9c66
47,759
import math def simple_project(latitiude: float) -> float: """ Projects a point to its corrected latitude for the rhumbline calculations. :param latitiude: A float in radians. :return: The projected value in radians. """ return math.tan(math.pi / 4 + latitiude / 2)
9e1c530b11b6c1203a486f078a756f27b19a5603
47,760
def sort_group_connected(group): """Sort by number of connected contacts""" return - group.get_nb_connected_contacts()
2c3dd44db42ba72b7a7843417ce2365e99827f01
47,762
import torch def reps_dot(sent1_reps: torch.Tensor, sent2_reps: torch.Tensor) -> torch.Tensor: """ calculate representation dot production :param sent1_reps: (N, sent1_len, reps_dim) :param sent2_reps: (N, sent2_len, reps_dim) :return: (N, sent1_len, sent2_len) """ return torch.bmm(sent1_reps, torch.transpose(sent2_reps, -1, -2))
ce8790433820f573b7c8c5ccd1f388abd917f513
47,765
def headers_to_table(markdown_headers, notebook_name): """Produces a markdown table of contents from markdown headers. This function uses a two pass solution to turn markdown headers into a table of contents. The first pass strips one # from every header and the second pass turns all remaining # into spaces. Then the function produces markdown links spaced in table formats. """ if markdown_headers is None: return None # First pass just strips # stripped_headers = [] for header in markdown_headers: # While we're here let's clean up the data. header = header.replace("\n", "") # Third argument is max replace> stripped_headers.append(header.replace("#", "", 1)) # Second pass Produces actual table of contents. table_of_contents = [] for header in stripped_headers: # Using the fact that we know markdown headers have to start on the # left side. pound_stripped_header = header.lstrip("#") nesting_count = len(header) - len(pound_stripped_header) # Remove spaces from URL. All spaces are convered to - so -- is valid. href_header = pound_stripped_header.replace(" ", "-") # Finally constructing toc line toc_line = (u"{spaces}* [{header_name}]" \ "(http://localhost:8888/notebooks/{notebook_name}#{href_header})").format( spaces=" " * (nesting_count * 4), header_name=pound_stripped_header, notebook_name=notebook_name, href_header=href_header,) table_of_contents.append(toc_line) return table_of_contents
3e1e05ad310056baf7167a6d4eb0f1eb4b4ea5d0
47,768
import time def expires_header(duration): """Takes a number of seconds and turns it into an expires header""" return time.strftime("%a, %d-%b-%Y %T GMT", time.gmtime(time.time() + duration))
d81a89ed617f2d0544ff2b9ad9c7e5f9e27394f1
47,769
def replace_by(value, VR, action, default_name="John Doe", default_date="18000101", default_datetime="180001010000.000000", default_time="0000.000000", default_text="anon", default_code="ANON", default_age="000M", default_decimal="0.0", default_integer="0", default_uid="000.000.0"): """ Replace a 'value' depending of the input 'action' and the value representation 'VR'. The following action codes are: * D - replace with a non zero length value that may be a dummy value and consistent with the VR * Z - replace with a zero length value, or a non-zero length value that may be a dummy value and consistent with the VR * X - remove * K - keep (unchanged for non-sequence attributes, cleaned for sequences) * C - clean, that is replace with values of similar meaning known not to contain identifying information and consistent with the VR * U - replace with a non-zero length UID that is internally consistent within a set of Instances * Z/D - Z unless D is required to maintain IOD conformance (Type 2 versus Type 1) * X/Z - X unless Z is required to maintain IOD conformance (Type 3 versus Type 2) * X/D - X unless D is required to maintain IOD conformance (Type 3 versus Type 1) * X/Z/D - X unless Z or D is required to maintain IOD conformance (Type 3 versus Type 2 versus Type 1) * X/Z/U* - X unless Z or replacement of contained instance UIDs (U) is required to maintain IOD conformance (Type 3 versus Type 2 versus Type 1 sequences containing UID references) We use here the PS 3.6 convention. """ if action in ["X", "X/Z", "X/D", "X/Z/D", "X/Z/U*"]: return None elif action in ["U", "D", "Z", "Z/D"]: if VR == "DA": return default_date elif VR == "AS": return default_age elif VR == "DS": return default_decimal elif VR == "DT": return default_datetime elif VR == "TM": return default_time elif VR == "FL": return float(eval(default_decimal)) # numpy.float32 elif VR == "FD": return float(eval(default_decimal)) # numpy.float64 elif VR in ["IS"]: return default_integer elif VR in ["UL", "US", "SS", "SL"]: return eval(default_integer) elif VR == "PN": return default_name elif VR == "UI": return default_uid elif VR == "CS": return default_code elif VR in ["LO", "LT", "ST", "SH"]: return default_text elif VR in ["OB", "OW"]: return default_integer else: raise Exception("VR '{0}' is not yet supported. Current value is " "'{1}'.".format(VR, value)) else: raise Exception("Action '{0}' is not yet supported.".format(action))
95139886417123c5678eaad1855edbff06087585
47,770
def _field_update(op, path, value): """Return a dictionary for a field operation :param op: *add*, *replace* or *test* :param path: Path of field :param value: Field value :return: dict """ return { "op": op, "path": "/fields/{}".format(path) if "/fields/" not in path else path, "value": value }
8f1b4841828f04119ca21e86b5d0eb43273ce01e
47,773
def vocab_from_file(path): """ Read all words of vocabulary from a file. Parameters ---------- path : string Path of vocabulary file. Returns ------- index2word : dict_like, shape (vocabsize) Dictionary that contains indices as keys, and words as values. word2index : dict_like, shape (vocabsize) Dictionary that contains words as keys, and indices as values. """ fin = open(path, 'r') WORD_INDEX = 0 index2word = dict() word2index = dict() while True: line = fin.readline() if not line: break word_freq = line.split() word = word_freq[WORD_INDEX] word2index[word] = len(word2index) + 1 index2word[len(index2word) + 1] = word fin.close() return (index2word, word2index)
9434935f4aca26a9867e9471d9060c6b1617cf1e
47,783
from typing import List from typing import Optional def create_patient_resource(patient_identifier: List[dict], gender: str, communication: Optional[List[dict]] = None) -> dict: """ Create patient resource following the FHIR format (http://www.hl7.org/implement/standards/fhir/patient.html) """ gender_codes = {"male", "female", "other", "unknown"} assert gender in gender_codes, \ f"Gender must be one of these gender codes: {gender_codes}" patient_resource = { "resourceType": "Patient", "identifier": patient_identifier, "gender": gender } if communication: patient_resource["communication"] = communication return patient_resource
0d8046f3600856ebecf3413ea3247f893c852885
47,791
def minimum(key): """ Curried version of the built-in min. >>> Stream([[13, 52], [28, 35], [42, 6]]) >> minimum(lambda v: v[0] + v[1]) [42, 6] """ return lambda s: min(s, key=key)
80877ce3d9eb51b6221a2f8c39a043a212a01406
47,794
import math def solar_apparent_longitude(solar_true_longitude, julian_century): """Returns the SolarApparentLongitude with Solar True Longitude, solar_true_longitude, and Julian Century, julian_century.""" solar_apparent_longitude = ( solar_true_longitude - 0.00569 - 0.00478 * math.sin(math.radians(125.04 - 1934.136 * julian_century)) ) return solar_apparent_longitude
37af338fdf61061475e0b10db106effae878eb48
47,796
def test_odd(value): """Return true if the variable is odd.""" return value % 2 == 1
3ee4d6b5b66c108a09408af70e6f6a3d37ccc729
47,798
def index_acl(acl): """Return a ACL as a dictionary indexed by the 'entity' values of the ACL. We represent ACLs as lists of dictionaries, that makes it easy to convert them to JSON objects. When changing them though, we need to make sure there is a single element in the list for each `entity` value, so it is convenient to convert the list to a dictionary (indexed by `entity`) of dictionaries. This function performs that conversion. :param acl:list of dict :return: the ACL indexed by the entity of each entry. :rtype:dict """ # This can be expressed by a comprehension but turns out to be less # readable in that form. indexed = dict() for e in acl: indexed[e["entity"]] = e return indexed
4a49cab07fbb06045e0ced58d413b7255ad841c7
47,799
def payload_string(nibble1, nibble2, nibble3, nibble4): """Returns the string representation of a payload.""" return'0x{:X}{:X}{:X}{:X}'.format(nibble1, nibble2, nibble3, nibble4)
aa80620ebcaec8bebb087fb953d065f9165cc2c0
47,804
def create_operation(cls, a, b): """Create operation between a and b, merging if a or b is already an operation of same class """ operands = [] operands.extend(a.operands if isinstance(a, cls) else [a]) operands.extend(b.operands if isinstance(b, cls) else [b]) return cls(*operands)
5f142dd692d96835e15e3c5934c8f53e70582b9d
47,808
def convert_rgb_tuple(tuple_256): """Convert R,G,B Decimal Code from 8-bit integers to [0, 1] floats. E.g. (255, 247, 0) -> (1., 0.9686... , 0.) representing a specific yellow colour, namely #FFF700 in Hex(ademical). """ return tuple(float(rgb_int) / 255 for rgb_int in tuple_256)
1748c6207b58d68ace9e947359cd686964ceb207
47,812
import torch def categorical_mean_average_precision(preds, targets, pred_name='output', target_name='map'): """ Computes the mean average precision from predictions given as logits, for a categorical segmentation task. """ preds = preds[pred_name] gt = targets[target_name] # order classes by predicted probability top_predictions = torch.argsort(preds, -1, descending=True) # find the correct class in there top_correct = (top_predictions == gt[..., None]) # determine the rank of the correct class (requires conversion from bool) rank = top_correct.byte().argmax(-1) # the average of the inverse ranks is the mean average precision # (but averaging over the batch items happens outside of this) return (1. / (1 + rank))
0882394ab1d4a4e0b53565df84aa1aefecfbd88f
47,835
def make_orbit_codes(text_file): """Reads text file with each line in form "ASK)DGE" and returns list of codes.""" fin = open(text_file) orbit_codes = [] for line in fin: orbit_codes.append(line.strip()) return orbit_codes
fbbe9df54f7d51789cbe4ea59abcb05b45fcd0eb
47,839
def resolve_name(name): """ Resolve a dotted name to some object (usually class, module, or function). Supported naming formats include: 1. path.to.module:method 2. path.to.module.ClassName >>> resolve_name('coilmq.store.memory.MemoryQueue') <class 'coilmq.store.memory.MemoryQueue'> >>> t = resolve_name('coilmq.store.dbm.make_dbm') >>> import inspect >>> inspect.isfunction(t) True >>> t.__name__ 'make_dbm' @param name: The dotted name (e.g. path.to.MyClass) @type name: C{str} @return: The resolved object (class, callable, etc.) or None if not found. """ if ':' in name: # Normalize foo.bar.baz:main to foo.bar.baz.main # (since our logic below will handle that) name = '%s.%s' % tuple(name.split(':')) name = name.split('.') used = name.pop(0) found = __import__(used) for n in name: used = used + '.' + n try: found = getattr(found, n) except AttributeError: __import__(used) found = getattr(found, n) return found
bc072c9763815f87882c68fe1464b88aa3b36d6a
47,845
def _orelse(exp1, exp2): """Generates an SQLite expression that evaluates to exp1 if exp1 is non-null and non-empty or exp2 otherwise. """ return ('(CASE {0} WHEN NULL THEN {1} ' 'WHEN "" THEN {1} ' 'ELSE {0} END)').format(exp1, exp2)
098d29a63eeec23cf52f6280700e45bdeb8536f3
47,848
import functools import time def process_time(logger): """ Decorator for measuring the ellapsed time for a process. The result is logged. """ def decorator_wrapper(func): @functools.wraps(func) def wrapper_process_time(*args, **kwargs): logger.info("Process {} STARTED.".format(func.__name__)) start_time = time.perf_counter() value = func(*args, **kwargs) end_time = time.perf_counter() logger.info("Process {0} FINISHED. Ellapsed time: {1:.4f}".format(func.__name__, end_time - start_time)) return value return wrapper_process_time return decorator_wrapper
b48badb735c040427750560ebe58b96f772b1339
47,850
def specific_heat_ratio(Cp: float, Cv: float) -> float: """[summary] Args: Cp (float): Spectfic Heat Constant Pressure [J/kg*K]. Cv (float): Spectfic Heat Constant Volume [J/kg/K]. Returns: float: Specific heat ratio [no units]. """ return Cp / Cv
89fd11f36a9ba7e8416ef6433ecab094754a9bf8
47,851
import requests def mw_search(server, query, num): """ Searches the specified MediaWiki server for the given query, and returns the specified number of results. """ search_url = ('http://%s/w/api.php?format=json&action=query' '&list=search&srlimit=%d&srprop=timestamp&srwhat=text' '&srsearch=') % (server, num) search_url += query query = requests.get(search_url).json() if 'query' in query: query = query['query']['search'] return [r['title'] for r in query] else: return None
7559f3e2576f05f41c732981cccb65c1920a31f1
47,853
def normalise_number(a): """ Split the power part of a number and its float value normalised to 1 Example ------- >>> normalise_number(1.433364345e9) (0.1433364345, 10) >>> normalise_number(14e-6) (0.13999999999999999, -4) >>> normalise_number(-14e-6) (-0.13999999999999999, -4)""" i = 0 # power value v = a # normalised value if v == 0.: v = 0. i = 0. elif abs(v) < 1: while abs(v) < 0.1: v *= 10. i -= 1 elif abs(v) > 1: while abs(v) > 1: v /= 10. i += 1 return v, i
b34373f56ebb110f6a86572cbfc968a8b3094d7d
47,854
def remove_xml(rid): """ Removes the .xml from a resource or spec id. """ if '.xml' in rid[-4:]: return rid[:-4] else: return rid
e57c7ccfdfb130092ef0f2fd8412d4771fd716aa
47,855
from typing import Type def isgeneric(class_type: Type) -> bool: """Checks if a class type is a generic type (List[str] or Union[str, int]""" return hasattr(class_type, "__origin__")
49f9efbd474852a7a6fd91ba7a1586ede494203d
47,857
def _read_vector(instream, structure, name): """Reads a vector from the instream and returns it as a tuple.""" try: v = instream.read(structure.size) vector = structure.unpack(v) return vector except Exception as e: raise RuntimeError("Failed to read vector " + "{} // {}".format(name, str(e)))
46ce645feef6cf0d8b3425a216bd25c374fab9af
47,858
def get_project_title_html(project): """Return HTML version of the full project title including parents""" ret = '' if project.get_parents(): ret += ' / '.join(project.full_title.split(' / ')[:-1]) + ' / ' ret += project.title return ret
23be200d2b3d4047a85b3040bec6063f1c043c01
47,863
import hashlib def hash_path(path: str) -> str: """Creates a UTF-8 SHA1 hash of an input string. Arguments: path: The string value to hash. Return: UTF-8 SHA1 hash of `path`. """ hash = hashlib.sha1(path.encode('utf-8')).hexdigest() return hash
66d00b5705be013f56c8c9501f527b31766f7b82
47,865
def check_input(user_input): """ Check if input is meet the input requirements: 4 alphabet spacing with blank :param user_input: str, can be anything :return: bool, whether input passed the check """ if len(user_input) != 7: return False for i in (0, 2, 4, 6): if not user_input[i].isalpha(): return False for i in (1, 3, 5): if user_input[i] != ' ': return False else: return True
bffa4a2e1a7c188cee3b1eab579a74e856277be5
47,867
def is_less_than_thresh(t_ref,t_comp): """Check that t_comp is effectively less than the t_ref with the congestion level given by max_threshold""" max_threshold = 100 # c'est des % return (t_comp < (t_ref + max_threshold * t_ref/100))
ed9985e1f1c82f8324f69cca45a67b24d0a15559
47,869
def createYangHexStr(textString): """ Convert plain hex string into yang:hex-string """ data = textString[0:2] i = 2 while i < len(textString): data = data + ':' + textString[i:i+2] i = i + 2 return data
9e081e50efca5331f1bbad924c61549c6835e8e3
47,872
def cached(oldMethod): """Decorator for making a method with no arguments cache its result""" storageName = f'_cached_{oldMethod.__name__}' def newMethod(self): if not hasattr(self, storageName): setattr(self, storageName, oldMethod(self)) return getattr(self, storageName) return newMethod
72b2fe49abc717de279e4f13869c9e1de9b91f01
47,873
def _label_to_path(label): """ Given a Label, turn it into a path by keeping the directory part of the name, and attaching that to the package path. """ base = label.package name = label.name last_slash = name.rfind("/") if last_slash >= 0: base = "{}/{}".format(base, name[0:last_slash]) return base
3e822402d5c91aba7a4d7b9d21ce302f19fc9939
47,874
def line_side(start_vector, end_vector, position_vector): """ Find out what side a position_vector is on given a line defined by start_vector and end_vector. Args: start_vector (list): eg. [0,0,0] vector\ end_vector (list): eg. [0,0,0] vector position_vector (list): eg. [0,0,0] vector Returns: float: If positive it's on one side of the line, if negative its on the other side. """ return ((end_vector.x - start_vector.x)*(position_vector.y - start_vector.y) - (end_vector.y - start_vector.y)*(position_vector.x - start_vector.x)) > 0
26ebb60f6f8779c8be7ef2068bfb5e4c255657c0
47,877
import configparser def load_config(path, defaults=None): """Load a config file.""" if not defaults: defaults = {} cfg = configparser.ConfigParser() cfg['DEFAULT'] = defaults cfg.read(path) return cfg
9c853f9344848652fbe627b1b4fcaa9d91672c61
47,878
def major_axis_equiv_diam_ratio(prop, **kwargs): """Return the ratio of the major axis length to the equivalent diameter Args: prop (skimage.measure.regionprops): The property information for a cell returned by regionprops **kwargs: Arbitrary keyword arguments Returns: float: major axis length / equivalent diameter """ return prop.major_axis_length / prop.equivalent_diameter
fd2e1668577cac77788415f8ecfefd9d9b8df926
47,880
def indent(text, n=4): """ Indent each line of text with spaces :param text: text :param n: amount of spaces to ident >>> indent("") '' >>> indent("the quick brown fox\\njumped over an lazy dog\\nend") ' the quick brown fox\\n jumped over an lazy dog\\n end' """ if not text: return "" i = " " * n return i + text.replace("\n", "\n" + i)
4720d29dfa8095342358317396d4e7d4208f2bcc
47,886
def calcCropSensorWidth(sensorWidth, nativeAspectRatio, mediaAspectRatio): """ Calculate effective/utilised width of camera sensor when image/video is recorded at non-native aspect ratio. """ cropRatio = (nativeAspectRatio[1] / nativeAspectRatio[0] ) / (mediaAspectRatio[1] / mediaAspectRatio[0]) return sensorWidth * cropRatio
a58dcf9bf2520f27ac74afe995b8ea045fbc9778
47,889
import math def add_aggregate_info(site_and_date_info, percentage, sorted_names): """ Function is used to add an 'aggregate metric' that summarizes all of the data quality issues for a particular site on a particular date. NOTE: This function DOES NOT take the weighted value of all of these metrics. This is merely to attach the aggregate statistic. NOTE: This is for the DICTIONARY with the date as the first set of keys. :param site_and_date_info (dict): dictionary with key:value of date:additional dictionaries that contain metrics for each HPO's data quality by type percentage (boolean): used to determine whether or not the number is a simple record count (e.g. duplicates) versus the percentage of records (e.g. the success rate for each of the tables) sorted_names (lst): list of the names that should have an aggregate statistic analyzed (e.g. avoiding 'avarage' statistics) :return: site_and_date_info (dict): same as input parameter but now each site and date has an added aggregate statistic. """ for date in site_and_date_info.keys(): date_report = site_and_date_info[date] date_metric, num_iterated = 0, 0 for site in sorted_names: table_metrics = date_report[site] date_metric, num_iterated = 0, 0 for table in table_metrics.keys(): stat = table_metrics[table] if not math.isnan(stat): date_metric += stat num_iterated += 1 # NOTE: 'AGGREGATE INFO' SHOULD NOT BE USED FOR # THE PERCENTAGE METRIC. THIS IS BECAUSE THE # FIRST 'AGGREGATE INFO' DOES NOT WEIGHT SITES # BY THEIR RELATIVE CONTRIBUTIONS (BY # OF ROWS). if percentage and num_iterated > 0: date_metric = date_metric / num_iterated elif percentage and num_iterated == 0: date_metric = float('NaN') date_report['aggregate_info'] = date_metric return site_and_date_info
71eb0b8d33bcbf9ad04f53621d7959995112bc47
47,906
from typing import Tuple def format_hint(title: str, description: str, original_hint: str) -> str: """Generate complete hint message. Arguments: title: The title of the draft to edit or commit. description: The description of the draft to edit or commit. original_hint: The original hint message. Returns: The complete hint message. """ hint: Tuple[str, ...] = (title,) if description: hint += ("", description) hint += (original_hint,) return "\n".join(hint)
e3439b14e9344de891c73d0f84ebdd6f8759f042
47,907
import copy def get_median(my_list): """ Gets the median in a list of numeric values. Args: my_list (list of ordinal): The list to find from. Returns: (float): The median. Notes: Throws a ZeroDivisionError on an empty list. """ if len(my_list) == 0: raise ZeroDivisionError copied_list = sorted(copy.deepcopy(my_list)) while len(copied_list) > 2: copied_list = copied_list[1:len(copied_list)-1] if len(copied_list) == 2: return (copied_list[0] + copied_list[1]) / 2 else: # len(copied_list) is 1: return copied_list[0]
86529e37928d31553f2ace906b9b9870b365d547
47,908
def iterable_response_to_dict(iterator): """ Convert Globus paginated/iterable response object to a dict """ output_dict = {"DATA": []} for item in iterator: dat = item try: dat = item.data except AttributeError: pass output_dict["DATA"].append(dat) return output_dict
9764718a922a310b2892e1896657dd2af24e7a8f
47,912
def load_words(path): """ Load a list of words from a text file. Args: path - path to a file containing line separated words Returns List of strings """ with open(path, 'r') as fh: words = [w.strip() for w in fh.readlines()] if len(words) == 0: raise EOFError("No text found in file") return words
d7c94937b6257f2af28701ebdf4766251d50a7ee
47,916
def run_metrics( metricObject, args ): """ Runs metric for individual sequences Params: ----- metricObject: metricObject that has computer_compute_metrics_per_sequence function args: dictionary with args for evaluation function """ metricObject.compute_metrics_per_sequence(**args) return metricObject
cc39cc3ea22bf798fcef61189cddd6838325cda7
47,918
def condTo25(cond, temp): """ Converts given value of specific conductance to the value at 25 C. Uses the relation from Sorensen and Glass (1987), as found in Equation 4 of Hayashi (2004). Parameters ---------- cond : float or array measured value of specific conductance temp : float or array temperature of water at which specific conductance was measured. Returns ------- cond25: float or array specific conductance at 25 C. """ a = 0.0187 #from Hayashi, 2004 cond25 = cond/(1+a*(temp - 25.0)) return cond25
58f95851280e83e8bd3fb332e0ad3b02b9b53c48
47,920
def get_repository_metadata_by_id( trans, id ): """Get repository metadata from the database""" return trans.sa_session.query( trans.model.RepositoryMetadata ).get( trans.security.decode_id( id ) )
7db2685ff113ae3d08b6acc6f9d16039e889fdf4
47,921
def _normalize_email(email: str) -> str: """Normalize email. Arguments: email {str} -- Email to normalize. Returns: str -- Normalized email. """ return email.upper()
69f7c5238fe2dd8a0be08139373af91a1609d04f
47,923
def minmax(dmax_cup): """ Find min and max position of the all shots Parameters ---------- dmax_cup: list of tuples each tuples has shot parameters returns: tuple of floats shot Y min, Y max, Z min, Z max """ ymin = 10000.0 ymax = -10000.0 zmin = 10000.0 zmax = -10000.0 for shinfo in dmax_cup: shot_y = shinfo[3] shot_z = shinfo[4] if ymin > shot_y: ymin = shot_y if ymax < shot_y: ymax = shot_y if zmin > shot_z: zmin = shot_z if zmax < shot_z: zmax = shot_z return (ymin, ymax, zmin, zmax)
fb337d87db03edcf2c95ac8690f0fce5561a5210
47,926
def create_json_from_stories(stories_and_location): """ Convert the preprocessed stories into a list of dictionaries. This will help us with making the 3d visualizations """ stories = [] for story, location, link, geo_coordinates, category, img_url, summary in stories_and_location: story_dict = {} story_dict["title"] = story story_dict["locations"] = location story_dict["link"] = link story_dict["geo_coordinates"] = geo_coordinates story_dict["category"] = category story_dict["img_url"] = img_url story_dict["summary"] = summary stories.append(story_dict) return stories
3603d09d0f55c5abb275ae84498e9abee8d39fe1
47,927
from typing import Set import inspect def get_init_properties(cls, to_class=object) -> Set[str]: """Given a class, determine the properties that class needs. Assumes that each sub-class will call super with **kwargs. (Which is not a good general assumption, but should work well enough for Handlers.) cls is the class to check, to_class is the final parent class to check. Returns a set of all parameters found. """ result = set() init = getattr(cls, '__init__', None) if init is not None: for param in inspect.signature(init).parameters.values(): if param.kind == param.VAR_KEYWORD: # Ignore any **kwargs continue if param.name == 'self': continue result.add(param.name) if issubclass(cls.mro()[1], to_class): result |= get_init_properties(cls.mro()[1], to_class) return result
5fa747fd98d93885292cd71676435e30b7a9ec91
47,931
def read_file(path): """Reads file contents at path.""" with open(path, 'rt') as f: return f.read()
6379c396e2c9a3786533dd02c004e5fd046b6b51
47,933
def hms2stringTime(h, m, s, precision=5): """ Convert a sexagesimal time to a formatted string. Parameters ---------- hour, min, sec : int, float precision : int Returns ------- String formatted HH:MM:SS.SSSSS """ if h < 0 or m < 0 or s < 0: pm = '-' else: pm = '+' formString = '%s%02d:%02d:%0' + str(precision + 3) + '.' + str(precision) + 'f' return formString % (pm, abs(h), abs(m), abs(s))
0b3959ec2ac31e248ccb1c28b1e2cde03483ae26
47,934
def display_name_to_class(value): """ Converts an aggregation display name to a string that is usable as a CSS class name """ return value.replace(" ", "_").lower()
cf09338ccb686c15132cc8b4f97f3c897dacb575
47,938
def _get_attachment_keys(zot, coll_id) -> list: """Retrieves attachment keys of attachments in provided collection. Args: zot (zotero.Zotero): Zotero instance coll_id (str): Collection ID. Returns: list: List of attachment keys. """ attach = zot.everything( zot.collection_items(coll_id, itemType='attachment')) if len(attach) == 0: raise KeyError( 'No attachments exist in this collection') else: attach_keys = list(set([x['key'] for x in attach])) return attach_keys
e64c2ba21c5b98a3a1bfd7d7b82d9512a9da0a77
47,943
def uncolorize(msg): """ Strip ANSI color codes from a string. """ code = '\033[1;' if msg.find(code) >= 0: msg = msg.replace(code, '')[3:-1] return msg
7413f25fb372759f12f7f767e94f21de84a92e0f
47,949
def set_transform_metric(method, metric): """Sets transformation metric for registration method Args: method (Sitk.ImageRegistrationMethod): registration computation method (sitk) metric (str): cc, ants, mi or msq Returns: (Sitk.ImageRegistrationMethod): registration computation method (sitk) """ if metric == "cc": method.SetMetricAsCorrelation() elif metric == "ants": method.SetMetricAsANTSNeighborhoodCorrelation(2) elif metric == "mi": method.SetMetricAsJointHistogramMutualInformation() elif metric == "msq": method.SetMetricAsMeanSquares() return method
275a369ed22656fc6a7ef3bc7beabd1673cd9aab
47,950
def f2c(fahrenheit): """ Covert Fahrenheit to Celsius :param fahrenheit: [float] Degrees Fahrenheit :return: [float] Degrees Celsius """ return 5 / 9 * (fahrenheit - 32)
0e03daebcc6286b4d1b92f3259b6b908e49c8bfd
47,957
import ast def process_alias(ast_alias: ast.alias) -> str: """ Process an ast alias to return its alias or its name, if alias is not present. :param ast_alias: An ast alias object :return: A string with the resolved name """ if ast_alias.asname is None: return ast_alias.name else: return ast_alias.asname
db33f839151c4825c1591e80e6058ffbf1b90441
47,960
def decode_http_header(raw): """ Decode a raw HTTP header into a unicode string. RFC 2616 specifies that they should be latin1-encoded (a.k.a. iso-8859-1). If the passed-in value is None, return an empty unicode string. :param raw: Raw HTTP header string. :type raw: string (non-unicode) :returns: Decoded HTTP header. :rtype: unicode string """ if raw: return raw.decode('iso-8859-1', 'replace') else: return u''
fdb322e1fc6b8d9ff5cfdc02001015e2ebeaf606
47,961
def getDetailsName(viewName): """ Return Details sheet name formatted with given view. """ return 'Details ({})'.format(viewName)
574c198a24a5ad1d4b6005af06a57999298a1e7f
47,969
def get_releases(events): """ Get all target releases from a list of events. :return: List of (major, minor) release tuples, sorted in ascending order """ return sorted({event.to_release for event in events})
adbf665c3b7a0068810a44c3c916bfcad6a3ef14
47,973
def get_N_RL(sym_list): """ Compute a value denoting the maximum "number of reachable locations", ($N_{r}$), over all possible locations. From the paper: Formally $N_{r}$ is calculated from an empirical symbolic time series $\mathcal{T} = \{s_{1}, s_{2}, \ldots, s_{m}\}$, with the set of all possible spatial locations being $\Omega$, as $N_{r} = \max_{x \in \Omega} | \{ s_{i+1} : s_i = x \} |$. :param sym_list: A list of location symbols :type sym_list: list """ mapLocation = {} ct_point = 0 for point in sym_list[:-1]: idNextPoint = sym_list[ct_point+1] try: mapLocation[point].add(idNextPoint) except KeyError: mapLocation[point] = set([idNextPoint]) ct_point += 1 N = 0 for SetPoint in mapLocation.values(): nb = len(SetPoint) if nb > N: N = nb return N
67b89b1d74dd2e24c60e5958ae6e8a7f09e1eb19
47,977
def ft2m(ft): """Convert feet to meters""" return ft * 0.3048
972cc2f18b3910ff6d43fe093658f1b2b361464c
47,980
def padStr(s, field=None): """ Pad the begining of a string with spaces, if necessary. """ if field is None: return s else: if len(s) >= field: return s else: return " " * (field - len(s)) + s
e5d339a2aee7fe2a0a80d507f356840ea71254da
47,983
def make_dlc_columns( dlc_df, remove_header_rows=True, reset_index=True, drop=True ): """ Replaces the default column names (e.g 'DLC_resnet50_...'), with more useful names, combining rows 0 and 1. :param dlc_df: Dataframe loaded from DLC output :param remove_header_rows: Remove the two rows used to make the column names from the data :param reset_index: Reset the dataframe index (after removing the header rows) :param drop: When resetting the dataframe index, do not try to insert index into dataframe columns. """ dlc_df.columns = dlc_df.iloc[0] + "_" + dlc_df.iloc[1] if remove_header_rows: dlc_df = dlc_df.iloc[2:] if reset_index: dlc_df = dlc_df.reset_index(drop=drop) return dlc_df
8dde48349bd9eef8012ea34694d50bb6f205afed
47,984
def get_str_from_list(message_list: list, cc: str = "and", punct: bool = True) -> str: """Returns list as a formatted string for speech. message list: [list] of the components to be joined. cc: [str] coordinating conjunction to place at end of list. punct: bool - indicates if should include punctuation at end of list. """ speech_list = [] if not message_list: return '' elif len(message_list) == 1: message = str(message_list[0]) if punct: message += "." return message for i in range(len(message_list)): if i == len(message_list) - 1: speech_list.append(' and ') speech_list.append( str(message_list[i])) if i != len(message_list) - 1: speech_list.append(', ') if punct: speech_list.append('.') return ''.join(speech_list)
224821fe0e0ca6b30001f467816df97bad37237a
47,990
def dequeueMessage(queue): """ Dequeue a single message from the queue. Returns None if no message found. """ m = queue.read() if m is None: return None else: queue.delete_message(m) return m.get_body()
7f8e3391d065736b49520b8d52e741629b35bfeb
47,992
from typing import Dict def validate_batch_data(data: Dict) -> bool: """Validate request data for batch load""" return 'field_id' in data \ and isinstance(data.get('rows', None), list)
dd2e9489f3abc36d95a4739e685698f7714af731
47,994
import functools def handle_top_exception(logger): """A decorator that will catch exceptions and log the exception's message as a CRITICAL log.""" def decorator(fnc): @functools.wraps(fnc) def wrapped(*args, **kwargs): try: return fnc(*args, **kwargs) except Exception as exc: logger.critical(exc) raise return wrapped return decorator
cb26776167bd6b452bf7891196a12144bd22f03d
47,998
def load_properties(lines, sep="=", comment_marker="#"): """Creates a dictionary for properties provided as a list of lines. Split on the first found sep is conducted. :param lines: (list[str]) lines of the file. :param sep: (str) separator between property key and its value. :param comment_marker: (str) marker signaling that this line is a comment. Must be the first character in the row excluding whitespaces. :return: (dict[str,str]) dictionary representing a properties file. """ res = {} for r in lines: if sep not in r or r.strip()[0] == comment_marker: continue i = r.index(sep) key = r[:i].strip() content = "".join(r[i+1:]).strip() res[key] = content return res
6ac731b955135f029734fb094d9fa91a3a08b291
47,999
def read_bb(filename): """Read ground truth bounding boxes from file""" gt_bboxes = {} with open(filename) as file: for line in file: frame = str(int(line.strip().split(".")[0])) bb = line.strip().split(";")[1:6] bb = list(map(int, bb)) if frame in gt_bboxes: gt_bboxes[frame].append(bb) else: gt_bboxes[frame] = [bb] return gt_bboxes
89d9a8726c4f3a5b254cd4318051da5018991be3
48,000
def dirac_delta_function(x, steps=[(1, 1)]): """ Returns a Dirac delta function such that f(x) = y_0 if x = x_0, y_1 if x = x_1, ... else 0 Parameters ============ x: the input value. steps: a list of deltas. """ for x_n, y_n in steps: if x == x_n: return y_n else: return 0
5bb5ac50dc34f01f8ee64169aba18411af20ddbc
48,001
from bs4 import BeautifulSoup def strip_html(value): """ Strips HTML (tags and entities) from string `value`. """ # The space is added to remove BS warnings because value "http://django.pynotify.com" # will be considered as URL not as string in BS. The space will be removed with get_text method. return BeautifulSoup(' ' + value, 'lxml').get_text()
4e917f2b315d59b7f97922edd892328dd1c12ea3
48,007
def alternate_transformation(text): """ Alternates the capitalization of a string's characters """ return "".join( [char.lower() if index % 2 else char.upper() for index, char in enumerate(text)] )
00ef40d431e9b515d67eb15b6a68e055021a363e
48,011
import collections def char_distribution(string: str) -> collections.Counter: """Given an ascii string, return the distribution of its characters as a percentage of the total string length. :param string: the string to be analyzed :returns: the characters distribution of the string """ assert string string = string.lower() c = collections.Counter(string) return c
70a245240d974bfded49b0d37e75a962365b4787
48,015
def get_complement(nucleotide): """ Returns the complementary nucleotide nucleotide: a nucleotide (A, C, G, or T) represented as a string returns: the complementary nucleotide >>> get_complement('A') 'T' >>> get_complement('C') 'G' >>> get_complement('G') 'C' >>> get_complement('T') 'A' >>> get_complement('L') -1 """ if nucleotide == 'A': #change A to T return 'T' elif nucleotide == 'T': # T to A return 'A' elif nucleotide == 'C': # C to G return 'G' elif nucleotide == 'G': # G to C return 'C' #should I have a negative one, you didn't feed me a valid dna sequence else: return -1
c52348987db20456ed0a3a73fc09bd08d6370595
48,017
def get_virtual_cst(prec, value, language): """ Generate coding of constant <value> in language assuming format <prec> """ return prec.get_support_format().get_cst( prec.get_base_format().get_integer_coding(value, language))
05a6cb7230a0f067e330e7e04ea59fe36e60bbb4
48,019
import json def json_pretty_print(content): """ Pretty print a JSON object ``content`` JSON object to pretty print """ temp = json.loads(content) return json.dumps( temp, sort_keys=True, indent=4, separators=( ',', ': '))
e02721042ae2eda31bf4187ba49787e9a5e0a989
48,020
def InitMutator(current, value): """Initialize the value if it is None""" if current is None: return value return current
96a8c8778b307196f319c260fdd7b7cd2788b770
48,021
def autoincrement_tag(last_tag): """ autoincrement_tag('1') => 2 autoincrement_tag('1.2') => 1.3 autoincrement_tag('1.2.3') => 1.2.4 """ tokens = last_tag.split('.') r = int(tokens[-1]) + 1 if len(tokens) > 1: return '%s.%s' % ('.'.join(tokens[0:-1]), r) else: return str(r)
29524f933a59252c8824830e68e64a2f12b2c49d
48,022
def get_all_jobs_in_workflow(processed_cfg, wrkflw): """ All jobs available under the specified workflow """ return processed_cfg['workflows'][wrkflw]['jobs']
54f4a2f9275791f5fbacdc826d40e596f2bf7de6
48,028
def grades_input(n_student): """Gets grade inputs from the user.""" grade_list = [] for _ in range(0, n_student): grade_list.append(int(input('Enter a number: '))) return grade_list
a4118afa760d5911f39b1ca0988f060c0dc630ef
48,032
import time def epochtime_to_string(epochtime=None, use_second=False): """ Given an epoch time (seconds since 1/1/1970), return a string useful as a plot title. Set the use_second flag to also include seconds in the string. """ try: if use_second: time_string = time.strftime('%m/%d/%Y %H:%M:%S UTC', time.gmtime(epochtime)) else: time_string = time.strftime('%m/%d/%Y %H:%M UTC', time.gmtime(epochtime)) except: time_string = '' return time_string
3327dc2b9f3f05b8d17a366c1d1d01b9c742cc06
48,033
def team_multisig(accounts) -> str: """The team multisig address.""" return accounts[4]
87453312ee39b0c8fa40d52a52d9bcf0c7cfb1b4
48,034
def spatial_agg(df, deg=2.5): """ For a given df calculate aggregation (count, mean, median, std, min, max) for data variables (temperature, pressure, humidity, magnetic_tot) grouped by latitude and longitude category Parameters ---------- df: pandas DataFrame Pandas dataframe with data for the desired sampling range (hourly, daily) deg: int or float, default 2.5 Spatial degree interval for for latitude and longitude data Returns ------- data_agg: pandas DataFrame DataFrame with aggregated data for every atmospheric variable data_count: pandas Series Series with count of data points for every location """ # Group data points by lat, lng categories df = df.discretize_latlng(deg=deg) # create a groupby object grouped by lat, lng categories grouped = df.groupby(by=["lat_cat","lng_cat"]) # custom agg functions to calculate na count and percentage na_pct = lambda df: df.isna().mean() na_count = lambda df: df.isna().sum() # group by custom functions na_pct = grouped.agg([na_pct]).rename({"<lambda>":"na_pct"}, axis=1) na_cnt = grouped.agg([na_count]).rename({"<lambda>":"na_count"}, axis=1) # group by regular statistics agg = grouped.agg(["mean","median","std","min","max","count"]) # join all groups and reshape dataframe so it has statistics as index not columns agg = agg.join([na_cnt, na_pct]).T.unstack().T # rename indices and columns for readability agg.columns.names = ["atmos"] agg.index.names = ["lat", "lng", "stat"] return agg
099f3af45b96d1bf46719d0839fa0f5b6e82d0d3
48,038
from typing import BinaryIO def read_c_string(fd: BinaryIO) -> bytes: """Reads a null-terminated string from the provided file descriptor.""" string = bytearray() while True: byte = fd.read(1) if not byte or byte == b'\0': return bytes(string) string += byte
358b7133216e14900e3247fb63482441c8118abc
48,048
def dataframe_count(df, count_type="row"): """ dataframe行数/列数/单元格数 """ if count_type not in ("row", "column", "cell"): raise ValueError(f"invaild count_type: {count_type}") if count_type == "row": return len(df) if count_type == "column": return len(df.columns) if count_type == "cell": return df.size
407d2661da4dc945d00f99884c38f1f16ff7ef0e
48,050
import json def load_config(folder): """ Loads the configuration file for the Canvas API and returns it. Params: folder (String) : The folder containing the config. Returns: json : The config file as a JSON object. """ try: with open(f'{folder}/config.json', 'r') as f: return json.load(f) except FileNotFoundError: raise FileNotFoundError("Make sure to run setup.py before running the scraper.")
b5629bbb2426333c13cf72dad7f088e1b571455c
48,055
def is_file_list(coll): """ Checks whether a collection is a list of files. :param coll (list): collection to check. :return: True if collection is a list of files, else returns False. """ # check if collection is a list of files if not isinstance(coll, list): raise Exception(f"Expected collection as a list, have received {type(coll)}") if coll[0].startswith("/") or coll[0].startswith("http"): return True return False
1ac4a12bf1654d6b787480b9072c3edc4bdb2604
48,057
def read_input(in_file, header=True): """Read and return all GO terms that are found in an open file. Parameters ---------- in_file : an open file object header : bool, optional If the file contains a header that should be stripped Returns ------- A list with all GO-term id's that are present in the given file. """ if header: next(in_file) return [line.rstrip() for line in in_file]
c42e93b99937281cfd3f1dd776fa0c507733ba22
48,058
def split_digits(number, digits_after_dp=1): """Return digits before and after a decimal point""" number = round(float(number), digits_after_dp) return str(number).split('.')
549d349c11d7f6974c3ec363579f273cc23bc630
48,061
def format_references(section): """Format the "References" section.""" def format_item(item): return ' - **[{0}]** {1}'.format(item[0], item[1].strip()) return '!!! attention "References"\n{0}'.format('\n'.join( map(format_item, section)))
586bc412ca142b2dbd583ab2e2e08bf49db83588
48,065
def ff(items, targets): """First-Fit This is perhaps the simplest packing heuristic; it simply packs items in the next available bin. Complexity O(n^2) """ bins = [(target, []) for target in targets] skip = [] for item in items: for target, content in bins: if item <= (target - sum(content)): content.append(item) break else: skip.append(item) return bins, skip
3649d9b7704f36871f320a236cff0115b75689f3
48,071
def _get_backdrop_error_message(response): """ Backdrop should return an error as response with a JSON body like {'status': 'error', 'message': 'Some error message'} This attempts to extract the 'Some error message' string. If that fails, return the raw JSON string. """ try: return response.json()['message'] except Exception: return response.content
bcb6cd58327807bd1ed8ae4671301f5678ad9370
48,073
from typing import Optional def get_time_limit(env, current_max_episode_length: Optional[int]) -> int: """ Get time limit from environment. :param env: Environment from which we want to get the time limit. :param current_max_episode_length: Current value for max_episode_length. :return: max episode length """ # try to get the attribute from environment if current_max_episode_length is None: try: current_max_episode_length = env.get_attr("spec")[0].max_episode_steps # Raise the error because the attribute is present but is None if current_max_episode_length is None: raise AttributeError # if not available check if a valid value was passed as an argument except AttributeError: raise ValueError( "The max episode length could not be inferred.\n" "You must specify a `max_episode_steps` when registering the environment,\n" "use a `gym.wrappers.TimeLimit` wrapper " "or pass `max_episode_length` to the model constructor" ) return current_max_episode_length
03e3c141079986eda534d152372e0454be01dad5
48,076