content
stringlengths
42
6.51k
def overflow(keyword): """Validation for the ``overflow`` property.""" return keyword in ('auto', 'visible', 'hidden', 'scroll')
def hex2rgb(hex_color): """ '#B4FBB8' => 'rgb(180, 251, 184)' """ hex_color = hex_color.strip('#') rgb = tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) return 'rgb{}'.format(rgb)
def maybe_truncate(s, length=30): """Truncate long strings (and append '...'), but leave short strings alone.""" s = str(s) if len(s) > length-3: return s[0:length-3] + "..." else: return s
def type_name(x): """ Describe (very briefly) what type of object this is. THANKS: https://stackoverflow.com/a/5008854/673991 """ the_type_name = type(x).__name__ if the_type_name == 'instance': the_type_name = x.__class__.__name__ # NOTE: Possibly this is only different in Python 2 instances not derived from object. # That is, type(x) is not the same as x.__class__ return the_type_name
def people_clashes(events_definition, clashes_definition): """ Parameters ---------- events_definition : list of dicts of the form {'title': Event title, 'duration': <integer in minutes>, 'tags': <list of strings>, 'person': <string>, 'event_type': <string>} clashes_definition : dict mapping a person to a list of people whose events they must not not be scheduled against. Returns ------- dict mapping the index of an event in the events list to a list of event indexes against which it must not be scheduled. integer the count of self-clashes added """ # Add everyone who is missing to the clashes definition so that they cannot # clash with themselves for person in [event['person'] for event in events_definition]: if person not in clashes_definition: clashes_definition[person] = [person] # Add the self-clashing constraint to any existing entries where it is # missing count = 0 for person, clashing_people in clashes_definition.items(): if person not in clashing_people: clashing_people.append(person) count += 1 clashes = { events_definition.index(event): [ events_definition.index(t) for c in clashing_people for t in events_definition if t['person'] == c and events_definition.index(event) != events_definition.index(t)] for person, clashing_people in clashes_definition.items() for event in events_definition if event['person'] == person } return clashes, count
def emit(expr, indent, body): """ Expression to string, formatted in executable python syntax """ # Default implementation, for types not specialized below return str(expr)
def tokens_url(tenant_url): """Returns the tokens API endpoint """ return '{0}/token'.format(tenant_url)
def architecture_flag(compiler, arch): """ returns flags specific to the target architecture and compiler """ if not compiler or not arch: return "" if str(compiler) in ['gcc', 'apple-clang', 'clang', 'sun-cc']: if str(arch) in ['x86_64', 'sparcv9']: return '-m64' elif str(arch) in ['x86', 'sparc']: return '-m32' return ""
def cast_cube(value, cursor): """ """ if value: return map(float, value[1:-1].split(','))
def lm1(data): """ Regresses the linear model y = a + b*x and returns (a, b, r2) for the best fit (where r2 is the R-squared value). The input, *data*, should be a series of (x,y) pairs. y can be None in which case the pair is ignored. """ sxx = sxy = syy = sx = sy = n = 0 for (x, y) in data: if y is not None: sxx += x * x syy += y * y sx += x sy += y sxy += x * y n += 1 if n < 2: return None, None, None # Make n a float. This contaminates all the subsequent divisions, # making them floating point divisions with floating point answers, # which is what we want. n = float(n) xbar = sx / n ybar = sy / n ssxx = sxx - (sx * sx) / n ssyy = syy - (sy * sy) / n ssxy = sxy - (sx * sy) / n if ssxx == 0: return None, None, None b = ssxy / ssxx a = ybar - b * xbar r2 = (ssxy * ssxy) / (ssxx * ssyy) return a, b, r2
def uri_to_curie(uri): """Converts URI to curie (short identifier). Args: uri: a full URI of an ontology term, e. g. http://purl.obolibrary.org/obo/MONDO_0009796. URIs are globally unique among all ontologies (and even other internet resources). Returns: curie: a short identifier (Compact URI) which contains an ontology prefix and identifier in that ontology. Example: MONDO:0009796. See also: http://www.obofoundry.org/docs/Citation.html """ return uri.split('/')[-1].replace('#', '').replace('_', ':')
def pixellate_factor(px_x=4, px_y=4): """ A pixellation filter that uses factor values instead of resolution sizes. Author: SolarLune Date Updated: 6/6/11 px_x = size of pixels on the x-axis px_y = size of pixels on the y-axis """ return (""" uniform sampler2D bgl_RenderedTexture; uniform float bgl_RenderedTextureWidth; uniform float bgl_RenderedTextureHeight; void main(void) { vec2 uv = gl_TexCoord[0].xy; vec2 pixel = vec2(1.0 / bgl_RenderedTextureWidth, 1.0 / bgl_RenderedTextureHeight); int target_x = """ + str(px_x) + """; int target_y = """ + str(px_y) + """; float dx = pixel.x * target_x; float dy = pixel.y * target_y; vec2 coord = vec2(dx * floor(uv.x / dx), dy * floor(uv.y / dy)); coord += pixel * 0.5; // Add half a pixel distance so that it doesn't pull from the pixel's edges, // allowing for a nice, crisp pixellation effect coord.x = min(max(0.001, coord.x), 1.0); coord.y = min(max(0.001, coord.y), 1.0); gl_FragColor = texture2D(bgl_RenderedTexture, coord); } """)
def _average_nadir_gain_dbi(pattern, angles): """Average gain on the nadir face of the satellite. For simplicity, this function assumes some hard-coded values of 65-degrees off of boresight. That translates to 0->65 and (360-65)->360 """ s = 0 n = 0 offset = 65 for i in range(len(pattern)): angle = angles[i] gain = pattern[i] if (0 <= angle <= offset) or ((360-offset) <= angle <= 360): s += gain n += 1 return s / n
def key_to_frequency(key): """Returns the frequency of the note (key) keys from A0""" return 440 * 2 ** ((key - 49) / 12.0)
def time_formatter(milliseconds: int) -> str: """Inputs time in milliseconds, to get beautified time, as string""" seconds, milliseconds = divmod(int(milliseconds), 1000) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) tmp = ( ((str(days) + " day(s), ") if days else "") + ((str(hours) + " hour(s), ") if hours else "") + ((str(minutes) + " minute(s), ") if minutes else "") + ((str(seconds) + " second(s), ") if seconds else "") + ((str(milliseconds) + " millisecond(s), ") if milliseconds else "") ) return tmp[:-2]
def merge_lists(iterable_of_lists): """ """ result = [] for i in iterable_of_lists: result += i return result
def exit_mode(mode): """ Returns 'min' if given 'max' and vice-versa. Arguments - mode: 'max' or 'min' """ return 'min' if mode == 'max' else 'max'
def data_sort_type(text, type='number'): """Define data-sort-type for wikitext table auto sorting""" text = str(text) if '|' not in text[1:]: return f"| data-sort-type={type} | {text}\n" else: parts = text.split('|') return f"|{parts[1]}data-sort-type={type} | {parts[-1]}"
def clean_team_name(team_names): """Take a list of team_names, modify the names to match the format specified in br_references, and return a new list Args: team_names: a list of team_names to be checked for validity, and if needed, modified """ new_team_names = [] for team in team_names: new_team_names.append(''.join(a for a in team if a.isalpha() or a.isspace() or a.isdigit()).upper()) return new_team_names
def sequential_search_ordered_input(list1, val): """ Carry out a sequential search of the given sorted list for a given value Parameters ---------- list1: input list, assumed to be sorted in ascending order val: the value to be searched Returns ------- True/False """ for i in range(len(list1)): #found the value of interest; return if list1[i] == val: return True #found a value greater than the value of interest. Since the list #is sorted and the value of interest hasn't been found, it must not be #present, return with false if list1[i] > val: return False return False
def solution1(A: list): """ pair indexes of values 0 with 1s""" n = len(A) pair = [] for i in range(n): if A[i] == 0: # if the value is 0 iterate over again # skipping over 0s and pairing indexes # of non zeroes and pairing the indexes for k in range(n): if A[k] != 0 and i < k: pair.append((i, k)) print(pair) nl = len(pair) if nl > 1_000_000_000: return -1 return nl
def format_time(start, end): """ Computes the interval time between a start and an end point. :param start: starting time :param end: ending time :return: """ elapsed_time = end - start elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_secs, elapsed_mins
def normalize_location_code(code: str) -> str: """ Removes any quantity designation from location code value """ try: s = code.index("(") e = code.index(")") return f"{code[:s]}{code[e + 1:]}" except ValueError: return code
def cria_peca(peca): """ cria_peca: str -> peca Recebe um identificador de jogador (ou peca livre) e devolve um dicionario que corresponde ah representacao interna da peca. R[peca] -> {'peca': peca} """ if type(peca) != str or len(peca) != 1 or peca not in 'XO ': raise ValueError('cria_peca: argumento invalido') return {'peca': peca}
def dm2skin_normalizeWeightsConstraint(x): """Constraint used in optimization that ensures the weights in the solution sum to 1""" return sum(x) - 1.0
def is_inside_circle(x, y): """Check if a given point falls inside the enclosed circle :param x: x coordinate of the point :param y: y coordinate of the point :return: a boolean True/False """ r = 0.5 # radius of the circle if x ** 2 + y ** 2 < r ** 2: return True else: return False
def normalizeInternalObjectType(value, cls, name): """ Normalizes an internal object type. * **value** must be a instance of **cls**. * Returned value is the same type as the input value. """ if not isinstance(value, cls): raise TypeError("%s must be a %s instance, not %s." % (name, name, type(value).__name__)) return value
def get_num_in_channel_2D(dataset_name="mesa"): """ get the number of input channels, this is for the hybrid fusion strategy as they use two Conv1D networks. @param dataset_name: dataset name @return: a set, one for each network """ if dataset_name == "mesa": in_channel_1 = 1 in_channel_2 = 8 elif dataset_name == "mesa_loocv": in_channel_1 = 1 in_channel_2 = 6 elif dataset_name == "apple": in_channel_1 = 1 in_channel_2 = 6 elif dataset_name == "mesa_hr_statistic": in_channel_1 = 1 in_channel_2 = 6 else: raise ValueError("Sorry, dataset is incorrect should be mesa, mesa_hr_statistic, apple") return in_channel_1, in_channel_2
def validate_validator_type(validator_type): """ Property: Validators.Type """ VALID_VALIDATOR_TYPE = ("JSON_SCHEMA", "LAMBDA") if validator_type not in VALID_VALIDATOR_TYPE: raise ValueError( "ConfigurationProfile Validator Type must be one of: %s" % ", ".join(VALID_VALIDATOR_TYPE) # NOQA ) return validator_type
def parse(arg): """Convert a series of zero or more numbers to an argument tuple""" return tuple(map(str, arg.split()))
def _alpha(n): """Excel-style column numbering A..Z, AA..AZ..BA..ZZ.., AAA""" if n < 1: raise ValueError(f"Can't represent {n} in alphabetic numbering") p = [] while n > 0: n, r = divmod(n - 1, 26) p.append(r) base = ord('A') ords = [(base + v) for v in reversed(p)] return ''.join(chr(o) for o in ords)
def app_config(app_config): """Application configuration fixture.""" app_config["SEARCH_COPY_TO_METADATA"] = True return app_config
def fixipmapping(ipparams, posflux, etc = [], retbinflux = False, retbinstd = False): """ This function returns the fixed best-fit intra-pixel mapping. Parameters ---------- ipparams : tuple unused bestmip : 1D array, size = # of measurements Best-fit ip mapping Returns ------- output : 1D array, size = # of measurements Intra-pixel-corrected flux multiplier Revisions --------- 2010-08-03 Kevin Stevenson, UCF kevin218@knights.ucf.edu Original version """ bestmip, binflux, binstd = posflux #Return fit with or without binned flux if retbinflux == False and retbinstd == False: return bestmip elif retbinflux == True and retbinstd == True: return [bestmip, binflux, binstd] elif retbinflux == True: return [bestmip, binflux] else: return [bestmip, binstd]
def is_pj_lop(value): """ Is the given value a PJ_LOP :param value: The value being checked :type value: Any :return: True if the value is a PJ_LOP, False otherwise :rtype: Boolean """ return isinstance(value, list)
def volumetric_thermal_expansion_coefficient(rho): """ Takes oil density and returns oil volumetric thermal expansion coefficient :param rho: the density of an oil, kg/m3 :return: volumetric thermal expansion coefficient, 1/(Celsius degree) """ a = None if 700 <= rho <= 719: a = 0.001225 elif 720 <= rho <= 739: a = 0.001183 elif 740 <= rho <= 759: a = 0.001118 elif 760 <= rho <= 779: a = 0.001054 elif 780 <= rho <= 799: a = 0.000995 elif 800 <= rho <= 819: a = 0.000937 elif 820 <= rho <= 839: a = 0.000882 elif 840 <= rho <= 859: a = 0.000831 elif 860 <= rho <= 879: a = 0.000782 elif 880 <= rho <= 899: a = 0.000734 elif 900 <= rho <= 919: a = 0.000688 elif 920 <= rho <= 939: a = 0.000645 return a
def get_keys_from_value(value, _dict, case_sensitive=False): """Returns list of keys in dict with value given""" matching_keys = [] for key in _dict.keys(): for val in _dict.get(key): if not case_sensitive: if value.lower() == val.lower(): matching_keys.append(key) else: if value == val: matching_keys.append(key) return matching_keys
def convert_data_to_storable_format(response: dict) -> dict: """ Takes response data from YouTube API and converts it to a format which can be stored in the database - removes irrelevant information and stores in accessible format Args: response: Response Data from API Returns: dict """ res_data = response["items"] video_data = {} for items in res_data: vid_data = items["snippet"] publish_date = vid_data["publishedAt"] title = vid_data["title"] description = vid_data["description"] channel = vid_data["channelTitle"] thumbnail_data = vid_data["thumbnails"] default_thumbnail = thumbnail_data["default"] default_url = default_thumbnail["url"] medium_thumbnail = thumbnail_data["medium"] medium_url = medium_thumbnail["url"] high_thumbnail = thumbnail_data["high"] high_url = high_thumbnail["url"] thumbnail_urls = {} thumbnail_urls["default"] = default_url thumbnail_urls["medium"] = medium_url thumbnail_urls["high"] = high_url data = {} data["title"] = title data["description"] = description data["channel"] = channel data["thumbnail_urls"] = thumbnail_urls video_data[publish_date] = data return video_data
def normalize_email(email): """ Normalize the address by lowercasing the domain part of the email address. """ email = email or '' try: email_name, domain_part = email.strip().rsplit('@', 1) except ValueError: # return None to leave error handling for serializer return None else: email = '@'.join([email_name, domain_part.lower()]) return email
def is_upper_case_name(name: str) -> bool: """ Checks that attribute name has no upper-case letters. >>> is_upper_case_name('camelCase') True >>> is_upper_case_name('UPPER_CASE') True >>> is_upper_case_name('camel_Case') True >>> is_upper_case_name('snake_case') False >>> is_upper_case_name('snake') False >>> is_upper_case_name('snake111') False >>> is_upper_case_name('__variable_v2') False """ return any(character.isupper() for character in name)
def mm2cin(arg): """ Convert millimeters to 1/100 in. Arguments: arg: Number or sequence of numbers. Returns: Converted number or sequence. """ if not type(arg) in [list, tuple]: return float(arg) * 100.0 / 25.4 return [float(j) * 100.0 / 25.4 for j in arg]
def vnf_package_obj(attrs=None, onboarded_state=False): """Create a fake vnf package. :param Dictionary attrs: A dictionary with all attributes :return: A FakeVnfPackage dict """ attrs = attrs or {} # Set default attributes. fake_vnf_package = {"id": "60a6ac16-b50d-4e92-964b-b3cf98c7cf5c", "_links": {"self": {"href": "string"}, "packageContent": {"href": "string"} }, "onboardingState": "CREATED", "operationalState": "DISABLED", "usageState": "NOT_IN_USE", "userDefinedData": {'key': 'value'}} if onboarded_state: fake_vnf_package = {"id": "60a6ac16-b50d-4e92-964b-b3cf98c7cf5c", "vnfdId": "string", "vnfProvider": "string", "vnfProductName": "string", "vnfSoftwareVersion": "string", "vnfdVersion": "string", "softwareImages": [ { "id": "string", "name": "string", "provider": "string", "version": "string", "checksum": { "algorithm": "string", "hash": "string" }, "containerFormat": "AKI", "diskFormat": "AKI", "createdAt": "2015-06-03T18:49:19.000000", "minDisk": '0', "minRam": '0', "size": '0', "userMetadata": {}, "imagePath": "string" } ], "checksum": { "algorithm": "string", "hash": "string" }, "onboardingState": "ONBOARDED", "operationalState": "ENABLED", "usageState": "IN_USE", "userDefinedData": {'key': 'value'}, "_links": { "self": { "href": "string" }, "vnfd": { "href": "string" }, "packageContent": { "href": "string" } }, "additionalArtifacts": [ { "artifactPath": "string", "metadata": {}, "checksum": { "algorithm": "string", "hash": "string" } }] } # Overwrite default attributes. fake_vnf_package.update(attrs) return fake_vnf_package
def LastLineLength(s): """Returns the length of the last line in s. Args: s: A multi-line string, including newlines. Returns: The length of the last line in s, in characters. """ if s.rfind('\n') == -1: return len(s) return len(s) - s.rfind('\n') - len('\n')
def is_call_id_active(call_id): """Check if reactor.callLater() from callID is active.""" if call_id is None: return False elif (call_id.called == 0) and (call_id.cancelled == 0): return True else: return False
def find_padding(dilation, kernel): """ Dynamically computes padding to keep input conv size equal to the output for stride = 1 :return: """ return int(((kernel - 1) * (dilation - 1) + (kernel - 1)) / 2.0)
def get_website_from_host(http_host): """Try to find the website name from the HTTP_HOST name""" return http_host.split(':')[0]
def _mangle_name(internal_name, class_name): """Transform *internal_name* (which is assumed to be an "__internal" name) into a "_ClassName__internal" name. :arg str internal_name: the assumed-to-be-"__internal" member name :arg str class_name: name of the class where *internal_name* is defined :return: the transformed "_ClassName__internal" name :rtype: str """ return "_%s%s" % (class_name.lstrip('_'), internal_name)
def invert_label(args, column): """Invert mapping. Find key corresponding to value column in dict args["labels"]. Returns `column` if the value does not exist. """ reversed_labels = {value: key for (key, value) in args["labels"].items()} try: return reversed_labels[column] except Exception: return column
def find_max_path(triangle): """ Find maximum-sum path from top of triangle to bottom """ # Start by copying the values sums = [[x for x in row] for row in triangle] # Efficient algorithm: start at the bottom and work our way up, computing max sums for reverse_index, row in enumerate(reversed(sums)): if reverse_index == 0: # Easy: max value for subpaths from last row is cell value itself continue # Now we need to take sum of each cell and max of two subpaths row_below = sums[-reverse_index] for col_index, col in enumerate(row): left = row_below[col_index] right = row_below[col_index + 1] row[col_index] = col + max(left, right) return sums[0][0]
def count_failed_validations(sessions): """Count failed challenges from certbot logs""" failed_validations = 0 for session in sessions: for line in session: if "'--staging'" in line: break if 'FailedChallenge' in line: failed_validations += 1 return failed_validations
def create_player_list(records, fields): """This function change tuples to list of dicts using fields""" players = [] for record in records: player = {} for idx, field in enumerate(fields): player[field] = record[idx] players.append(player) return players
def counter(clstr_lst, remove_single=True): """_summary_ Args: clstr_lst (dictionary): A dictionary with the number of the cluster as key and the UniProt ID's for the sequences inside each cluster as value. remove_single (bool, optional): Decides to remove single sequence clusters. Defaults to True. Returns: dictionary: A dictionary with the number of the cluster as key and the UniProt ID's for the sequences inside each cluster, as well as the size of that same cluster as value, in the form of tuple. """ number_seqs_by_cluster = {} for k, v in clstr_lst.items(): if remove_single: if len(v) > 1: number_seqs_by_cluster[k] = (v, len(v)) else: number_seqs_by_cluster[k] = (v, len(v)) return number_seqs_by_cluster
def formatData(account): """Format the account data into printable format.""" account_name = account["name"] account_desc = account["description"] account_country = account["country"] return f"{account_name}, a {account_desc} from {account_country}"
def dsub_to_api(job): """Extracts logs from a job, if present. Args: job: A dict with dsub job metadata Returns: dict: Labels key value pairs with dsub controller, stderr, and stdout log files """ if job['logging'] and job['logging'].endswith('.log'): base_log_path = job['logging'][:-4] return { 'Controller Log': '{}.log'.format(base_log_path), 'Error Log': '{}-stderr.log'.format(base_log_path), 'Output Log': '{}-stdout.log'.format(base_log_path), } return None
def calculate_gc_lo(subseq): """Calculate the GC and lowercase (RepeatMasked) content of a string.""" cnt_at_lo = subseq.count('a') + subseq.count('t') cnt_at_up = subseq.count('A') + subseq.count('T') cnt_gc_lo = subseq.count('g') + subseq.count('c') cnt_gc_up = subseq.count('G') + subseq.count('C') tot = float(cnt_gc_up + cnt_gc_lo + cnt_at_up + cnt_at_lo) if not tot: return 0.0, 0.0 frac_gc = (cnt_gc_lo + cnt_gc_up) / tot frac_lo = (cnt_at_lo + cnt_gc_lo) / tot return frac_gc, frac_lo
def color_green(val): """ Takes a scalar, returns a string with the CSS property `'color: green'` """ # color = 'green' if type(val) == np.float64 else 'black' # return f'color: {color}' return 'color: green'
def frequency_map(text, k): """ Find the frequency of all k-mers in a string. Args: text (str): text. k (int): length of the substring (i.e. kmers). Returns: Dictionary, a dictionary that contains the count of all the k-mers in text. Examples: Computes the frequency map of a given string (i.e. text) and integer (i.e. k). Return a dictionary of the k-mers and the corresponding frequency for all k-mers that appears in text. >>> text = "CGATATATCCATAG" >>> k = 3 >>> kmers_count_map = frequency_map(text, k) >>> kmers_count_map {'CGA': 1, 'GAT': 1, 'ATA': 3, 'TAT': 2, 'ATC': 1, 'TCC': 1, 'CCA': 1, 'CAT': 1, 'TAG': 1} """ freq = {} n = len(text) for i in range(n-k+1): pattern = text[i:i+k] freq[pattern] = 0 for m in range(n-k+1): if text[m:m+k] == pattern: freq[pattern] = freq[pattern] + 1 return freq
def is_close(float1, float2, relative_tolerance=1e-9, absolute_tolerance=0.0): """ This is a comparison for floats and taken from Python 3.5 :param float1: Float - Value 1 :param float2: Float - Value 1 :param relative_tolerance: the relative tolerance in nano :param absolute_tolerance: minimum absolute tolerance :return: boolean """ return abs(float1 - float2) <= max(relative_tolerance * max(abs(float1), abs(float2)), absolute_tolerance)
def get_fits_name(expnum, prodtype): """ Get the name of a fits image based on its exposure number and prodtype """ if prodtype=='image': fits_name = '{0}.fits'.format(expnum) else: fits_name = "{0}.{1}.fits".format(expnum, prodtype) return fits_name
def get_label(method): """Standardizes labels for graphs given METHOD list Input - method (list): list of methods to standardize Output - label (string): standardizes labels """ label = method.capitalize() if label == "Random": label = "random" return label
def string_to_array(string): """Return a list from input string.""" if string == "": return [""] return string.split()
def has_next(iterable): """ pick element from generator """ try: return next(iterable) except StopIteration: return None finally: del iterable
def scalarProduct(s, v): """ Calculate s * v :param s: Scalar. :param v: Vector. :return: """ return [s * v[c] for c in range(len(v))]
def _is_set(data: str) -> bool: """Returns False if data is a special mmCIF character indicating 'unset'.""" return data not in (".", "?")
def sales_velocity(units_sold_last_12m, number_of_days_in_stock, velocity_days=30): """Return the sales velocity of a product for a given number of days. Args: units_sold_last_12m (int): Total number of units sold in the past 12 months. number_of_days_in_stock (int): Total number of days in the past 12 months when product was in stock. velocity_days (int, optional): Number of days over which to measure sales velocity. Default 30. Returns: Sales velocity of product """ return (units_sold_last_12m / number_of_days_in_stock) * velocity_days
def get_total_time_in_sec(last_user_stats): """Calculates the total time you listen to music""" total_time_sec = 0 for song in last_user_stats: try: total_time_sec += int(song['play_count']) * (int(song['duration_millis']) / 1000) except: continue return total_time_sec
def merge_dicts(inputs): """ Merge multiple input dicts into a single dict. Parameters ---------- inputs : list List of dictionaries. """ output = {} for i in inputs: output.update(i) return output
def lorentzian(x, ll, yshift=0.): """ Peak normalized Lorentzian function :arguments x: float / np1darray ll: float Lorentzian FWHM yshift: float shift of y value (used for root finding) :returns y: float / np1darray """ return ll**2 / (4 * x**2 + ll**2) - yshift
def replace_escapes(text): """ Replace escape sequences with the characters they represent :param text: string to replace escapes in :return: New string with escapes replaced by the represented characters """ escape = False out = "" for char in text: if escape is True: if char == "t": out += "\t" elif char == "n": out += "\n" elif char == "s": out += " " else: out += char escape = False continue if char == "\\": escape = True else: out += char return out
def _score_sentences(tf_idf_matrix) -> dict: """ score a sentence by its word's TF Basic algorithm: adding the TF frequency of every non-stop word in a sentence divided by total no of words in a sentence. :rtype: dict """ sentenceValue = {} for sent, f_table in tf_idf_matrix.items(): total_score_per_sentence = 0 count_words_in_sentence = len(f_table) for word, score in f_table.items(): total_score_per_sentence += score sentenceValue[sent] = total_score_per_sentence / count_words_in_sentence return sentenceValue
def generate_registers_sifive_clic0_clicintie(intr, addr): """Generate xml string for riscv_clic0 intie register for specific interrupt id""" return """\ <register> <name>clicintie_""" + intr + """</name> <description>CLICINTIE Register for interrupt id """ + intr + """</description> <addressOffset>""" + addr + """</addressOffset> <size>8</size> </register> """
def drange(v0: float, v1: float, d: int) -> range: """Returns a discrete range.""" return range(int(v0) // d, int(v1 + d) // d)
def parseRange(string): """Parses a dash-separated string of ints into a tuple""" splitarray = string.split("-") if len(splitarray) == 1: return (int(splitarray[0]), int(splitarray[0])) if len(splitarray) == 2: return (int(splitarray[0]), int(splitarray[1])) raise ValueError("Cannot parse range " + string)
def preprocessLines(sourceLines): """ Delete comments from the lines and change them to upper case sourceLines - array of assembly lines with comments return: resulting array of lines """ for i, line in enumerate(sourceLines): line = line.upper() line = line.split(";")[0] # trim comments sourceLines[i] = line return sourceLines
def compareHour(hour1, operator, hour2) : """ Function that allows to compare two hours that are defined in string formats as : HH:MM """ hour1 = hour1.split(":") hour_1 = int(hour1[0]) minute_1 = int(hour1[1]) hour2 = hour2.split(":") hour_2 = int(hour2[0]) minute_2 = int(hour2[1]) if operator == ">=" : if hour_1 > hour_2 : return True elif hour_1 < hour_2 : return False elif hour_1 == hour_2 : if minute_1 > minute_2 : return True elif minute_1 < minute_2 : return False else : return True if operator == "<=" : if hour_1 > hour_2 : return False elif hour_1 < hour_2 : return True elif hour_1 == hour_2 : if minute_1 > minute_2 : return False elif minute_1 < minute_2 : return True else : return False return
def _key_by_signature(operations, signature_func): """Creates a dictionary of operations keyed by signature Args: operations (iterable[Operations]): the input operations Returns: dict[string, [Operations]]: the operations keyed by signature """ return dict((signature_func(op), op) for op in operations)
def is_gradoop_id(value) -> bool: """Check if value is a valid Gradoop id. Gradoop ids are 12 byte hexadecimal strings Parameters ---------- value Value to check Returns ------- bool True if is valid Gradoop id """ if isinstance(value, str) and len(value) == 24: try: int(value, 16) return True except ValueError: return False return False
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, basic_auth=None): """ Shortcuts for generating request headers. keep_alive If true, adds 'connection: keep-alive' header. accept_encoding Can be a boolean, list, or string. True translates to 'gzip,deflate'. List will get joined by comma. String will be used as provided. user_agent String representing the user-agent you want, such as "python-urllib3/0.6" basic_auth Colon-separated username:password string for 'authorization: basic ...' auth header. """ headers = {} if accept_encoding: if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): accept_encoding = ','.join(accept_encoding) else: accept_encoding = 'gzip,deflate' headers['accept-encoding'] = accept_encoding if user_agent: headers['user-agent'] = user_agent if keep_alive: headers['connection'] = 'keep-alive' if basic_auth: headers['authorization'] = 'Basic ' + basic_auth.encode('base64').strip() return headers
def read_input_samples(data_dict): """ Function that takes only the input property from the dictionary Ignores train or test and just takes all inputs as equal :param data_dict: data dictionary with the full file input structure loaded :return: a dictionary of just input values >>> dict = {'train': [{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]}, {'input': [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 4, 0, 4, 0]]}], 'test': [{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 7, 0, 0]]}]} >>> read_input_samples(dict) {0: [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 1: [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 2: [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]} """ inputs_dict = {} i = 0 for train_inputs in data_dict["train"]: inputs_dict[i] = train_inputs["input"] i += 1 for test_inputs in data_dict["test"]: inputs_dict[i] = test_inputs["input"] i += 1 return inputs_dict
def localDigitsStrToInt(value, digitsToLocalDict, localToDigitsDict): """Convert digits to integer.""" # First make sure there are no real digits in the string tmp = value.translate(digitsToLocalDict) # Test if tmp == value: return int(value.translate(localToDigitsDict)) # Convert else: raise ValueError('string contains regular digits')
def _get_liblinear_solver_type(multi_class, penalty, loss, dual): """Find the liblinear magic number for the solver. This number depends on the values of the following attributes: - multi_class - penalty - loss - dual The same number is also internally used by LibLinear to determine which solver to use. """ # nested dicts containing level 1: available loss functions, # level2: available penalties for the given loss function, # level3: whether the dual solver is available for the specified # combination of loss function and penalty _solver_type_dict = { 'logistic_regression': { 'l1': {False: 6}, 'l2': {False: 0, True: 7}}, 'hinge': { 'l2': {True: 3}}, 'squared_hinge': { 'l1': {False: 5}, 'l2': {False: 2, True: 1}}, 'epsilon_insensitive': { 'l2': {True: 13}}, 'squared_epsilon_insensitive': { 'l2': {False: 11, True: 12}}, 'crammer_singer': 4 } if multi_class == 'crammer_singer': return _solver_type_dict[multi_class] elif multi_class != 'ovr': raise ValueError("`multi_class` must be one of `ovr`, " "`crammer_singer`, got %r" % multi_class) _solver_pen = _solver_type_dict.get(loss, None) if _solver_pen is None: error_string = ("loss='%s' is not supported" % loss) else: _solver_dual = _solver_pen.get(penalty, None) if _solver_dual is None: error_string = ("The combination of penalty='%s' " "and loss='%s' is not supported" % (penalty, loss)) else: solver_num = _solver_dual.get(dual, None) if solver_num is None: error_string = ("The combination of penalty='%s' and " "loss='%s' are not supported when dual=%s" % (penalty, loss, dual)) else: return solver_num raise ValueError('Unsupported set of arguments: %s, ' 'Parameters: penalty=%r, loss=%r, dual=%r' % (error_string, penalty, loss, dual))
def get_pattern(guess, solution): """generates the patterns for a guess""" hint = "" for index in range(len(guess)): if not guess[index] in solution: hint += "b" else: if guess[index] == solution[index]: hint += "g" else: hint += "y" return hint
def try_int(s, *args): """Convert to integer if possible.""" try: return int(s) except (TypeError, ValueError): return args[0] if args else s
def median(iterable): """Obtain the central value of a series Sorts the iterable and returns the middle value if there is an even number of elements, or the arithmetic mean of the middle two elements if there is an even number of elements :param iterable: a series of ordeable items :return: the median value """ items = sorted(iterable) if len(items) == 0: raise ValueError("median() arg is an empty sequence") median_index = (len(items) - 1) // 2 if len(items) % 2 != 0: return items[median_index] return (items[median_index] + items[median_index + 1]) / 2.0
def multiply_basic(a, b): """ This function is NOT a standard matrix multiplication operation. It instead multiplies two matrix directly(first index with first index). Both Matrices must be: MxN where M=1 and N=any positive number. :param a: (list) 2D matrix with only one row :param b: (list) 2D matrix with only one row :return: (list) 2D matrix containing the product of a*b """ # Check if both matrix contain only one row. if len(a) != 1 or len(b) != 1: raise Exception("Error xm14: Basic multiplication only works on 1xN matrices") # Check for mismatched row lenght if len(a[0]) != len(b[0]): raise Exception("Error xm15: Row lengths do not match") # multiply elements together and return matrix return [[b[0][i] * a[0][i] for i in range(len(a[0]))]]
def parse_config_line(line): """ :param line: string with a single line from config file :return: config_key and config_value config_key: 0 - empty line or comment 1 - section (section name in line) """ # Remove comments and strip whitespace line = line.split("#")[0] line = line.strip() if len(line) <= 2: # Line too short - not a configuration parameter return 0, line if line[0] == '[' and line[len(line) - 1] == ']': # Starting line of new section line = line[1:len(line) - 1].strip() if len(line) > 0: return 1, line return 0, line # Regular configuration parameter pair = line.split('=') if len(pair) != 2: # only the first '=' matters pair = [pair[0], '='.join(pair[1:])] config_key, config_value = pair config_key = config_key.strip() config_value = config_value.strip() if config_key == '' or config_value == '': return 0, line if len(config_value) > 2 and config_value[0] == '[' \ and config_value[len(config_value) - 1] == ']': # Value is a list lst = config_value[1:len(config_value) - 1].split(',') for i, _ in enumerate(lst): lst[i] = lst[i].strip() config_value = lst return config_key, config_value
def reduce_reflex_angle_deg(angle): """ Given an angle in degrees, normalises in [-179, 180] """ new_angle = angle % 360 if new_angle > 180: new_angle -= 360 return new_angle
def bytes_xor(a: bytes, b: bytes) -> bytes: # pylint: disable=invalid-name """XOR two bytes values.""" return (int.from_bytes(a, "big") ^ int.from_bytes(b, "big")).to_bytes(len(a), "big")
def similarity(token1, token2): """ Calculate the similarity of two tokens. :param token1: First token. :param token2: Second token. :return: similarity of the two inputs """ total = len(token1) same = 0 for i in range(len(token1)): if token1[i] == token2[i]: same += 1 return float(same)/float(total)
def chunk_to_str(chunk): """Convert the given chunk to a string. chunk - list of Tagged characters. return - string representation of the list of Tagged characters """ return "".join(c.c for c in chunk)
def get_content(html_code): """ Separates the webpage's message section from the content section and returns both for further processing @parameters html_code (str) html code of the downloaded web page @returns tuple (of lists) message_list: list of strings. Each string is a line from MESSAGE section (webpage) content_list: list of strings. Each string is a line from CONTENT section (webpage) """ message_list, content_list = [], [] for line in html_code.split('\n'): if line.startswith('#') and not line.endswith('#'): message_list.append(line) elif not line.endswith('#'): content_list.append(line) return message_list, content_list
def get_protocol(url): """Returns the protocol of a given URL.""" try: return url.split("://")[0] except: return ""
def _check_regular_chunks(chunkset): """Check if the chunks are regular "Regular" in this context means that along every axis, the chunks all have the same size, except the last one, which may be smaller Parameters ---------- chunkset: tuple of tuples of ints From the ``.chunks`` attribute of an ``Array`` Returns ------- True if chunkset passes, else False Examples -------- >>> import dask.array as da >>> arr = da.zeros(10, chunks=(5, )) >>> _check_regular_chunks(arr.chunks) True >>> arr = da.zeros(10, chunks=((3, 3, 3, 1), )) >>> _check_regular_chunks(arr.chunks) True >>> arr = da.zeros(10, chunks=((3, 1, 3, 3), )) >>> _check_regular_chunks(arr.chunks) False """ for chunks in chunkset: if len(chunks) == 1: continue if len(set(chunks[:-1])) > 1: return False if chunks[-1] > chunks[0]: return False return True
def apply_gain_x2(x, AdB): """Applies A dB gain to x^2""" return x * 10 ** (AdB / 10)
def _get_new_shape(name, shape, num_heads): """Checks whether a variable requires reshape by pattern matching.""" if "self_attention_output/kernel" in name: return tuple([num_heads, shape[0] // num_heads, shape[1]]) if "self_attention_output/bias" in name: return shape patterns = [ "self_attention/query", "self_attention/value", "self_attention/key" ] for pattern in patterns: if pattern in name: if "kernel" in name: return tuple([shape[0], num_heads, shape[1] // num_heads]) if "bias" in name: return tuple([num_heads, shape[0] // num_heads]) return None
def _exact_phrase(phrase): """ Returns a query item matching messages that have an exact phrase. Args: phrase (str): The exact phrase to match. Returns: The query string. """ return f'"{phrase}"'
def coding_problem_48(io, po): """ Given pre-order and in-order traversals of a binary tree, write a function to reconstruct the tree. Example: >>> def pre_order(tree): ... return [] if tree is None else [tree[0]] + pre_order(tree[1]) + pre_order(tree[2]) >>> def in_order(tree): ... return [] if tree is None else in_order(tree[1]) + [tree[0]] + in_order(tree[2]) >>> tree = ['1B', ['2A', None, None], ['3F', ['4D', ['5C', None, None], ['6E', None, None]], ['7G', None, None]]] >>> po = pre_order(tree) # ['1B', '2A', '3F', '4D', '5C', '6E', '7G'] >>> io = in_order(tree) # ['2A', '1B', '5C', '4D', '6E', '3F', '7G'] >>> tree == coding_problem_48(io, po) True """ root_value = po[0] root_index = io.index(root_value) io_left_nodes = io[:root_index] io_right_nodes = io[root_index + 1:] po_left_nodes = [val for val in po if val in io_left_nodes] po_right_nodes = [val for val in po if val in io_right_nodes] left_subtree = coding_problem_48(io_left_nodes, po_left_nodes) if io_left_nodes else None right_subtree = coding_problem_48(io_right_nodes, po_right_nodes) if io_right_nodes else None return [root_value, left_subtree, right_subtree]
def listangar(a_list): """this method recives a list and returns the first an last elements as a list""" listanga = [a_list[0],a_list.pop()] return listanga
def ztf_magnitude_zero_point(bands=''): """ Sample from the ZTF zeropoint distribution """ dist = {'g': 26.325, 'r': 26.275, 'i': 25.660} return [dist[b] for b in bands.split(',')]
def isnum(*xs): """ Tests if all of xs are numeric :param xs: vals of whatever :returns: True or False """ try: for x in xs: float(x) return True except (ValueError, TypeError): return False
def make_table_line(input_list, header_flag): """wandelt eine Liste in eine HTML-Tabellenzeile um. Wenn das Flag sesetzt ist, wird eine Kopfzeile erzeugt.""" tag_str_open = "<td>" tag_str_close = "</td>" if header_flag: tag_str_open = "<th>" tag_str_close = "</th>" output_string = "<tr>" for item in input_list: output_string += tag_str_open + item + tag_str_close output_string += "</tr>" return output_string + "\n"