content
stringlengths
42
6.51k
def generate_new_rule(child_id, parent_tag, parent_id, priority, maxdepth, pkg_filter, intransitive, noconfig): """ Return a full inheritance rule to add for this child tag. :param int child_id: Koji tag id :param str parent_tag: Koji tag name :param int parent_id: Koji tag id :param int priority: Priority of this parent for this child :param int maxdepth: Max depth of the inheritance :param str pkg_filter: Regular expression string of package names to include :param bool intransitive: Don't allow this inheritance link to be inherited :param bool noconfig: Prevent tag options ("extra") from being inherited """ return { 'child_id': child_id, 'intransitive': intransitive, 'maxdepth': maxdepth, 'name': parent_tag, 'noconfig': noconfig, 'parent_id': parent_id, 'pkg_filter': pkg_filter, 'priority': priority}
def strip(value): """ Functor to return a whitespace stripped string. """ return value.strip()
def indent(txt: str, num_spaces: int = 2) -> str: """ Add `num_spaces` spaces before each line of the passed string. """ spaces = " " * num_spaces txt_out = [] for curr_line in txt.split("\n"): if curr_line.lstrip().rstrip() == "": # Do not prepend any space to a line with only white characters. txt_out.append("") continue txt_out.append(spaces + curr_line) res = "\n".join(txt_out) return res
def prepare_library(library): """Change library structure""" new_library = [] for songs in library: new_song = dict() new_song['title'] = songs['title'] new_song['artist'] = songs['artist'] new_song['id'] = songs['id'] new_song['artist'] = songs['artist'] new_song['duration_millis'] = songs['durationMillis'] try: new_song['play_count'] = songs['playCount'] except: new_song['play_count'] = 0 try: new_song['artist_id'] = songs['artistId'][0] except: new_song['artist_id'] = '' try: new_song['artist_art_ref'] = songs['artistArtRef'][0]['url'] except: new_song['artist_art_ref'] = '' try: new_song['album_art_ref'] = songs['albumArtRef'][0]['url'] except: new_song['album_art_ref'] = '' new_library.append(new_song) return new_library
def absolute_time(t_string): """Changes 24-hour time string 'hh:mm:ss' to float mins since midnight.""" num = [float(n) for n in t_string.split(':')] return 60*num[0] + num[1] + (num[2]/60)
def error_has_parenthesis(value): """Checks if the value has a single ) or (""" return bool(')' in value and '(' not in value) or bool('(' in value and ')' not in value)
def calibration(i): """This function gives wavelength as a function of spectrum array index.""" a,b = 3.04956866e-01, 3.82033291e+02 return a*i + b
def math_expression_type(text): """Custom Parse Type which expects a valid mathematical expression :param str text: the text which was matched as math expression :returns: calculated float number from the math expression :rtype: float """ return float(eval(text))
def split_into_chunks(string, chunk_size): """ Splits a string into chunks of the specified size. NOTE: expects len(str) to be multiple of chunk_size. """ chunks = [] for i in range(0, len(string), chunk_size): chunks.append(string[i:i + chunk_size]) return chunks
def check_nmea_checksum(nmea_sentence): """Calculate and compare the checksum of a NMEA string. Args: nmea_sentence (str): The NMEA sentence to check. Return True if the calculated checksum of the sentence matches the one provided. """ split_sentence = nmea_sentence.split('*') if len(split_sentence) != 2: # No checksum bytes were found... improperly formatted/incomplete NMEA data? return False transmitted_checksum = split_sentence[1].strip() # Remove the $ at the front data_to_checksum = split_sentence[0][1:] checksum = 0 for c in data_to_checksum: checksum ^= ord(c) return ("%02X" % checksum) == transmitted_checksum.upper()
def replace_extension(filename, new_extension, add_to_end=False): """Replaces the extension in the filename with the new_extension.""" dot = filename.rfind(".") if dot < 0 or filename[dot + 1:] == new_extension or add_to_end: filename_base = filename else: filename_base = filename[:dot] return filename_base + "." + new_extension
def _FormatUidRange(lower, higher): """Convert a user-id range definition into a string. """ if lower == higher: return str(lower) return "%s-%s" % (lower, higher)
def mulfrange(start, end, n): """mulfrange(start, end, n): multiplicative range() for floats.""" # If the arguments are already float or complex, this won't hurt. # If they're int, they will become float. start *= 1.0 end *= 1.0 step = (end / start) ** (1./n) list = [] current = start for k in range(0, n): list.append(current) current *= step return list
def split_path(path): """ Get the parent path and basename. >>> split_path('/') ['', ''] >>> split_path('') ['', ''] >>> split_path('foo') ['', 'foo'] >>> split_path('/foo') ['', 'foo'] >>> split_path('/foo/bar') ['/foo', 'bar'] >>> split_path('foo/bar') ['/foo', 'bar'] """ if not path.startswith('/'): path = '/' + path return path.rsplit('/', 1)
def calculate_scores(emojis_list, images_list, texts_list): """ Calculate sentiment scores given lists of media Emoji scores are considered first as they are a good indicator of sentiment in a sentence Text is considered the next most reliable and the model has a high classification accuracy Image is considered the worst and is used as a final option :param emojis_list: emojis in sentences :param images_list: image urls in sentences :param texts_list: texts in sentences :return: sentiment_scores """ sentiment_scores = [] if len(emojis_list) == len(images_list) == len(texts_list): for i in range(len(emojis_list)): emoji_score = emojis_list[i] image_score = images_list[i] text_score = texts_list[i] if emoji_score: sentiment_scores.append(emoji_score) elif text_score: sentiment_scores.append(text_score) elif image_score: sentiment_scores.append(image_score) else: sentiment_scores.append(None) else: print("Lists are not the same size!") return sentiment_scores
def alreadyExist(movieDict): """ Check if movie that we want to add is already in the database. This is commented out for github. Implement your own method to check db here. Right now it always returns False. Parameters ---------- movieDict : dictonary containing movie's info Returns ------- exists : bool returned value tells whether movieDict already exists in db """ return False # Uncomment below to check if this movie is already in database # cursor.execute('SELECT movID, pbmovname, hashX FROM' # 'subtitleserver_movies WHERE imdbID = %(imdbID)s', # movieDict) # row = cursor.fetchone() # if row is not None: # movieDict['movID_old'] = row[0] # return True # else: # return False
def exchange(a, b, dancers): """ Swap places of dancers a and b. Parameters ---------- a : int Index of the first character to be swapped b : int Index of the second character to be swapped dancers : list A mutable sequence of characters, modified in place. """ dancer_list = [char for char in dancers] dancer_list[b], dancer_list[a] = dancer_list[a], dancer_list[b] return ''.join(dancer_list)
def find_the_ball(start, swaps): """ Where is the ball? """ for swap in swaps: if swap[0] == start: start = swap[1] elif swap[1] == start: start = swap[0] return start
def fbkup(fname): """ ('filepath')->None Opens given file path (or file name if file is in same folder as proj9) reads contents if able, saves contents to a new file new file has same name as input but with 'fbkup' in it new file also retains its file type. Tested to work with .txt and .doc, does not work with .docx returns (new backup filename) """ path = (fname) path1 = open(path, 'r') fname_splice = fname.split('.', 1)[0] fname_splice2 = fname.split('.', 1)[1] path_bkup = ('%s-fbkup.%s' % (fname_splice, fname_splice2)) path_bkup1 = open(path_bkup, 'w') contents ='' for line in path1: contents += line path_bkup1.write(contents) return path_bkup
def find_column(text, index): """ Return column given text and index """ last_cr = text.rfind("\n", 0, index) if last_cr < 0: last_cr = 0 column = (index - last_cr) + 1 return column
def cria_posicao(col, lin): """ cria_posicao: str x str -> posicao Esta funcao recebe duas cadeias de carateres correspondentes a coluna e a linha de uma posicao e devolve a posicao correspondente. """ col_para_num = { 'a': 1, 'b': 2, 'c': 3 } if type(col) == type(lin) == str: if col in col_para_num and lin in ('1','2','3'): return [col_para_num[col] + 3*(int(lin)-1)] raise ValueError('cria_posicao: argumentos invalidos')
def _canonical_to_natural(mu, sigma_squared): """convert from canonical to natural gaussian parameters""" n1 = mu / sigma_squared n2 = -0.5 * 1 / sigma_squared return n1, n2
def rgb2rgba(rgb): """Take a row of RGB bytes, and convert to a row of RGBA bytes.""" rgba = [] for i in range(0, len(rgb), 3): rgba += rgb[i:i+3] rgba.append(255) return rgba
def get_benchmark_name(project, fuzz_target, benchmark_name=None): """Returns the name of the benchmark. Returns |benchmark_name| if is set. Otherwise returns a name based on |project| and |fuzz_target|.""" return benchmark_name if benchmark_name else project + '_' + fuzz_target
def collect_package_stats(packages, cwes, filter): """ Collects coverage statistics for packages matching the given filter. `filter` is a `lambda` that for example (i) matches packages to frameworks, or (2) matches packages that were previously not processed. The returned statistics are used to generate a single row in a CSV file. """ sources = 0 steps = 0 sinks = 0 framework_cwes = {} processed_packages = set() for package in packages: if filter(package): processed_packages.add(package) sources += int(packages[package]["kind"].get("source:remote", 0)) steps += int(packages[package]["part"].get("summary", 0)) sinks += int(packages[package]["part"].get("sink", 0)) for cwe in cwes: sink = "sink:" + cwes[cwe]["sink"] if sink in packages[package]["kind"]: if cwe not in framework_cwes: framework_cwes[cwe] = 0 framework_cwes[cwe] += int( packages[package]["kind"][sink]) return sources, steps, sinks, framework_cwes, processed_packages
def rightrotate(x, c): """ Right rotate the number x by c bytes.""" x &= 0xFFFFFFFF return ((x >> c) | (x << (32 - c))) & 0xFFFFFFFF
def min_subset_sum_difference(numbers): """ Parameters ---------- numbers : list A list of positve numbers Returns ------- int the minimum difference between subsets of the list >>> min_subset_sum_difference([1, 2, 3, 9]) 3 """ def helper(idx, sum_1, sum_2): if idx == len(numbers): return abs(sum_1 - sum_2) current_num = numbers[idx] difference_1 = helper(idx + 1, sum_1 + current_num, sum_2) difference_2 = helper(idx + 1, sum_1, sum_2 + current_num) return min(difference_1, difference_2) return helper(0, 0, 0)
def length_score(x1, y1, x2, y2, d1, d2): """Scores the length of the cutting line (x1, y1)->(x2, y2) made at a concavity depth of d1 and d2. Returns ------- Score : float Angle score according to equation (5) from the reference. See Also -------- SplitConcavities References ---------- .. [#] S. Weinert et al "Detection and Segmentation of Cell Nuclei in Virtual Microscopy Images: A Minimum-Model Approach" in Nature Scientific Reports,vol.2,no.503, doi:10.1038/srep00503, 2012. """ # calculate length of cut r = ((x1 - x2)**2 + (y1 - y2)**2) ** 0.5 # normalize by total span across convex hull LengthScore = r / (r + d1 + d2) return LengthScore
def genInvSBox( SBox ): """ genInvSBox - generates inverse of an SBox. Args: SBox: The SBox to generate the inverse. Returns: The inverse SBox. """ InvSBox = [0]*0x100 for i in range(0x100): InvSBox[ SBox[i] ] = i return InvSBox
def cleanchrom(chrom): """Support function for converting wig to countslist function above.""" if chrom[-1]=="\n": chrom=chrom.strip() if chrom[0]=='\"': chrom=chrom[1:-1] return chrom
def checksum(raw): """ CHECKSUM """ if isinstance(raw, bytes): raw = raw.decode('utf-8') value = 0 for sz in raw: value += ord(sz) value_hex = hex(value & 255)[2:].upper() if len(value_hex) < 2: value_hex = '0' + value_hex return value_hex
def threeSum(nums): """ :type nums: List[int] :rtype: List[List[int]] """ nums1 = sorted(nums) size = len(nums1) l = [] for i in range(0, size): j = i + 1 k = size - 1 while j < k: while j < k and nums1[k] + nums1[j] > -nums1[i]: k -= 1 while j < k and nums1[k] + nums1[j] < -nums1[i]: j += 1 while j < k and nums1[j] + nums1[k] == -nums1[i]: if [nums1[i], nums1[j], nums1[k]] not in l: l.append([nums1[i], nums1[j], nums1[k]]) k -= 1 return l
def convert_temp_unit(temp_value, temp_unit): """ Convert current temp_value to temp_unit """ if temp_unit == 'C': return round((temp_value - 32) / 1.8) elif temp_unit == 'F': return round(temp_value * 1.8 + 32)
def cut_coords(name): """Selects appropriate cut coords.""" if name == 'mosaic': return 3 if name in ['yx', 'yz', 'xz']: return (0,) * 2 if name in ['lyrz', 'lyr', 'lzr']: return (0,) if name in ['lr', 'l']: return (0,) * 4 return (0,) * 3
def find_class(_globals, _name, _raise_error=True): """Using class name, find a class reference""" try: _object_instance = None _object_name = None for k in _globals.items(): if k[0].lower() == _name.lower(): _object_instance = k[1]() _object_name = k[0] break # Python types if _name == "list": return [], "list" except Exception as e: raise Exception("meta - find_class: Error looking for " + str(_name) + " : " + str(e)) if _object_instance is None and _raise_error: raise Exception("meta - find_class: Cannot find matching class - " + _name) return _object_instance, _object_name
def max_vehicle(service_lane, i, j): """ :type service_lane: list[int] :type i: int :type j: int :rtype: int """ return min(service_lane[i:j])
def flatten(nested_list): """Flatten a nested list.""" return [item for a_list in nested_list for item in a_list]
def check_for_mostly_numeric_string(token): """ Check whether the token is numerical. :param token: A single text token. :return A boolean indicating whether the token is a numerical. """ int_chars = [] alpha_chars = [] for ch in token: if ch.isnumeric(): int_chars.append(ch) elif ch.isalpha(): alpha_chars.append(ch) if len(int_chars) > len(alpha_chars): return True else: return False
def get_duration(ffinfo): """ ffinfo: json of ffprobe output. return the duration of the stream in seconds. mkv doesn't have duration, but does tags:DURATION. """ duration = ffinfo.get("duration") if duration is not None: try: duration = float(duration) except Exception as e: return 0 else: return duration # e.g. 00:03:20.720000000 d = "00:00:00.000000000" str_dur = ffinfo.get("tags",{"DURATION":d}).get("DURATION",d) return sum([p*q for p,q in zip([float(i) for i in str_dur.split(":")], [3600,60,1])])
def can_infer(num_sensed_blocked: int, num_confirmed_blocked: int, num_sensed_unblocked: int, num_confirmed_unblocked: int): """ check whether we can infer or not from the current set of variables :param num_sensed_blocked: number of sensed blocks :param num_confirmed_blocked: number of confirmed blocks :param num_sensed_unblocked: number of sensed unblocks :param num_confirmed_unblocked: number confirmed unblocks :return: True if we can infer anything otherwise False """ # Check precondition assert (num_sensed_blocked >= num_confirmed_blocked) and (num_sensed_unblocked >= num_confirmed_unblocked) # Condition whether we can infer or not if ((num_sensed_blocked == num_confirmed_blocked) and (num_sensed_unblocked > num_confirmed_unblocked)) or \ ((num_sensed_unblocked == num_confirmed_unblocked) and (num_sensed_blocked > num_confirmed_blocked)): return True return False
def rotate_delta(delta, index): """there are 24 rotational version, up,down,left,right,forward,back * 4 spins. This is a quick lookup" used: https://danceswithcode.net/engineeringnotes/rotations_in_3d/demo3D/rotations_in_3d_tool.html to calculate them""" dx, dy, dz = delta if index == 0: return delta elif index == 1: return dx, -dz, dy elif index == 2: return dx, -dy, -dz elif index == 3: return dx, dz, -dy elif index == 4: return -dy, dx, dz elif index == 5: return dz, dx, dy elif index == 6: return dy, dx, -dz elif index == 7: return -dz, dx, -dy elif index == 8: return -dx, -dy, dz elif index == 9: return -dx, dz, dy elif index == 10: return -dx, dy, -dz elif index == 11: return -dx, -dz, -dy elif index == 12: return dy, -dx, dz elif index == 13: return -dz, -dx, dy elif index == 14: return -dy, -dx, -dz elif index == 15: return dz, -dx, -dy elif index == 16: return dz, dy, -dx elif index == 17: return dy, -dz, -dx elif index == 18: return -dz, -dy, -dx elif index == 19: return -dy, dz, -dx elif index == 20: return -dz, dy, dx elif index == 21: return -dy, -dz, dx elif index == 22: return dz, -dy, dx else: return dy, dz, dx
def order(alpha, p): """ Determines the order of an element :param alpha: the element we want to find the order of :param p: the prime modulus :returns: the order of alpha """ count = 1 x = alpha while x != 1: x = (x * alpha) % p count += 1 return count
def valid_struct_ptr(file_offset): """ check if file_offset not 0 before and after relocation relocation patches for NULL pointers hasn't happened yet, so compare to null magic malicious file could not use relocations (swisscheese/extracheese) so check for NULL as wekk """ return file_offset != 0xfffabada and file_offset != 0
def read_mutations(mutations_file): """ Read mutations file into memory for processing. """ with open(mutations_file, encoding="utf8", errors="ignore") as infile: mutations = infile.read().splitlines() print("[+] Mutations list imported: {} items".format(len(mutations))) return mutations
def get_value_from_request(request, param, default, dtype=int): """Look for a value in GET and convert it to the given datatype.""" try: v = dtype(request.GET.get(param, default)) except: v = default return v
def graph_percentages(a_arguments: int, b_arguments: int, a_votes: int, b_votes: int): """Counts percentages for battle graph Percentage for a side is a rounded up arithmetic average of side's argument and upvote percentages For each side: (argument % of total arguments amount + vote % of total votes amount ) / 2 """ percent_a = 0 percent_b = 0 total_arguments = a_arguments + b_arguments total_upvotes = a_votes + b_votes if total_arguments > 0: argument_percent = 100 / total_arguments percent_a = a_arguments * argument_percent percent_b = b_arguments * argument_percent if total_upvotes > 0: upvote_percent = 100 / total_upvotes percent_a = (percent_a + a_votes * upvote_percent) / 2 percent_b = (percent_b + b_votes * upvote_percent) / 2 return { "percent_a": round(percent_a), "percent_b": round(percent_b) }
def is_lemma_string(lemma): """ check whether input lemma is a string """ if isinstance(lemma, str): return lemma else: return ""
def get_wrong(answer, correct_answer): """ """ #how many are correct overlap = answer & correct_answer ncorrect = len(overlap) #how many are wrong wrong = answer - correct_answer nwrong = len(wrong) #how many correct are missing missing = correct_answer - answer nmissing = len(missing) nmistakes = nwrong + nmissing return(nmistakes)
def point_to_index(point, origin, spacing): """Transform image data point coordinates to voxel.""" i = (point[0] - origin[0]) / spacing[0] j = (point[1] - origin[1]) / spacing[1] k = (point[2] - origin[2]) / spacing[2] return (i, j, k)
def seconds(s): """Convert time string to number. e.g. '100ps' -> 1e-10""" from numpy import nan try: return float(s) except: pass s = s.replace("min","*60") s = s.replace("h","*60*60") s = s.replace("d","*60*60*24") s = s.replace("s","") s = s.replace("p","*1e-12") s = s.replace("n","*1e-9") s = s.replace("u","*1e-6") s = s.replace("m","*1e-3") try: return float(eval(s)) except: return nan
def ReadFile(filename): """Read a file in one step. Args: filename: Filename to read. Returns: String containing complete file. """ fh = open(filename, 'rb') data = fh.read() fh.close() return data
def deconstruct_keyval_reporting(entry): """ Takes a reporting of the form '{colname}:{oldval}->{newval}' and returns colname, oldval, newval. Args: entry, str. A string of the form '{colname}:{oldval}->{newval}'. colname should be an all upper case column name. oldval and newval can include any string characters except the specific combination "->". Returns: key, str. The string that precedes the initial colon. The name of the column being reported. val1, str. The string after the initial colon and preceding the '->'. The original value of the column. val2, str. The string after the '->'. The value that the original was changed to. """ ## Ensure that the rudimentary characteristics are there if ':' not in entry or '->' not in entry: raise ValueError("Entry must be of the form {key}:{oldval}->{newval}. Exiting") ## Get the key left of colon entries = entry.split(':') key = entries[0] ## The values could potentially have colon's. This allows for that values = ':'.join(entries[1:]) ## Two values should be separated by text arrow val1,val2 = values.split("->") return key, val1, val2
def recall_at_position_k_in_10(sort_data, k): """" Evaluate recall """ sort_lable = [s_d[1] for s_d in sort_data] select_lable = sort_lable[:k] return 1.0 * select_lable.count(1) / sort_lable.count(1)
def process_operator_filter(field, value): """ Process a mongo operador attached to a field like name__in, pay__gte Args: field (str): The field name value (str): The value """ params = field.split('__') if len(params) is not 2: return {} field, operator = params operator_func = globals().get(operator + '_operator') if not callable(operator_func): return {} return operator_func(field, value)
def f_dot_product(vector1, vector2): """Return the dot product of two 3D vectors.""" dottedVectors = [vector1[i] * vector2[i] for i in range(len(vector1))] return sum(dottedVectors)
def generate_ftb_line(frequency, shots, **kwargs): """ Function that generates an FTB file for a list of frequencies, plus categorization tests. kwargs are passed as additional options for the ftb batch. Keywords are: magnet: bool dipole: float atten: int skiptune: bool drfreq: float drpower: int cal parameters: --------------- :param frequency: float for frequency in MHz :param shots: int number of shots to integrate for returns: --------------- :return ftbline: str """ line = "ftm:{:.4f} shots:{}".format(frequency, shots) for key, value in kwargs.items(): line += " {}:{}".format(key, value) line += "\n" return line
def even_spread(M, N): """Return a list of target sizes for an even spread. Output sizes are either M//N or M//N+1 Args: M: number of elements N: number of partitons Returns: target_sizes : [int] len(target_sizes) == N sum(target_sizes) == M """ if N == 0: assert M == 0 return [] tgt = [ M//N ]*N for i in range(M%N): tgt[i] += 1 return tgt
def VirtualTempFromMixR(tempk, mixr): """Virtual Temperature INPUTS: tempk: Temperature (K) mixr: Mixing Ratio (kg/kg) OUTPUTS: tempv: Virtual temperature (K) SOURCE: hmmmm (Wikipedia). This is an approximation based on a m """ return tempk * (1.0 + 0.6 * mixr)
def select(q_sols, q_d, w=[1]*6): """Select the optimal solutions among a set of feasible joint value solutions. Args: q_sols: A set of feasible joint value solutions (unit: radian) q_d: A list of desired joint value solution (unit: radian) w: A list of weight corresponding to robot joints Returns: A list of optimal joint value solution. """ error = [] for q in q_sols: error.append(sum([w[i] * (q[i] - q_d[i]) ** 2 for i in range(6)])) return q_sols[error.index(min(error))]
def get_int_metrics(line): """Retrieve the integer metrics.""" return int(line.split(" ")[1])
def isSPRelative(uri:str) -> bool: """ Check whether a URI is SP-Relative. """ return uri is not None and len(uri) >= 2 and uri[0] == '/' and uri [1] != '/'
def compare_file(file1, file2): """ Compare two file, line by line """ line1 = True line2 = True with open(file1, 'r') as f_1, open(file2, 'r') as f_2: while line1 and line2: line1 = f_1.readline() line2 = f_2.readline() if line1 != line2: return False return True
def truncate_seq_pair_test(tokens_a, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) if total_length <= max_length: break else: tokens_a = tokens_a[-max_length:].copy() return tokens_a
def split(string, sep=' '): """split(string, [separator]) -> [string] Split a string on a separator. """ return string.split(sep)
def uniq(elems: list) -> dict: """generate a set of unique objects The list is browsed in order to remove duplicates. Each of the unique objects has been assigned an identifier to produce a dictionary where keys are identifiers and values are objects. **Note**: Objects to be compatible with the `uniq()` function has to be comparable (`__eq__()`) and has a property called `identifier` which will be used in the process of assigning an identifier to the object. Args: elems: an input `list` of objects Returns: A dictionary `{identifier: object}`. """ output = {} identifier = 0 for elem in elems: duplicate = False # look up for duplicates for oid, other in output.items(): if elem == other: # creating a relationship elem.identifier = oid duplicate = True break # a new benchmark found if not duplicate: elem.identifier = identifier output[identifier] = elem identifier += 1 return output
def required_fit_points(order: int, tangents=True) -> int: """ Returns the count of required fit points to calculate the spline control points. Args: order: spline order (degree + 1) tangents: start- and end tangent are given or estimated """ if tangents: # If tangents are given or estimated two points for start- and end # tangent will be added automatically for the global bspline # interpolation. see function fit_points_to_cad_cv() order -= 2 # required condition: order > count, see global_bspline_interpolation() return max(order, 2)
def apply_transform(base, transform, num_pos): """ :param num_pos: length of the base and transform arrays :param base: 1-D array to be transformed :param transform: 1-D transform to apply """ return [base[transform[i]] for i in range(num_pos)]
def terminate(status, dt): """ Called for terminating execution. Prints some logging and returns. Parameters ---------- status : tuple Tuple with 2 cells containing status text [0] and status code [1]. Just passed to return. dt : float Seconds since function start. Returns ------- status : tuple Tuple with 2 cells containing status text [0] and status code [1]. """ # console out print('-- Execution ended at {:.3f}s with status {}.'.format(dt, status)) print('END' + '-'*50) # pass status return status
def f_to_sint(f, x_width, clean1=False): """ Takes a float and returns a signed integer. If clean1 is True then we scale so that 1 in binary is 0100000 rather than 0111111. This allows a multiplication by 1 followed by a down shift to leave the result unchanged.OB """ if f < -1 or f > 1: raise ValueError("The tap must be between -1 and 1.") if clean1 is False: maxint = pow(2, x_width-1)-1 else: maxint = pow(2, x_width-2) i = int(round(f*maxint)) return i
def is_ascii(string: str) -> bool: """Determine whether or not a string is ASCII.""" try: bytes(string, encoding='ascii') return True except UnicodeEncodeError: return False
def get_page_rows(parsed_dictionary): """Get the Content current page""" data_dictionary = parsed_dictionary['context']['dispatcher']['stores']['ScreenerResultsStore']['results']['rows'] return data_dictionary
def convert_dimensions(dimensions): """Converts a V3 dimensions parameter into V4 Dimension objects.""" return [{'name': name} for name in dimensions.split(',')]
def idx_or_default( arr, idx, default ): """ Like arr.get( idx, default ) except that arr is a list and lists don't have a get attribute """ try: return arr[idx] except IndexError: return default
def parse_data_slice(data_slice): """Parse a dataslice as a list of slice objects.""" if data_slice is None: return data_slice elif isinstance(data_slice, (list, tuple)) and \ all([isinstance(_slice, slice) for _slice in data_slice]): return list(data_slice) else: assert isinstance(data_slice, str) # Get rid of whitespace data_slice = data_slice.replace(' ', '') # Split by commas dim_slices = data_slice.split(',') # Build slice objects slices = [] for dim_slice in dim_slices: indices = dim_slice.split(':') if len(indices) == 2: start, stop, step = indices[0], indices[1], None elif len(indices) == 3: start, stop, step = indices else: raise RuntimeError # Convert to ints start = int(start) if start != '' else None stop = int(stop) if stop != '' else None step = int(step) if step is not None and step != '' else None # Build slices slices.append(slice(start, stop, step)) # Done. return slices
def dot(s, t): """Returns the mod-2 dot product of two n-bit strings s and t.""" return sum([s[i] * t[i] for i in range(len(s))]) % 2
def override_ascii_lower(string): """do not use lowercase in cssselect""" return string.encode('utf8').decode('utf8')
def ackermann(m, n): """Computes the Ackermann function A(m, n) See http://en.wikipedia.org/wiki/Ackermann_function n, m: non-negative integers """ if m == 0: return n+1 if n == 0: return ackermann(m-1, 1) return ackermann(m-1, ackermann(m, n-1))
def program_complete_program_page(context, read_only=False): """ Renders percentage complete with a graphic icon. Takes percent_complete as an integer percentage value """ program = context['program'] return { 'program.id': program['pk'], 'program.start_date': program['start_date'], 'program.end_date': program['end_date'], 'program.reporting_period_start': program['reporting_period_start_iso'], 'program.reporting_period_end': program['reporting_period_end_iso'], 'program.percent_complete': program['percent_complete'], 'read_only': 'true' if read_only else 'false', }
def footer() -> str: """ Return the footer of the LaTeX document. :return: LaTeX document footer. """ return "\\end{tikzcd}\n\\end{document}"
def extract_suggest_items(res) -> list: """Formatting items from search to suggest format.""" items = [] es_hits = res['hits']['hits'] if es_hits: for elem in es_hits: hit = elem['_source'] item_dict = { 'page_title': hit['page_title'], 'person_link': hit['person_link'], 'photo_link': hit['photo_url'], } items.append(item_dict) return items
def metadata_matches_requested_config(metadata_dict, requested_config_dict): """:return true, if all requested parameters of the requested_config_dict are fond in metadata_dict""" for key, value in requested_config_dict.items(): if str(metadata_dict[key]) != value: return False return True
def sg_or_pl(number): """Return 's' if number is greater than 1. Return '' if number is 1.""" if number > 1: return 's' elif number == 1: return ''
def bisection(f, a, b, TOL, N): """ To find a solution to f(x) = 0 given the continuous function f on the interval [a, b], where f(a) and f(b) have opposite signs: INPUT : the function f, endpoints a, b; tolerance TOL; maximum number of iterations N. OUTPUT : an approximate solution p or message of failure. """ p = a if (f(a) * f(b) >= 0): print("f(a) and f(b) need to have opposite signs\n") return i = 1 FA = f(a) while i <= N: p = a + (b - a)/2 #(Compute pi.) FP = f(p) if (FP == 0.0 or (b - a)/2 < TOL): #(Procedure completed successfully.) break i = i + 1 if (FA * FP > 0): a = p #(Compute ai, bi.) FA = FP else: b = p #(FA is unchanged.) if i == N: print(f'Method failed after {N} iterations') #(The procedure was unsuccessful.) return p
def is_ascii(string): """Check if string is all ascii characters""" return all(ord(c) < 128 for c in string)
def is_sorted ( lst ) : """Check that list is sorted """ l = len ( lst ) return all ( lst [ i ] <= lst [ i + 1 ] for i in range ( l - 1 ) ) if lst else True
def parse_date(yyyymmdd): """Convert yyyymmdd string to tuple (yyyy, mm, dd)""" return (yyyymmdd[:-4], yyyymmdd[-4:-2], yyyymmdd[-2:])
def _expand_dims_nonnegative_axis(axis, rank): """Get the nonnegative axis according to the rules of tf.expand_dims.""" # Implementation note: equivalent to get_positive_axis(axis, rank + 1) if axis < 0: new_axis = (1 + rank) + axis if new_axis < 0: # Note: this is unreachable in the current code. raise ValueError("Axis out of range: " + str(axis)) return new_axis elif axis > rank: # Note: this is unreachable in the current code. raise ValueError("Axis larger than rank: " + str(axis) + " > " + str(rank)) return axis
def benchmark_headers( benchmark_data ): """ Convert benchmark data from calls to benchmark() into a dictionary of HTTP headers, to be fed into a request handler. """ ret = {} for category in benchmark_data.keys(): if type(benchmark_data[category]) != type(list): benchmark_data[category] = [benchmark_data[category]] if len(benchmark_data[category]) > 0: ret[category] = ",".join( [str(x) for x in benchmark_data[category]] ) return ret
def isMessageBody(line: str) -> bool: """ Returns True if line has more than just whitepsace and unempty or is a comment (contains #) """ return not (line.isspace() or line.lstrip().startswith('#'))
def to_string(pos): """ Convenience function to convert a position to string. """ return f'{pos["x"]}:{pos["y"]}'
def unwrap(func): """ Fully unwrap a wrapped object :param func: Function to unwrap :return: Unwrapped function """ while hasattr(func, "__wrapped__"): func = func.__wrapped__ return func
def cint(s, default=0): """Convert to integer :param s: Number in string or other numeric format. :returns: Converted number in python integer type. Returns default if input can not be converted to integer. Examples: >>> cint("100") 100 >>> cint("a") 0 """ try: return int(float(s)) except Exception: return default
def ligand_unbound(pdb_code): """Augment pdb code with ligand partner and unbound binding notation.""" return pdb_code + '_l_u'
def getFibonacciRecursive(n: int) -> int: """ Calculate the fibonacci number at position n recursively """ a = 0 b = 1 def step(n: int) -> int: nonlocal a, b if n <= 0: return a a, b = b, a + b return step(n - 1) return step(n)
def lisp_parens_with_count(parens): """Output with count to see whether parens are open(1), broken(-1), or balanced(0).""" open_count = 0 for par in parens: if par == '(': open_count elif par == ')': open_count -= 1 if open_count < 0: return -1 if open_count == 0: return 0 else: return 1
def replace_definitions(definition, obj_map): """replace numerator/denominators with readable objects""" for i, j in obj_map.items(): definition = definition.replace(i, u'{}'.format(obj_map[i]['desc'])) return definition
def bubble_sort(lst: list) -> list: """Sort a list in ascending order. The original list is mutated and returned. The sort is stable. Design idea: Swap adjacent out-of-order elements until the list is sorted. Complexity: O(n^2) time, O(1) space. Stable and in-place. See quicksort, merge sort and heapsort for sorting algorithms with a better time complexity. """ # The first iteration will bring the largest element to the end, the second # iteration will bring the second largest element to the end - 1, and so on, # so we need no more than n iterations to put every element in the proper # place. for _ in range(len(lst)): swapped = False for i in range(1, len(lst)): # If you changed this comparison to >=, the algorithm would still # be correct but the sort would no longer be stable. if lst[i-1] > lst[i]: lst[i-1], lst[i] = lst[i], lst[i-1] swapped = True # If no swaps occurred, the list is sorted and we can exit early. if not swapped: break return lst
def order_node_preview(node_preview: list) -> list: """ Order the list of checked nodes by fits/fits not and number of similar jobs descending. Args: node_preview: the list of checked nodes Returns: A list of checked nodes sorted by number of similar executable jobs. """ return sorted( node_preview, key=lambda nodes: (nodes["sim_jobs"]), reverse=True )
def winhax(s): """this escaping is ridiculous on windows http://trac.ffmpeg.org/ticket/2166 """ s = s.replace('\\', '\\\\\\\\') s = s.replace(':\\', '\\\\:\\') return s
def findTimes(num_sweep): """ Finds the times at the beginning and at the end of each sweep. Information comes from the elapsed time since the beginning of the volume scan, from the Rad4Alp: Specifications/ Request for Proposal (RFP) document. Inputs ------ num_sweep: int rank of the sweep Returns ------- elapsed_times[num_sweep][0]: float the elapsed time since the beginning of the volume scan at the beginning of the sweep elapsed_times[num_sweep][1]: float the elapsed time since the beginning of the volume scan at the end of the sweep """ elapsed_times = {9: [0, 11.4], 7: [11.4, 22.8], 5: [22.8, 39.2], 3: [39.3, 60.5], 1: [60.5, 84.7], 19: [84.7, 97.2], 17: [97.2, 109.6], 15: [109.6, 121.6], 13: [121.6, 133.1], 11: [133.1, 144.4], 10: [144.4, 155.8], 8: [155.8, 172.2], 6: [172.2, 188.6], 4: [188.6, 204.9], 2: [204.9, 229.4], 20: [229.4, 241.9], 18: [241.9, 254.4], 16: [254.4, 266.6], 14: [266.6, 278.3], 12: [278.3, 289.9]} return elapsed_times[num_sweep][0], elapsed_times[num_sweep][1]