content
stringlengths
42
6.51k
def balanced_parts(L, N): """\ Find N numbers that sum up to L and are as close as possible to each other. >>> balanced_parts(10, 3) [4, 3, 3] """ if not (1 <= N <= L): raise ValueError("number of partitions must be between 1 and %d" % L) q, r = divmod(L, N) return r * [q + 1] + (N - r) * [q]
def moment(values, c, exponent): """Returns moment about the value c""" total = 0.0 for value in values: total += ((value-c)**exponent) return total / len(values)
def unquote(qstr): """Unquote a string from the server.""" qstr = qstr.replace("%13", "\n") qstr = qstr.replace("%25", "%") return qstr
def count_failed_requests(out): """ Count failed and non-2xx responses """ failed = 0 non2xx = 0 completed = 0 for metrics, _, _ in out: failed += metrics.get('Failed requests', [0])[0] non2xx += metrics.get('Non-2xx responses', [0])[0] completed += metrics.get('Complete requests', [0])[0] return failed, non2xx, completed
def calculate_fee(order_size: float, comission: float): """calculates the trading fees from the exchange Parameters ---------- order_size, amount of the coin after the transaction comission, percentage of the transaction """ if comission: return round((order_size / 100) * comission, 8) return 0.0
def pr_name(payload): """Returns the name (ex. optee_os) of the Git project.""" return payload['repository']['name']
def normalize_list(L): """Normalize an array of numbers such that each element satisfies: 0 <= e <= 1 """ if not L: return L max_ = max(L) if max_ > 0: return [(float(e) / max_) for e in L] return L
def parse_fastq_description(description): """ Parse the description found in a fastq reads header Parameters ---------- description: str A string of the fastq reads description header Returns ------- description_dict: dict A dictionary containing the keys and values found in the fastq read headers """ description_dict = {} descriptors = description.split(" ") # Delete symbol for header for item in descriptors: if "=" in item: bits = item.split("=") description_dict[bits[0]] = bits[1] return description_dict
def html_newlines(text): """ Replace newlines, \n, with HTML break, <br> tag """ return text.replace('\n', '<br>')
def ee_collections(collection): """ Earth Engine image collection names """ dic = { 'Sentinel2': 'COPERNICUS/S2', 'Landsat7': 'LANDSAT/LE07/C01/T1_SR', 'CroplandDataLayers': 'USDA/NASS/CDL' } return dic[collection]
def combined(*dicts): """ Combine multiple dicts. >>> (combined({'a': 'foo'}, {'b': 'bar'}) ... == {'a': 'foo', 'b': 'bar'}) True """ result = {} for d in dicts: result.update(d) return result
def imageXY2SubgridRC(pxCoord, imageShape, gridsize, factor): """ Returns the (sub-)grid RC coordinate of an image pixel RC origin at top left, same as image factor = subdivision factor, how many cells are within a cell """ pxPerC = imageShape[1]/ (gridsize * factor) pxPerR = imageShape[0]/ (gridsize * factor) # ist identisch bei quadratischen images... # pxPerC = int(imageShape[1]/ gridsize/ factor) # pxPerR = int(imageShape[0]/ gridsize/ factor) # int() is used to floor (schneidet nachkommastellen ab) - zuweisung auf eindeutiges kaestchen subgridC = int(pxCoord[0]/ pxPerC) subgridR = int(pxCoord[1]/ pxPerR) return (subgridC, subgridR)
def get_specs(data): """ Takes a magic format file and returns a list of unique specimen names """ # sort the specimen names speclist = [] for rec in data: try: spec = rec["er_specimen_name"] except KeyError as e: spec = rec["specimen"] if spec not in speclist: speclist.append(spec) speclist.sort() return speclist
def _update_selected_experiment_table_rows( last_select_click, last_clear_click, experiment_table_indices ): """The callback to select or deselect all rows in the experiment table. Triggered when the select all or clear all button is clicked. """ last_select_click = last_select_click if last_select_click else 0 last_clear_click = last_clear_click if last_clear_click else 0 # "select all" is clicked: return all row indicies if int(last_select_click) > int(last_clear_click): return experiment_table_indices # "clear all" or nothing yet is clicked: return no row indicies return []
def flatten(l): """ Converts a list of tuples to a flat list. e.g. flatten([(1,2), (3,4)]) => [1,2,3,4] """ return [item for pair in l for item in pair]
def pad_sentences(sentences, padding_word="<PAD/>"): """ Pads all sentences to the same length. The length is defined by the longest sentence. Returns padded sentences. """ sequence_length = max(len(x) for x in sentences) padded_sentences = [] # PREALLOCATE FOR SPEED for i in range(len(sentences)): sentence = sentences[i] num_padding = sequence_length - len(sentence) new_sentence = sentence + [padding_word] * num_padding padded_sentences.append(new_sentence) return padded_sentences
def normalizeRegionString( revisionString ): """ Ensures consistency in revision strings. Should produce something like 'NTSC 1.02', 'PAL 1.00', etc., or 'ALL'. """ revisionString = revisionString.upper() if 'ALL' in revisionString: return 'ALL' # Check for a game/dol version verIdPosition = revisionString.find( '.' ) if verIdPosition == -1: ver = '1.00' # The default assumption else: ver = revisionString[verIdPosition-1:verIdPosition+3] # Check for the region if 'PAL' in revisionString: region = 'PAL ' else: region = 'NTSC ' # The default assumption return region + ver
def sieve_of_eratosthenes_modified(n): """ Sieve of Eratosthenes implementation with a tweak: Instead of true-false calculate the number of prime numbers to generate the composites. """ primes = [0] * (n + 1) primes[0] = -1 primes[1] = -1 for i in range(2, n + 1): if not primes[i]: # 0 for prime for j in range(i + i, n + 1, i): primes[j] += 1 return [i for i in range(2, n + 1) if primes[i] >= 3]
def identity(nums): """Identity: Given a list of numbers, write a list comprehension that produces a copy of the list. >>> identity([1, 2, 3, 4, 5]) [1, 2, 3, 4, 5] >>> identity([]) [] """ lst = [a for a in nums] return lst #pass
def primes(n): """ Returns a list of primes < n for n > 2 """ factors = [1 for _ in range(n + 1)] p = 3 max_number = 0 max_index = 0 while (p <= n): for i in range(p, n + 1, p): factors[i] += 1 if factors[p] > max_number: max_number = factors[p] max_index = p p += 1 print(n, factors) return max_index # sieve = bytearray([True]) * (n // 2) # for i in range(3, int(n ** 0.5) + 1, 2): # if sieve[i // 2]: # sieve[i * i // 2::i] = bytearray((n - i * i - 1) // (2 * i) + 1) # print(sieve)
def has_cycle(head): """ :type head: ListNode :rtype: bool """ if not head: return False slow = fast = head # fast goes forward twice fast as slow # if there is a cycle, fast will catch up with the slow. while fast.next and fast.next.next: fast = fast.next.next slow = slow.next if fast == slow: return True return False
def svg_to_src(svg): """Convert an SVG string to a format that can be passed into a src. :rtype: str """ return 'data:image/svg+xml;utf8,' + svg
def score_tuple(word_tuple): """ Score a tuple of words. For now this is just the sum of the individual scores """ return sum(w.score for w in word_tuple)
def get_tensorflow_model_name(processor, model_name): """ Helper function to get tensorflow model name :param processor: Processor Type :param model_name: Name of model to be used :return: File name for model being used """ tensorflow_models = { "saved_model_half_plus_two": { "cpu": "saved_model_half_plus_two_cpu", "gpu": "saved_model_half_plus_two_gpu", "eia": "saved_model_half_plus_two", }, "saved_model_half_plus_three": {"eia": "saved_model_half_plus_three"}, } if model_name in tensorflow_models: return tensorflow_models[model_name][processor] else: raise Exception(f"No entry found for model {model_name} in dictionary")
def int_to_float(item: int) -> float: """[summary] Args: item (int): [description] Returns: float: [description] """ """Converts an int to a float.""" return float(item)
def _format_reduction_label(s): """Helper function to make graph labels more readable. Removes single quotes and square brackets from the input string. Parameters ---------- s : str Input label. Returns ------- str Label with characters ', [, and ] removed. """ return s.replace("'","").replace('[','').replace(']','')
def create_cast_date_column_query(dbms: str, date_format: str, date_column: str) -> str: """ Create query that cast date column into yyyy-MM-dd format :param dbms: data warehouse name :param date_format: format of date column :param date_column: name of the column with periods :return: query to cast date column """ # Convert date format to yyyy-MM-dd if dbms == 'hive': if date_format in ['yyyy-MM-dd', 'YYYY-MM-DD']: casted_date_column = date_column else: casted_date_column = f"from_unixtime(unix_timestamp({date_column}, '{date_format}'), 'yyyy-MM-dd')" elif dbms == 'teradata': if date_format in ['yyyy-MM-dd', 'YYYY-MM-DD']: casted_date_column = date_column else: casted_date_column = f"cast(cast({date_column} as varchar(20)) as date format '{date_format}')" else: raise NotImplementedError('Data warehouses supporting by now : hive and teradata') return casted_date_column
def fmin(V, volume): """Minimization function for solver. Parameters ---------- V: float volume: float Returns ------- result: float """ return (volume - V)**2/V
def find_high_index(arr, key): """Find the high index of the key in the array arr. Time: O(log n) Space: O(1) """ lo, hi = 0, len(arr) while lo < hi: mi = (lo + hi) // 2 if arr[mi] > key: hi = mi elif arr[mi] == key and (mi + 1 == len(arr) or arr[mi + 1] > key): return mi else: lo = mi + 1 return -1
def interpret_origin(geom, origin, ndim): """Returns interpreted coordinate tuple for origin parameter. This is a helper function for other transform functions. The point of origin can be a keyword 'center' for the 2D bounding box center, 'centroid' for the geometry's 2D centroid, a Point object or a coordinate tuple (x0, y0, z0). """ # get coordinate tuple from 'origin' from keyword or Point type if origin == 'center': # bounding box center minx, miny, maxx, maxy = geom.bounds origin = ((maxx + minx)/2.0, (maxy + miny)/2.0) elif origin == 'centroid': origin = geom.centroid.coords[0] elif isinstance(origin, str): raise ValueError("'origin' keyword %r is not recognized" % origin) elif hasattr(origin, 'type') and origin.type == 'Point': origin = origin.coords[0] # origin should now be tuple-like if len(origin) not in (2, 3): raise ValueError("Expected number of items in 'origin' to be " "either 2 or 3") if ndim == 2: return origin[0:2] else: # 3D coordinate if len(origin) == 2: return origin + (0.0,) else: return origin
def most_frequent(data: list) -> str: """ determines the most frequently occurring string in the sequence. """ # your code here d = dict(zip(list(set(data)), map(data.count, list(set(data))))) return sorted(d, key=lambda x:d[x])[-1]
def remove_empty_intent_examples(intent_results): """Remove those examples without an intent.""" filtered = [] for r in intent_results: # substitute None values with empty string # to enable sklearn evaluation if r.prediction is None: r = r._replace(prediction="") if r.target != "" and r.target is not None: filtered.append(r) return filtered
def revcomp(seq: str) -> str: """ reverse complement a nucleotide sequence. """ rc_nuc = { 'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C', } seq_rev = seq[::-1] seq_rev_comp = ''.join([rc_nuc[n] for n in list(seq_rev)]) return seq_rev_comp
def _compute_delta_t( data: dict, nodeid: str, event1: str, event1_pos: int, event2: str, event2_pos: int) -> float: """Compute time delta between two events :param dict data: data :param str nodeid: node id :param str event1: event1 :param int event1_pos: event1 position in stream :param str event2: event2 :param int event2_pos: event2 position in stream :rtype: float :return: delta t of events """ # attempt to get directly recorded diff try: return data[nodeid][event2][event2_pos]['message']['diff'] except (TypeError, KeyError): return (data[nodeid][event2][event2_pos]['timestamp'] - data[nodeid][event1][event1_pos]['timestamp']).total_seconds()
def get_color(attr): """ attr is assumed to be between -1 and 1 """ if attr > 0: return int(128 * attr) + 127, 128 - int(64 * attr), 128 - int(64 * attr) return 128 + int(64 * attr), 128 + int(64 * attr), int(-128 * attr) + 127
def force_tuple(var): """ Returns the given variable as tuple :param var: variant :return: list """ if var is None: return () if type(var) is not tuple: var = tuple(var) return var
def get_label(labels, index): """ Gets label if exists, otherwise returns label number """ if index < len(labels): return labels[index] else: return '#%d' % (index)
def is_same_class(obj, a_class): """return true if obj is the exact class a_class, otherwise false""" return (type(obj) == a_class)
def get_samples(ndata,ntrainInit): """ Input: ndata = df.shape[0], total number of samples ntrainInit = number of points for initial training Output: Two arrays of indexes of ndata: sample_idx, and remaining_idx Use: test_data_idx,remaing_data_idx = get_samples(ndata,ntrainInit) """ import numpy as np nremain = ndata - ntrainInit dataset = np.random.permutation(ndata) a1data = np.empty(ntrainInit, dtype=int) # randomly chosen data points a2data = np.empty(nremain, dtype=int) # remaining data points a1data[:] = dataset[0:ntrainInit] a2data[:] = dataset[ntrainInit:ndata] return a1data,a2data
def store_data(file_name, data): """ Description ----------- Use to store data in file Parameters: string: Represent the path of the file string: Represent the data to store in file Return ------ boolean true if the data are print in file """ fout = open(file_name, 'a') if fout.write('\n' + data): return True fout.close() return False
def _format_like_example(args, example, key_order): """Returns args as instance, ordered list or dict, following `example` format. Note: facilitates single, multi-output or named multi-output Keras model API. Args: args: ordered tuple of arguments. example: example of format to follow: single instance, tuple or dict. key_order: keys for arguments in case `example` of type dict. Returns: args formatted as `example`. """ if isinstance(example, dict): result = dict(zip(key_order, args)) elif isinstance(example, (tuple, list)) and not len(example): # pylint: disable=g-explicit-length-test # Empty single instance. result = [] elif (isinstance(example, (tuple, list)) and isinstance(example[0], (tuple, list))): result = args else: result = args[0] return result
def _format_read_number(read, read_type=None): """ Catch read values without a letter prefix (legacy format), convert them to Rn format. This could be fixed by a database migration that updates the JSON blobs. :param read: The read number. May be eg 'R1', 'R2', 'I1', or the old format used previously, '1', '2', '3' (as string or int). :type read: str | int :return: The read number properly formatted for output - Rn or In. :rtype: str """ try: read = int(read) if read_type is None: read_type = 'R' read = '%s%s' % (read_type, read) except ValueError: # read is in the format 'Rn' or 'In', so just return it unmodified pass return read
def filter_optional(l, filter_list=None): """ Optionally filter elements in a list according to a second filter list :param l: list to potentially filter :param filter_list: filter list specifying all elements which are allowed in the returned list :return: if filter_list is not None, list containing the intersection of l and filter_list else l """ if filter_list is None: return l else: return [f for f in l if f in filter_list]
def _calcFrame(parentFrame, posSize, absolutePositioning=False): """ Convert a vanilla posSize rect to a Cocoa frame. """ (pL, pB), (pW, pH) = parentFrame (l, t), (w, h) = posSize if not absolutePositioning: if l < 0: l = pW + l if w <= 0: w = pW + w - l if t < 0: t = pH + t if h <= 0: h = pH + h - t b = pH - t - h # flip it upside down return (l, b), (w, h)
def relu(x): """ Rectified linear unit. Args: x (numpy.ndarray): The input array. Returns: numpy.ndarray, the array applied relu. """ return x * (x > 0)
def _peek_eol(line): """ :param unicode line: A line in file. :rtype: unicode :return: EOL used by line. """ eol = '\n' if line: if line.endswith('\r'): eol = '\r' elif line.endswith('\r\n'): eol = '\r\n' return eol
def split_lexical_entry(name): """ Takes a lexical_entry_rule and splits it. :param name: :return: """ stem = pos = gloss = None name = name.split("_") if len(name) > 0: stem = name[0] if len(name) > 1: pos = name[1] if len(name) > 2: gloss = name[2] return [stem, pos, gloss]
def build_mssql_trusted_connection_string(server, database): """ Given a server and database name, build a Trusted Connection MSSQL connection string """ return 'DRIVER={SQL Server Native Client 11.0};Server=' + server + ';Database=' + database + ';Trusted_Connection=yes;'
def getObjPV(pmgr, objID): """ Returns last known PV for obj if it exists. """ try: return pmgr.objs[objID]["rec_base"] except AttributeError: print("getObjPV: Error with pmgr and/or objID, one or both may not exist!") exit()
def dms2dd(dms): """ DMS to decimal degrees. Args: dms (list). d must be negative for S and W. Return: float. """ d, m, s = dms return d + m/60. + s/3600.
def __to_js(var, value): """ Encapsulates python variable into a javascript var. :param var: The name of the javascript var. :param value: The value to set. """ return '<script type="text/javascript">var %s = "%s";</script>' % (var, value) return join(dirname(file), 'templates/')
def chunk(n, m, frames): """Divide the n frames by n parts""" if n % m == 0: idx = list(range(0, n, n // m)) idx.append(n) else: d = n // m r = n % m idx = list(range(0, n - r, d)) # distribute the remainder value to offset = 1 for j in range(m - r, m): idx[j] += offset offset += 1 idx.append(n) res = [] for i in range(len(idx) - 1): res.append(frames[idx[i]: idx[i + 1]]) return res
def all_same_nonterminal(ilist,bitpos): """Return false if not all of the next bits are the same nonterminal, also return nonterminal if True""" last_nonterminal = None for i in ilist: plen = len(i.ipattern.bits) if bitpos >= plen: #msge("Fell off end of bits") return (False,None) if i.ipattern.bits[bitpos].is_nonterminal(): if last_nonterminal == None: last_nonterminal = i.ipattern.bits[bitpos] elif last_nonterminal != i.ipattern.bits[bitpos]: #msge("Differing NTs: [" + str(last_nonterminal)+ "] vs [" #+ str(i.ipattern.bits[bitpos]) + ']') return (False, None) else: #msge("Not a nonterminal") return (False, None) # not a nonterminal if last_nonterminal: return (True, last_nonterminal) return (False, None)
def replace_source_mask(py, base_directory, source_mask_directory): """ replace source masks. """ script = f"{py} -m seisflow.scripts.structure_inversion.replace_source_mask --base_directory {base_directory} --source_mask_directory {source_mask_directory}; \n" return script
def normalize_stdout(stdout): """Make subprocess output easier to consume Decode bytes to str, strip unnecessary newlines produced by most commands. :param stdout: return value of `subprocess.check_output` or similar >>> normalize_stdout(b'/foo/bar\n') '/foo/bar' """ return stdout.decode().strip()
def d_y_diffr_dy(x, y): """ derivative of d(y/r)/dy equivalent to second order derivatives dr_dyy :param x: :param y: :return: """ return x**2 / (x**2 + y**2)**(3/2.)
def is_dict(data): """Checks if data is a dictionary.""" return isinstance(data, dict)
def set_users(db, users): """ Add users to a key-value store db :return: db """ db['users'] = users return db
def is_continuation_week(event_weekdays): """ continuation week """ weekday_len = len(event_weekdays) if weekday_len < 3: return False for idx, week in enumerate(event_weekdays): if idx + 1 < weekday_len: if week+1 != event_weekdays[idx+1]: return False return True
def __toarray(X): """ Converts DataFrames to numpy arrays. """ if hasattr(X, "values"): X = X.values return X
def is_valid_sequence(s): """ (str) -> bool The parameter is a potential DNA sequence. Return True if and only if the DNA sequence is valid (that is, it contains no characters other than 'A', 'T', 'C' and 'G'). >>> is_valid_sequence("ATTCCGGGA") True >>> is_valid_sequence("SALIMAAAA") False """ nucs = "ATCG" counterL = [] total = 0 for i in s: if i in nucs and not i in counterL: total += 1 counterL += i elif i not in nucs: return False return (total >= 4) and (nucs == "ATCG")
def clean_line( line ): """ parse clean and classify the line, return classify: see line_type, and code clean: means clean the line ( some in this code some in function clean_word() ) note theat for the word ananysis we convert to lowere case Return: tuple = ( cleaned_line, line_ref = str_ref, line_type ) # in flux check out the code """ line_type = "tweet" # bad empty, retweet.... working on it line = line.encode( encoding='UTF-8', errors='replace') # do this in the read?? does not seem to work line = str( line ) # remove some junk line = line.replace("\n", " ") line = line.replace("/n", " ") # but still see them # this is the parse for the csv line_parts = line.split( ",") # seems to be way file is delimited this gets us just the tweat # if we do not get the required no of parts we "reject" the line if len( line_parts ) < 7 : line_type = "bad" return( "", "", line_type ) #print( f"line_parts {len(line_parts)}" ) line_ref = line_parts[6] # original reference from the download file. line_tweet = line_parts[1] # the tween part of the line if line_tweet.startswith( "RT"): # be careful where we lower line_type = "retweet" # get rid of the RT ?? line_tweet = line_tweet.lower() return( line_tweet, line_ref, line_type )
def add_missing_flows(data): """There are some flows not given in ReCiPe that seem like they should be there, given the relatively coarse precision of these CFs.""" new_cfs = { "managed forest": { "amount": 0.3, "flows": [ "occupation, forest, unspecified", "occupation, field margin/hedgerow", ], }, "annual crops": { "amount": 1.0, "flows": [ "occupation, annual crop, flooded crop", "occupation, annual crop, irrigated, extensive", ], }, "pasture": { "amount": 0.55, "flows": [ "occupation, arable land, unspecified use", "occupation, grassland, natural, for livestock grazing", "occupation, heterogeneous, agricultural", ], }, "artificial area": {"amount": 0.73, "flows": [],}, "permanent crops": { "amount": 0.7, "flows": [ "occupation, permanent crop, irrigated", "occupation, permanent crop, irrigated, extensive", "occupation, permanent crop, non-irrigated", "occupation, permanent crop, non-irrigated, extensive", ], }, } """ The following were included in an earlier version of ReCiPe, but are skipped here, as we don't have enough info to use them consistently: * 'occupation, bare area (non-use)', * 'occupation, cropland fallow (non-use)', * 'occupation, forest, primary (non-use)', * 'occupation, forest, secondary (non-use)', * 'occupation, inland waterbody, unspecified', * 'occupation, lake, natural (non-use)', * 'occupation, river, natural (non-use)', * 'occupation, seabed, natural (non-use)', * 'occupation, seabed, unspecified', * 'occupation, snow and ice (non-use)', * 'occupation, unspecified', * 'occupation, unspecified, natural (non-use)', * 'occupation, wetland, coastal (non-use)', * 'occupation, wetland, inland (non-use)' """ for ds in data: ds["exchanges"].extend( [ {"name": flow, "amount": obj["amount"]} for obj in new_cfs.values() for flow in obj["flows"] ] ) return data
def variantCombinations(items): """ Calculates variant combinations for given list of options. Each item in the items list represents unique value with it's variants. :param list items: list of values to be combined >>> c = variantCombinations([["1.1", "1.2"], ["2.1", "2.2"], ["3.1", "3.2"]]) >>> len(c) 8 >>> for combination in c:print combination ['1.1', '2.1', '3.1'] ['1.1', '2.1', '3.2'] ['1.1', '2.2', '3.1'] ['1.1', '2.2', '3.2'] ['1.2', '2.1', '3.1'] ['1.2', '2.1', '3.2'] ['1.2', '2.2', '3.1'] ['1.2', '2.2', '3.2'] """ assert isinstance(items, list) and list if len(items) == 1: result = items[0] else: result = [] subItems = variantCombinations(items[1:]) for masterItem in items[0]: for subItem in subItems: if isinstance(subItem, list): item = [masterItem] item.extend(subItem) result.append(item) else: result.append([masterItem, subItem]) return result
def num_neg_pmi(pdict): """ Count number of negative PMIs in PMI dictionary """ total = 0 neg_count = 0 for key1 in pdict: for key2 in pdict[key1]: # Make sure we don't double count if key1 < key2: total += 1 if pdict[key1][key2] < 0: neg_count += 1 return neg_count, neg_count/total
def parse_segment(segment: str) -> str: """Parse a pointer segment. Individual segments need to replace special chars, as per RFC-6901: https://tools.ietf.org/html/rfc6901 """ return segment.replace("~", "~0").replace("/", "~1")
def PatternCount(text, pattern): """ Exercice 1.2.7 PatternCount Description: Counts the number of times Pattern happens in Text WITH OVERLAPS Input: Strings Text and Pattern. Output: Count(Text, Pattern). Sample Input: GCGCG GCG Sample Output: 2 """ n = 0 for i in range(len(text)-len(pattern)+1): if text[i:i+len(pattern)] == pattern: n += 1 return n
def addKwdArgsToSig(sigStr, kwArgsDict): """ Alter the passed function signature string to add the given kewords """ retval = sigStr if len(kwArgsDict) > 0: retval = retval.strip(' ,)') # open up the r.h.s. for more args for k in kwArgsDict: if retval[-1] != '(': retval += ", " retval += str(k)+"="+str(kwArgsDict[k]) retval += ')' retval = retval return retval
def add_periods_endtime( l): """ Fuegt Endzeitpunkte der Zeitspannen ein """ l.reverse() old = u"24:00" #for i in l[:]: for i in l: i.append(old) old = i[0] l.reverse() return l
def tail( f, window=20 ): """Unix tail for python Taken from http://stackoverflow.com/a/136368/624900 """ BUFSIZ = 1024 f.seek(0, 2) bytes = f.tell() size = window block = -1 data = [] while size > 0 and bytes > 0: if (bytes - BUFSIZ > 0): # Seek back one whole BUFSIZ f.seek(block*BUFSIZ, 2) # read BUFFER data.append(f.read(BUFSIZ)) else: # file too small, start from begining f.seek(0,0) # only read what was not read data.append(f.read(bytes)) linesFound = data[-1].count('\n') size -= linesFound bytes -= BUFSIZ block -= 1 return '\n'.join(''.join(data).splitlines()[-window:])
def trim(speeds, trim_percentage): """Trims the list given by x percent""" trim_amount = int(len(speeds)*trim_percentage) tmpspeeds = sorted(speeds) if trim_amount > 1: for i in range(0, trim_amount, 2): tmpspeeds.pop(0) tmpspeeds.pop() return tmpspeeds
def confirm(request_body, response_body, storage): """ This callback function is called on a request to the `/v1/payments/confirm` endpoint. It stores differences between initial balances of accounts and actual balances after payments are completed. :param request_body: a request body :param response_body: a mocked response body :param storage: the internal storage :return: a response body without modifications """ payment_info = storage[response_body['paymentId']] storage[payment_info['receiverIban']] = storage.get(payment_info['receiverIban'], 0) + payment_info['amount'] storage[payment_info['payerIban']] = storage.get(payment_info['payerIban'], 0) - payment_info['amount'] return response_body
def serialize_values(obj): """Recursively create serializable values for (custom) data types.""" def get_val(val): if ( isinstance(val, (list, set, filter, tuple)) or val.__class__ == "dict_valueiterator" ): return [get_val(x) for x in val] if val else [] if isinstance(val, dict): return {key: get_val(value) for key, value in val.items()} try: return val.to_dict() except AttributeError: return val except Exception: # pylint: disable=broad-except return val return get_val(obj)
def jn_first_zero(n): """Get an approximated location for the first zero of spherical bessel's function at a given order n""" precomputed = [3.14159, 4.49341, 5.76346, 6.98793, 8.18256, 9.35581, 10.5128, 11.657, 12.7908, 13.9158, 15.0335, 16.1447, 17.2505, 18.3513, 19.4477, 20.5402, 21.6292, 22.715, 23.7978, 24.878, 25.9557, 27.0311, 28.1043, 29.1756, 30.245, 31.3127] try: return precomputed[n] except: # formula 9.5.14 in Handbook of Mathematical Functions v = n + 0.5 return v + 1.8557571*v**(1/3) + 1.033150*v**(-1/3) - \ 0.00397*v**(-1) - 0.0908*v**(-5/3) + 0.043*v**(-7/3)
def adapt_param(p: float, p_min: float, p_max: float, d: float, d_target: float, xi: float ) -> float: """ Self adapts a param p given a measure m and control xi Args: p (float): The param to self adapt p_min (float): The lower bound of the param p_max (float): The upper bound of the param d (float): The measure that affects p d_target (float): The desired d value xi (float): The parameter controlling the adaptivity strength Returns: float: The adapted param p """ new_p = max(p_min, min(p_max, p * (1 + xi * (d_target - d) / d))) return new_p
def dfor(value, default): """null coalescing operator Args: value (Any) : value default (Any) : default value Returns: Any: If the specified value is not None, return the specified value. Otherwise, return the specified default value. """ if value is None: return default else: return value
def scale_data_and_transform(data): """ Assume data is a list of features, change to a list of points and max/min scale """ scaled_data = [[] for _ in range(len(data[0]))] for feature_list in data: max_val = max(feature_list) min_val = min(feature_list) for i in range(len(feature_list)): if max_val == min_val: scaled_data[i].append(0.0) else: scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val)) return scaled_data
def get_weight_shapes(num_inputs, layer_sizes, num_outputs): """ adapted from original tf_model.get_weight_shapes() to convert from method to function """ weight_shapes = [] input_size = num_inputs for i, layer in enumerate(layer_sizes): weight_shapes.append((input_size, layer)) weight_shapes.append((layer,)) input_size = layer weight_shapes.append((input_size, num_outputs)) weight_shapes.append((num_outputs,)) return weight_shapes
def is_comment(string, delimiter): """ Helper method to determine if something is a comment or not :param string: the string being checked to see if it's a comment :param delimiter: a string for what you what to check to see if it comes before or after the '!' :return: a boolean indicating if it is a comment """ testing_index = string.find('!') green_light_index = string.find(delimiter) if testing_index == -1 or green_light_index < testing_index: comment = False else: comment = True return comment
def _links(x): """Helper function to link states with one another.""" return [(x[i],x[i+1]) for i,_ in enumerate(x[:-1])]
def create_empty_append(n_k, n_iss, n_feats): """Create null measure in the list form. Parameters ---------- n_k: int the number of perturbations n_vals_i: int the number of indices of the output measure. n_feats: int the number of features. Returns ------- measure: list the null measure to be fill by the computation of the spatial descriptor model. """ return [[[]]*n_iss]*n_k
def get_arrhythmia_type(fields): """Returns type of arrhythmia based on fields of the sample Arguments --------- fields: fields of sample read from wfdb.rdsamp Returns ------- Type of arrhythmia 'a': asystole 'b': bradycardia 't': tachycardia 'f': ventricular fibrillation 'v': ventricular tachycardia """ arrhythmias = { 'Asystole': 'a', 'Bradycardia': 'b', 'Tachycardia': 't', 'Ventricular_Tachycardia': 'v', 'Ventricular_Flutter_Fib': 'f' } arrhythmia_type = fields['comments'][0] return arrhythmias[arrhythmia_type]
def dashes(i=1, max_n=6, width=1): """Dashes for matplotlib.""" return i * [width, width] + [max_n * 2 * width - 2 * i * width, width]
def setdiff(list1, list2): """ returns list1 elements that are not in list2. preserves order of list1 Args: list1 (list): list2 (list): Returns: list: new_list Example: >>> list1 = ['featweight_rowid', 'feature_rowid', 'config_rowid', 'featweight_forground_weight'] >>> list2 = [u'featweight_rowid'] >>> new_list = setdiff(list1, list2) >>> result = ub.repr2(new_list, nl=False) >>> print(result) ['feature_rowid', 'config_rowid', 'featweight_forground_weight'] """ set2 = set(list2) return [item for item in list1 if item not in set2]
def get_wsgi_header(header): """Returns a WSGI compliant HTTP header. See https://www.python.org/dev/peps/pep-3333/#environ-variables for information from the spec. """ return 'HTTP_{}'.format(header.upper().replace('-', '_'))
def ValOf(x): """ "this algo calculate value of job. if job is int it return the int' if it is tuple of (index,val) it returns val >>> ValOf(8) 8 >>> ValOf(20) 20 >>> ValOf((1,58)) 58 >>> ValOf(("a",67)) 67 """ if isinstance(x,int): return x if isinstance(x,float): return x else: return x[1]
def replace_methods_stack(form, new_methods_stack): """ Return a new form with ``methods_stack`` replaced with ``new_methods_stack`` """ return form[:4] + (new_methods_stack,)
def _flatten(l): """Flatten given list ``l``. [[1], [2]] -> [1, 2] """ return [item for sublist in l for item in sublist]
def _ParseAppServers(app_servers): """Parse the app servers for name and project id. Args: app_servers: list|str|, a list of strings defining the Google Cloud Project IDs by friendly name. Returns: A dictionary with the friendly name as the key and the Google Cloud Project ID as the value. """ return dict(server.split('=', 1) for server in app_servers)
def format_error_message(message, **kwargs): """ Replaces the tokens by `kwargs`. :param message: The message that contains the tokens. :param kwargs: The args used to replace the tokens. :return: The message formatted. """ if isinstance(message, str): message = message.format(**kwargs) elif isinstance(message, dict): for key, value in message.items(): message[key] = format_error_message(value, **kwargs) return message
def edge_list_to_nodes(edges): """ Create a list of nodes from a list of edges :param edges: edges to create nodes for :type edges: list """ return list(set(sum(edges, ())))
def mutable_two(A): """Uses built-in max() method and extra storage.""" if len(A) < 2: raise ValueError('Must have at least two values') idx = max(range(len(A)), key=A.__getitem__) my_max = A[idx] del A[idx] second = max(A) A.insert(idx, my_max) return (my_max, second)
def is_merge(complete, part1, part2): """Checks if part1 & 2 can be merged into complete maintaining order of characters.""" if len(part1) + len(part2) != len(complete): return False if part1 in complete: ix = complete.find(part1) remaining = complete[0:ix] + complete[ix + len(part1):] if remaining == part2: return True if part2 in complete: ix = complete.find(part2) remaining = complete[0:ix] + complete[ix + len(part2):] if remaining == part1: return True p1ix = 0 p2ix = 0 ix = 0 while ix < len(complete): if p1ix < len(part1) and part1[p1ix] == complete[ix]: p1ix += 1 ix += 1 continue elif p2ix < len(part2) and part2[p2ix] == complete[ix]: p2ix += 1 ix += 1 continue else: return False return True
def get_startframe(anim_data): """ Gets startframe from data :param anim_data: dict of data :type anim_data: dict :return: int or None :rtype: int or NoneType """ # get start frame to offset from start_frames = [] for t in anim_data["Translation"]: time_value = t[0].get("time") if time_value is not None: start_frames.append(time_value) if start_frames: return min(start_frames) else: return None
def isValidSudoku(board): """ :type board: List[List[str]] :rtype: bool """ all_index_set = set() for rows in range(len(board)): for columns in range(len(board)): element = board[rows][columns] if element != ".": if (rows, element) in all_index_set or (element, columns) in all_index_set or ( rows // 3, columns // 3, element) in all_index_set: return False else: all_index_set.add((rows, element)) all_index_set.add((element, columns)) all_index_set.add((rows // 3, columns // 3, element)) return True
def get_function_input(inputs, input_name, optional=False): """Given input_name, checks if it defined. Raises ValueError if a mandatory input is None""" the_input = inputs.get(input_name) if the_input is None and optional is False: err = "'{0}' is a mandatory function input".format(input_name) raise ValueError(err) else: return the_input
def split_nth(string, count): """ Splits string to equally-sized chunks """ return [string[i:i+count] for i in range(0, len(string), count)]
def polygon_from_bbox(minx, miny, maxx, maxy): """Construct polygon coordinates in numeric list representation from a numeric list representing a bounding box.""" return [[minx, miny], [maxx, miny], [maxx, maxy], [minx, maxy]]
def get_name_as_id(name): """Get name as legal svd identifier""" return name.replace(",", "_").replace("-", "_")
def _get_target_segment_dict(main_id, segment_dict: dict): """get a segment list that contains a segment dict that does not contain the currently investigating id""" result_dict = {} for an_id in segment_dict.keys(): if an_id != main_id: result_dict[an_id] = segment_dict[an_id] return result_dict