content
stringlengths
42
6.51k
def merge_data(template_id, substitutions): """Template push merge_data creation. :param template_id: Required, UUID. :param substitutions: Required, dictionary of template variables and their substitutions, e.g. {"FIRST_NAME": "Bob", "LAST_NAME": "Smith"} """ md = {} md['template_id'] = template_id md['substitutions'] = { key: val for key, val in iter(substitutions.items()) if val is not None } return md
def array_rotate(arr, n): """ This method would rotate the array by breaking down the array into two pieces 1. The rotated part 2. The remaining part Then an array would be reconstructed using these two parts. Space complexity = O(n) Time complexity = O(1) :param arr: The array to rotate :param n: number of rotation positions :return: rotated array """ if len(arr) == 0 or n == 0 or len(arr) == n: return arr if len(arr) > n: ret = [] ret.extend(arr[len(arr) - n:]) ret.extend(arr[:len(arr) - n]) return ret else: n = n % len(arr) return array_rotate(arr, n)
def _convert_id_to_name(cluster, transaction): """ Helper function to convert the cluster with ids to cluster with names. """ new_cluster = set() for element in cluster: graql_query = f'match $char id {element}, has name $name; get $name;' iterator = transaction.query(graql_query) answers = iterator.collect_concepts() for answer in answers: new_cluster.add(answer.value()) return new_cluster
def validate_json(keys, json_like_object, empty=False): """Check if keys are present.""" missing = "" for key in keys: try: value = json_like_object[key] if isinstance(value, str): if not value.strip(): if empty is True: pass else: missing += key + ', ' else: if value is None: if empty is True: pass else: missing += key + ', ' else: pass except KeyError: missing += key + ', ' if missing: return missing[:-2] else: return True
def clean_ns(tag): """Return a tag and its namespace separately.""" if '}' in tag: split = tag.split('}') return split[0].strip('{'), split[-1] return '', tag
def c(v): """convert""" if v == "NAN" or v == "-INF" or v == "INF": return None return v
def pair_subtract(lst: list) -> int: """ Cleanly subtract the elements of a 2 element list. Parameters ---------- lst: lst The list to subtract elements from. Returns ------- int """ assert len(lst) == 2 return lst[1] - lst[0]
def get_next_code(last_code): """Generate next code based on the last_code.""" return (last_code * 252533) % 33554393
def _get_author_weight(authors_in_paper): """ Method that returns the weight of a single author in a paper""" #I check if there is at least 1 author otherwise the weight is 0 if len(authors_in_paper) > 0: return 1./len(authors_in_paper) else: return 0
def rmchars(chars: str, s: str) -> str: """Remove chars from s. >>> rmchars("123", "123456") '456' >>> rmchars(">=<.", ">=2.0") '20' >>> rmchars(">=<.", "") '' """ for c in chars: s = s.replace(c, "") return s
def check_class_dict(param_type): """Check the class dict""" dict_type = str(param_type).replace("<", "") dict_type = dict_type.replace(">", "") dict_type = dict_type.split(" ")[-1] check_dect = dict_type != "dict" and not isinstance(param_type, dict) if not hasattr(param_type, '__annotations__') and check_dect: return True return False
def get_default_dict(template_dict): """ Return a default dict from a template dictionary Args: :template_dict: Template dictionary Returns: :default_dict: New dictionary with defaults generated from 'template_dict' The template dictionary must have a specific structure as outlined below: .. code:: python template_dict = { 'test_key1': ('default_value1', str), 'test_key2': (1792, (int, float)), } The 'default_dict' will look like this: .. code:: python default_dict = { 'test_key1': 'default_value1', 'test_key2': 1792, } """ default_dict = {} for key, (value, _) in template_dict.items(): # Treat non-empty dictionary recursively if isinstance(value, dict) and value: value = get_default_dict(template_dict=value) default_dict[key] = value return default_dict
def subdivideData(d): """ subdivide data into categories (names, emails, passwords, subjects, bodies, recipients) """ nm = em = pw = sb = bd = rcp = []; for x in range (len(d)): nm.append(d[x][0]) em.append(d[x][1]) pw.append(d[x][2]) sb.append(d[x][3]) bd.append(d[x][4]) r = []; y = 5; while y < len(d[x]): r.append(d[x][y]) y = y+1 rcp.append(r) return nm, em, pw, sb, bd, rcp
def create_events_model(areas, virus_states): """Create events for the model. Parameters ---------- virus_states : list of strings List containing the names of all virus variants. Returns ------- events: dict Dictionary that contains the event names as keys and dicitonaries that contain the event ids, and formulas as values. """ events = {} for index1 in areas: for index2 in virus_states: keys = f"event_{index1}_{index2}" assignee = f"infectious_{index1}_vac0_{index2}" trigger_time_par = f"{index2}_{index1}_appears_time" trigger_quantity = f"{index2}_{index1}_appears_quantity" events[keys] = { "trigger_formula": f"geq(time, {trigger_time_par})", "assignee_id": assignee, "assign_formula": trigger_quantity, } return events
def turned_off_response(message): """Return a device turned off response.""" return { "requestId": message.get("requestId"), "payload": {"errorCode": "deviceTurnedOff"}, }
def merge(line): """ Helper function that merges a single row or column in 2048 """ result = [0] * len(line) result_position = 0 # iterate over all line values for line_value in line: if line_value != 0: # merge two tiles if result[result_position] == line_value: result[result_position] = 2 * line_value result_position += 1 # put in blank spot elif result[result_position] == 0: result[result_position] = line_value # add to next position for a non-matching, occupied spot else: result_position += 1 result[result_position] = line_value return result
def batch_list(inputlist, batch_size): """ Returns the inputlist split into batches of maximal length batch_size. Each element in the returned list (i.e. each batch) is itself a list. """ list_of_batches = [inputlist[ii: ii+batch_size] for ii in range(0, len(inputlist), batch_size)] return list_of_batches
def tri_recursion(k): """recursion of a value""" if(k>0): result = k + tri_recursion(k-1) # print(result) else: result = 0 return result
def price_fixer(price): """ Removes the unnecessary strings from the price tag Parameters ---------- price : str raw price decription taken from the website. Returns ------- float filtered price . """ price = price.replace('KDV',"") price = price.replace('TL',"") price = price.replace('+',"") price = price.replace('.',"") price = price.replace(',',".") price = price.replace('\n',"") price = price.replace(' ',"") return float(price)
def getGeolocalisationFromJson(image): """Get geolocalisation data of a image in the database Parameters: image (json): image from the validation data Returns: float: lng (degree) float: lat (degree) float: alt (degree) float: azimuth (degree) float: tilt (degree) float: roll (degree) float: focal (pixel) array: gcps int: image width int: image height """ lng = float(image['lng']) lat = float(image['lat']) alt = float(image['alt']) azimuth = float(image['azimuth'])%360 tilt= float(image['tilt'])%360 roll = float(image['roll'])%360 focal = float(image['focal']) gcps = image['gcp_json'] width = float(image['width']) height = float(image['height']) return lng, lat, alt, azimuth, tilt, roll, focal, gcps, width, height
def load_required_field(value): """load required_field""" if value == "y": return True return False
def remove_empty_buckets(json_data): """Removes empty buckets in place""" if 'bucket' not in json_data: return json_data idxs_to_remove = [] for i, bucket in enumerate(json_data.get('bucket')): dataset = bucket['dataset'] if len(dataset) == 1 and dataset[0]['point'] == []: idxs_to_remove.append(i) for i in sorted(idxs_to_remove, reverse=True): del json_data['bucket'][i] return json_data
def met_barpres(mbar): """ Description: OOI Level 1 Barometric Pressure core data product, which is calculated by scaling the measured barometric pressure from mbar to Pascals. Implemented by: 2014-06-25: Christopher Wingard. Initial code. Usage: Pa = met_barpres(mbar) where Pa = Barometric pressure (BARPRES_L1) [Pa] mbar = Barometric pressure (BARPRES_L0) [mbar] References: OOI (2012). Data Product Specification for L1 Bulk Meterological Data Products. Document Control Number 1341-00360. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-00360_Data_Product_SPEC_BULKMET_OOI.pdf) """ Pa = mbar * 100. return Pa
def detect_linear(errortype, atom_types, cclib_data): """ Check whether a linear molecule contain the right number of frequencies """ linear_options_3 = [ ["I", "I", "I"], ["N", "N", "N"], ["N", "C", "H"], ["O", "C", "O"], ["O", "C", "S"], ["S", "C", "S"], ["F", "Be", "F"], ["F", "Xe", "F"], ["O", "C", "N"], ["S", "C", "N"], ] linear_options_4 = [["C", "C", "H", "H"]] if len(atom_types) == 3: for linear_3 in linear_options_3: if ( sorted(atom_types) == sorted(linear_3) and len(cclib_data["vibrations"]["frequencies"]) != 4 ): errortype = "linear_mol_wrong" break elif len(atom_types) == 4: for linear_4 in linear_options_4: if ( sorted(atom_types) == sorted(linear_4) and len(cclib_data["vibrations"]["frequencies"]) != 7 ): errortype = "linear_mol_wrong" break return errortype
def clip_black_by_luminance(color, threshold): """If the color's luminance is less than threshold, replace it with black. color: an (r, g, b) tuple threshold: a float """ r, g, b = color if r+g+b < threshold*3: return (0, 0, 0) return (r, g, b)
def merge_usage_periods(periods, new_period): """Merge a time period into an existing set of usage periods""" outlist = [] for period in periods: if new_period[0] > period[1]: # No overlap - past the end outlist.append(period) continue if new_period[1] < period[0]: # No overlap - before the beginning outlist.append(period) continue # There must now be some overlap merged = True if new_period[0] < period[0]: period[0] = new_period[0] if new_period[1] > period[1]: period[1] = new_period[1] new_period = period outlist.append(new_period) return outlist
def findall_containing_string(list, string, inelement = False): """ Description: Find all elements containing a substring and return those elements. Input: list - list object string - string to look for invert - if True, find all that do not contain string """ # return updated list (not) containing the substring if inelement: return [element for element in list if string in element] else: return [element for element in list if string not in element]
def _bookmark_data(entry): """ We encode bookmark info in a similar way to Pixiv to make it simpler to work with them both in the UI. """ if not entry.get('bookmarked'): return None return { 'tags': entry['bookmark_tags'].split(), 'private': False, }
def _trim(strings): """Remove leading and trailing whitespace from each string.""" return [x.strip() for x in strings]
def game_with_rows_all_zeroes(game): """ Checks whether there is not full zero row in game """ for row in game: if 1 not in row or 0 not in row: return True return False
def syncsort(a, b): """ sorts a in ascending order (and b will tag along, so each element of b is still associated with the right element in a) """ a, b = (list(t) for t in zip(*sorted(zip(a, b)))) return a, b
def calc(temp): """calculates total value of the investment after the given time frame""" name, amount, interest, times, years = temp interest = interest/100 total = amount*(1+interest/times)**(times*years) return name, amount, interest*100, times, years, total
def liss(root): """ largest independent set in a given binary tree """ if root is None: return 0 if root.liss: return root.liss if root.left is None and root.right is None: root.liss = 1 return root.liss # Size excluding current node size_excluding = liss(root.left) + liss(root.right) # Size including current node size_including = 1 if root.left: size_including += liss(root.left.left) + liss(root.left.right) if root.right: size_including += liss(root.right.left) + liss(root.right.right) root.liss = max(size_excluding, size_including) return root.liss
def find_first(data): """ >>> find_first([[0, 0], [0, 1]]) (1, 1) >>> find_first([[0, 0], [0, 0]]) """ for y_index, y_value in enumerate(data): for x_index, x_value in enumerate(y_value): if x_value == 1: return (x_index, y_index) return None
def _sort_key(item): """ Robust sort key that sorts items with invalid keys last. This is used to make sorting behave the same across Python 2 and 3. """ key = item[0] return not isinstance(key, int), key
def dup_links(links): """Check for duplicated links""" print(f'Checking for duplicated links...') hasError = False seen = {} dupes = [] for link in links: if link not in seen: seen[link] = 1 else: if seen[link] == 1: dupes.append(link) if not dupes: print(f"No duplicate links") else: print(f"Found duplicate links: {dupes}") hasError = True return hasError
def get_charge_style( charge_styles, cutoffs, ewald_accuracy=None, dsf_damping=None ): """Get the Charge_Style section of the input file Parameters ---------- charge_styles : list list of charge styles, one for each box cutoffs : list of coulombic cutoffs, one for each box. For a box with charge style 'none', the cutoff should be None ewald_accuracy : float, optional accuracy of ewald sum. Required if charge_style == ewald dsf_damping : float, optional value for dsf damping. """ assert len(charge_styles) == len(cutoffs) valid_charge_styles = ["none", "cut", "ewald", "dsf"] for charge_style in charge_styles: if charge_style not in valid_charge_styles: raise ValueError( "Unsupported charge_style: {}. Supported options " "include {}".format(charge_style, charge_styles) ) if charge_style == "ewald": if ewald_accuracy is None: raise ValueError( "Ewald selected as the charge style but " "no ewald accuracy provided" ) inp_data = """ # Charge_Style""" for charge_style, cutoff in zip(charge_styles, cutoffs): if charge_style == "none": inp_data += """ {charge_style}""".format( charge_style=charge_style ) elif charge_style == "cut": inp_data += """ coul {charge_style} {cutoff}""".format( charge_style=charge_style, cutoff=cutoff ) elif charge_style == "ewald": inp_data += """ coul {charge_style} {cutoff} {accuracy}""".format( charge_style=charge_style, cutoff=cutoff, accuracy=ewald_accuracy, ) elif charge_style == "dsf": if dsf_damping is not None: inp_data += """ coul {charge_style} {cutoff} {damping}""".format( charge_style=charge_style, cutoff=cutoff, damping=dsf_damping, ) else: inp_data += """ coul {charge_style} {cutoff}""".format( charge_style=charge_style, cutoff=cutoff ) inp_data += """ !------------------------------------------------------------------------------ """ return inp_data
def orientation_string_nib2sct(s): """ :return: SCT reference space code from nibabel one """ opposite_character = {'L': 'R', 'R': 'L', 'A': 'P', 'P': 'A', 'I': 'S', 'S': 'I'} return "".join([opposite_character[x] for x in s])
def detect_read_more(bs4tag): """ Not all news has read more thus it must be detected before hand to avoid errors For this function a bs4.tag.elemnt is passed as an argument It returns an empty string if there is no URL for readmore Else it return the URL for readmore """ if bs4tag is None: return "" return bs4tag["href"]
def area(p): """Area of a polygone :param p: list of the points taken in any orientation, p[0] can differ from p[-1] :returns: area :complexity: linear """ A = 0 for i in range(len(p)): A += p[i - 1][0] * p[i][1] - p[i][0] * p[i - 1][1] return A / 2.
def _roman_to_int(r): """ Convert a Roman numeral to an integer. """ if not isinstance(r, str): raise TypeError(f'Expected string, got type(input)') r = r.upper() nums = {'M': 1000, 'D': 500, 'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1} integer = 0 for i in range(len(r)): try: value = nums[r[i]] if i+1 < len(r) and nums[r[i + 1]] > value: integer -= value else: integer += value except KeyError: raise ValueError('Input is not a valid Roman numeral: %s' % r) return integer
def strip(L, e=0): """ strip all copies of e from end of list""" if len(L) == 0: return L i = len(L) - 1 while i>=0 and L[i] == e: i -= 1 return L[:i+1]
def AddRepositoryTags(prefix=''): """Add repository tagging into the output. Args: prefix: comment delimiter, if needed, to appear before tags Returns: list of text lines containing revision data """ tags = [] p4_id = '%sId:%s' % ('$', '$') p4_date = '%sDate:%s' % ('$', '$') tags.append('%s%s' % (prefix, p4_id)) tags.append('%s%s' % (prefix, p4_date)) return tags
def process_col_bool(attr_list, str_find): """ Function that creates the boolean columns. Parameters ---------- attr_list : list List containing the boolean attributes of an apartment. str_find : str String to find in each attribute from 'attr_list'. Returns ------- True : bool Numerical value if 'attrlist' contains an attribute containing 'str_find'. False : bool NaN value if 'attrlist' does not contain any attribute containing 'str_find'. """ for attr in attr_list: if str_find in attr: return True return False
def quote_and_escape_value(value): # type: (str) -> str """ Quote a string so it can be read from Lisp. """ return '"' + value.replace('\\', '\\\\').replace('"', '\\"') + '"'
def get_added_lines(patch): """ Get lines added with a patch. (e.g., git diff between two versions of a file) :param patch: the content of the patch :return: the lines added by the patch """ added_lines = "" lines = patch.split('\n') for line in lines: if line.startswith("+"): if not added_lines: # empty added_lines = line[1:] else: # append added_lines += line[1:] + "\n" return added_lines
def estimate_cudnn_parameter_size(input_size, hidden_size, direction): """ Compute the number of parameters needed to construct a stack of LSTMs. """ single_rnn_size = 8 * hidden_size + 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size) return direction * single_rnn_size
def above_threshold(student_scores, threshold): """ :param student_scores: list of integer scores :param threshold : integer :return: list of integer scores that are at or above the "best" threshold. """ best_students = [] for score in student_scores: if score >= threshold: best_students.append(score) return best_students
def is_chinese_char(cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean # characters, despite its name. The modern Korean Hangul alphabet is a # different block, as is Japanese Hiragana and Katakana. Those alphabets # are used to write space-separated words, so they are not treated # specially and handled like the all of the other languages. return (0x4E00 <= cp <= 0x9FFF) or \ (0x3400 <= cp <= 0x4DBF) or \ (0x20000 <= cp <= 0x2A6DF) or \ (0x2A700 <= cp <= 0x2B73F) or \ (0x2B740 <= cp <= 0x2B81F) or \ (0x2B820 <= cp <= 0x2CEAF) or \ (0xF900 <= cp <= 0xFAFF) or \ (0x2F800 <= cp <= 0x2FA1F)
def as_uri(value): """ puts <> around if not a CURIE """ try: parts = value.split(":") if len(parts) == 2 : return value except: pass return value.join("<",">")
def is_permuation(s1, s2): """ s1: string s2: string return: True or False """ # we can get the ascii value for each charactor in the list and add it ascii_s1, ascii_s2 = 0,0 if len(s1)!=len(s2) or not s1 or not s2: # O(1) # handeling the edge cases, so that we wont waste time if one of the string is # empty ot both of them doesnt contain equal number of chars return False for char in s1: # -> O(n) ascii_s1 += ord(char) for char in s2: # -> O(n) ascii_s2 += ord(char) if not abs(ascii_s1-ascii_s2): # O(1) return True else: return False
def period_modified(p0, pdot, no_of_samples, tsamp, fft_size): """ returns period with reference to the middle epoch of observation """ if (fft_size == 0.0): return p0 - pdot * \ float(1 << (no_of_samples.bit_length() - 1) - no_of_samples) * tsamp / 2 else: return p0 - pdot * float(fft_size - no_of_samples) * tsamp / 2
def parseCitationList(str2parse,occurList,commandName): """ Parses one citation list that has been identified by ``parseTexDocument`` and returns a list containing the :term:`bibtex` keys. """ outStrList=[] for occurence in occurList: startIndex=occurence+len(commandName)-1 while True: if str2parse[startIndex] == '{': startIndex+=1 break startIndex+=1 stopIndex=startIndex while True: if str2parse[stopIndex] == '}': break stopIndex+=1 treadedStrings = str2parse[startIndex:stopIndex].split(',') for key in treadedStrings: outStrList.append(key.strip()) return outStrList
def ftime(secs): """ Format a time duration `secs` given in seconds into a human readable string. >>> ftime(12345) '3h25m45s' """ units = ("d", "h", "m", "s") factors = (86400, 3600, 60, 1) res = "" if secs < 0.0: # add sign secs = abs(secs) res += "-" for (unit, factor) in zip(units, factors): value = int(secs // factor) secs -= value * factor if (value > 0) or (unit == units[-1]): res += "{value}{unit}".format(value=value, unit=unit) return res
def left_to_right_check(input_line: str, pivot: int): """ Check row-wise visibility from left to right. Return True if number of building from the left-most hint is visible looking to the right, False otherwise. input_line - representing board row. pivot - number on the left-most hint of the input_line. >>> left_to_right_check("412453*", 4) True >>> left_to_right_check("452453*", 5) False >>> left_to_right_check("132354*", 3) True """ row = input_line[1:6] visible = [1] base = int(row[0]) for i in range(len(row)-1): if base < int(row[i+1]): visible.append(1) base = int(row[i]) if len(visible) == pivot: return True return False
def idx2ht(idx, vertex_set_length): """Gets h_idx, t_idx from enumerated idx from h_t_idx_generator""" h_idx = idx // (vertex_set_length - 1) t_idx = idx % (vertex_set_length - 1) if t_idx >= h_idx: t_idx += 1 return h_idx, t_idx
def toSnakeCase(text): """converts camel case to snake case """ return ''.join(['_' + c.lower() if c.isupper() else c for c in text]).lstrip('_')
def t_area(t_str: str) -> float: """Calculate area of a triangle (basic). Args: t_str: <str> triangle shape as a string. Returns: <float> triangle area. """ rows = t_str.split('\n')[2:-1] base = rows[-1].count(' ') height = sum(map(lambda string: ' ' in string, rows)) return (base * height) / 2
def expand_init_dict(init_dict): """Enhances read in initialization dictionary by adding model parameters derived from the specified initialisation file""" # Calculate range of years of education in the (simulated) sample educ_min = init_dict["INITIAL_CONDITIONS"]["educ_min"] educ_max = init_dict["INITIAL_CONDITIONS"]["educ_max"] educ_range = educ_max - educ_min + 1 # Calculate covariances of the error terms given standard deviations shocks_cov = init_dict["PARAMETERS"]["optim_paras"][14:17] shocks_cov = [shocks_cov[0] ** 2, shocks_cov[1] ** 2, shocks_cov[2] ** 2] init_dict["DERIVED_ATTR"] = {"educ_range": educ_range, "shocks_cov": shocks_cov} # Return function output return init_dict
def response_list(input_list): """xmltodict returns different structure if there's one item on the list.""" if isinstance(input_list, dict): input_list = [input_list] return input_list
def FIELD(name: str) -> str: """ Creates a reference to a field Args: name: field name Usage: >>> FIELD("First Name") '{First Name}' """ return "{%s}" % name
def add_config(config): """Adds config section""" uml = '' if config is not None and len(config) >= 3: # Remove all empty lines config = '\n'.join([line.strip() for line in config.split('\n') if line.strip() != '']) uml += '\n\' Config\n\n' uml += config uml += '\n\n' return uml
def k_value(B=0.5, period=18): """ k = e/2/pi/m/c*period*B B is peak field of undulator [T] period is period in mm """ return 0.09337 * B * period
def simple_decomp(compressed): """Decompress string skipping markers within the scope of a previous one.""" decompressed = 0 i = 0 while i < len(compressed): if compressed[i] == '(': marker = '' in_marker = True j = 1 while in_marker: if compressed[i + j] == ')': in_marker = False else: marker += compressed[i + j] j += 1 split_marker = marker.split('x') repeated, reps = int(split_marker[0]), int(split_marker[1]) decompressed += repeated * reps i += (len(marker) + 2) + repeated else: decompressed += 1 i += 1 return decompressed
def filename_parse(filename): """ Parses filename to get information about the method used. It is assumed that u-pbe/avtz on hcn will be named hnc_pbe_avtz_u.xxx, where xxx is an arbitrary extension. """ # Since SV-P is misspelled as SV-P_, catch that filename = filename.replace("SV-P_", "SV-P") # Same with 6-31+G-d,p filename = filename.replace("6-31+G-d,p_", "6-31+G-d,p") tokens = filename.split("/")[-1].split("_") mol = tokens[0] func = tokens[1] basis = tokens[2].split(".")[0] name = func + "/" + basis if func in ['rDCSD', 'df-lrmp2']: unrestricted = False #elif func in ['luCCSD', 'uCCSD', 'uDCSD']: elif func in ['uCCSD', 'uDCSD']: unrestricted = True else: unrestricted = (len(tokens) == 4) name = "u-" * unrestricted + name if func in ['rDCSD', 'uDCSD']: func = 'DCSD' return mol, func, basis, unrestricted, name
def invert_dict(dict_to_invert): """Invert Python dictionary. Examples -------- >>> invert_dict({'key': 'value'}) {'value': 'key'} """ return {v: k for k, v in dict_to_invert.items()}
def use_clip_fps_by_default(f, clip, *a, **k): """ Will use clip.fps if no fps=... is provided in **k """ def fun(fps): if fps is not None: return fps elif getattr(clip, "fps", None): return clip.fps raise AttributeError( "No 'fps' (frames per second) attribute specified" " for function %s and the clip has no 'fps' attribute. Either" " provide e.g. fps=24 in the arguments of the function, or define" " the clip's fps with `clip.fps=24`" % f.__name__ ) func_code = f.__code__ names = func_code.co_varnames[1:] new_a = [fun(arg) if (name == "fps") else arg for (arg, name) in zip(a, names)] new_kw = {k: fun(v) if k == "fps" else v for (k, v) in k.items()} return f(clip, *new_a, **new_kw)
def convert_to_base7(num): """ :type num: int :rtype: str """ if num == 0: return '0' o = '' a = abs(num) while a != 0: o = str(a % 7) + o a /= 7 return o if num > 0 else '-' + o
def msToTime(ms): """ Convert milliseconds to human readable time""" secs = int(ms / 1000) mins = int(secs / 60) if mins < 60: return str(mins) + " mins" else: hrs = int(mins / 60) mins = int(mins % 60) return str(hrs) + "h " + str(mins) + "m"
def num_min_repeat(arr): """ Discuss the length of the longest increasing sequence. length = the number of distinct elements Use an end to store the last element's index of the seq. Loop the sorted array temp(monotonically increasing), and let index = arr.index(x). (interviewer helped me modify the update rule) case 1: index > end => we can append x after end and hold the monotonicity case 2: index < end => we need a repetition to append x after end, num += 1 Update end as end <- index. If elements are not distinct. <a> temp = set(arr) <b> we may find multi matches as indices = [i1, i2, i3, ..., iN], where i1 < i2 < ... < iN case 1: end < i1, use i1 to update end case 2: end > iN, we need a repetition, use i1 to update end case 3: i1 < end < iN, use the smallest i which is greater than end to update end """ if not arr: raise Exception("Empty Array") # sort temp = arr[:] # <a> temp.sort() # init num = 1 end = arr.index(temp[0]) # pick the minimum index # loop for i in range(1, len(temp)): # find index x = temp[i] index = arr.index(x) # <b> # need a repetition if index < end: num += 1 # update end = index return num
def saturated_vapour_pressure_average(svp_24_max, svp_24_min): """ Average saturated vapour pressure based on two saturated vapour pressure values calculated using minimum and maximum air temperature respectively. This is preferable to calculating saturated vapour pressure using the average air temperature, because of the strong non-linear relationship between saturated vapour pressure and air temperature .. math :: e_{s}=\frac{e^{0}\left(T_{max}\right)+e^{0}\left(T_{mon}\right)}{2} Parameters ---------- svp_24_max : float daily saturated vapour pressure based on maximum air temperature :math:`e_{s,max}` [mbar] svp_24_min : float daily saturated vapour pressure based on minimum air temperature :math:`e_{s,min}` [mbar] Returns ------- svp_24 : float daily saturated vapour pressure :math:`e_{s,24}` [mbar] """ return (svp_24_max + svp_24_min)/2
def get_concat_level_bits(i, n): """Returns the bits of multiplying the current variable by the i-th index of the previous one. Take in integers i and n, returns a string containing the smt2 concat operation combining the bits of the multiplication of the current variable by the i-th index of the previous variable. Args: i: An integer, the index of the previous variable n: An integer, the number of bits in each bitvector """ concats = [] if i > 0: concats.append(f"(concat m{i}_{i} #b{'0' * i})") else: concats.append("m0_0") if i < (n - 1): for j in range(i + 1, n): rhs = concats[j - i - 1] concat = ["(concat", f"m{i}_{j}", rhs + ")"] concats.append(" ".join(concat)) return concats[-1]
def validate_numeric(input): # test """Validates input to see if it's an int or integer numeric string Args: input: data corresponding to key in dictionary from POST request Returns: boolean: False if input cannot be cast as int int: value of input if castable as int """ try: output = int(input) except ValueError: return False return output
def redshift_correct(z, wavelengths): """ Redshift correction Correct a given numpy array of wavelengths for redshift effects using an accepted redshift value. Args: z (float): a redshift value wavelengths (array): a numpy array containing wavelenth values Returns: array: a numpy array of wavelength values of the same size as input wavelengths array which has been corrected to the emitted wavelength values for the rest-frame of the object in quetion. """ wavelengths_corrected = wavelengths/(z+1) return wavelengths_corrected
def get_clean_fips(fips): """ Given a FIPS code, ensure it is returned as a properly formatted FIPS code of length 5 Example: get_clean_fips(123) = "00123" get_clean_fips("0002") = "00002" get_clean_fips("00001") = "00001 :param fips: The FIPS code to clean :return: The 5-digit FIPS code as a string """ as_string = str(fips) size = len(as_string) fips_length = 5 difference = fips_length - size if difference > 0: as_string = "0" * difference + as_string return as_string
def get_ordinal(number): """Produces an ordinal (1st, 2nd, 3rd, 4th) from a number""" if number == 1: return "st" elif number == 2: return "nd" elif number == 3: return "rd" else: return "th"
def next_greater_digit_index(digits: list, i: int) -> int: """ Find the next greater digit in the right portion of number[i] - that is from digit at index i+1 to last digit. Let that digit be number[j] at index 'j'. :param digits: list of digits :param i: index of number[i] :return: next greater digit in the right portion of number[i] """ j: int = -1 current = '' if len(digits[i:]) == 1 and digits[1] > digits[0]: return i else: for index, digit in enumerate(digits[i:]): if digits[i - 1] < digit: if current == '': current = digit j = i + index elif current > digit: current = digit j = i + index return j
def format_filename(name): """Take a string and return a valid filename constructed from the string. Uses a whitelist approach: any characters not present in valid_chars are removed. Also spaces are replaced with underscores. Note: this method may produce invalid filenames such as ``, `.` or `..` When I use this method I prepend a date string like '2009_01_15_19_46_32_' and append a file extension like '.txt', so I avoid the potential of using an invalid filename. """ import string valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) filename = ''.join(c for c in name if c in valid_chars) filename = filename.replace(' ', '_') # I don't like spaces in filenames. return filename
def size(n, abbriv='B', si=False): """size(n, abbriv='B', si=False) -> str Convert the length of a bytestream to human readable form. Arguments: n(int,str): The length to convert to human readable form abbriv(str): Example: >>> size(451) '451B' >>> size(1000) '1000B' >>> size(1024) '1.00KB' >>> size(1024, si=True) '1.02KB' >>> [size(1024 ** n) for n in range(7)] ['1B', '1.00KB', '1.00MB', '1.00GB', '1.00TB', '1.00PB', '1024.00PB'] """ if isinstance(n, (bytes, str)): n = len(n) base = 1000.0 if si else 1024.0 if n < base: return '%d%s' % (n, abbriv) for suffix in ('K', 'M', 'G', 'T'): n /= base if n < base: return '%.02f%s%s' % (n, suffix, abbriv) return '%.02fP%s' % (n / base, abbriv)
def as_dir(directory): """ Add a forward slash if one is not at the end of a string. """ if directory[-1] != '/': return directory + "/" else: return directory
def replace_non_digits(input_string): """Replaces non-digits in known dates with digits""" input_string = input_string.replace('O', '0') input_string = input_string.replace('o', '0') input_string = input_string.replace('l', '1') input_string = input_string.replace('I', '1') input_string = input_string.replace('B', '8') input_string = input_string.replace('S', '5') input_string = input_string.replace('Q', '0') return input_string
def peek_ahead(string, pos): """ Return the next character from ``string`` at the given position. Return ``None`` at the end of ``string``. We never need to peek more than one character ahead. """ return None if pos == len(string) else string[pos]
def UC_V(V_mm, A_catch, outUnits): """Convert volume from mm to m^3 or to litres. outUnits 'm3' or 'l'""" factorDict = {'m3':10**3, 'l':10**6} V = V_mm * factorDict[outUnits] * A_catch return V
def parse_type_opts(ostr): """ Parse options included in type definitions Type definitions consist of 1) type name, 2) parent type, 3) options string, 4) list of fields/items Returns a dict of options: String Dict key Dict val Option ------ -------- ------- ------------ ">*" "pattern" string regular expression to match against String value """ opts = {} if ostr[:1] == ">": opts["pattern"] = ostr[1:] elif ostr: print("Unknown type option", ostr) return opts
def extract_exp_from_diff(diff_exp): """ Takes in an expression that has been ran through pass_reverse_diff and returns just the main expression, not the derrivatives """ assert(diff_exp[0] == "Return") assert(len(diff_exp) == 2) diff_retval = diff_exp[1] if diff_retval[0] != "Tuple": return diff_exp exp = diff_retval[1] return ("Return", exp)
def get_updated_jobs(jobs, name_map): """get updated jobs.""" new_jobs = [] for job in jobs: if job["name"] in name_map.keys(): job["cluster"] = name_map[job["name"]] new_jobs.append(job) return new_jobs
def verse(day: int): """Create a verse""" ordinal = [ 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth', 'eleventh', 'twelfth' ] verse: list = [ f'On the {ordinal[day-1]} day of Christmas,', 'My true love gave to me,' ] presents: list = [ 'A partridge in a pear tree', 'Two turtle doves', 'Three French hens', 'Four calling birds', 'Five gold rings', 'Six geese a laying', 'Seven swans a swimming', 'Eight maids a milking', 'Nine ladies dancing', 'Ten lords a leaping', 'Eleven pipers piping', 'Twelve drummers drumming' ] for n in range(day-1, -1, -1): if day > 1 and n == 0: verse.append(f'And {presents[n].lower()}.') elif day == 1 and n == 0: verse.append(f'{presents[n]}.') else: verse.append(f'{presents[n]},') return '\n'.join(verse)
def filter_length(d, n): """Select only the words in d that have n letters. d: map from word to list of anagrams n: integer number of letters returns: new map from word to list of anagrams """ res = {} for word, anagrams in d.items(): if len(word) == n: res[word] = anagrams return res
def _parse_date(date: str) -> tuple: """Return date as a tuple of ints in the format (D, M, Y).""" date_split = date.split("/") return int(date_split[0]), int(date_split[1]), int(date_split[2])
def parse_headers(response, convert_to_lowercase=True): """ Receives an HTTP response/request bytes object and parses the HTTP headers. Return a dict of all headers. If convert_to_lowercase is true, all headers will be saved in lowercase form. """ valid_headers = ( b"NOTIFY * HTTP/1.1\r\n", b"M-SEARCH * HTTP/1.1\r\n", b"HTTP/1.1 200 OK\r\n", ) if not any([response.startswith(x) for x in valid_headers]): raise ValueError( "Invalid header: Should start with one of: {}".format(valid_headers) ) lines = response.split(b"\r\n") headers = {} # Skip the first line since it's just the HTTP return code for line in lines[1:]: if not line: break # Headers and content are separated by a blank line if b":" not in line: raise ValueError("Invalid header: {}".format(line)) header_name, header_value = line.split(b":", 1) headers[header_name.decode("utf-8").lower().strip()] = header_value.decode( "utf-8" ).strip() return headers
def create_coin(coin: tuple = ('HEADS', 'TAILS',)): """Define a your coin. coin - tuple with 2 values. Default (heads, tails,) """ COIN = {True: coin[0], False: coin[1]} return COIN
def bounds(points): """ >>> bounds([(0, 0)]) ((0, 0), (0, 0)) >>> bounds([(7, 1), (-1, 9)]) ((-1, 1), (7, 9)) """ left = min(x for (x, y) in points) right = max(x for (x, y) in points) top = min(y for (x, y) in points) bottom = max(y for (x, y) in points) return ((left, top), (right, bottom))
def dict_py_to_bash(d, bash_obj_name="DICT"): """ Adapted from source: * https://stackoverflow.com/questions/1494178/how-to-define-hash-tables-in-bash Converts a Python dictionary or pd.Series to a bash dictionary. """ bash_placeholder = "declare -A {}=(".format(bash_obj_name) for k,v in d.items(): bash_placeholder += ' ["{}"]="{}"'.format(k,v) bash_placeholder += ")" return bash_placeholder
def strip_2tuple(dict): """ Strips the second value of the tuple out of a dictionary {key: (first, second)} => {key: first} """ new_dict = {} for key, (first, second) in dict.items(): new_dict[key] = first return new_dict
def split_text(text, n=100, character=" "): """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i: i + n]).strip() for i in range(0, len(text), n)]
def underscore_to_pascalcase(value): """Converts a string from underscore_case to PascalCase. Args: value: Source string value. Example - hello_world Returns: The string, converted to PascalCase. Example - hello_world -> HelloWorld """ if not value: return value def __CapWord(seq): for word in seq: yield word.capitalize() return ''.join(__CapWord(word if word else '_' for word in value.split('_')))
def viralAdvertising(n): """ Calculate the number of viral. Args: n: (todo): write your description """ count = 0 start = 5 for i in range(0,n): count =count + start/2 start = (start/2)*3 return count
def get_mean_value(predicted_values): """ Method for calculate mean prediction value by all predicted value :param predicted_values: all predicted values :type predicted_values: list :return: mean prediction value :rtype: float """ if len(predicted_values) == 0: return None sum_value = 0 for value in predicted_values: sum_value += value return sum_value / len(predicted_values)
def construct_filter(specs, match_template): """ Build parser according to specification :param match_template: :param specs: list of strings of specifications :return: filter string """ spec_string = " & ".join(specs) return match_template.format(specs=spec_string)
def recFibonacci(n): """Recursive Fibonacci function. """ if n <= 0: return 0 elif n ==1: return 1 else: return recFibonacci(n-2) + recFibonacci(n-1)