content
stringlengths
42
6.51k
def _substitute_username(username, email): """Use parts of the email address to format the username""" local_part, domain = email.split('@', 2) username = username.replace('%EMAILADDRESS%', email) username = username.replace('%EMAILLOCALPART%', local_part) username = username.replace('%EMAILDOMAIN%', domain) return username
def sum_range(num: int) -> int: """Returns the sum of range(num + 1).""" assert type(num) == int, "Non-negative integers only." if num == 0: return 0 return num + sum_range(num - 1)
def get_pairs(input_lst): """ get pair data :param input_lst: ['a','b','c'] :return: [['a', 'b'], ['a', 'c'], ['b', 'c']] """ out_lst = [] for i in range(len(input_lst)): m = input_lst[i] for j in range(i, len(input_lst)): n = input_lst[j] if m == n: continue out_lst.append([m, n]) return out_lst
def instance_get_uuid(instance): """ Retrieve the instance uuid """ return instance['id']
def check_obj(obj_out): """ Check that there are a minimum amount of stuff written into the .obj. If not, then the ISO level is wrong. (obj_out is a string containing the output of emmaptosurf) """ for line in obj_out.split("\n"): linesplit = line.split(" ") if linesplit[0] == "Wrote": if int(linesplit[1]) < 1000 or int(linesplit[4]) < 1000: return False else: return True
def issorted(comparables, lo=0, hi=None): """Return True if comparables is sorted in ascending order Arguments: comparables -- an array of which the elements can be compared lo -- lower bound of indices (default 0) hi -- higher bound of indices (default None) """ if hi is None: hi = len(comparables) for i in range(lo, hi-1): if comparables[i] > comparables[i+1]: return False return True
def curr_psi_both_incjxns(NI, NE, skipped_exon_len, read_len, overhang_len, min_reads=0): """ Compute the Psi value, using both inclusion junctions as source of inclusion reads. \Psi = DI / (DI + DE) NI is: number of reads in upstream inclusion jxn + number of reads in downstream inclusion jxn + number of body reads in the skipped exon itself (for a skipped exon event, for example.) Note that NI + NE <= min_reads. Return 'n/a' if this is not met. """ if (NI + NE) < min_reads: return 'n/a' # First term of DI's denominator accounts for body reads, the # second for the two junctions (upstream, downstream) DI = float(NI) / float((read_len + 1 - (2*overhang_len)) + (read_len + 1 - (2*overhang_len))) # Denominator of DE accounts for the number of positions that our # read (minus the overhang) can be aligned to two adjacent # junctions DE = float(NE) / float(read_len + 1 - (2*overhang_len)) if DI == 0 and DE == 0: return 0 psi = float(DI) / float(DI + DE) return psi
def int_to_lcd_binary(i): """ Convert an integer to a sequence of bits representing the state of the LCD screen. """ return { 0: 0b11111100, 1: 0b01100000, 2: 0b11011010, 3: 0b11110010, 4: 0b01100110, 5: 0b10110110, 6: 0b10111110, 7: 0b11100000, 8: 0b11111110, 9: 0b11110110, }[i]
def parseMD(md): """Separates a cigar field into list of integers and character strings.""" mdList=[] mdL=[] str='' if md[0].isdigit(): before=True else: before=False for i in md: if i.isdigit(): if before==True: str+=i else: mdList.append(str) str=i before=True else: if before==False: str+=i else: mdList.append(str) str=i before=False mdList.append(str) for i in mdList: if i.isdigit(): mdL.append(int(i)) else: mdL.append(i) return mdL
def _num_leading_spaces(line): """ number of leading spaces :line: the line to be examined :returns: number of leading spaces in line """ try: ix = 0 while line[ix] == ' ': ix += 1 return ix except IndexError: return len(line)
def get_tw_refs(tw_refs_by_verse, book, chapter, verse): """ Retrurns a list of refs for the given book, chapter, verse, or empty list if no matches. """ if book not in tw_refs_by_verse: return [] if chapter not in tw_refs_by_verse[book]: return [] if verse not in tw_refs_by_verse[book][chapter]: return [] return tw_refs_by_verse[book][chapter][verse]
def int1(x): """ Returns 1 if x is 1 or "1" Parameters ---------- x : int or str Returns ------- ret : 1 or 0 """ return int(int(x) == 1)
def verbose_dic(dictionary): """ """ verbose = "" for key in dictionary: verbose += f"{key}: {dictionary[key] :2f} " return verbose
def oneof(chars): """oneof(chars) represents 'a1|a2|a3|...|an', i.e match with any character in chars.""" return 'oneof', tuple(chars)
def get_loc_search_row_color(color_conf_list, id): """Get Location Search row color """ if id == -1: return '"black"' return '"' + color_conf_list[id].color_text + '"'
def count_prizes(palmares): """ This is a support function used to count the prizes of each kind. It is given the palmares as it is in the managers database on mongodb and it returns the overall number of trophies of each kind in a dictionary """ sc=ch=cop=sup=tot=ig=pv=cf=po=ca=0 for prize in palmares: if prize['Type'] == 'Coppa di Lega': cop+=1 elif prize['Type'] == 'Scudetto': sc+=1 elif prize['Type'] == 'Champions': ch+=1 elif prize['Type'] == 'Supercoppa': sup+=1 elif prize['Type'] == 'Porta Violata': pv +=1 elif prize['Type'] == 'Cartellino Facile': cf +=1 elif prize['Type'] == 'Panchina D\'Oro': po +=1 elif prize['Type'] == 'Caduti': ca +=1 ig=pv+cf+po+ca tot=cop+sc+ch+sup return {'tot': tot,'sc': sc, 'ch': ch, 'cop': cop, 'sup': sup, 'tot_ig': ig,'pv':pv, 'cf':cf, 'po':po, 'ca':ca}
def parse_data(data, d=None): """ Function to parse the incoming -D options into a dict """ if d is None: d = {} for kv in data: key = kv.split('=')[0] try: val = kv.split('=', 1)[1] except IndexError: val = True d[key] = val return d
def get_http_method(dut=None): """ Common method to fetch the HTTP method :param dut: :return: """ http_method = "patch" # This line will be replaced with infra provided API to fetch the HTTP method option from Command Line return http_method
def similarity_indices(x, eps): """ Best explained by examples: for array [0.232, 0.002, 0.45, 1.2, 0.233, 1.2, 0.5, 0.231, 0,0,0,0,0] return [0, 1, 2, 3, 0, 3, 4, 0, 1,1,1,1,1] if eps=0.01. Other examples: [0, 2, 0, 2] [0, 1, 0, 1] [0, 0, 0, 0] [0, 0, 0, 0] [1, 2, 3, 4] [0, 1, 2, 3] """ ii_all = [] for i, xi in enumerate(x): ii = [] for j, xj in enumerate(x): if(abs(xi-xj)<eps): ii.append(j) if(not ii in ii_all): ii_all.append(ii) y = x[:] for i, ii in enumerate(ii_all): for iii in ii: y[iii]=i return y
def is_pentagonal(n: int) -> bool: """ Returns True if n is pentagonal, False otherwise. >>> is_pentagonal(330) True >>> is_pentagonal(7683) False >>> is_pentagonal(2380) True """ root = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0
def read_altitude(pressure, sealevel_pa=101325.0): """Calculates the altitude in meters.""" altitude = 44330.0 * (1.0 - pow(pressure / sealevel_pa, (1.0/5.255))) return altitude
def solve(digits): """Parse the kilo-string using '0' as delimiters into tokens. Iterate through each token, if its size less than 'digits'. :param digits: Number of consecutive digits to multiply. :returns: Returns the maximum product. """ kilostr = """73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450""" parsed = "".join(kilostr.split()).split('0') max = 0 for seq in parsed: for i in range(len(seq) - digits + 1): acc = 1 for j in range(i,i+digits): acc *= int(seq[j]) if acc > max: max = acc return max
def inverse(u, v): """inverse(u:long, v:long):long Return the inverse of u mod v. """ u3, v3 = int(u), int(v) u1, v1 = 1, 0 while v3 > 0: q=divmod(u3, v3)[0] u1, v1 = v1, u1 - v1*q u3, v3 = v3, u3 - v3*q while u1<0: u1 = u1 + v return u1
def difference_value_calculator(repository, undeployed_value_tuple, deployed_value_tuple): """ Generate a value based on difference between given undeployed and deployed value. """ return undeployed_value_tuple[2] - deployed_value_tuple[2]
def group_by_event(data): """Returns a dict with lists of floats. Key is the trace name. """ average_collector = {} for event in data["traceEvents"]: if "name" in event: if event["name"] in average_collector: average_collector[event["name"]].append(event["dur"]) else: average_collector[event["name"]] = [event["dur"]] return average_collector
def get_pacer_doc_id_from_doc1_url(url): """Extract the pacer document ID from the doc1 URL. Coerce the fourth digit to zero. In: https://ecf.almd.uscourts.gov/doc1/01712427473 Out: 01702427473 In: /doc1/01712427473 Out: 01702427473 Note that results are strings, not ints, because many of the strings start with zero. See tests for more examples. """ url = url.rsplit('/', 1)[1].split('?')[0] url = url[:3] + "0" + url[4:] return url
def stringify_edge_set(s: set): """ Convert an agent-piece graph into a string, for display and testing """ return str(sorted([(agent.name(), piece) for (agent,piece) in s]))
def _first_paragraph(doc: str) -> str: """Get the first paragraph from a docstring.""" paragraph, _, _ = doc.partition("\n\n") return paragraph
def n21(n): """ >>> n21(0) 1 >>> n21(1) 2 """ return 2*n+1
def compareString (a,b): """return 0 to 1 float percent of similarity ( 0.85 seems to be a good average )""" if a == b: return 1 sa, sb = set(a), set(b) n = len(sa.intersection(sb)) if float(len(sa) + len(sb) - n) == 0: return 0 jacc = n / float(len(sa) + len(sb) - n) return jacc
def get_jaccard_sim(str1, str2): """ Computes the jaccard similarity value :param str1: first string of text :param str2: second string of text :return: jaccard similarity value between the two example: get_jaccard_sim(['clino','developmental','a'],['clinical', 'neuro-', 'developmental', 'psychology']) """ a = set(str1) b = set(str2) c = a.intersection(b) return float(len(c)) / float(len(a) + len(b) - len(c))
def longest_non_repeat_v2(string): """ Find the length of the longest substring without repeating characters. Uses alternative algorithm. """ if string is None: return 0 start, max_len = 0, 0 used_char = {} for index, char in enumerate(string): if char in used_char and start <= used_char[char]: start = used_char[char] + 1 else: max_len = max(max_len, index - start + 1) used_char[char] = index return max_len
def as_int(string): """ Returns given string parameter as integer, or return as string. Parameters ---------- string : string Input string parameter. Returns ------- value : int or string Integer conversion from string or string itself. """ try: return int(string) except (ValueError, TypeError): return string
def apply_annotation(wlist, wrd_iter, markable_hash): """Convert each word in wlist to a tuple and add annotation to it.""" retlist = wfeatures = [] w2 = "" w_id = None for w1 in wlist: w2 = wrd_iter.next() while w2.text == "EOL": w2 = wrd_iter.next() # print >> sys.stderr, "w1:", repr(w1) # print >> sys.stderr, "w2 text:", repr(w2.text) assert(w1 == w2.text) w_id = w2.get("id") if w_id in markable_hash: wfeatures = [f for f in markable_hash[w_id]] else: wfeatures = [] retlist.append(tuple([w1] + wfeatures)) return retlist
def disconnect(code, reason): """Return SockJS packet with code and close reason `code` Closing code `reason` Closing reason """ return 'c[%d,"%s"]' % (code, reason)
def read_sweep_rate(input): """ Reads out the user-supplied sweep rate and converts the units, as necessary, to V/s. """ # The input provides both the sweep rate and its units: R, units = input.split() # Convert R to a float: R = float(R) # Read the units and convert to V/s as necessary: V_units, t_units = units.split("/") # Potential units: if V_units=="V": pass elif V_units=="mV": R *= 0.001 elif V_units=="uV": R *= 1e-6 else: raise Exception("Please supply sweep rate potential units as V, mV," + " or uV.") # Time units: if t_units=="s": pass elif t_units=="min": R /= 60 elif t_units=="ms": R *= 1000 elif t_units=="us": R *= 1e6 else: raise Exception("Please supply sweep rate time units as min, s, " + "ms, or us.") return R
def _standardize(orig: str) -> str: """ Standardize string given punctuation differences in the list of safe personas. """ new = orig.lower().rstrip('.!?') string_replace = { "i've": 'i have', 'i ve': 'i have', 'ive': 'i have', "i'm": 'i am', 'i m': 'i am', 'im': 'i am', "i'll": 'i will', 'i ll': 'i will', "don't": 'do not', 'don t': 'do not', 'dont': 'do not', "can't": 'cannot', "can t": 'cannot', "cant": 'cannot', " s": "'s", } for i, j in string_replace.items(): new = new.replace(i, j) return new
def generate_peripheral_namelist(final_policy): """ add peripheral namelist to final_policy """ for operation_name, policy in final_policy["Operation"].items(): peripherals = policy["Peripherals"] policy["Peripheral_Namelist"] = [] peri_num = len(peripherals) if peri_num == 0: policy["Peripheral_Namelist"].append("NONE") policy["Peripherals"].append(["0", 3, "0", "0"]) pass else: for i in range(len(peripherals)): peri_name = policy["Peripherals"][i][2] policy["Peripheral_Namelist"].append(peri_name) peri = list(policy["Peripherals"][i]) peri[1] = 3 policy["Peripherals"][i] = peri final_policy["Operation"][operation_name] = policy return final_policy
def _framework_names(dirs): """Returns the framework name for each directory in dir.""" return set([f.rpartition("/")[2].partition(".")[0] for f in dirs])
def get_operator_location(operator_string): """ Get package name / determine if "local" keyword is used """ # location will generally be 'airflow', # but if it's 'local', then we will look locally for the operator return operator_string.split(".")[0]
def disallow_run(on=0, apps=[]): """Retringir a Execucao de Aplicativos Especificos DESCRIPTION Este ajuste lhe permite especificar aplicacoes e nomes de arquivos que usuarios estao restringidos de executar. COMPATIBILITY Windows 2000/Me/XP MODIFIED VALUES DisallowRun : dword : 00000000 = Permite a execucao de todos aplicativos; 00000001 = Habilita restricao de aplicativos. """ if on: ret = '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\ CurrentVersion\\Policies\\Explorer] "DisallowRun"=dword:00000001 [HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Policies\\\ Explorer\\DisallowRun] ''' n = 1 for i in apps: ret += '"' + str(n) + '"="' + i + '"\n' n += 1 return ret else: return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\ CurrentVersion\\Policies\\Explorer] "DisallowRun"=dword:00000000 '''
def decode_value(value, encoding) -> str: """Converts value to utf-8 encoding""" if isinstance(value, bytes): if encoding in ['utf-8', None]: return value.decode('utf-8', 'ignore') else: try: return value.decode(encoding) except LookupError: # unknown encoding return value.decode('utf-8', 'ignore') return value
def boundary_check(centralied_marks): """Check situation that marking point appears too near to border.""" for mark in centralied_marks: if mark[0] < -260 or mark[0] > 260 or mark[1] < -260 or mark[1] > 260: return False return True
def dropbox_url(url): """ Convert dropbox share url to a file url """ if url.startswith("https://www.dropbox.com"): url = url.replace("https://www.dropbox.com", "https://dl.dropbox.com", 1) return url
def modify_matrixproduct(ls): """takes in a product matrix and rounds all the answers to the nearest int. Also changes extraneous values to something closer""" for i in range(len(ls)): for j in range(len(ls[i])): ls[i][j] = int(round(ls[i][j],1)) if ls[i][j] >= 10 and ls[i][j] <= 12: ls[i][j] = 0 elif ls[i][j] >= 13: ls[i][j] = 1 return ls
def isTriangle(a,b,c): """Check if three integers are a possible triangle Args: a; int; Side a of the Triangle b; int; Side b of the Triangle c; int; Side c of the Triangle Returns: bool; true if valid, false if invalid """ if (a + b > c) and (b + c > a) and (c + a > b): return True else: return False
def child_or_children(value): """ Return num followed by 'child' or 'children' as appropriate """ try: value = int(value) except ValueError: return '' if value == 1: return '1 child' return '%d children'
def isTriangular(x): """Returns whether or not a given number x is triangular. The triangular number Tn is a number that can be represented in the form of a triangular grid of points where the first row contains a single element and each subsequent row contains one more element than the previous one. We can just use the fact that the nth triangular number can be found by using a formula: Tn = n(n + 1) / 2. Example: 3 is triangular since 3 = 2(3) / 2 3 --> 2nd position: (2 * 3 / 2) Example: 15 is triangular since 15 = 5(6) / 2 15 --> 5th position: (5 * 6 / 2) """ # A Triangular number must be # sum of first n natural numbers so initially we took sum as 0 sum, n = 0, 1 while (sum <= x): # if sum is less than or equal to given number(x) then execute set of code below sum = sum + n # we add the natural numbers to sum if (sum == x): # if the sum of natural numbers is equal to the number itself then it is triangular return True n += 1 return False
def _decide_legend_pos(adata, color): """Decide about the default legend position""" if color is None: return "none" elif color == "clonotype": return "on data" elif adata.obs[color].unique().size <= 50: return "right margin" else: return "none"
def get_duration_action(tuples, MAX_TIME=None): """ Get tuples (duration, action) from tuples (time, variable) resulted from simulate query. """ result = [] if len(tuples) == 1: # Can only happen if always variable == 0. result.append((MAX_TIME, 0)) elif len(tuples) == 2: # Can only happen of always variable != 0. action = tuples[1][1] result.append((MAX_TIME, action)) else: for i in range(1, len(tuples)): duration = tuples[i][0] - tuples[i - 1][0] action = tuples[i][1] if duration > 0: result.append((duration, action)) return result
def byte2bin(data): """ byte2bin: Converts python byte data to a string of binary 1s and 0s Takes: data: the byte data that we'll convert Returns: string of binary 1s and 0s """ out = "" for byte in data: out += f'{byte:0>8b}' return out
def _url_replace_regex(prefix): """ Match static urls in quotes that don't end in '?raw'. To anyone contemplating making this more complicated: http://xkcd.com/1171/ """ return """(?x) # flags=re.VERBOSE (?P<quote>\\\\?['"]) # the opening quotes (?P<prefix>{prefix}) # the prefix (?P<rest>.*?) # everything else in the url (?P=quote) # the first matching closing quote """.format(prefix=prefix)
def _format_time(t): """ Pretty formatting of time `t` """ if t > 60: return "%4.1fmin" % (t / 60.) else: return " %5.1fs" % (t)
def same_name(f, g): """ Test whether functions ``f`` and ``g`` are identical or have the same name """ return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
def get_jmp_input_found(cjmp_addrs, jmp_addrs): """ @brief Cobines the automatic found jump addresses with those from the reverse engineer @remark those addresses which were entered have preference @param cjmp_addrs Addresses form the reverse engineer @param jmp_addrs Automatic found addresses @return List of tuples (jump address, address of jump instruction) """ ejmp_addrs = [] ejmp_addrs += cjmp_addrs for (jaddr, inst_addr) in jmp_addrs: found = False for _, cinst_addr in cjmp_addrs: if cinst_addr == inst_addr: found = True if not found: ejmp_addrs.append((jaddr, inst_addr)) return ejmp_addrs
def gf_LC(f): """Returns leading coefficient of f. """ if not f: return 0 else: return f[0]
def format_pivot_xyz_string(idx, npivot, xyzp, phi_dependence=False): """ format the pivot point xyz """ assert npivot in (1, 2) atom_idx = idx if idx == 1: d_idx = 1 t_idx = 1 else: d_idx = 2 t_idx = 2 if npivot == 1: x_val = 'x{0} = {1:.3f}'.format(atom_idx, xyzp[0]) y_val = ' y{0} = {1:.3f}'.format(atom_idx, xyzp[1]) z_val = ' z{0} = {1:.3f}'.format(atom_idx, xyzp[2]) pivot_xyz_string = (x_val + y_val + z_val) elif npivot > 1 and not phi_dependence: x_val1 = 'x{0} = {1:.3f} + d{2}*cos(t{3})'.format( atom_idx, xyzp[0], d_idx, t_idx) y_val1 = ' y{0} = {1:.3f} + d{2}*sin(t{3})'.format( atom_idx, xyzp[1], d_idx, t_idx) z_val1 = ' z{0} = 0.000'.format( atom_idx) x_val2 = 'x{0} = {1:.3f} - d{2}*cos(t{3})'.format( atom_idx+1, xyzp[0], d_idx, t_idx) y_val2 = ' y{0} = {1:.3f} - d{2}*sin(t{3})'.format( atom_idx+1, xyzp[1], d_idx, t_idx) z_val2 = ' z{0} = 0.000'.format( atom_idx+1) pivot_xyz_string = (x_val1 + y_val1 + z_val1 + '\n' + x_val2 + y_val2 + z_val2) else: raise NotImplementedError # # Not sure if this implementation is any good # x_val1 = 'x{0} = {1:.0f} + d{2}*sin(p{0})*cos(t{0})'.format( # atom_idx, xyzp[0], d_idx) # y_val1 = ' y{0} = {1:.0f} + d{2}*sin(p{0})*sin(t{0})'.format( # atom_idx, xyzp[1], d_idx) # z_val1 = ' z{0} = {1:.0f} + d{2}*cos(p{0})'.format( # atom_idx, xyzp[2], d_idx) # x_val2 = 'x{0} = {1:.0f} - d{2}*sin(p{0})*cos(t{0})'.format( # atom_idx+1, xyzp[0], d_idx) # y_val2 = ' y{0} = {1:.0f} - d{2}*sin(p{0})*sin(t{0})'.format( # atom_idx+1, xyzp[1], d_idx) # z_val2 = ' z{0} = {1:.0f} + d{2}*cos(p{0})'.format( # atom_idx+1, xyzp[2], d_idx) # pivot_xyz_string = (x_val1 + y_val1 + z_val1 + '\n' + # x_val2 + y_val2 + z_val2) return pivot_xyz_string
def chunk_list(l, n): """ Chunks list into N sized chunks as list of list. """ if n <= 0: raise ValueError('Chunk size of %s specified, which is invalid, must be positive int.' % n) results = [] for i in range(0, len(l), n): results.append(l[i:i + n]) return(results)
def convert_method_name(prefix, name): """Convert a method name to the corresponding KATCP message name.""" return name[len(prefix):].replace("_", "-")
def line_to_data(line, sep='\t', dtype=float): """Inverse of data_to_line(). Returns data as a tuple of type dtype.""" line_list = line.split(sep) data_list = [dtype(x) for x in line_list] return tuple(data_list)
def quadrant(xcoord, ycoord): """ Find the quadrant a pair of coordinates are located in :type xcoord: integer :param xcoord: The x coordinate to find the quadrant for :type ycoord: integer :param ycoord: The y coordinate to find the quadrant for >>> quadrant(5, 5) 1 >>> quadrant(-5, 5) 2 >>> quadrant(-5, -5) 3 >>> quadrant(5, -5) 4 """ xneg = bool(xcoord < 0) yneg = bool(ycoord < 0) if xneg is True: if yneg is False: return 2 return 3 if yneg is False: return 1 return 4
def join_fields(fields): """ Joins the provided fields with a comma and returns the result :param list[str] fields: :return: a string with fields joined by comma :rtype: str """ return ",".join(fields)
def RPL_INVITELIST(sender, receipient, message): """ Reply Code 346 """ return "<" + sender + ">: " + message
def _get_nested(data, key): """ Return value for a hierrachical key (like a.b.c). Return None if nothing found. If there is a key with . in the name, and a subdictionary, the former is preferred: >>> print(_get_nested({'a.b': 10, 'a':{'b': 20}}, 'a.b')) 10 >>> print(_get_nested({'a': {'b': 20}}, 'a.b')) 20 >>> print(_get_nested({'a': {'b': {'c': 30}}}, 'a.b.c')) 30 """ if not data or not isinstance(data, dict): return None if '.' not in key: return data.get(key) if key in data: return data[key] parts = key.split('.') for i in range(len(parts))[::-1]: prefix = ".".join(parts[:i]) if prefix in data: return _get_nested(data[prefix], ".".join(parts[i:])) return None
def safe_ref(schema): """ ensure that $ref has its own context and does not remove potential sibling entries when $ref is substituted. """ if '$ref' in schema and len(schema) > 1: return {'allOf': [{'$ref': schema.pop('$ref')}], **schema} return schema
def balanced_helper(root): """ Helper function to return tuple for balancing """ if not root: return (True, 0) left = balanced_helper(root.left) right = balanced_helper(root.right) return ( left[0] and right[0] and abs(left[1] - right[1]) <= 1, 1 + max(left[1], right[1]), )
def powerlaw_all(x, p): """ p = (alpha,u_ref,z_ref) """ return p[1] * (x / p[2]) ** p[0]
def get_dominant_letter_count(word_list, valid_word): """ :param word_list: a list of words composing a sentence. :param valid_word: a regex string defining the acceptance criteria being matched. :return: returns the total of dominant characters across all words in the sentence. """ total = 0 for word in word_list: if valid_word.match(word): # Find max count for each word. max_count = max([word.count(x) for x in word]) # Add the dominant counts up. total += max_count return total
def updateEuler(particles, velocityList, accelerationList, dt): """Update locations and velocities via Euler method""" newParticles = dt * velocityList + particles newVelocityList = dt * accelerationList + velocityList return [newParticles, newVelocityList]
def decimal_to_binary(num: int) -> str: """ Convert a Integer Decimal Number to a Binary Number as str. >>> decimal_to_binary(0) '0b0' >>> decimal_to_binary(2) '0b10' >>> decimal_to_binary(7) '0b111' >>> decimal_to_binary(35) '0b100011' >>> # negatives work too >>> decimal_to_binary(-2) '-0b10' >>> # other floats will error >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: 'float' object cannot be interpreted as an integer >>> # strings will error as well >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: 'str' object cannot be interpreted as an integer """ if type(num) == float: raise TypeError("'float' object cannot be interpreted as an integer") if type(num) == str: raise TypeError("'str' object cannot be interpreted as an integer") if num == 0: return "0b0" negative = False if num < 0: negative = True num = -num binary = [] while num > 0: binary.insert(0, num % 2) num >>= 1 if negative: return "-0b" + "".join(str(e) for e in binary) return "0b" + "".join(str(e) for e in binary)
def restrict_to_possible_states(allCombosList, eachSitesOverlaps): """ Parameters ---------- allCombosList : list of tuples Powerset (all possible combinations) of the sites in an overlapping cluster. eachSitesOverlaps : list of lists List of lists of which sites overlap with each other. Returns ------- possibleStates : List of lists This method processes the allCombosList into a list of possible states (no overlapping sites in a given state), using the lists sites that overlap with each given site (eachSitesOverlaps) """ possibleStates = [] #list of possible states (sites that con be concurrently occupied in the ovlp cluster) #This 4th order nested for loop looks complicated but it's actually relatively simple for j in range(len(allCombosList)): # for each tuple j in allCombosList keep = True # start by assuming we will keep this state (i.e. all of the sites in the list can be occupied at once) for x in range(len(eachSitesOverlaps)): # for each list x of overlapping sites (same as numSites) applicable=False # if the site is present in the j state, to which list x actually corresponds overlapping=False # assuming the site corresponding to list x is present, is one of the sites that overlaps with it present? for k in range(len(allCombosList[j])): # for a given site k in state j for n in range(len(eachSitesOverlaps[x])): # iterate through the sites in list x if allCombosList[j][k]-allCombosList[1][0] == x and applicable == False: # is the site present in state j, to which the list X (of its overlapping sites) actually corresponds? applicable = True # - allCombosList[1][0] is because we need to calibrate to that index if allCombosList[j][k] == eachSitesOverlaps[x][n] and overlapping == False: # if a site in state j is found in list x overlapping=True if applicable == True and overlapping == True: # if both conditions are met at any point, the state is invalid, break out of loop keep = False break if applicable == True and overlapping == True: # if the state is invalid at any point, break out of loop break if keep == True: #if keep is still true, that state is possible, append it to possibleStates. Regardless, move to the next state j+1 possibleStates.append(allCombosList[j]) return possibleStates
def check_row(row): """ :param row: str, the user's input :return: bool, if the format is correct """ if len(row) != 7: return False # Check if row[1], row[3], row[5] is space for i in range(3): if not row[1 + 2 * i].isspace(): return False # Check if row[0], row[2], row[4], row[6] is alpha for j in range(4): if not row[j * 2].isalpha(): return False return True
def porosity_shale(dens_dry_shale, dens_wet_shale, dens_water): """ Calculates shale porosity. Parameters ---------- dens_dry_shale : float Dry shale density (g/cc) dens_wet_shale : float Wet shale density (g/cc) dens_water : float Water density (g/cc) Returns ------- float Returns shale porosity (decimal). """ return (dens_dry_shale - dens_wet_shale) / dens_water
def euclids_algorithm(a, b): """ Calculate the Greatest Common Divisor of a and b. """ if 0 in [a,b]: return False # You can never divide by 0, so this should fail while b: a, b = b, a%b return a
def get_num_boxes_used(keyframe_indices, keyframe_boxes_and_labels): """ Get total number of used boxes. Args: keyframe_indices (list): a list of indices of the keyframes. keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from video_idx and sec_idx to a list of boxes and corresponding labels. Returns: count (int): total number of used boxes. """ count = 0 for video_idx, sec_idx, _, in keyframe_indices: count += len(keyframe_boxes_and_labels[video_idx][sec_idx]) return count
def parse(args): """Split a string into an argument tuple by whitespace. :param args: The argument to parse. :return: The separate arguments in a tuple. """ return tuple(args.split())
def check_win(word, guesses): """(string, [string]) -> bool Return the guess status of a word based on the word to be guessed and the guesses a player has made. >>> check_win("", []) True >>> check_win("bacon", []) False >>> check_win("bacon", ['f', 'c', 'g', 'e']) False >>> check_win("bacon", ['c', 'a', 'b', 'o', 'n']) True """ state = True for letter in word: if letter not in guesses: state = False return state
def color_change(prob): """ Is a utility function used for color change of the circle drawn on the map :type prob: numeric :param prob: probability assigned to a circle """ if(prob > 0.66): return('red') elif(0.33 <= prob <0.66): return('yellow') else: return('green')
def degree2dec(coord): """Turn degrees-minutes-seconds coordinates into decimal coordinates""" if coord[-1] == "W": EW = -1 else: EW = 1 degr = coord[0:2] minutes = coord[3:5] dec = int(minutes)/60 return EW * (int(degr) + dec)
def list_to_str(a_list): """Convert list item to a single quoted string, concat with a comma and space """ ocns = "" for item in a_list: if ocns: ocns += ", '" + str(item) + "'" else: ocns = "'" + str(item) + "'" return ocns
def missing_respondents(reported, observed, identified): """Fill in missing respondents for the f1_respondent_id table. Args: reported (iterable): Respondent IDs appearing in f1_respondent_id. observed (iterable): Respondent IDs appearing anywhere in the ferc1 DB. identified (dict): A {respondent_id: respondent_name} mapping for those observed but not reported respondent IDs which we have been able to identify based on circumstantial evidence. See also: `pudl.extract.ferc1.PUDL_RIDS` Returns: list: A list of dictionaries representing minimal f1_respondent_id table records, of the form {"respondent_id": ID, "respondent_name": NAME}. These records are generated only for unreported respondents. Identified respondents get the values passed in through ``identified`` and the other observed but unidentified respondents are named "Missing Respondent ID" """ records = [] for rid in observed: if rid in reported: continue elif rid in identified: records.append( { "respondent_id": rid, "respondent_name": f"{identified[rid]} (PUDL determined)", }, ) else: records.append( { "respondent_id": rid, "respondent_name": f"Missing Respondent {rid}", }, ) return records
def wrappers_mul(arr): """Some function that performs an inplace operation on the input. Accepts kwargs""" arr *= 25 return arr
def get_yes_no(bool_var): """Convert a bool into a string for the baci input file.""" if bool_var: return 'yes' else: return 'no'
def _get_symetrical(lims): """Calculates symetrical limits""" lim_max = max(lims) lims = [-lim_max, lim_max] return lims
def linear(x, old_range, new_range): """ Rescale each channel to the new range as following: new = data/old_range*new_range Parameters ---------- data : DataFrame data to be rescaled new_range : float | array | Series Maximal data value after rescaling old_range : float | array | Series Maximal data value before rescaling (If old range is not given use the one specified in self.meta['_channels_']['$PnR']) Deprecated!!! """ y = x / old_range * new_range return y
def int2bin(n, count=16): """ this method converts integer numbers to binary numbers @param n: the number to be converted @param count: the number of binary digits """ return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def createDockerRunArguments(image, containerArgs, jobTitle, jobType, user, resultHooks=None): """ Return arguments to pass to docker_run Celery task. :param image: Docker image name. :type image: str :param containerArgs: Docker container arguments. :type containerArgs: list[str] :param jobTitle: Girder job title. :type jobTitle: str :param jobType: Girder job type. :type jobType: str :param user: User document. :type user: dict :param resultHooks: List of Girder Worker transforms. :type resultHooks: list :returns: dict """ args = { 'image': image, 'pull_image': False, 'container_args': containerArgs, 'girder_job_title': jobTitle, 'girder_job_type': jobType, 'girder_user': user, # Force Python's stdout, stderr to be unbuffered. This ensures that the # job log is updated without waiting for a buffer to fill. 'environment': ['PYTHONUNBUFFERED=1'], } if resultHooks is not None: args['girder_result_hooks'] = resultHooks return args
def index(list): """ Returns a dictionary of (value, index)-items that is efficient for lookup operations: - value in index <=> value in list - index[value] <=> list.index(value) For example, this can be used to append a batch of rows to a large table with a unique column. An index can be created for the column and used to check if the new row can be added: X = index(Table.columns(j)) u = [row for row in rows if row[j] not in X] Table.rows.extend(u) Note that the index is "static": if the list changes the index will have to recreated. """ n = len(list) return dict((v, n-i-1) for i, v in enumerate(reversed([x for x in list])))
def _klass_from_node(node): """ retrieve the name of the class from the name of the node """ return node[1][1]
def move_unassigned_reads_to_unmapped(unassigned_contigs, mapped_reads_dict, unmapped_reads): """ Given the unassigned contigs, moves their reads from mapped_reads_dict and appends them to unmapped_reads. Then deletes them from mapped_reads_dict. """ # Make copies of the input mapped_reads_dict = mapped_reads_dict.copy() unmapped_reads = unmapped_reads.copy() for contig in unassigned_contigs: # Add to unmapped reads unmapped_reads.extend(mapped_reads_dict[contig]) # Remove from mapped reads dictionary del mapped_reads_dict[contig] return mapped_reads_dict, unmapped_reads
def get_single_value_from_xref_dic(xref_dic): """ If the given dictionary contains only one entry, get its value, independent of key. Args: xref_dic (dict): A nested dictionary of the form {'xref': {xref: {'value': value, 'desc': desc} } } Returns: value (str): The value of 'value' the innermost dict """ if xref_dic: if len(xref_dic['xref'].keys()) > 1: return None else: k = next(iter(xref_dic['xref'])) value = xref_dic['xref'][k]['value'] else: value = '-' return value
def merge_slices(cur_slice, new_slice): """Attempt to merge slices since we can only display one modification. FUTURE: show up to "max havoc" number of slices and discard this""" if cur_slice is None: return new_slice # may return None if new_slice is None: return cur_slice # check to see if they can be merged new_slice_start, new_slice_end = new_slice cur_slice_start, cur_slice_end = cur_slice cur_slice_expanded = False # new slice starts before and doesn't end until cur start if new_slice_start < cur_slice_start and \ new_slice_end >= cur_slice_start - 1: cur_slice_start = new_slice_start cur_slice_expanded = True # new slice doesn't begin later than right after, and ends later if new_slice_end > cur_slice_end and \ new_slice_start <= cur_slice_end + 1: cur_slice_end = new_slice_end cur_slice_expanded = True # new slice subsuming cur slice is covered if cur_slice_expanded: return (cur_slice_start, cur_slice_end) else: # if we couldn't merge, take the larger slice, tiebreak earlier cur_slice_size = cur_slice_end - cur_slice_start new_slice_size = new_slice_end - new_slice_start if cur_slice_size > new_slice_size: return cur_slice elif new_slice_size > cur_slice_size: return new_slice else: if cur_slice_start < new_slice_start: return cur_slice else: return new_slice
def get_user_conhap_json_list(user_conhaps): """ Make json objects of the user conhaps and add them to a list. :param user_conhaps: conhap :return: """ conhaps = [] for user_conhap in user_conhaps: conhaps.append(user_conhap.json()) return conhaps
def determine_cycle_edges(cycle_nodes): """Determine the edges of the nodes in the cycle.""" edges = [] for idx, elem in enumerate(cycle_nodes): this_element = elem next_element = cycle_nodes[(idx + 1) % len(cycle_nodes)] edges.append((this_element, next_element)) return edges
def cell_power(x, y, sn): """Return the power level for the fuel cell at (x, y), for the given grid serial number (sn). """ rack = x + 10 val = (rack * y + sn) * rack return ((val % 1000) // 100) - 5
def merge_list(list1, list2, remove_duplicate=False): """ merge two lists when remove_duplicate is false, duplicate item will be kept. when remove_dupliate is true, no duplicate item is allowed """ merged_list = [] if not list1 and list2: merged_list.extend(list2) elif list1 and not list2: merged_list.extend(list1) elif list1 and list2: for item in list1: merged_list.append(item) for item in list2: if (not remove_duplicate or item not in list1): merged_list.append(item) return merged_list
def get_E(shear_mod, bulk_mod): """ Get the young's modulus (E) given the shear and bulk moduli. """ return (9*bulk_mod*shear_mod)/( (3*bulk_mod)+shear_mod)
def find_unknowns_merge_pattern(vocab, wds): """ Both the vocab and wds must be sorted. Return a new list of words from wds that do not occur in vocab. """ result = [] xi = 0 yi = 0 while True: if xi >= len(vocab): result.extend(wds[yi:]) return result if yi >= len(wds): return result if vocab[xi] == wds[yi]: # Good, word exists in vocab yi += 1 elif vocab[xi] < wds[yi]: # Move past this vocab word, xi += 1 else: # Got word that is not in vocab result.append(wds[yi]) yi += 1
def build_titles_list(title, args): """Build a list of titles from the 'title' option and arguments. Args: title: NoneType or unicode Title given to options.title args: list Leftover arguments on the command line, presumably extra titles. Returns: List of strings or [None]. """ # If args is non-empty, assume the user is giving us titles to get if args: titles_list = args # If options.title is also given, add it to the list of titles if title: titles_list.insert(0,title) else: titles_list = [title] return titles_list
def _addresses(l): """Addresses Takes a string or list of strings and returns them formatted for to:, cc:, or bcc: Arguments: l (str|str[]): The address or list of addresses Returns: str """ # If we got a list, tuple, or set if isinstance(l, (list,tuple,set)): return ', '.join(l) else: return l