content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Iterable def repeat_scale(scale): """Promote a scalar scale to a list""" if isinstance(scale, Iterable): return scale else: return scale, scale, scale
9114410882fc92a84ff9e6c369006c41930dca8d
117,738
def midi_note_to_frequency(note): """ Maps a MIDI note index to a frequency. """ if note is None: return None return 440.0 * pow(2, (note - 69.0) / 12)
075aa353d099acfe04b2c8d8b3beee272aaaf25f
117,744
import random import math def accept_or_reject(delta_e, beta): """ Accept or reject a new state based on change in energy and temperature. Parameters: ``````````` delta_e : float change in energy beta : float inverse of temperature Returns: ```````` accept : bool T/F value of whether to accept change """ if delta_e == 0: accept = True else: random_number = random.random() p_acc = math.exp(-beta * delta_e) if random_number < p_acc: accept = True else: accept = False return accept
d7856d3e50af3d5fefc1a2cc602aa00b606bf9c5
117,745
from typing import List def get_groups(numbers: List[int]) -> List[List[int]]: """Take a list of increasing ints and return groups of consecutive numbers. Parameters ---------- numbers: List[int] A list of integers in increasing order Returns ------- groups: List[List[int]] A list of groups consisting of consecutive numbers """ groups = [] tmp = [numbers[0]] if len(numbers) > 1: for idx, value in enumerate(numbers[1:], start=1): if value == numbers[idx - 1] + 1: tmp.append(value) else: groups.append(tmp) tmp = [value] groups.append(tmp) return groups
43c038482ccbc45d9d4a05462d8ca3386fe76e0f
117,746
def pct_changes(x): """Percentage change of a series Return a series of percentage changes from a series. Accepts multivariate series Parameters ---------- x: array [float, features] Numpy array of floats. The series to be converted to percentages should be the first index Returns ------- percentage changes: array [float, features] Percentage changes of the input series on index 0 Notes ---------- Assumes the series is positive """ return (x[1:]-x[:-1])/x[:-1]
3b9ee58d6b0377a5262303a6eb34d32cb0b4c8aa
117,751
def filter_predictions(predictions, max_regions, threshold): """ Filters predictions down to just those that are above or equal to a certain threshold, with a max number of results controlled by 'max_regions'. """ results = [entry for entry in predictions if entry["prob"] >= threshold] results = results[0:max_regions] return results
c31b25ec9dbd8e3f03783c576cda56b524929e67
117,752
def gen_relocation_data(offsets): """ Given a list of offsets (offsets are in multiples of pointer length), output bytes to put in the relocation section's data. """ last_offset = 0 out = b'' for o in offsets: distance = o - last_offset if distance > 0x3ff: b = distance.to_bytes(4, byteorder='big') b = bytes([b[0] | 0xc0]) + b[1:] elif distance > 0x3f: b = distance.to_bytes(2, byteorder='big') b = bytes([b[0] | 0x80]) + b[1:] else: b = distance.to_bytes(1, byteorder='big') b = bytes([b[0] | 0x40]) + b[1:] out += b last_offset = o out_size = len(out) + 4 out = out_size.to_bytes(4, byteorder='little', signed=False) + out # prepend length # pad to 16 bytes while len(out) % 16: out += b'\x00' return out
94d3a7652ed80929bf263cbdf8bd0f07f0fc08a7
117,753
def is_aerial_duel(event_list, team): """Returns if the event list is 2 events representing an aerial duel""" count = 0 for e in event_list[:2]: if e.type_id == 44: count += 1 if count == 2: return True else: return False
db670dc60304dcfd497e79d867beee27784dc7f1
117,758
def index_1d(row, col): """For a given row and column in a grid world, return the row that cell would appear on in the transition matrix. Parameters ---------- row : int The row number. Indexing starts at 0. col : int The column number. Indexing starts at 0. Returns ------- transition_row : int The row on the transition matrix the row and column would appear on. """ max_cols = 9 return row * max_cols + col
a4a23fa4677276eb8bd1f130b0bf9c217da0d4f4
117,759
def Diff(li1, li2): """ Function determining the different elements of two lists :param li1: first list :param li2: second list :return: Difference as list """ return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
eb0f473fade87f2c116291cd30c97b70a550ff2f
117,761
def evaluate_profits(vrp, **kwargs): """ Evaluates total profits acquired from visiting optional nodes. :param vrp: A population individual subject to evaluation. :param kwargs: Keyword arguments. The following are expected from it: - (list<int>) 'node_profit': List of profits that one could get from visiting optional nodes. :return: Total profits that come from visiting nodes specified by given individual. """ node_profit_list = kwargs["node_profit"] solution = vrp.solution unvisited_optional_nodes = vrp.unvisited_optional_nodes profit = 0 for i in range(len(solution)): node = solution[i] if node not in unvisited_optional_nodes: profit += node_profit_list[node] vrp.profits = profit return profit
9079ed05c68a8351e009e1cdb78e0172900c903b
117,766
from datetime import datetime def json_serialize(data): """Ensures data is JSON serializable for api requests. Args: data: list of database dictionaries/tuples of table rows. Note: This function mutates the original referenced list. """ for index, row in enumerate(data): if isinstance(row, dict): # case I: data is a list of dictionaries for field, value in row.items(): if value is None: continue if isinstance(value, datetime): data[index].update({field : str(value)}) elif isinstance(value, bytearray): data[index].update({field : str(value)}) elif isinstance(row, tuple): # case II: data is a list of tuples mutable_row = list(row) for element_index, element in enumerate(row): if element is None: continue if isinstance(element, datetime): mutable_row[element_index] = str(element) elif isinstance(element, bytearray): mutable_row[element_index] = str(element) data[index] = tuple(mutable_row) return data
8c443c9de80d635069469b902fa4b32f4ad89c6a
117,776
from typing import Iterable import glob def find_onewire_devices() -> Iterable[str]: """Get the list of files corresponding to devices on the one-wire bus. Args: Returns: Iterable[str]: The file paths. """ return glob.glob('/sys/bus/w1/devices/28*/w1_slave')
354f9927b24c41b2e121c8c44ae8f3e0c3cf9bc0
117,779
def create_filename(ordering, base_filename, id) -> str: """Creates a standard filename Args: ordering (str): The ordering of the note base_filename (str): The base of the filename id (str): The id of the note Returns: str: filename """ if ordering == '': filename = base_filename else: filename = ordering + '_' + base_filename if id != '': filename = filename + '_' + id + '.md' else: filename = filename + '.md' return filename
84877b41f1e98ac5400179b4396911e538dc416c
117,783
def _Stylize(s): """Stylize a given string. Currently done by converting to upper-case.""" return s.upper()
6883d996787b5a7a20da5c9fb208745bdbb1244b
117,784
def shorten_unique(names, keep_first=4, keep_last=4): """ Shorten strings, inserting '(...)', while keeping them unique. Parameters ---------- names: List[str] list of strings to be shortened keep_first: int always keep the first N letters keep_last: int always keep the last N letters Returns ------- shortened_names: List[str] list with shortened strings """ short, cut_chars, longest_str = [], 0, max([len(p) for p in names]) while len(set(short)) != len(set(names)) and cut_chars <= longest_str: short = [p[:keep_first + cut_chars] + '(...)' + p[-keep_last:] if len(p) > sum([keep_first, keep_last, cut_chars]) else p for p in names] cut_chars += 1 return short
bf23e2f2eeddb5b5541613284291ba0b6a1e271f
117,795
def convert_miles_to_minutes_nyc(input_distance): """Convert MPH to minutes using aveage speed of 10 mph. Round to this nearest 10 minutes""" mph=10 def myround(x, base=10): nearest_five = int(base * round(float(x)/base)) if nearest_five ==0: return 10 ## shortest trip is ten minutes else: return nearest_five minutes = input_distance /mph *60 return myround(minutes)
da34c850909d0048e1659f8c1df43608948e67e0
117,796
def cell_state(forget_gate_output, input_gate_output): """ New cell state, a combination of the partially forgotten cell state and the newly proposed state. """ return forget_gate_output + input_gate_output
16594f6fa429fba444c14e1bde9705ad93b40e3e
117,798
def switch_player(player): """Switch the player from X to O or vice versa and return the player""" players = {"X": "O", "O": "X"} return players[player]
6ca7412a4c1d6adddee38a15c2f3dc7ebed15cf7
117,800
def rename_dfa_states(dfa: dict, suffix: str): """ Side effect on input! Renames all the states of the DFA adding a **suffix**. It is an utility function to be used to avoid automata to have states with names in common. Avoid suffix that can lead to special name like "as", "and",... :param dict dfa: input DFA. :param str suffix: string to be added at beginning of each state name. """ conversion_dict = dict() new_states = set() new_accepting = set() for state in dfa['states']: conversion_dict[state] = '' + suffix + state new_states.add('' + suffix + state) if state in dfa['accepting_states']: new_accepting.add('' + suffix + state) dfa['states'] = new_states dfa['initial_state'] = '' + suffix + dfa['initial_state'] dfa['accepting_states'] = new_accepting new_transitions = dict() for transition in dfa['transitions']: new_transitions[conversion_dict[transition[0]], transition[1]] = \ conversion_dict[dfa['transitions'][transition]] dfa['transitions'] = new_transitions return dfa
4c2935dbb6236ef5d82e2c151f27fea6c4bb4c18
117,804
import math def computePairN(att_name, att_value, data): """ Sub-function to compute number of pairs that input value > * used in Pairwise oracle Attributes: att_name: sensitive attribute name att_value: value of protected group of above attribute data: dataframe that stored the data Return: number of pairs of att_value > * in input data, number of pairs of att_value > * estimated using proportion, and proportion of group with att_value """ # input checked_atts includes names of checked sensitive attributes total_N = len(data) # get the unique value of this sensitive attribute values_att = list (data[att_name].unique()) # for each value, compute the current pairs and estimated fair pairs position_lists_val = data[data[att_name]==att_value].index+1 size_vi = len(position_lists_val) count_vi_prefered_pairs = 0 for i in range(len(position_lists_val)): cur_position = position_lists_val[i] left_vi = size_vi - (i + 1) count_vi_prefered_pairs = count_vi_prefered_pairs + (total_N - cur_position - left_vi) # compute estimated fair pairs total_pairs_vi = size_vi*(total_N-size_vi) estimated_vi_pair = math.ceil((size_vi / total_N) * total_pairs_vi) return int(count_vi_prefered_pairs),int(estimated_vi_pair),int(size_vi)
495fdc105ab0aebf39464dffc50aa1a641973394
117,806
def _inters(orig, new): """Make the interaction of two sets. If the original is a None value, a new set will be created with elements from the new set. When both operands are None, the result is None. """ if orig is None: return new elif new is None: return orig else: orig &= new return orig
31982100d97cb87b766635f622aacd1bdcead84d
117,809
import random def makePermutation(n): """ generates a random permuation of the numbers 1..n-1 sandwiched between 0 and n """ seq = list(range(1,n)) random.shuffle(seq) return [0] + seq + [n]
a32e8f598178baf252090d80ba9dd6485761e276
117,811
def __edge_list(self): """ Returns a tuple of two arrays (sources, targets) defining all the edges of the graph. :Example: >>> g = UndirectedGraph(3) >>> g.add_edges((0, 1, 0), (1, 2, 2)) >>> g.edge_list() (array([0, 1, 0]), array([1, 2, 2])) :return: pair of two 1d arrays """ return self.sources(), self.targets()
81f3c52a7700dca59a70dcd6830b807688810e9c
117,813
def _parse_options(options): """Parse an options value from a connection string. :param str options: options string :return: dictionary of options keys/values :rtype: dict """ o_dict = {} options_val = options.strip("'") options = [o.strip() for o in options_val.split('-c ')] for option in options: if not option: continue k, v = option.split('=', 1) o_dict[k] = v return o_dict
5b2597375eb2b39540ce97a4c4ccb27f094f6dad
117,817
def GetMemberFilename(member, strtab_data): """ Get the real filename of the archive member. """ if not member.is_long_name: return member.name.strip() else: # GNU style long filenames are /[index] # where index is a position within the strtab_data. name_index = int(member.name[1:].strip()) name_data = strtab_data[name_index:] name_data = name_data.split('\n', 2)[0] assert (name_data.endswith('/')) return name_data[:-1]
f0d22f166f76649c2e595bf9de0d9c1b3fe5a9d5
117,818
import hashlib def hexhash(file_path, num_bytes = 32, block_size = 65536): """Compute a hex string hash for a file.""" hasher = hashlib.sha256() hd = '' with open(file_path, 'rb') as fs: buf = fs.read(block_size) while len(buf) > 0: hasher.update(buf) buf = fs.read(block_size) hd = hasher.hexdigest() return hd[:num_bytes]
481ae1c1c9467de9e156da1a16399d0ac7faf2fb
117,819
def show_result(pg_version, es_version, name, output): """ Show the result of a test """ success, error = output print( "PostgreSQL {pg_version} with Elasticsearch {es_version}: Test {name} - {result}".format( pg_version=pg_version, es_version=es_version, name=name, result="PASS" if success else "FAIL", ) ) if not success: print(error) return success
61ae5c2b68a5ee079acbc9b758c91b546a5af037
117,820
def wrap(s, sep=" "): """Wrap a string in parentheses if it contains an occurrence of sep. Args: s (string): String to possibly wrap in parentheses sep (str, optional): Separator whose presence indicates that the string must be wrapped. Defaults to " ". Returns: str: The string s wrapped in parentheses if it includes sep. """ if sep in s: return "(" + s + ")" else: return s
884c3039bd67e64789b90642b3a7e135e18dca01
117,823
def shortened_interface(name): """Condenses interface name. Not canonical - mainly for brevity""" name = name.replace("GigabitEthernet", "ge") name = name.replace("0/0/0/", "") return name
b8fdd5de2e32727035f7fd5ecea49e02efbe99ed
117,827
import collections import functools import time def call_at_most_every(seconds, count=1): """Call the decorated function at most count times every seconds seconds. The decorated function will sleep to ensure that at most count invocations occur within any 'seconds' second window. Args: seconds: time in seconds that this function will get called at most count times over. count: int, number of times it can be called in seconds duration. Returns: Decorated function. """ def decorator(func): try: call_history = getattr(func, '_call_history') except AttributeError: call_history = collections.deque(maxlen=count) setattr(func, '_call_history', call_history) @functools.wraps(func) def _wrapper(*args, **kwargs): current_time = time.time() window_count = sum(ts > current_time - seconds for ts in call_history) if window_count >= count: # We need to sleep until the relevant call is outside the window. This # should only ever be the the first entry in call_history, but if we # somehow ended up with extra calls in the window, this recovers. time.sleep(call_history[window_count - count] - current_time + seconds) # Append this call, deque will automatically trim old calls using maxlen. call_history.append(time.time()) return func(*args, **kwargs) return _wrapper return decorator
82c9343bcd44eac6598940abe87522a4f1f30e12
117,829
def validDBMLFile(s): """ Return a boolean indicating whether passed string has valid `.dbml` file extension. Case-sensitive (i.e. `.DBML` not accepted). Parameters: s (str): name of file. Returns: bool: True if s ends with '.dbml', else False. """ if s.endswith('.dbml'): return True else: return False
ffa0a9f61bf9545efa2a170ddb1875cad0b75892
117,830
from typing import Counter def remove_duplicate_docs(docs: list): """ Removes Recoll docs that have the same URL but actually refer to different files, for example an epub file which contains HTML files will have multiple docs for each but they all refer to the same epub file. :param docs: the original list of documents :return: the same docs param but with the docs removed that share the same URL attribute """ urls = [x.url for x in docs] url_count = Counter(urls) duplicates = [k for k in url_count.keys() if url_count[k] > 1] # Merge duplicate results, this might happen becase it actually consists of more than 1 file, like an epub # We adopt the relevancy rating of the max one for dup in duplicates: # Just take the one with the highest relevancy best_doc = None best_rating = -1 for doc in [x for x in docs if x.url == dup]: rating = float(doc.relevancyrating.replace("%", "")) if rating > best_rating: best_doc = doc best_rating = rating docs = [x for x in docs if x.url != dup] docs.append(best_doc) return docs
043a0efbcd3bbf612f64f779a91576e24d35749c
117,833
def _validate_raid_1(logical_disk, unassigned_pd, no_of_pds): """Checks if RAID 1 logical drive creation with requested size is possible This method finds a list of suitable unassigned physical drives which can be used to create a logical volume of requested size :param logical_disk: The logical disk dictionary from raid config :param unassigned_pd: The sorted list of unassigned physical drives :param no_of_pds: The 'number_of_physical_disks' if user has specified in target raid configuration, else default value is False :returns: A list of suitable physical drives for logical volume creation """ # Check if raid 1 with 2 disks can be created if not no_of_pds or no_of_pds == 2: count = 0 for pd in unassigned_pd[:-1]: if float(pd[1]) < logical_disk['size_gb']: count += 1 else: return [unassigned_pd[count], unassigned_pd[count + 1]] # Check if raid 1 with 4 disks can be created if not no_of_pds or no_of_pds == 4: if len(unassigned_pd) == 4 and float(unassigned_pd[0][1]) + float( unassigned_pd[1][1]) >= logical_disk['size_gb']: return unassigned_pd # If raid 1 can not be created or ld['number_of_physical_disks'] # is not 2 or 4, return empty list return []
e68c1de91fa957447efdacfda0ea76370ad3a4e9
117,835
def _cuota_mensual(L: float, I: float, n: float) -> float: """ Calcula los pagos mensuales de un cronograma :param L: Monto del préstamo :param I: Tasa de interes :param n: Número de cuotas :return: Monto de la cuota """ return L * (I * ((1+I) ** n) / ((1+I) ** n - 1))
011fd1a768e51efb27b8aac86b5e413dce59e446
117,838
def get_ec_params(alg): """Return a string representation of a hex encoded ASN1 object X9.62 EC parameter http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html Indicates that EC paramaters are byte arrays of a DER encoded ASN1 objects X9.62 parameter. This function will return a hex string without the leading 0x representing this encoding. """ if alg == "NIST p256": obj = "2A8648CE3D030107" elif alg == "NIST p224": obj = "2B81040021" elif alg == "NIST p384": obj = "2B81040022" elif alg == "NIST p521": obj = "2B81040023" else: raise RuntimeError("alg %s has no EC params mapping" % alg) # start building the DER object tag + len + obj in hex der = "06{:02x}{}".format(len(obj) // 2, obj) return der
57746eab7368940b41558970531e6db29aa1023d
117,844
def has_races(path): """Checks if a result file has races.""" with open(path, 'r') as f: return len(f.readlines()) > 2
f56913771e1c0df9e0dc04258bd6172b236eb72b
117,850
def get_employee_details(name, df): """ Input ------- name : str The name of the person. df: pandas dataframe The dataframe to search for Returns ------- emp_dict : dict The dict of employee details. If name is "Unknown", returns None """ if name=="Unknown": return None else: emp_dict={} cond = df["Name"]==name for column in df.columns: emp_dict[column] = df.loc[cond,column] return emp_dict
e56f8b33ad029a6eda9f2d4406cdd06d8e2e1e73
117,852
import re def clean_str(input_): """ Remove all non-words from a string """ output = re.sub(r'[^\w]', '_', input_) return output
d89cc81b6680767749668a683b6473309b99ac70
117,853
def is_subspan(x, y): """ Return True if x is a subspan of y. """ return y[0]<=x[0] and x[1]<=y[1]
c9d9cab17a2f474139571bb587a098cec8a602a1
117,855
import re def _cycler2prop_cycle(txt): """turn matplotlib 1.5.0 cycler syntax back to old syntax""" txt = re.sub(r"(.*?)\.prop_cycle\s*:\s*cycler\((['\"])(.*?)\2,\s*" r"\[\s*(.*)\s*\]\s*\)", r"\1.\3_cycle: \4", txt) txt = txt.replace("'", "").replace('"', "") return txt
599123aa00cb974e6b2dbd939e9c8f44dc15337d
117,861
import math def distance_calculator(x1, y1, x2, y2): """ function to return Euclidean distance between two coordinates (x1,y1) and (x2,y2) """ return math.sqrt((x2 - x1)**2 + (y2-y1)**2)
fe6926c4734292005303e72195cf9067bb8b74b2
117,865
def is_rectangular(faces): """ Determine if faces form a recatngular area """ face_area = sum([f.calc_area() for f in faces]) verts = [v for f in faces for v in f.verts] verts = sorted(verts, key=lambda v: (v.co.x, v.co.y)) _min, _max = verts[0], verts[-1] width = abs(_min.co.x - _max.co.x) length = abs(_min.co.y - _max.co.y) area = width * length if round(face_area, 4) == round(area, 4): return True return False
40eab07bc198680674a3c158165c6ddb7bb9a2d0
117,866
def remove_prefix(s: str, prefix: str) -> str: """Remove the prefix from the string. I.e., str.removeprefix in Python 3.9.""" return s[len(prefix) :] if s.startswith(prefix) else s
5a2f74477ad38c3ba1a96e03560fc1c0d6664a91
117,870
def get_comprehensive_tables(dataframes, analytics_type): """ Function is used to ensure that all of the HPO sites will have all of the same table types. This is important if a table type is introduced in future iterations of the analysis script. :param dataframes (lst): list of pandas dataframes that are representations of the Excel analytics files analytics_type (str): the data quality metric the user wants to investigate :return: final_tables (lst): list of the tables that should be represented for each HPO at each date. these are extracted from the column labels of the Excel analytics files. """ undocumented_cols = ['Unnamed: 0', 'src_hpo_id', 'HPO', 'total', 'device_exposure', 'number_valid_units', 'number_total_units', 'number_sel_meas', 'number_valid_units_sel_meas'] rate_focused_inputs = ['source_concept_success_rate', 'concept'] final_tables = [] for sheet in dataframes: # for each date data_info = sheet.iloc[1, :] # just the columns column_names = data_info.keys() # NOTE: older Excel outputs had naming inconsistencies # this was a quick fix # get all of the columns; ensure the columns are only logged once if analytics_type in rate_focused_inputs: for col_label, _ in data_info.iteritems(): if col_label[-5:] != '_rate': undocumented_cols.append(col_label) final_tables = [x for x in column_names if x not in undocumented_cols] # eliminate duplicates final_tables = list(dict.fromkeys(final_tables)) return final_tables
a963d03fdf72f9fe136c9a89dc3a9ca0a362f750
117,874
def is_scope_type(node_type): """Judge whether the type is scope type.""" return node_type.endswith('scope')
e9c39d59c86f6de7c8f8c5ef1df145002dd8b37d
117,877
def rho_stp(s,t,p=0): """ returns density as a function of: s = Salinity in psu, t = Temperature in deg C, p = Pressure in dbar (default = 0) """ p1 = 999.842594 p2 = 6.793952E-2 p3 = -9.09529E-3 p4 = 1.001685E-4 p5 = -1.120083E-6 p6 = 6.536332E-9 p7 = 8.24493E-1 p8 = -4.0899E-3 p9 = 7.6438E-5 p10 = -8.2467E-7 p11 = 5.3875E-9 p12 = -5.72466E-3 p13 = 1.0227E-4 p14 = -1.6546E-6 p15 = 4.8314E-4 k1 = 19652.21 k2 = 148.4206 k3 = -2.327105 k4 = 1.360477E-2 k5 = -5.155288E-5 k6 = 3.239908 k7 = 1.43713E-3 k8 = 1.16092E-4 k9 = -5.77905E-7 k10 = 8.50935E-5 k11 = -6.12293E-6 k12 = 5.2787E-8 k13 = 54.6746 k14 = -0.603459 k15 = 1.09987E-2 k16 = -6.1670E-5 k17 = 7.944E-2 k18 = 1.6483E-2 k19 = -5.3009E-4 k20 = 2.2838E-3 k21 = -1.0981E-5 k22 = -1.6078E-6 k23 = 1.91075E-4 k24 = -9.9348E-7 k25 = 2.0816E-8 k26 = 9.1697E-10 ro_st0 = p1 + p2*t + p3*t**2 + p4*t**3 + p5*t**4 + p6*t**5\ + p7*s + p8*s*t + p9*t**2*s + p10*t**3*s + p11*t**4*s\ + p12*s**1.5 + p13*t*s**1.5 + p14*t**2*s**1.5 + p15*s**2 k_stp = k1 + k2*t + k3*t**2 + k4*t**3 + k5*t**4\ + k6*p + k7*t*p + k8*t**2*p + k9*t**3*p\ + k10*p**2 + k11*t*p**2 + k12*t**2*p**2\ + k13*s + k14*t*s + k15*t**2*s + k16*t**3*s\ + k17*s**1.5 + k18*t*s**1.5 + k19*t**2*s**1.5\ + k20*p*s + k21*t*p*s + k22*t**2*p*s + k23*p*s**1.5\ + k24*p**2*s + k25*t*p**2*s + k26*t**2*p**2*s return ro_st0/(1.0 - (p/k_stp))
abc3f6e252fc09f4a5919e8f854926e61f763796
117,878
def choices(enum): """Convert an Enum to a Django-style "choices" iterator. """ return [(member.name, member.value) for member in enum]
b9decfb2fa85f6042df1c805b2a06ce77630ba8e
117,886
def make_link_key(line: str, delimiter="\t") -> str: """ Make a formatted string containing both contigs that make up a link. """ attrs = line.strip().split(delimiter) attrs = sorted(attrs) contig_str = attrs[0] + "$" + attrs[1] return contig_str
47c9736158beaf3f1c111e6b6df9d85ad3b10132
117,890
def hex_list(items): """ Return a string of a python-like list string, with hex numbers. [0, 5420, 1942512] --> '[0x0, 0x152C, 0x1DA30]' """ return '[{}]'.format(', '.join('0x%X' % x for x in items))
775166e908ae9202e330d76fc82c9c45a4388cca
117,893
import torch def apply_argmax(array, argmax, axis): """Apply precomputed argmax indices in multi dimension arrays array[np.argmax(array)] works fine in dimension 1, but not in higher ones. This function tackles this issue. Examples -------- >>> import torch >>> array = torch.randn(10, 4, 8) >>> argmax = torch.argmax(array, axis=1) >>> max_ = apply_argmax(array, argmax, axis=1) >>> assert torch.all(max_ == torch.max(array, axis=1).values) """ argmax = argmax.unsqueeze(dim=axis) max_ = torch.gather(array, dim=axis, index=argmax) return torch.squeeze(max_, dim=axis)
2702b093f45302f191f4011183347ef1efb0ac3c
117,896
def FindDBLocations(DB, Aminos): """ Find all occurrences of this peptide in the database. Return DB indices. """ PrevPos = -1 LocationList = [] while (1): Pos = DB.find(Aminos, PrevPos + 1) if Pos == -1: break LocationList.append(Pos) PrevPos = Pos return LocationList
2151cab0b4622bfb936d373a52bdacdf3d86a6bc
117,902
from pathlib import Path def convert_to_path(d, parent): """Convert strings to paths. If the path is relative, then this script makes it absolute in reference to "parent" Parameters ---------- d : dict if a key ends in _file or _dir, it gets converted to path parent : path absolute path to use to make relative paths absolute Returns ------- dict where keys ending in _file and _dir are all absolute paths """ for k, v in d.items(): if v is None: continue elif isinstance(v, dict): v = convert_to_path(v, parent) elif k.endswith('_file') or k.endswith('_dir'): v = Path(v) if not v.is_absolute(): v = (parent / v).resolve() d[k] = v return d
bca902336012f56fa56da2fd3c396c26861ce59f
117,905
def _load_file_contents(path): """ Returns the contents of the specified file path. """ with open(path) as f: return f.read()
7820d88d4d303c15f24b7bb352ae53874f0d2867
117,910
import json def serialize_product_id_and_amount(product_id: str, amount: int) -> str: """ Convert information about product and its amount into JSON string. Used to pass it as a callback query data. """ return json.dumps({'id': product_id, 'amount': amount})
cef9603cc9dde0aba6cd437207344ef295d2bf03
117,912
def crear_matriz(filas, columnas): """ Crea una matriz Mmxn hecha de ceros. m = filas, n = columas. """ matriz = [] for i in range(filas): matriz.append([0] * columnas) return matriz
d9b6d3cafdccaf719625c6defaee3dd94bd595ab
117,915
from typing import List def check_df_columns(df, columns: List[str]) -> bool: """ Checks if Pandas Dataframe contains columns :param df: Pandas Dataframe to be checked :param columns: columns to check :return: True if Pandas Dataframe contains all the columns """ condition1 = set(df.columns) == set(columns) if condition1: return True else: return False
af0f311467c1a8e6fbc98721df700ab129f2c2a1
117,917
from datetime import datetime def timestring_to_timestamp(timestring, form): """ Convert a string timestamp to a float epoch. Parameters ---------- timestamp: str The timestamp to transform. form: str The `format` of the given timestamp to cast into a datetime object. Returns ------- epoch: float The epoch equivalent representation of the timestamp string. """ epoch = datetime(1970, 1, 1) dt = datetime.strptime(timestring.split('.')[0], form) td = dt - epoch return td.total_seconds()
de1c32f1cfd49ebf3b51c9ffc279fa085335e255
117,923
from datetime import datetime def _get_time_difference(then): """ Calculate the time difference between the input and the current time. Parameters ---------- then : datetime time object The datetime of the time to calculate the difference from Returns ------- dict of { 'years': int, 'days': int, 'hours': int, 'minutes': int, 'seconds': int, } A dictionary containing the number of years, days, hours, minutes, and seconds that have elapsed between the input and the current time. The days, hours, minutes, and seconds are calculated as remainders from the immediate larger time interval. For example, the return would be something like 1 hour, 0 minutes, 0 seconds to denote that one hour has passed between the input and the current time. The dictionary does *not* return something like 1 hour, 60 minutes, 3600 seconds (the interpretation that the dictionary contains the years, days, hours, etc representation of the time difference). Notes ----- This method is adapted from the `getDuration` method provided in a post answered and edited from Sabito 錆兎 and Attaque on November 9, 2017 and Febrary 15, 2021 to a stackoverflow thread here: https://stackoverflow.com/questions/1345827/how-do-i-find-the-time-difference-between-two-datetime-objects-in-python. """ now = datetime.now() duration = now - then # For build-in functions duration_in_s = duration.total_seconds() def years(): return divmod(duration_in_s, 31536000) # Seconds in a year=31536000. def days(seconds): return divmod(seconds if seconds is not None else duration_in_s, 86400) # Seconds in a day = 86400 def hours(seconds): # Seconds in an hour = 3600 return divmod(seconds if seconds is not None else duration_in_s, 3600) def minutes(seconds): # Seconds in a minute = 60 return divmod(seconds if seconds is not None else duration_in_s, 60) def seconds(seconds): if seconds is not None: return divmod(seconds, 1) return duration_in_s y = years() d = days(y[1]) # Use remainder to calculate next variable h = hours(d[1]) m = minutes(h[1]) s = seconds(m[1]) return { 'years': int(y[0]), 'days': int(d[0]), 'hours': int(h[0]), 'minutes': int(m[0]), 'seconds': int(s[0]), }
f69b07a9272dbb337cba334f762a3c3c2abf3669
117,927
def getVersion(root): """Get BEM++ Version information""" f = open(root+"/VERSION") version_strings = f.readline().rstrip().split(".") version_major = int(version_strings[0]) version_minor = int(version_strings[1]) version_patch = int(version_strings[2]) return (version_major,version_minor,version_patch)
48d7d48de8ad6a80df0d3b743de5435390c74b7c
117,932
import re def clean(require): """ Replace git requirements with just the requirement name """ cleaned = re.sub(r'^git\+[^#]+#egg=', '', require) return str(cleaned)
e9af8048d838cbcf5dd6672effc434165014557f
117,933
def strip_fname(name): """ strip wrapper (e.g. F${}, V${}, P${}) from name to get ID. :param name: string to strip :return: element name without wrapper text """ return name[3:-1]
24d5e2b1a45110598ca6cfde1bcfc7ee3c2d9d36
117,937
from pathlib import Path def get_datapath(filename: str, data_type: str) -> Path: """Return the full path of a test data file. This function is widely used in test functions Args: filename: Filename data_type: Data type, folder with same name must exist in proc_test_data Returns: Path: Path to data file """ data_type = data_type.upper() return ( Path(__file__) .resolve(strict=True) .parent.parent.joinpath(f"data/proc_test_data/{data_type}/{filename}") )
d3cef30d26114ddd15b1b2983a9d92cb06cbc1b0
117,942
def return_f1(precision, recall): """ function that returns F1 score given precision and recall """ if precision!=0 and recall!=0: f1 = (2 * precision * recall ) / (precision + recall) else: f1 = 0 return f1
a36b2088843b6b0323dd96957c1b846eff47c298
117,944
from typing import Union import colorsys def hsl_color(value: float, hue: Union[int, float], step: int) -> str: """ Generete a hex color from value and hue. Step set how often color light resets. """ if isinstance(hue, int): hue /= 360 value = 49 + ((value % step) * 5) value /= 100 rgb = colorsys.hls_to_rgb(hue, value, value) r_255, g_255, b_255 = [int(c * 255) for c in rgb] return '#%02x%02x%02x' % (r_255, g_255, b_255)
78af59e3af92853712d2936f6ff68561df0555ef
117,947
def timedelta_to_string(td): """Returns a custom string representation of a timedelta object Parameters ---------- td : timedelta timedelta object to be represented as a string """ seconds = int(td.total_seconds()) time_string = '' if seconds >= 86400: # Seconds in a day time_string = "{0}d" elif seconds >= 3600: time_string = "{1}h {2}m" else: time_string = "{2}m" return time_string.format(seconds // 86400, seconds // 3600, (seconds % 3600) // 60)
9caff8e5f9fe436635dec92222ee46d7aaaa6dde
117,948
def calculate_tickers_set(correlated_features_filtered): """ calculate and return unique tickers in SET based on given `correlated_features_filtered` DICT Parameters ------------------------------------------------ `correlated_features_filtered`: DICT (object, generated as result of `collect_corr_tickers` Returns ------- SET """ total_tickers = set() for key, value in correlated_features_filtered.items(): total_tickers.update([key] + value) return len(total_tickers)
1ca073fe0f9a4f6cc629644c265ac3e7a0ebbe8e
117,950
def ngram(n, iter_tokens): """ Return a generator of n-gram from an iterable """ z = len(iter_tokens) return (iter_tokens[i:i+n] for i in range(z-n+1))
a660dab036f70f03916b4cf0f34300ace94c06e6
117,952
def config_prop_name(cls, name, value): """Configure name on property. Attempts to configure the name of a property. If attribute value has __config__ method will call it with attribute name. Args: cls: Class property will belong to. name: Name of attribute. value: Value of attribute. Returns: True if attribute was determined to be a property and was configured, else False. """ if not isinstance(cls, type): raise TypeError('Class must be a type') if not isinstance(name, str): raise TypeError('Name must be a string') if not name: raise ValueError('Name must be non-empty') try: config = value.__config__ except AttributeError: return False else: config(cls, name) return True
e5c0adba68fbd1e534c769c0211a7bf52fcd3a8c
117,955
import collections def GroupSymbolInfosByOffset(symbol_infos): """Create a dict {offset: [symbol_info1, ...], ...}. As several symbols can be at the same offset, this is a 1-to-many relationship. Args: symbol_infos: iterable of SymbolInfo instances Returns: a dict {offset: [symbol_info1, ...], ...} """ offset_to_symbol_infos = collections.defaultdict(list) for symbol_info in symbol_infos: offset_to_symbol_infos[symbol_info.offset].append(symbol_info) return dict(offset_to_symbol_infos)
03cd92992a44ba751e49e4965bb4e78dbbebc09b
117,958
def rescale_value(original, prev_min, prev_max, min, max): """Rescale a given value. Given the value, the current min and max and the new min and max produce the rescaled value """ f = float(original - prev_min) / (prev_max - prev_min) return min + ((max - min) * f)
44366f334a5ea89c0802782d0303b5ea93ff8300
117,959
import requests import time def fetch_data_by_ip(ip): """Fetch data of client by IP. This endpoint is limited to 45 requests per minute from an IP address. If you go over the limit your requests will be throttled (HTTP 429) until your rate limit window is reset. If you constantly go over the limit your IP address will be banned for 1 hour. Your implementation should always check the value of the X-Rl header, and if its is 0 you must not send any more requests for the duration of X-Ttl in seconds. """ response = requests.get(f"http://ip-api.com/json/{ip}") ttl = int(response.headers["X-Ttl"]) rate_limit = int(response.headers["X-Rl"]) if rate_limit == 0 or response.status_code == 429: time.sleep(ttl) return response.json()
ce596e3dea2c8031bee2d78c8d5e90741db4097e
117,964
def str_manipulation(s): """ This function turns all the alphabet into lower case. ---------------------------------------------------------------------------- :param s: (str) the word that user input. :return: ans (str), the word with the lower case. """ ans = '' for ch in s: if ch.isupper(): ans += ch.lower() else: ans += ch return ans
61a91a4bbcafe2ffd913bad9055cbba115bd8311
117,966
def get_periodic_interval(current_time, cycle_length, rec_spacing, n_rec): """Used for linear interpolation between periodic time intervals. One common application is the interpolation of external forcings that are defined at discrete times (e.g. one value per month of a standard year) to the current time step. Arguments: current_time (float): Time to interpolate to. cycle_length (float): Total length of one periodic cycle. rec_spacing (float): Time spacing between each data record. n_rec (int): Total number of records available. Returns: :obj:`tuple` containing (n1, f1), (n2, f2): Indices and weights for the interpolated record array. Example: The following interpolates a record array ``data`` containing 12 monthly values to the current time step: >>> year_in_seconds = 60. * 60. * 24. * 365. >>> current_time = 60. * 60. * 24. * 45. # mid-february >>> print(data.shape) (360, 180, 12) >>> (n1, f1), (n2, f2) = get_periodic_interval(current_time, year_in_seconds, year_in_seconds / 12, 12) >>> data_at_current_time = f1 * data[..., n1] + f2 * data[..., n2] """ locTime = current_time - rec_spacing * 0.5 + \ cycle_length * (2 - round(current_time / cycle_length)) tmpTime = locTime % cycle_length tRec1 = 1 + int(tmpTime / rec_spacing) tRec2 = 1 + tRec1 % int(n_rec) wght2 = (tmpTime - rec_spacing * (tRec1 - 1)) / rec_spacing wght1 = 1.0 - wght2 return (tRec1 - 1, wght1), (tRec2 - 1, wght2)
abdd4fa0eed806a16a65717772f6dbd2d0eebecb
117,967
def add_inverse_times_to_edges(tree): """ Labels each edge of the tree with 'inverse time' equal to 1/edge['time'] """ for (u, v) in tree.edges: tree.edges[u,v]['inverse time'] = 1/tree.edges[u,v]['time'] return tree
d1448bd79b2968276ad2fdc41384f01887a37def
117,970
def validate_process_subtype(supertype_name, supertype, subtype_name, subtype): """Perform process subtype validation. :param supertype_name: Supertype name :param supertype: Supertype schema :param subtype_name: Subtype name :param subtype: Subtype schema :return: A list of validation error strings """ errors = [] for item in supertype: # Ensure that the item exists in subtype and has the same schema. for subitem in subtype: if item["name"] != subitem["name"]: continue for key in set(item.keys()) | set(subitem.keys()): if key in ("label", "description"): # Label and description can differ. continue elif key == "required": # A non-required item can be made required in subtype, but not the # other way around. item_required = item.get("required", True) subitem_required = subitem.get("required", False) if item_required and not subitem_required: errors.append( "Field '{}' is marked as required in '{}' and optional in '{}'.".format( item["name"], supertype_name, subtype_name ) ) elif item.get(key, None) != subitem.get(key, None): errors.append( "Schema for field '{}' in type '{}' does not match supertype '{}'.".format( item["name"], subtype_name, supertype_name ) ) break else: errors.append( "Schema for type '{}' is missing supertype '{}' field '{}'.".format( subtype_name, supertype_name, item["name"] ) ) return errors
e9bad265f5350555c4644e6845af142ec661f203
117,971
import torch def accuracy(output: torch.Tensor, labels: torch.Tensor): """ A simple function to calculate the accuracy """ return output.argmax(dim=-1).eq(labels).sum().item() / len(labels)
bc3a95450276a266a3bbaddad728f86545497a03
117,972
def trim_hash_from_symbol(symbol): """If the passed symbol ends with a hash of the form h[16-hex number] trim this and return the trimmed symbol.""" # Remove the hash off the end tokens = symbol.split('::') last = tokens[-1] if last[0] == 'h': tokens = tokens[:-1] # Trim off hash if it exists trimmed_name = "::".join(tokens) # reassemble return trimmed_name else: return symbol
0ca332dfcca453d669f5d4d73c24326cff15c7ab
117,973
def mul(x, y): """Computes multiplication element-wise. Args: x, y: array_like data Returns: ndarray """ if x.dtype is not y.dtype: raise TypeError('x and y should be same type.') return x * y
fa8a846b0bc61ed1c42034583861e03416ec6980
117,975
from datetime import datetime def weekday_num(dt: datetime): """Returns number of weekday in the current month. I.e. if Tuesday is first in this month, returns 0 """ return int((dt.day - 1)/7)
16685e70065337b2a3d75cf03b32b61da2bc3207
117,978
def to_instance_format(hub: str, group: str, project: str) -> str: """Convert input to hub/group/project format.""" return f"{hub}/{group}/{project}"
40c979c1791b6fb8078738eeac1a1907377962b6
117,981
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor def timeout(*args, **kwargs): """ Limit the function execution time to the given timeout (in seconds). Example: >>> fib = lambda n: 1 if n <= 1 else fib(n - 1) + fib(n - 2) >>> timeout(0.25, fib, 10) # wait at most 0.25 seconds 89 >>> timeout(0.25, fib, 50) # stops before the thermal death of the universe Traceback (most recent call last) ... TimeoutError: Args: timeout (float, seconds): The maximum duration of function execution func: Function to be executed """ executor = ProcessPoolExecutor timeout, func, *args = args if func in ("thread", "process"): executor = ThreadPoolExecutor if func == "thread" else ProcessPoolExecutor func, *args = args with executor() as e: future = e.submit(func, *args, **kwargs) return future.result(timeout=timeout)
70140a13742e73bfda4d164477a2d6ceb687cae9
117,982
def removeIntInxString(txt, sep = '.'): """ removeIntInxString(txt, sep) From text writen like "1. Text what u need" transform that to "Text what u need" Parameters ---------- txt : String String what you want to be transformed sep : Char Separation between you don't need and text Returns ------- String Returns string with real info you need """ s = txt.split(sep) rettxt = '' if len(s) > 1: for t in s[1: len(s) -1]: rettxt = rettxt + t.strip() + sep rettxt = rettxt + s[-1].strip() return rettxt else: return txt.strip()
e2836f90af0687e3d130a9adddb179a9344a1cbf
117,985
import re def proc_line(line): """ proc_line() removes the HTML tags from a relevant line from the HTML text bound to a genus. Returns the list of row elements. """ line = re.sub(r'<font color=#FF00BF>', '', line) line = re.sub(r'</font>', '', line) # remove all color highlighting HTML tags line = re.sub('<.*?>', '|', line) # replace all remaining HTML tags by a bar sign line = re.sub('\|\|\|', '|', line) # replace triple bars by a single one line = re.sub('\|\|', '|', line) # replace all double bars by a single one line = line[1:-1] # remove first bar and last bar # table row elements are now separated by a vertical bar lines = line.split('|') # get list of row values within the current line return lines
ff2e29a09e977687f8aa9d8d32b651a3e57ee014
117,987
def getStreamLabel(sampleRate, channels, blockSize): """Returns the string repr of the stream label """ return "{}_{}_{}".format(sampleRate, channels, blockSize)
90b28873a77a3359b1801120afc974361b67fb86
117,989
def get_count(word_count_tuple): """Returns the count from a dict word/count tuple -- used for custom sort.""" return word_count_tuple[1]
61cae689e722f96df2c7ec5e957319455c6dafd0
117,992
def create_zero_list(number_of_segments): """ Create a list of zeros for the Iris AO :param number_of_segments: int, the number of segments in your pupil :return: list of tuples with three zeros, the length of the number of total segments in the DM """ return [(0., 0., 0.)] * number_of_segments
33826fb16d2980101d517036f3239dca18c78ac3
117,994
def getOneAfterSpace(string): """returns next character after a space in given string (Helper for countInitials)""" result = '' reachedSpace = False for i in range(len(string)): if string[i] == ' ': return string[i+1] return ''
0a398b5376fd993b0d0a4180d33ce4d56a564e5c
117,995
def validate_params(params, required_params, validate_values=False): """ Make sure the iterable params contains all elements of required_params If validate_values is True, make sure params[k] are set. If required_params is a dictionary, make sure params[k] are set to the values given >>> validate_params(['a','b','c'], ['a','b']) True >>> validate_params(['a','b','c'], ['a','b','d']) False >>> validate_params({'a':0,'b':1,'c':2}, ['a','b']) True >>> validate_params({'a':0,'b':1,'c':2}, ['a','b','d']) False >>> validate_params({'a':0,'b':1,'c':2}, ['a','b'], validate_values=True) True >>> validate_params({'a':0,'b':1,'c':2}, ['a','b','d'], validate_values=True) False >>> validate_params({'a':None,'b':1,'c':2}, ['a','b','d'], validate_values=True) False >>> validate_params({'a':0,'b':1,'c':2}, {'a':0,'b':2}, validate_values=False) True >>> validate_params({'a':0,'b':1,'c':2}, {'a':0,'b':2}, validate_values=True) False >>> validate_params({'a':0,'b':1,'c':2}, {'a':0,'b':1}, validate_values=True) True >>> validate_params({'a':None,'b':1,'c':2}, {'a':0,'b':1}, validate_values=True) False >>> validate_params({'a':None,'b':1,'c':2}, {'a':None,'b':1}, validate_values=True) True >>> validate_params({'a':0,'b':1,'c':2}, {'a':0,'b':1, 'd':2}, validate_values=True) False >>> validate_params({'a':None,'b':1,'c':2}, {'a':[0, None],'b':1, 'c':2}, validate_values=True) True """ # every key (or element) in required_params must be present in the given params for k in required_params: if k not in params: return False elif validate_values: try: # see if we got a dictionary of parameters p_val = params.get(k) except AttributeError: # if it's not a dictionary, it doesn't have values, obviously return False # now we need to check if the given parameter value is valid try: req_vals = required_params.get(k) # check if there's a list of requirements try: if p_val not in req_vals: return False except TypeError: # check if it matches the required value if p_val != req_vals: return False except AttributeError: # if the requirements are not specified, just make sure it's set to something if p_val is None: return False # and if we pass all the checks for all the required_params, it's valid return True
fe86e190b294bfc68ca228c56ae504335bb34f48
117,998
import random def level_a_colour_shuffler(level_a_list): """ Take a list of the level A numbers, then re-map these to different number consistently e.g. all 0's turn to 8's for colouring plots purposes """ leva_groups = list(set(level_a_list)) random.seed(2) random.shuffle(leva_groups) lev_a_col_shuffle_dict = {i:v for i,v in enumerate(leva_groups)} return [lev_a_col_shuffle_dict[c] for c in level_a_list]
5c83e33aab172251883a63559b16c63084597ac2
117,999
def append_default_extension(filename, default_extension='.png'): """If a filename has no extension yet, add the default extension to it""" if '.' in filename: return filename else: return filename + default_extension
70181b03aba9ec73836d6e33784b2123ade67406
118,006
def bond_idx_to_symb(idxs, idx_symb_dct): """ Convert a list of bond idxs ((a1, b1), (a2, b2), ..., (an, bn)) to pairs of atom symbols """ return tuple( (idx_symb_dct[idx1], idx_symb_dct[idx2]) for (idx1, idx2) in idxs )
68e26d6dae90a1d43618049b468b385e573bbef2
118,007
def bed_merge_file_select_top_ids(in_merged_bed, id2sc_dic, rev_filter=False): """ Given a merged .bed file (using mergeBed or bed_merge_file() ), select the top scoring region IDs from the merged file, where in case of overlaps the best scoring region ID is picked. rev_filter=True leads to lower scores regarded as better, e.g. in case of p-values. >>> test_merged = "test_data/test.sorted.merged.bed" >>> id2sc_dic = {'r1': 1, 'r2': 2, 'r4': 4, 'r7' : 3} >>> bed_merge_file_select_top_ids(test_merged, id2sc_dic) {'r4': 4, 'r7': 3} >>> bed_merge_file_select_top_ids(test_merged, id2sc_dic, rev_filter=True) {'r1': 1, 'r7': 3} """ ids2keep_dic = {} assert id2sc_dic, "given ID to score dictionary seems to be empty" with open(in_merged_bed) as f: for line in f: cols = line.strip().split("\t") ids = cols[3].split(";") best_id = "-" best_sc = -6666666 if rev_filter: best_sc = 6666666 for site_id in ids: assert site_id in id2sc_dic, "site ID \"%s\" not found in given site ID to score dictionary" % (site_id) site_sc = id2sc_dic[site_id] if rev_filter: if site_sc < best_sc: best_sc = site_sc best_id = site_id else: if site_sc > best_sc: best_sc = site_sc best_id = site_id ids2keep_dic[best_id] = best_sc f.closed assert ids2keep_dic, "No IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (in_merged_bed) return ids2keep_dic
80908a9a14d48fd95aad421cb2bc13c41f5cc9c5
118,012
def MergeIndexRanges(section_list): """Given a list of (begin, end) ranges, return the merged ranges. Args: range_list: a list of index ranges as (begin, end) Return: a list of merged index ranges. """ actions = [] for section in section_list: if section[0] >= section[1]: raise ValueError('Invalid range: (%d, %d)', section[0], section[1]) actions.append((section[0], 1)) actions.append((section[1], -1)) actions.sort(key=lambda x: (x[0], -x[1])) merged_indexes = [] status = 0 start = -1 for action in actions: if start == -1: start = action[0] status += action[1] if status == 0: merged_indexes.append((start, action[0])) start = -1 return merged_indexes
1cf99a959108f2606237773fa836e01bc2d11e2b
118,014
def is_repeated_chars(text: str) -> bool: """Find repeated characters in a redaction. This often indicates something like XXXXXXXX under the redaction or a bunch of space, etc. :param text: A string to check :returns: True if only repeated characters, else False """ if len(text) <= 1: return False if len(set(text)) == 1: # There's only one unique character in the string return True return False
edc681e03bd367a6767205036c0e4081490f7b0b
118,016
def get_plane_point(plane_transform,x,y): """ From a subspace plane coordinate system with local coordinates (x,y) return the coordinates in corresponding world-space. """ pos = plane_transform.dot([x,y,0,1]) return pos
4e1f4a5822a4a505a93468e70db18012581e0941
118,018
def valid_application_round_data( reservation_unit, reservation_unit2, service_sector, purpose, purpose2, valid_application_round_basket_data, ): """ Valid JSON data for creating a new application round """ return { "name": "Kevään nuorten säännöllisten vuorojen haku 2021", "reservation_unit_ids": [reservation_unit.id, reservation_unit2.id], "application_period_begin": "2020-01-01T08:00", "application_period_end": "2020-01-31T09:00", "reservation_period_begin": "2021-02-01", "reservation_period_end": "2021-06-01", "public_display_begin": "2020-11-10T08:00", "public_display_end": "2021-11-10T08:00", "purpose_ids": [purpose.id, purpose2.id], "service_sector_id": service_sector.id, "status": "draft", "application_round_baskets": [valid_application_round_basket_data], }
539b3dbf7163ec2b67c28f8662cef8b3223c4800
118,019
def construct_wiki_header(wiki_meta): """ Return wiki page Jekyll front matter header for compomics.github.io pages. """ header = """--- title: "{title}" layout: default permalink: "{permalink}" tags: {tags} project: "{project}" github_project: "{github_project}" --- """.format(**wiki_meta) return header
dbc000d7cfa3b91edf243363f1af0d9956c42494
118,020
def is_primitive(x): """ Return truthy if x maps to a JSON primitive (i.e. string, integer, floating-point number, boolean, null). """ return isinstance(x, (str, int, float, bool)) or x is None
dce6b785cbafa779f28e6863fd17cf3acb03d257
118,025
from typing import Any from typing import Type def _is_measurement_device(instrument_handle: Any, class_type: Type) -> bool: """ Returns True if the instrument handle is of the given type, else False. This function checks whether the given handle is of the correct instrument type. All error's are catched related to importing of not installed drivers or instruments which are not connected. Args: instrument_handle: An measurement device instance. class_type: The type of the measurement class. Returns: True if of the given class_type, else False. """ try: is_present = isinstance(instrument_handle, class_type) except Exception: is_present = False return is_present
7a0d9ba51a36df8c800f35e20b4a3ae690522502
118,029