content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def find_components_without_annotation(model, components): """ Find model components with empty annotation attributes. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- list The components without any annotation. """ return [ elem for elem in getattr(model, components) if elem.annotation is None or len(elem.annotation) == 0 ]
9ad2163cb2d3e4f0b079c081c86cae760583a287
99,909
def F_harmonic(x, k=1): """Harmonic force""" return -k*x
0e518b0de5914ff9cae40d9d13572dfb6601ccdf
99,911
def batchify(data, batch_size): """ バッチサイズが4の場合: 元の系列がこうなっているとする. tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]) モデルは4本ずつ食えるので4分割する(端数は捨てる). tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]) 同一ステップで食われるべき要素がベクトルになるように転置する. tensor([[0, 3, 6, 9], <-- 0ステップ目 [1, 4, 7, 10], <-- 1ステップ目 [2, 5, 8, 11]]) <-- 2ステップ目 """ nbatch = data.size(0) // batch_size data = data.narrow(0, 0, nbatch * batch_size) # 端数は捨てる. return data.view(batch_size, -1).t().contiguous()
308f573db4141fef59143ceb49e2da0610b744de
99,912
def create_inverse_index(index): """Return an inverse index mapping the integer indexes to string values. For the `index` with mapping ``index[field][v] = i``, the inverse index has mapping ``inverse_index[field][i] = v``. See `Instance.to_sentence` method for usage of the inverse index for transformation of instances to sentences. """ return {f: {v: k for k, v in c.items()} for f, c in index.items()}
8a76f33136c9167cf12b77f365a55f57e1125022
99,915
def get_wordlist(stanzas): """ Get an iterable of all final words in all stanzas """ return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
a2c610c90aae76276a6b5cc345f9537e1c131464
99,920
def number(number: str, fsep: str, tsep: str) -> str: """ Format a number using the provided float and thousands separators. >>> number("1 000 000 000 000", ",", " ") '1 000 000 000 000' >>> number("1000000", ",", " ") '1 000 000' >>> number("1000000", ".", "") '1000000' >>> number("1000000", ".", ",") '1,000,000' >>> number("-1000000", ",", " ") '−1 000 000' >>> number("-1000000", "", "") '−1000000' >>> number("−1000000", ".", ",") '−1,000,000' >>> number("4.54609", "," , " ") '4,54609' >>> number("4.54609", "." , ",") '4.54609' >>> number("22905", "," , ".") '22.905' """ # Remove superfluous spaces number = number.replace(" ", "") # Handle unicode minus U+2212 character before doing the conversion number = number.replace("−", "-") # Convert try: # Integer res = f"{int(number):,}" except ValueError: # Float res = f"{float(number):,}" # Replace the current thousands separator with "|"; # then replace the dot with the float separator; # and lastly replace the "|" with the deisred thousands separator. # This 3-steps-replacement is needed for when separators are replacing each other. res = res.replace(",", "|").replace(".", fsep).replace("|", tsep) # Always return unicode minus U+2212 character for negative numbers return res.replace("-", "−")
eb68d3e7fc344ed3eab94ccb0ccaf9a493c89d27
99,921
def position_string_to_id(positions): """ position_string_to_id converts input position strings to their integer representations defined by: Position 1 = Primary farm (ADC) Position 2 = Secondary farm (Mid) Position 3 = Tertiary farm (Top) Position 4 = Farming support (Jungle) Position 5 = Primary support (Support) Note that because of variable standardization of the string representations for each position (i.e "jg"="jng"="jungle"), this function only looks at the first character of each string when assigning integer positions since this seems to be more or less standard. Args: positions (list(string)) Returns: list(int) """ d = {"a":1, "m":2, "t":3, "j":4, "s":5} # This is lazy and I know it out = [] for position in positions: char = position[0] # Look at first character for position information out.append(d[char]) return out
6a153a931e8d239adde49eac7ea358a837cb1e3e
99,925
def split_sequences2d(sequences, seq_splitter_d1, seq_splitter_d2): """Split a sequence into its second level hierarchy components e.g. Split a string into its component words and characters. [ 'a brown cat sat on the red mat', 'a gray fox jumped over the dog', 'Phil saw Feel feel the feels' ] will become [ [['a'], ['b', 'r', 'o', 'w', 'n'], ['c', 'a', 't'], ['s', 'a', 't'], ['o', 'n'], ['t', 'h', 'e'], ['r', 'e', 'd'], ['m', 'a', 't']], [['a'], ['g', 'r', 'a', 'y'], ['f', 'o', 'x'], ['j', 'u', 'm', 'p', 'e', 'd'], ['o', 'v', 'e', 'r'], ['t', 'h', 'e'], ['d', 'o', 'g']], [['P', 'h', 'i', 'l'], ['s', 'a', 'w'], ['F', 'e', 'e', 'l'], ['f', 'e', 'e', 'l'], ['t', 'h', 'e'], ['f', 'e', 'e', 'l', 's']] ] This will result in a doubly nested list""" splitted_seqs_d1 = [seq_splitter_d1(seqs) for seqs in sequences] splitted_seqs_d2 = [] for splitted_seq_d1 in splitted_seqs_d1: splitted_seq_d2 = [seq_splitter_d2(seq_d2) for seq_d2 in splitted_seq_d1] splitted_seqs_d2.append(splitted_seq_d2) return splitted_seqs_d2
d15a4bd08a119721a2f52df604b7e1e83b866b9d
99,926
def make_absolute(route, url_prefix): """Creates an absolute route without trailing slash. """ route = route.lstrip('/').rstrip('/') if url_prefix: return '/' + url_prefix.lstrip('/').rstrip('/') + '/' + route return '/' + route
2033150f12a153bf6489aa6265fe1286a2506489
99,927
def split_arguments(df_arguments): """Splits `DataFrame` by column `Usage` into `train`-, `validation`-, and `test`-arguments""" train_arguments = df_arguments.loc[df_arguments['Usage'] == 'train'].drop(['Usage'], axis=1).reset_index(drop=True) valid_arguments = df_arguments.loc[df_arguments['Usage'] == 'validation'].drop(['Usage'], axis=1).reset_index(drop=True) test_arguments = df_arguments.loc[df_arguments['Usage'] == 'test'].drop(['Usage'], axis=1).reset_index(drop=True) return train_arguments, valid_arguments, test_arguments
ebb8e3be4b299fc81149c0418ef9d8cf3cda5ae2
99,930
from typing import List def merge_sort_in_place(the_list: List[int]) -> List[int]: """ Performs a merge sort on a list in-place :param the_list: list of integer values to sort :return: same list orded in ascending order Examples: >>> merge_sort_in_place([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> merge_sort_in_place([]) [] >>> merge_sort_in_place([-2, -5, -45]) [-45, -5, -2] """ mid = len(the_list) // 2 left_half = the_list[:mid] right_half = the_list[mid:] merge_sort_in_place(left_half) merge_sort_in_place(right_half) i = 0 j = 0 k = 0 while i < len(left_half) and j < len(right_half): if left_half[i] < right_half[j]: the_list[k] = left_half[i] i += 1 else: the_list[k] = right_half[j] j += 1 k += 1 # check if any element was left in the left_half while i < len(left_half): the_list[k] = left_half[i] i += 1 k += 1 # check if any element was left in the right_half while i < len(right_half): the_list[k] = right_half[j] j += 1 k += 1 return the_list
e8ed322fbab1b8b640692532f6d056391a851807
99,931
from typing import Any import inspect def is_instanced_object(value: Any) -> bool: """ Check if the input value is an instanced object, i.e. an instantiated class. """ is_type = inspect.isclass(value) is_function = inspect.isroutine(value) is_object = hasattr(value, "__dict__") return is_object and not is_type and not is_function
d1bd3569720f37bab5a01f4b253652e08e87c094
99,934
def host_to_device(request_type): """ Check if the direction is host to device """ return (request_type & 0x80) == 0x00
bba689f41632067d24822d7bd9cb086b538fd6a3
99,939
import re def is_sentence_separator(token): """ Returns true if the token is a sentence splitter """ return re.match(r"^/?[.!?]$", token) is not None
8218a24f1d978fe81061233a97d3a98e34c1c1f7
99,940
def will_it_float(s): """Helper function that converts the input into a float if it is a number. If the input is a string, the output does not change. """ try: return float(s) except ValueError: return(s)
32555a67e909a4b5e1f1cde6f383833fb4c78c81
99,941
def should_save_from_rollout_logs( all_rollout_logs, best_return, best_success_rate, epoch_ckpt_name, save_on_best_rollout_return, save_on_best_rollout_success_rate, ): """ Helper function used during training to determine whether checkpoints and videos should be saved. It will modify input attributes appropriately (such as updating the best returns and success rates seen and modifying the epoch ckpt name), and returns a dict with the updated statistics. Args: all_rollout_logs (dict): dictionary of rollout results that should be consistent with the output of @rollout_with_stats best_return (dict): dictionary that stores the best average rollout return seen so far during training, for each environment best_success_rate (dict): dictionary that stores the best average success rate seen so far during training, for each environment epoch_ckpt_name (str): what to name the checkpoint file - this name might be modified by this function save_on_best_rollout_return (bool): if True, should save checkpoints that achieve a new best rollout return save_on_best_rollout_success_rate (bool): if True, should save checkpoints that achieve a new best rollout success rate Returns: save_info (dict): dictionary that contains updated input attributes @best_return, @best_success_rate, @epoch_ckpt_name, along with two additional attributes @should_save_ckpt (True if should save this checkpoint), and @ckpt_reason (string that contains the reason for saving the checkpoint) """ should_save_ckpt = False ckpt_reason = None for env_name in all_rollout_logs: rollout_logs = all_rollout_logs[env_name] if rollout_logs["Return"] > best_return[env_name]: best_return[env_name] = rollout_logs["Return"] if save_on_best_rollout_return: # save checkpoint if achieve new best return epoch_ckpt_name += "_{}_return_{}".format(env_name, best_return[env_name]) should_save_ckpt = True ckpt_reason = "return" if rollout_logs["Success_Rate"] > best_success_rate[env_name]: best_success_rate[env_name] = rollout_logs["Success_Rate"] if save_on_best_rollout_success_rate: # save checkpoint if achieve new best success rate epoch_ckpt_name += "_{}_success_{}".format(env_name, best_success_rate[env_name]) should_save_ckpt = True ckpt_reason = "success" # return the modified input attributes return dict( best_return=best_return, best_success_rate=best_success_rate, epoch_ckpt_name=epoch_ckpt_name, should_save_ckpt=should_save_ckpt, ckpt_reason=ckpt_reason, )
ea69134880ad8f7ef75b5d3eb16a3b761a8a3716
99,946
def get_week_sec(epoch, epoch_start): """Returns seconds since the beginning of the week Parameters ---------- epoch : datetime.datetime epoch_start : datetime.datetime Returns ------- seconds : float """ current_epoch = epoch - epoch_start week = current_epoch.days / 7. seconds = (week - int(week)) * (7 * 24 * 60 * 60) seconds += current_epoch.seconds return seconds
3810daaa4165286c4e063b284578255a2e4700a1
99,950
def list_database_tables(database, mydb): """ Get an array of all tables in a database. Args: database - The name of the database to list out mydb - A connected MySQL connection """ tables = [] cursor = mydb.cursor() cursor.execute('SHOW TABLES FROM `' + database + '`;') while True: rows = cursor.fetchmany(20) if not rows: break for entry in rows: name = entry[0] tables.append(name) return tables
02cc943f3ac4af2a94b5fccdec1abafb8550a3d0
99,951
def mangle(cls, name): """Applies Python name mangling using the provided class and name.""" return "_" + cls.__name__ + "__" + name
348f9d1b1819c6283f81c8e7bf3a07a1e7c25b7f
99,954
def get_amplify_cookie_names(client_id, cookies_or_username): """Return mapping dict for cookie names for amplify.""" key_prefix = f"CognitoIdentityServiceProvider.{client_id}" last_user_key = f"{key_prefix}.LastAuthUser" if isinstance(cookies_or_username, str): token_user_name = cookies_or_username else: token_user_name = cookies_or_username.get(last_user_key) return { "last_user_key": last_user_key, "user_data_key": f"{key_prefix}.{token_user_name}.userData", "scope_key": f"{key_prefix}.{token_user_name}.tokenScopesString", "id_token_key": f"{key_prefix}.{token_user_name}.idToken", "access_token_key": f"{key_prefix}.{token_user_name}.accessToken", "refresh_token_key": f"{key_prefix}.{token_user_name}.refreshToken", }
6914921fa8e85450d7216b64c6f4b5902485003f
99,956
def get_public_ip(oneandone_conn, public_ip, full_object=False): """ Validates that the public ip exists by ID or a name. Returns the public ip if one was found. """ for _public_ip in oneandone_conn.list_public_ips(per_page=1000): if public_ip in (_public_ip['id'], _public_ip['ip']): if full_object: return _public_ip return _public_ip['id']
947d000c4e62a18cd4908ec2072f07d6a14a1bfe
99,961
def ping() -> str: """Check if the grid client is up. Check for Ping? Get a Pong! Returns: str: pong """ return "pong"
e4b0c556630a85900d2c1df7638ee7402122815c
99,962
def is_pow2(x): """Check if input is a power of 2.""" return not (x & (x - 1)) and x != 0
9c68884190260f085dade66e14c7c6c5134d1c4f
99,964
def remove_root(root, paths): """ Returns `paths` made relative to `root` """ return [pth.replace(root + '/', '') for pth in paths]
2d3540eae7654d9f7b438571a544ce5698d54579
99,966
def get_messages_per_minutes(msgs, minutes): """Gets lists of messages for each interval in minutes. Args: msgs (list of MyMessage objects): Messages. minutes (int): The number of minutes in one interval. Returns: A dictionary such as: { minute: list off all messages sent within interval [minute, minute + minutes). } """ res = {i: [] for i in range(0, 24 * 60, minutes)} for msg in msgs: res[(msg.date.hour * 60 + msg.date.minute) // minutes * minutes].append(msg) return res
a69147050978a4c1da50fa56bd4ab58c72626b3a
99,967
def _next_rotation_id(rotated_files): """Given the hanoi_rotator generated files in the output directory, returns the rotation_id that will be given to the current file. If there are no existing rotated files, return 0. """ if not rotated_files: return 0 else: highest_rotated_file = max(rotated_files, key=lambda x: x[1]) return highest_rotated_file[1] + 1
92222796c7427974dc4afb52d9b80da0533ce01b
99,968
def GetNestedDictValue(a_dict, nested_key): """Obtains nested dict's value given hierarchical key sequence. For example, given d['a']['b']['c'] = 'z': GetNestedDictValue(d, ['a', 'b', 'c']) returns 'z' Args: a_dict: nested dict. nested_key: hierarchical key sequence. Returns: Value if found. None if any of keys doesn't exist. """ obj = a_dict for k in nested_key: if not isinstance(obj, dict): return None obj = obj.get(k) if obj is None: return None return obj
80cf48ead7d8b34dc12a4c0c4cdb1f19d98f87db
99,970
def get_diagonals(arr): """returns the diagonals for a given matrix """ d1, d2 = [], [] for row in range(len(arr)): forwardDiag = arr[row][row] d1.append(forwardDiag) backwardDiag = arr[row][len(arr[row]) - 1 - row] d2.append(backwardDiag) return d1, d2
66b04f70adba62e15148a769f7195d3670071c27
99,971
import re def extract_time(input_: str) -> list: """Extracts 12-hour time value from a string. Args: input_: Int if found, else returns the received float value. Returns: list: Extracted time from the string. """ return re.findall(r'(\d+:\d+\s?(?:a.m.|p.m.:?))', input_) or \ re.findall(r'(\d+\s?(?:a.m.|p.m.:?))', input_) or \ re.findall(r'(\d+:\d+\s?(?:am|pm:?))', input_) or \ re.findall(r'(\d+\s?(?:am|pm:?))', input_)
5f6ca07cd6637f0692fddb23c859e6b6765a1d6d
99,972
def get_form_field_type(field): """ Returns the widget type of the given form field. This can be helpful if you want to render form fields in your own way (i.e. following Bootstrap standards). Usage:: {% load libs_tags %} {% for field in form %} {% get_form_field_type field as field_type %} {% if "CheckboxInput" in field_type %} <div class="checkbox"> <label> // render input here </label> </div> {% else %} {{ field }} {% endif %} {% endfor %} """ return field.field.widget.__str__()
6e21606fddc0238a8b42ebe9e51fc8c78e5e267b
99,973
def parse_metrics(text): """ Parses the Q4S message into the corresponding metrics""" latency = float('nan') jitter = float('nan') bandwidth = float('nan') packetloss = float('nan') text = text.split() for index, word in enumerate(text[:-1]): if word == "Latency:": try: latency = float(text[index+1]) except ValueError: latency = float('nan') elif word == "Jitter:": try: jitter = float(text[index+1]) except ValueError: jitter = float('nan') elif word == "PacketLoss:": try: packetloss = float(text[index+1]) except ValueError: packetloss = float('nan') elif word == "BandWidth:": try: bandwidth = float(text[index+1]) except ValueError: bandwidth = float('nan') return latency, jitter, bandwidth, packetloss
7c14240abaed11113c735bac8f659aa6fb1e5b4c
99,978
def to_hex(s): """ Transforms a string to hexadecimal notation. """ hex_str = ' '.join("{0:02x}".format(ord(c)) for c in s) return '\n'.join([hex_str[i:i+48] for i in range(0, len(hex_str), 48)])
0c71b398291ac9081a7194509fc65218af404cb2
99,983
def niters(request): """Number of test iterations.""" return request.config.getoption('--niters')
ef92b993ad57ba566b5c28587071d13427943155
99,984
def rgb_to_int(r, g, b): """ Convert color from RGB to 24-bit integer """ return b*65536 + g*256 + r
cf6e76fbf3e7541cb0a1ade669bbc30f525ef468
99,991
def relative_to_absolute(base, url): """ Transforms a relative URL in an absolute URL """ if url.startswith('http'): return url elif url.startswith('/'): return base + url else: return base + '/' + url
ee2b784e7db0edd44495acb08d07738a6557a8e0
99,992
import re def get_suff(path): """ Get the suffix from a parameter directory path. """ scinote = re.compile(r'[+-]?\d+(?:\.?\d*(?:[eE][+-]?\d+)?)?') patterns = re.findall(scinote, str(path)) return float(patterns[-1])
b024c33e3515e7af5d91329e8466d5db74c97548
99,993
def transform_y(transform, y_value): """Applies a transform matrix to a y coordinate.""" return int(round(y_value * transform[1][1]))
faf381398f47c4face4254b6bff8c76e11db67d1
99,999
def get_reward(state): """ Function returns reward in the given state Args: state -- type(int) state value between [0,47] Returns: reward -- type(int) Reward in the corresponding state game_end -- type(bool) Flag indicates game end (falling out of cliff / reaching the goal) """ # game continues game_end = False # all states except cliff have -1 value reward = -1 # goal state if(state == 47): game_end = True reward = 10 # cliff if(state >= 37 and state <= 46): game_end = True # Penalize the agent if agent encounters a cliff reward = -100 return reward, game_end
71c7e725ea640ea7d2a604ee40c4dc5936537ddb
100,002
import ctypes def _response_post(response): """Return the post-processed response.""" # response = ctypes.c_bool(response).value # workaround, the ROPUF sampler return counter values instead of boolean response = ctypes.c_int16(response).value > 0 return response
0f71ccaca350feeeccc2aa28634cadb15992c079
100,015
def only_ascii(value: str) -> str: """ Remove non-ascci characters. """ return "".join(char for char in value if ord(char) < 128)
29b3bafe122546371869ed6bab59a043a3ba1300
100,018
def kwot(s): """Single quote a string, doubling contained quotes as needed.""" return "'" + "''".join(s.split("'")) + "'"
3f6dd553172bbef32212cfc80818b1ad5befeb10
100,019
def clip(coordinates, image_shape): """Clip box to valid image coordinates # Arguments coordinates: List of floats containing coordinates in point form i.e. [x_min, y_min, x_max, y_max]. image_shape: List of two integers indicating height and width of image respectively. # Returns List of clipped coordinates. """ height, width = image_shape[:2] x_min, y_min, x_max, y_max = coordinates if x_min < 0: x_min = 0 if y_min < 0: y_min = 0 if x_max > width: x_max = width if y_max > height: y_max = height return x_min, y_min, x_max, y_max
440f4f741eb7a5a1f63f6673d3be6132fe1549c5
100,022
def package_quantity(value, unit=None): """Package a quantity value in a standardised form.""" quantity = {"quantity_value": value} if unit: quantity["unit"] = unit return quantity
4d9799abdbdf002cb92eb09d2fed482f94cfa7fd
100,023
def get_filename_pair(filename): """ Given one *.spar/*.sdat filename, returns tuple with both filenames It doesn't matter if the filename is a fully qualified path or not. - one assumption, the extension are either all caps or all lower """ spar_filename = sdat_filename = filename[:-3] if filename[-1:].isupper(): sdat_filename += 'DAT' spar_filename += 'PAR' else: sdat_filename += 'dat' spar_filename += 'par' return (spar_filename, sdat_filename)
1d3a33ba68e14d9217e5e2a7833258d9c94f1667
100,026
def handy_clean(kwargs): """ :param kwargs: Pop all handy shortcuts from kwargs :return str|None: Equivalent full --clean option """ cs = kwargs.pop("cs") cl = kwargs.pop("cl") cr = kwargs.pop("cr") ca = kwargs.pop("ca") if cs: return "show" if cl: return "local" if cr: return "remote" if ca: return "all" return None
faa48a43416356de5e52be48e7f86a8d8bd36e8e
100,027
def check_sheet_cols(name): """ Callable mini-function passed to pd.read_excel(usecols=function). Grab the first column to use as an index, then all columns starting with column 4. """ return True if name == 0 or name >= 4 else False
b6fe4eda2c45d08cce9dec7a202d43512576c672
100,034
def isElement(node): """ Returns True if the supplied node is an element """ return node.nodeType == node.ELEMENT_NODE
eb1161adc782945aae0efdc0ad55c9d899b811f0
100,040
def factorielle(n : int) -> int: """Précondition : n >= 0 Retourne le produit factoriel n! """ # Rang k : int = 1 # Factorielle au rang k f : int = 1 while k <= n: f = f * k k = k + 1 return f
9c78d3ff1890b2a8c2b0694f5aae5a651b460f69
100,041
from typing import Optional def padStr(s: str, field: Optional[int] = None) -> str: """Pad the beginning of a string with spaces, if necessary.""" if field is None or len(s) >= field: return s else: return " " * (field - len(s)) + s
fe45f39dd510ba9f51a983e4ed2f69c87b942cfa
100,049
import typing import math def lists_are_equal(a: typing.List[float], b: typing.List[float]) -> bool: """Return True if all members of the two lists are equal. NaNs are treated as equal.""" if len(a) != len(b): return False for v_a, v_b in zip(a, b): if math.isnan(v_a) or math.isnan(v_b): # Both NaN or both non-NaN if math.isnan(v_a) ^ math.isnan(v_b): return False else: if v_a != v_b: return False return True
dbe447fe70e00c7c5bbd907e47da22fa5ccb845e
100,050
import glob def get_lines_from_file(folder_name, interface_name, file_string): """Open file with the path "folder_name/interface_name/filestring" and return its contents. """ with open(glob.glob( f"{folder_name}/{interface_name}/{file_string}")[0], "r") as file: return file.readlines()
171437e75ac1bbac1b11f0d9b7563dae766036f3
100,051
def getMove(playerHand, money): """Asks player for their move, and returns 'H' for hit, 'S' for stand, and 'D' for double down.""" while True: moves = ['(H)it', '(S)tand'] if len(playerHand) == 2 and money > 0: moves.append('(D)ouble down') movePrompt = ', '.join(moves) + '> ' move = input(movePrompt).upper() if move in ('H', 'S'): return move if move == 'D' and '(D)ouble down' in moves: return move
42b1ab2a3b472c82184ac451d612a04e133f731e
100,052
def read_example_index(example_index_file): """Read example index file and parse into dictionary. Parameters ---------- example_index_file : str /path/to/file.ext Returns ------- dict Dictionary of example indices. Returns empty dictionary if no index file given. """ example_index = {} if example_index_file: with open(example_index_file, "r") as f: for line in f.readlines(): seq_id = line.split()[0] example_index[seq_id] = 1.0 return example_index
01b03e669f5cf11363f5ee4ff6fa31526b0e44fc
100,054
from unittest.mock import call def fetch_file(url, outfile, retries = 0): """ Function to fetch a remote file from a `url`, writing to `outfile` with a particular number of `retries`. """ wget_cmd = ["wget", "-O", outfile, url] retcode = call(wget_cmd) return retcode
7f1424726918a88679a41463a7de9b5a2be9deeb
100,056
import re def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str
bbdfeedf00ef151cd22d6b8cecf94e86e8251501
100,064
def read_stoplist(fname) : """ Read stop words from a text file, and ignore comments after '#'. The file contains a stop word per line. """ stoplist = open(fname).read().split() stoplist = filter(lambda line : len(line.strip())>0 and line.strip()[0] != '#', stoplist) stoplist = map(lambda line : line.split('#')[0].strip(), stoplist) return set(stoplist)
b123a27a0456552587fb12406e6966bfcba2ccac
100,066
def check_not_i_within_i(antecedent_mention, mention): """ Check whether one of the two mentions fully contains the other. "Not i-within-i", i.e.: the two mentions are not in an i-within-i constructs, that is, one cannot be a child NP in the other's NP constituent In this case, this is interpreted as "one mention does not fully contain the other" The following expression is equivalent to the one below not_i_within_i = not ( (boffset2 <= boffset1 and eoffset1 <= eoffset2) or (boffset1 <= boffset2 and eoffset2 <= eoffset1) ) This constraint is symmetric. :param antecedent_mention: candidate antecedent Mention :param mention: Mention under considerations """ boffset1 = antecedent_mention.begin_offset eoffset1 = antecedent_mention.end_offset boffset2 = mention.begin_offset eoffset2 = mention.end_offset return ( (boffset2 > boffset1 and eoffset2 > eoffset1) or (eoffset1 > eoffset2 and boffset1 > boffset2) )
604727a6ca8725601a9322f93878fa5df77b9072
100,067
def factorial(n): """ Returns factorial of the integer passed. Naive recursive implementation with memoization. """ memo = {0: 0, 1: 1} # base cases or memoed cases try: return memo[n] except KeyError: memo[n] = n * factorial(n - 1) return memo[n]
4028ca862218c3f86c3cc30b532b0a8b3177ac44
100,071
import importlib import inspect def get_available_classes(mod, mod_path, control_variable): """ Get all classes objects available in a custom module :param mod: the module :type mod: object :param mod_path: path to the module :type mod_path: str :param control_variable: module specific attribute name (please refer to the documentation sec XX) :type control_variable: str :return: a dictionary with the associated class objects :rtype: dict{str: object} """ available_objects = {} for c in mod.__all__: m = importlib.import_module(mod_path + c) for name, obj in inspect.getmembers(m, lambda x: inspect.isclass(x) or inspect.isfunction(x)): if control_variable not in obj.__dict__: continue available_objects[obj.__dict__[control_variable]] = obj return available_objects
358044d74794df99576d3e1966530d5516a7ea8f
100,072
def read_text(filename): """ Read data from text file :param filename: name of text file to be read :type filename: str :return: data stored in text file """ with open(filename, 'r') as file: return file.read()
bdf3ce45a50e4297db5fe5c253676f8ddf546bb9
100,073
def generate_valid_filename(path): """Method to generate valid file name :param path: file name :type path: str :return: valid file path :rtype: str """ # List of special characters escape_list = ['[', '@', '!', '#', '$', '%', '^', '&', '*', '(', ')', '<', '>', '?', '|', '}', '{', '~', ':', ']', ' '] for k in escape_list: path = str(path).strip().replace(k, '\\' + k) return path
4e6ce626943cd143734c829c1713635d870cb414
100,074
def first(iterable, default=None): """Return the first element of an iterable; or default.""" return next(iter(iterable), default)
45d3a81c962af677732e1d8ad0359c8bbd8a3dc9
100,084
def collatz(x): """ Generates the next term of the Collatz sequence, given the previous term x """ if x % 2 == 0: return(x//2) else: return(3*x + 1)
0d5fa57bf175f833bb82b7a58f31bc94b4d3a96c
100,088
import json def read_json(path): """Read a JSON file. Read a JSON formatted file into a dictionary object. Args: path (str): File path to JSON formatted file. Returns: dict: Content of a JSON file parsed as dictionary. """ fs = open(path, "r") d = json.load(fs) fs.close() return d
ee852019a1e2d9aff4f4662aae827e05d599952c
100,089
from hashlib import md5 def md5checksum(file_name): """Compute MD5 checksum on a file, even if it is quite large""" hash_md5 = md5() with open(file_name, "rb") as f: for chunk in iter(lambda: f.read(32768), b""): hash_md5.update(chunk) return hash_md5.hexdigest()
3cbfb37d46524dae1929480e48060bbd2fcb1255
100,092
from datetime import datetime def __log_dt_converter(dt_object): # pragma: no cover """Used to convert datetime objects to strings automatically for the logger.""" return dt_object.__str__() if isinstance(dt_object, datetime) else dt_object
6d9128347181e8f7e2e3ee2f3ef790a54134b0ae
100,094
def unique(iterable): """ Returns a generator containing all unique items in an iterable Args: iterable (iterable): An iterable of any hashable items Returns: generator: Unique items in the given iterable """ seen = set() return (x for x in iterable if x not in seen and not seen.add(x))
5105a786dafb6b3b31ede01a3233bc0248ff1536
100,097
def _get_tune_approach_value(qx: float, qy: float) -> float: """ Calculate the (fractional) tune approach of qx and qy. """ tune_split = int(qx) - int(qy) return abs(qx - qy - tune_split)
c158d6eb08446e6477b269282060f2306a3071ba
100,102
def get_catalog_record_embargo_available(cr): """Get access rights embargo available date as string for a catalog record. Args: cr (dict): A catalog record Returns: str: The embargo available for a dataset. Id not found then ''. """ return cr.get('research_dataset', {}).get('access_rights', {}).get('available', '')
4558e8f5b60a75104093684c22c6ffe7a0d7715d
100,106
def _trim_path(file_path, base_path): """Trim a path by removing a prefix. Args: file_path (str): Path. base_path (str): Prefix to remove. Returns: str: Trimmed path. """ if file_path.startswith(base_path): return file_path[len(base_path) :] return file_path
bf90a104ee50e0e6a359717d06f9d43e9e82f2e7
100,109
def step_symmetry(autocorr_peak_values): """ Calculate step symmetry from autocorrelation peak values. Step symmetry measures based on `Moe-Nilssen (2004) - Estimation of gait cycle characteristics by trunk accelerometry <http://www.jbiomech.com/article/S0021-9290(03)00233-1/abstract>`_. If calculating symmetry from acceleration in the vertical axis, this function receives the detected peaks from the vertical axis autocorrelation. However, if calculating symmetry from lateral axis acceleration, you should pass in *both* peaks and valleys from the autocorrelation of the lateral axis. Perfect step symmetry is 1.0 for the vertical axis - larger values are more symmetric, capped at 1.0. Perfect step symmetry is -1.0 for the lateral axis - smaller values are more symmetric, capped at -1.0. Parameters ---------- autocorr_peak_values : ndarray Values of the autocorrelation peaks/valleys detected by :func:`sensormotion.peak.find_peaks`. This should contain only peak values when looking at the vertical axis, and both peak and valley values when looking at the lateral axis. Returns ------- step_sym : float Step symmetry. Value is capped at 1.0 or -1.0 depending on the axis of interest. """ peaks_half = autocorr_peak_values[autocorr_peak_values.size // 2 :] assert len(peaks_half) >= 3, ( "Not enough autocorrelation peaks detected. Plot the " "autocorrelation signal to visually inspect peaks" ) ac_d1 = peaks_half[1] # first dominant period i.e. a step (left-right) ac_d2 = peaks_half[2] # second dominant period i.e. a stride (left-left) # Always divide smaller peak by the larger peak if abs(ac_d1) > abs(ac_d2): step_sym = ac_d2 / ac_d1 # Preserve sign by not using abs() else: step_sym = ac_d1 / ac_d2 # Preserve sign by not using abs() return step_sym
b95976e7aac6d26fba2e9a7694f55e6e74d0edf9
100,112
def describe_dmri_directions(img): """Generate description of diffusion directions.""" return "{} diffusion directions".format(img.shape[3])
f5a789a8a63a458bb47e8d4d49d4fac20a6ce287
100,113
def calculate_fuel(mass: int) -> int: """Calculate required fuel given mass.""" mass //= 3 mass -= 2 return mass
1afb2b91447146ed8fd71c8ba1d08794e363b427
100,114
def rcmip_2_hector_var(rcmip_2_hector_lut, var_name): """ Get the Hector variable name corresponding to a RCMIP variable name Params ------- rcmip_2_hector_lut : Pandas DataFrame RCMIP to Hector variable look up table var_name : str Name of the RCMIP variable Return ------- Pandas Series representing RCMIP --> Hector variable conversion information Attributes: hector_component, hector_variable, hector_unit, hector_udunits, rcmip_variable, rcmip_units, rcmip_udunits """ # Get a Pandas series for the lut row. Use iloc[0] to handle case where a hector # variable has multiple rows (ffi, luc) if (var_name == 'ffi_emissions'): rcmip_var = 'Emissions|CO2|MAGICC Fossil and Industrial' var_row = rcmip_2_hector_lut.loc[rcmip_2_hector_lut['rcmip_variable'] == rcmip_var].iloc[0] elif (var_name == 'luc_emissions'): rcmip_var = 'Emissions|CO2|MAGICC AFOLU' var_row = rcmip_2_hector_lut.loc[rcmip_2_hector_lut['rcmip_variable'] == rcmip_var].iloc[0] else: var_row = rcmip_2_hector_lut.loc[rcmip_2_hector_lut['hector_variable'] == var_name].iloc[0] return var_row
53879cff447fc5285cf86d003edbea870dc8f36f
100,119
def get_output_labels(opts, preds): """Convert predictions to labels.""" processor = opts["pass_in"][0] labels = processor.get_labels() output_labels = [] for prediction in preds: output_labels.append(labels[prediction]) return output_labels
8121107c0e894335925546d9970297f6f72a7bbf
100,121
import re def getRoutes(sim_type, data, letters): """Get raw data actual pathways and convert to node routes.""" all_actual_pathways = [data[i] for i in range(len(data))] split_routes = [re.findall('.',route) for route in all_actual_pathways] Routes = [[letters.index(r) + 1 for r in route] for route in split_routes] if sim_type == 'Raw Pathways': Routes = [[1] + route for route in Routes] return Routes
9f354f5924989831e8688b5b963d284d73f67354
100,122
from typing import MutableMapping from typing import Any from typing import Dict def pop_by_prefix( d: MutableMapping[str, Any], key_prefix: str) -> Dict[str, Any]: """Returns {k: v for key_prefix+k, v in d.items()} and removes them from d.""" popped = {} for key in list(d.keys()): if key.startswith(key_prefix): popped[key[len(key_prefix):]] = d.pop(key) return popped
5fc30b8d4e7be87a0b568192bfe63fe74bdd774c
100,124
def interpolate(message, data): """ interpolates the given message with given data. if data is not a dict, no interpolation will be done. :param str message: message to be interpolated. :param dict data: data to be used for interpolation. :rtype: str """ if not isinstance(data, dict): return message return message.format(**data)
2ddbd9e00432c5a21e8363b4e620d9f2b07c8b3d
100,125
def reverse_tiles(tiles): """ Reverses tiles in a 16-bit row """ new_tiles = 0 for i in range(0, 4): new_tiles <<= 4 new_tiles += tiles & 0b1111 tiles >>= 4 return new_tiles
2935d449a4a11f2f2dd8130b97cb58abcd79d359
100,127
import re from datetime import datetime, timedelta from dateutil.tz import tzutc def _list_log_files_with_filter(client, resource_group_name, server_name, filename_contains=None, file_last_written=None, max_file_size=None): """List all the log files of a given server. :param resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :type resource_group_name: str :param server_name: The name of the server. :type server_name: str :param filename_contains: The pattern that file name should match. :type filename_contains: str :param file_last_written: Interger in hours to indicate file last modify time, default value is 72. :type file_last_written: int :param max_file_size: The file size limitation to filter files. :type max_file_size: int """ # list all files all_files = client.list_by_server(resource_group_name, server_name) files = [] if file_last_written is None: file_last_written = 72 time_line = datetime.utcnow().replace(tzinfo=tzutc()) - timedelta(hours=file_last_written) for f in all_files: if f.last_modified_time < time_line: continue if filename_contains is not None and re.search(filename_contains, f.name) is None: continue if max_file_size is not None and f.size_in_kb > max_file_size: continue del f.created_time files.append(f) return files
8ae4fddd1a2107d56cb5117d415ae31b42004deb
100,132
import requests def get_from_url(url, headers): """ Given a url, return the API response. Parameters: url: string, the url headers: dict, the headers to send with the request to the URL Returns: API response, in dict form """ r = requests.get(url, headers=headers).json() if 'error' in r.keys(): raise RuntimeError('API response error' + str(r)) return r
5974a4657e95e3bffee8d6dd544b57dcd12c07c0
100,135
def v2_lstrip(iterable, strip_value): """Return iterable with strip_value items removed from beginning.""" stripped = [] iterator = iter(iterable) try: item = next(iterator) while item == strip_value: item = next(iterator) stripped.append(item) except StopIteration: pass else: for item in iterator: stripped.append(item) return stripped
2e1cfc14791cba04ccdcd5ed1ca88e2f4b975501
100,142
def rhoe(gamma, pres): """ Given the pressure, return (rho * e) Parameters ---------- gamma : float The ratio of specific heats pres : float The pressure Returns ------- out : float The internal energy density, rho e """ return pres/(gamma - 1.0)
fdc0bae5b9e2890f9149ef1d68ef9a6d7228788d
100,143
def refine_city_name(location): """display User-friendly city name""" if location == 'newyork': # does this have to capitalized loc = 'New York' elif location == 'washingtondc': loc = 'Washington D.C.' elif location == 'sanfrancisco': loc = 'San Francisco' else: loc = location return loc
fcacf82f3177e2b40c977c24585c8ae9af3da06a
100,148
def find_column(df, keyword): """Finds the first column that contains a certain keyword. Mainly intended to be a utility function for some of the other methods. Parameters ---------- df : pd.DataFrame The DataFrame containing the columns that need to be searched. keyword: str The keyword that needs to be present in the desired column. """ return [column for column in df.columns if keyword in column][0]
c37480feebbac5b50d5919a8a7a09440bb01ec76
100,151
def to_squares(grid): """Return a list of squares for the grid""" squares = [] for _ in range(9): squares.append([]) for rownum, row in enumerate(grid): for colnum, cell in enumerate(row): squareno = colnum // 3 + (rownum // 3) * 3 squares[squareno].append(cell) return squares
c0f62eb2ea8ac5e64d4c85035e989b96dcf529c0
100,152
import csv def getPapersFromSource(papersFile="data/papers.csv"): """Get a dictionary of news sources from a csv file Args: papersFile: a CSV file with a source column and a url column. Defaults to "data/papers.csv" Returns: A dict of the papers in the CSV """ with open(papersFile, 'r') as csvfile: reader = csv.DictReader(csvfile) papers = [papers for papers in reader] return papers
98acd43601104a4be01d21d6164da7516b7bc757
100,159
def _wrap(x, x_min, x_max): """Wrap floating point values into range Parameters ---------- x : ee.Image or ee.Number Values to wrap. x_min : float Minimum value in output range. x_max : float Maximum value in output range. Returns ------- ee.Image or ee.Number Notes ----- This formula is used to mimic the Python modulo operator. Javascript/EE mod operator has the same sign as the dividend, so negative values stay negative. Python mod operator has the same sign as the divisor, so negative values wrap to positive. """ x_range = x_max - x_min return x.subtract(x_min).mod(x_range).add(x_range).mod(x_range).add(x_min) # return np.mod((x - x_min), (x_max - x_min)) + x_min
ee9907436735e108e89ae8fb130946cf5528ba9e
100,162
def get_index_dict(nodes): """ Get a dict denotes relationships between name and index of nodes. Args: nodes: A json object contain all the nodes in a mxnet json file. Returns: A dict with format: {node_name: node_index} """ index_dict = dict() for index, node in enumerate(nodes): index_dict[node["name"]] = index return index_dict
375bb02727697ea7a40e3cf39070b2935379d8cc
100,163
def from_base36(value, default=None): """Reversal of to_base36 that only accepts lowercase""" if not value.islower(): return default try: return int(value, 36) except (ValueError, TypeError): return default
c6f674bfcf6132713998fc57fb32cab324bd3704
100,167
def sign_of_weight(x, weight): """ Determines the sign of a weight, based on the value of the first digit of the gene. :param x: int, range [0, 9] First digit of the gene. :param weight: int Weight determined from the gene. :return: int Signed weight. """ if x >= 5: return weight else: return -weight
b48a92e4b04d251f2b8768e38fab66112d3f4d2d
100,168
from typing import Dict def format_commit_message( git_web_url: str, commit_hash: str, commit_message: str ) -> Dict: """Return formatted commit message as Slack's BlockKit section.""" return { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '* {:s} | <{:s}/commit/{:s}|{:s}>'.format( commit_message, git_web_url, commit_hash, commit_hash[0:6] ) } }
f521e86522449d33717160a6e3c40137395733c9
100,169
def make_url(nothing): """ Get the URL of a given 'nothing'. """ return 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=%d' \ % nothing
f55c3fe4e3347b15f77435272722a34f90f2778a
100,170
import random def random_dependency_generator(columns, n=10): """ Generates n random dependencies from a list of columns. Returns a dictionary with rhs's as keys and associated with it lhs combinations, representing a total of <= n dependencies. Note that the returned lhs-combinations are neither randomly distributed, nor are there exactly n dependencies returned. The way rand_length is generated makes it more likely for short lhs-combinations to be generated over long lhs-combinations. Keyword attributes: columns -- list of columns-names n -- int, indicating how many dependencies shall be returned """ dependencies_dict = {} for _ in range(0, n): # at least 2 columns are necessary to form a dependency rand_length = random.randint(2, len(columns)) lhs = random.sample(columns, rand_length) rhs = lhs.pop() lhs.sort() if rhs in dependencies_dict: if lhs not in dependencies_dict[rhs]: dependencies_dict[rhs].append(lhs) else: dependencies_dict[rhs] = [lhs] return dependencies_dict
bbdab053c582438560934f1139aafec2a3f1f4f3
100,171
def area_width_normalized_label(var, area_norm=True, width_norm=True): """Returns a label for histograms normalized by any combination of area/width. For both area and width, the label should be 1/N*dN/d(var). """ if area_norm and width_norm: return '#frac{dN}{Nd('+var+')}' elif area_norm: return '#frac{#Delta('+var+')dN}{Nd('+var+')}' elif width_norm: return '#frac{dN}{d('+var+')}' else: return '#frac{#Delta('+var+')dN}{d('+var+')}'
354e53e2543b9eddd45aff51a5e0f8ffc1fccb49
100,176
def get_object_or_none(klass, *args, **kwargs): """ Uses get() to return an object, or raises a Http404 exception if the object does not exist. klass may be a Model. All other passed arguments and keyword arguments are used in the get() query. Note: Like with get(), an MultipleObjectsReturned will be raised if more than one object is found. """ queryset = klass.objects try: return queryset.get(*args, **kwargs) except queryset.model.DoesNotExist: return None
3b6eaff651427122a862bd35dc62b598ec5312fd
100,178
def check_list(nlist): """Check to see if the list only contains [0-9]""" for n in range(len(nlist)): if not 0 <= nlist[n] <= 9: return False return True
1933d2c5a4641c45cbcf5516af30d89ca259a784
100,179
def logstashIndex(date): """ Return the logstash index name for the given date Logstash index names are in the format: 'logstash-YYYY.MM.DD' """ return "logstash-" + date.strftime("%Y.%m.%d")
1d2b953d04bd500b6571bb78ca5cd9c0162221db
100,180
import tarfile import shutil def tf_model_to_tar(tf_model, run_id: int, ): """ Saves tensorflow model as compressed file :param run_id: current Metaflow run id :param tf_model: tensorflow model :return: """ model_name = "intent-model-{}/1".format(run_id) local_tar_name = 'model-{}.tar.gz'.format(run_id) # save model locally tf_model.save(filepath=model_name) # save model as .tar.gz with tarfile.open(local_tar_name, mode="w:gz") as _tar: _tar.add(model_name, recursive=True) # remove local model shutil.rmtree(model_name.split('/')[0]) return local_tar_name
8ef02a2dacd161aa2a08f15a1ff00c30474cc73f
100,182
import torch def load_checkpoint(path): """Load model checkpoint and optimizer state from file.""" file_path = "%s.pt" % path state = torch.load(file_path, map_location=torch.device('cpu')) print("Loaded checkpoint from {} (epoch {})".format( file_path, state["epoch"])) return state["epoch"], state["state_dict"], state["optimizer"]
a61d053de55b54c1fba275d21b3a35091cb30887
100,186