content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def allow_cors(response): """Allow CORS on every response this server sends.""" response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type') return response
a0c9765586bcd089ab235e88565a3f82fbb1d35e
70,126
def convert_link(link): """Convert the D3 JSON link data into a Multinet-style record.""" return { "_from": f"""characters/{link["source"]}""", "_to": f"""characters/{link["target"]}""", "value": link["value"], }
3462c9079e72d60d4882b7c1067a76551aa90577
70,130
import math def get_group_size(total_items, total_groups): """Return the group size.""" return int(math.ceil(float(total_items) / total_groups))
684cd861913cd14051725c668b473c74b68bfd39
70,134
def compatibility_in_test(a, b): """Return True when a is contained in b.""" return a in b
c55bc1cf46de5cfe2f7b63e811d316845af14545
70,139
def normalize_audios(audios, dbfs_list, target_dbfs): """ Normalizes given Pydub audio segment Objects. :param audios: A list of Pydub audio segment Objects. :param dbfs_list: A list of dBFS values as float. :param target_dbfs: A float dBFS value. :return: Returns a list of normalized Pydub audio segment Objects. """ normalized = [] for counter, audio in enumerate(audios): init_dbfs = dbfs_list[counter] db_gain = target_dbfs - init_dbfs normalized.append(audio.apply_gain(db_gain)) return normalized
c35689012e809f3c6b6d9fac31773a57e0624950
70,144
def balanced_accuracy(sensitivity, specificity, factor=0.5): """Balanced accuracy Wikipedia entry https://en.wikipedia.org/wiki/Accuracy_and_precision Parameters ---------- sensitivity : float in [0, 1] sensitivity. specificity : float in [0, 1] specificity. factor : float in [0, 1] Balancing factor multiplying true positive rate (sensitivity). Default value 0.5 Returns ------- bacc: float Balanced accuracy """ return float(((1-factor) * sensitivity) + (factor * specificity))
dad17e6a56f6a839759633936d44496243f6b3be
70,146
def is_odd(number: int) -> bool: """ Test if a number is a odd number. :param number: the number to be checked. :return: True if the number is odd, otherwise False. >>> is_odd(-1) True >>> is_odd(-2) False >>> is_odd(0) False >>> is_odd(3) True >>> is_odd(4) False >>> all([is_odd(i) for i in range(1, 100, 2)]) True """ return number % 2 != 0
dd39c7c82b8f724661562c4391a0a9d2dfdc0e9b
70,147
import math def logistic_decay(d: float, a: float): """ Logistic decay is a smooth version of the window decay. f(d) = 1 - 1 / (1 + exp(-d + a)) = exp(-d + a) / (1 + exp(-d + a)) :param d: distance (non-negative finite value) :param a: the x-value of the sigmoid's midpoint :return: decay """ return math.exp(-d + a) / (1 + math.exp(-d + a))
fbd84e5e6b4d45fd6e1b8d828404295f3a24a247
70,148
import base64 def encode_data(non_encoded_data: str) -> str: """ Encode a UTF-8 string. :param str non_encoded_data: The non encoded data. :return: encoded_data string """ data = base64.b64encode(bytes(non_encoded_data, encoding='UTF-8')) data = data.decode(encoding='UTF-8') return data
20ae4c69a82964590db7ff9ed71e34b67a707eed
70,156
def map_documentation(x): """ Map a SQLAlchemy Documentation model object into a generic map. :param x: SQLAlchemy Documentation model. :return: Generic map containing relevant details for REST API. """ return {"path": x.path, "name": x.name, "order": x.order}
1d901ef97eacc6b65f51e1a0e75710c284501555
70,166
import hashlib def hash_file(filepath: str, chunk_size: int = 65536) -> bytes: """Calculate the SHA512 of a file :param filepath: Path of the file to hash :param chunk_size: Number of bytes per chunk of file to hash :return: File hash in a :class:`bytes` object """ sha512 = hashlib.sha512() with open(filepath, 'rb') as src: for chunk in iter(lambda: src.read(chunk_size), b''): sha512.update(chunk) return sha512.digest()
cc03aaf71b68e3a7d85a69a5bef9610cc12e9dec
70,169
def merge_sort(arr): """ time complexity: O(n*logn) space complexity: O(n) :prarm arr: list :return: list """ if not isinstance(arr, list): raise TypeError if len(arr) <= 1: return arr mid = len(arr) // 2 left = merge_sort(arr[:mid]) right = merge_sort(arr[mid:]) k, i, j = 0, 0, 0 while i < len(left) and j < len(right): if left[i] < right[j]: arr[k] = left[i] i += 1 else: arr[k] = right[j] j += 1 k += 1 if i < len(left): arr[k:] = left[i:] if j < len(right): arr[k:] = right[j:] return arr
7297e20fa3a01b85fa7f5cebef1921e9c17c7be9
70,172
def calc_sample_freq(time): """ Calculates the sampling frequency of the ECG signal Args: time: array of time values Returns: fs: sampling frequency of ECG """ fs = 1/(time[1]-time[0]) return fs
671e53d6a41594ef1e33e8dd34ce068fe010b8f1
70,173
import re def get_year(title): """ Finds the last occurrence of a 4-digit number, within parentheses. Returns that as the suspected year of this issue's publication. If no match is found, return None. """ match_year = re.compile(r'\((\d{4})\)') matches = match_year.findall(title) return matches[-1] if matches else None
86d7e11636aea9a74ad010ede21d90a471d11681
70,175
import torch def box_area(boxes: torch.Tensor) -> torch.Tensor: """ Computes the area of a set of bounding boxes, which are specified by their (start, end) coordinates. Args: boxes (Tensor[N, 2]): boxes for which the area will be computed. They are expected to be in (start, end) format with ``0 <= start < end``. Returns: Tensor[N]: the area for each box """ return (boxes[:, 1] - boxes[:, 0])
f4ffd0043a1959608fd51ee795a0ed3f338e6083
70,177
def dms_to_dd(dms): """Converts EXIF coordinates in (degrees, minutes, seconds) format to decimal degrees. https://en.wikipedia.org/wiki/Geographic_coordinate_conversion """ d, m, s = [float(dms[i][0]) / float(dms[i][1]) for i in range(3)] return d + (m / 60.0) + (s / 3600.0)
691c576f3e6ab53cf295578b2b83c74006a9a861
70,179
def split(message: str, step: int) -> list[str]: """Split a message in chunk of a given length. The last chunk can be shorter. Args: message (str): the message to process. step (int): the length of every chunks. Returns: list[str]: the list of chunks. """ chunks = [message[i:i+step] for i in range(0, len(message), step)] return chunks
fa8172d9476f36096aec51898f6254ec8c137570
70,182
def hyphen_range(s): """ Takes a range in form of "a-b" and generate a list of numbers between a and b inclusive. Also accepts comma separated ranges like "a-b,c-d,f" will build a list which will include Numbers from a to b, a to d and f""" s="".join(s.split())#removes white space r=set() for x in s.split(','): t=x.split('-') if len(t) not in [1,2]: raise SyntaxError("hash_range is given its arguement as "+s+" which seems not correctly formated.") r.add(int(t[0])) if len(t)==1 else r.update(set(range(int(t[0]),int(t[1])+1))) l=list(r) l.sort() return l
72991f1a1ce4e8d1e70cd8c9c1ba5bee2b0a002d
70,186
import requests def login(connection, verbose=False): """ Authenticate a user and create an HTTP session on the web server where the user’s MicroStrategy sessions are stored. This request returns an authorization token (X-MSTR-AuthToken) which will be submitted with subsequent requests. The body of the request contains the information needed to create the session. The loginMode parameter in the body specifies the authentication mode to use. You can authenticate with one of the following authentication modes: Standard (1), Anonymous (8), or LDAP (16). Authentication modes can be enabled through the System Administration REST APIs, if they are supported by the deployment. :param connection: MicroStrategy REST API connection object :param verbose: Verbosity of request response; defaults to False :return: Complete HTTP response object """ response = requests.post(url=connection.base_url + '/auth/login', data={'username': connection.username, 'password': connection.password, 'loginMode': connection.login_mode, 'applicationType': connection.application_code}, verify=connection.ssl_verify) if verbose: print(response.url) return response
befa855d52e444d8ff414c39a11ddd9acff2fe89
70,198
def get_evening_cars(morning_cars,requested,returned,max_cars): """ compute #cars avaiable in the evening, given #cars in the morning, #returned and #requests during the day """ return min(max(morning_cars - requested,0) + returned, max_cars)
6637c25c0b47ac9f3a5010591b1ec457df0250df
70,199
def create_policy(env, policy_type, policy_weights_file=None): """Create a policy. Create and return a policy. Args: env: gym.Environment. Gym environment. policy_type: policies.X. Policy type. policy_weights_file: str. Path to policy weights file. Return: policies.policy. """ input_size = env.observation_space.shape[0] output_size = env.action_space.shape[0] action_low = env.action_space.low action_high = env.action_space.high policy = policy_type(input_size=input_size, output_size=output_size, action_high=action_high, action_low=action_low) if policy_weights_file: policy.load_model(policy_weights_file) return policy
15cb5d1b0621ccae01034f9f2ddc49f42830ca9c
70,209
def equals_list2dict(equals_list): """Converts an array of key/values seperated by = to dict""" return dict(entry.split('=') for entry in equals_list)
5f9565e91ef988e5139ee8d469ab782dbe4d7cdb
70,211
def _rescale_forecast(forecast, history, cols_rescale=['forecast', 'forecast_lower', 'forecast_upper']): """Internal function to return a rescaled forecast resulting from a scaled history. Parameters ---------- history: the history that generated the forecast Returns ------- The forecast pd dataframe with the forecast columns rescaled""" hist_mean = history['actual'].mean() hist_std = history['actual'].std() for col_rescale in cols_rescale: forecast[col_rescale] = forecast[col_rescale] * hist_std + hist_mean return forecast
903cc49584ade14565d728d1ca2ed5d0200a9ec5
70,212
import random def my_agent(observation, configuration): """Encapsulated agent function Kaggle use the conda_env/conda_env.yml to run this agent. Can only import: - pytorch (torch) - numpy - scipy - gym - Python 3.6 standard library Args: observation: the gameboard observation extracted with a kaggle_environments connect-x env at env.observation. Format dict: { board: [ list len rowXcol with 0 (empty), 1, 2 (player counters)], mark: turn_marker}, where N is set by configuration. configuration: the dict stored in a kaggle_environments env object: env.configuration. Keys (defaults): 'timeout': 5, 'columns': 7, 'rows': 6, 'inarow': 4, # A.k.a "X" 'steps': 1000, Returns: The agent's choice of move as a column-integer, given the args """ ## MAY CHANGE total_cols = configuration.columns non_full_columns = [ c for c in range(total_cols) if observation.board[c] == 0] column_choice = random.choice(non_full_columns) ############ return column_choice
77f6a7581b703da35f10e495bb92eb15df8a55cd
70,214
from typing import Dict from typing import Any def _is_address_info_available(event: Dict[Any, Any]) -> bool: """Check if address info needs to be added to formatted event. Args: event: the raw event from data source. Returns: bool to indicate that address info is available. """ keys_exist = all(k in event for k in ( 'hashedFirstName', 'hashedLastName', 'countryCode', 'zipCode')) if not keys_exist: return False values_exist = all((event['hashedFirstName'], event['hashedLastName'], event['countryCode'], event['zipCode'])) if not values_exist: return False return True
ffd8d158af98f5ac1437e1a24a50bbdb7bdc3fc7
70,217
import poplib def connect_pop3(mailserver, username, password): """Connect to a pop3 mailserver and return a handle to it """ p = poplib.POP3(mailserver) p.user(username) p.pass_(password) return p
09a6be101476d50761b13f61304c967feec0a741
70,219
def _new_obj(cls, kwargs, args): """Maps kwargs to cls.__new__""" return cls.__new__(cls, *args, **kwargs)
afe9af8cb7fabe40209e67916b07fb5a58bf4b90
70,221
import ctypes def print_structure(structure, indent=""): """Return string representation of ctypes.Structure.""" result = [] if indent else [""] for field in structure._fields_: name = field[0] attr = getattr(structure, name) if isinstance(attr, ctypes.Structure): if name in structure._anonymous_: line = "{}- Struct\n{}".format(indent, print_structure(attr, indent + " ")) else: line = "{}* {}:\n{}".format(indent, name, print_structure(attr, indent + " ")) elif isinstance(attr, ctypes.Union): line = "{}- Union\n{}".format(indent, print_structure(attr, indent + " ")) else: line = f"{indent}* {name}: {attr}" result.append(line) return "\n".join(result)
c64e5db3270bff26bb2f6d41eea3934ab61bf52e
70,222
def fromMel(x): """ Converts x from mel-scale to Hz """ return 700*(10**(x/2595.0)-1)
456d23ffe4ef5714f86d4bb9c1646d1d81f7bfa3
70,223
def manhattan_cost(curr, end): """ Estimates cost from curr (x0,y0) to end (x1,y1) using Manhattan distance. """ curr_x, curr_y = curr end_x, end_y = end return abs(curr_x-end_x) + abs(curr_y-end_y)
ad0687b3859bca592a24061fbdfaf128db60881a
70,226
def searchTitle(apps, title): """ Search for an app given its title. """ for app in apps: if apps[app]['title'] == title: return app return None
73dd0c039636410d17c49d6aa887b7bd260231c5
70,228
def getattr_recursive(variable, attribute): """ Get attributes recursively. """ if '.' in attribute: top, remaining = attribute.split('.', 1) return getattr_recursive(getattr(variable, top), remaining) else: return getattr(variable, attribute)
e91401be53c287f392a123ec3a88a19c0f4b8095
70,238
from bs4 import BeautifulSoup import re def extract_videos(html): """ Parses given html and returns a list of (Title, Link) for every movie found. """ soup = BeautifulSoup(html, 'html.parser') pattern = re.compile(r'/watch\?v=') found = soup.find_all('a', 'yt-uix-tile-link', href=pattern) return [(x.text.encode('utf-8'), x.get('href')) for x in found]
bed153a7058b83881abb40c9f9594c04a9bde41a
70,243
def scapy_layers_dot11_Dot11_essid(self): """Return the payload of the SSID Dot11Elt if it exists""" elt = self.find_elt_by_id(0) return elt.info if elt else None
c1634d01def034df8bbbff8cf02c207d279c8baa
70,255
def towers_of_hanoi(n): """Solution to exercise C-4.14. In the Towers of Hanoi puzzle, we are given a platform with three pegs, a, b, and c, sticking out of it. On peg a is a stack of n disks, each larger than the next, so that the smallest is on the top and the largest is on the bottom. The puzzle is to move all the disks from peg a to peg c, moving one disk at a time, so that we never place a larger disk on top of a smaller one. See Figure 4.15 for an example of the case n = 4. Describe a recursive algorithm for solving the Towers of Hanoi puzzle for arbitrary n. """ a = list(range(n, 0, -1)) b = [] c = [] def recurse(n, source, destination, temp): if n > 0: # Base case, bottom of stack of disks # Move n-1 disks from source to temporary storage recurse(n-1, source, temp, destination) # Move the nth (bottom) disk from source to destination destination.append(source.pop()) # Move the n-1 disks from temporary storage to destination recurse(n-1, temp, destination, source) recurse(n, a, c, b) return c
9d8186d4f6ef85102c89ab7b3d79dca17307dba1
70,256
import requests def get_url_json_to_dict(url): """ Generalized JSON request tool that returns dict. :param url: API JSON endpoint :return: A dictionary of results. """ r = requests.get(url) json_data = r.json() return json_data
fa74692b0a979d82f23c33acf4db96ab195eca8e
70,263
def parse_input(input): """Seperates the interface input from the user into the name and number ex. Gi1/0/1 becomes ('Gi', '1/0/1') or GigabitEthernet1/0/1 becomes ('GigabitEthernet', '1/0/1')""" interface_name = '' interface_number = '' x = 0 for letter in input: if letter.isdigit(): interface_number = input[x:] break else: interface_name += letter x += 1 return interface_name.strip(' '), interface_number.strip(' ')
7c3cc5d759ce1235c9a1d5258c3981d91fddc5dd
70,264
def is_user_type_authorized(unauthorized_list, user_type): """Check user type authorization Method returns True if the user_type is in the unauthorized list """ if user_type in unauthorized_list: return True return False
a0a5fd02793b414d0fa4ac6ab37ef431692b8c25
70,268
def cum_sum(seq): """ Cumulative sum (include 0) """ s = 0 cumult = [0] for n in seq: s += n cumult.append(s) return cumult
31db5695195831d2ac69b9ac3e60d86615aca0f2
70,272
def get_next_power_2(n): """ Returns the closest number that is smaller than n that is a power of 2. """ power = 1 while (power < n): power *= 2 if power > 1: return power / 2 else: return 1
a1f1e9ce3acb03d8e8dcd48cf7b53ce4e4d1dddb
70,275
def type_to_display(type_name): """ Convert an Avro fully qualified type name (with dots) to a display name. """ # Get the thing after the last dot, if any. return type_name.split(".")[-1]
5f9bac0a6b8c54671b167b123f44bbf965176a91
70,277
def convert_valid_prob_float(value, default): """ Helper method to check and convert float to valid probability value :param value: probability value supposed to be 0 - 1 range :type value: float :param default: default value if any error :type default: float :return: valid probability value :rtype: float """ try: value = float(value) except ValueError: return default # check prob 0.0 - 1.0 only if value < 0 or value > 1: return default return value
d9bbfa43c66fc2959b36e7a2ae573177019edfa6
70,280
import base64 def base64_decode(encoded): """Inverse of `base64_encode`""" b_encoded = encoded.encode('ascii', 'surrogateescape') s = b''.join(b_encoded.splitlines()) return base64.b64decode(s, validate=True)
3143909a002926efacf7f11d5a92dfd964c025ba
70,283
import yaml def parse_yaml_document_to_dict(yaml_doc): """Parses a YAML document to a dict. Args: yaml_doc: a str (in Python 2) or bytes (in Python 3) containing YAML configuration data. Returns: A dict of the key/value pairs from the given YAML document. Raises: yaml.YAMLError: If there is a problem parsing the YAML document. """ return yaml.safe_load(yaml_doc) or {}
378eb4ecfda8878cbf4889edccc845f2af863000
70,285
def valid_phage_titer(titer, o): """ Decides wether or not a phage titer is valid and, if not decides why. Args: titer(float): Titer to decide on. o (Options): Options object for lookup Returns(int): -1 if the phage titer was lower than mincp at some transfer point 0 if the phage titer was valid the whole time 1 if the phage titer was higher than maxcp at some transfer point """ gen = 1 while o.min_cp < titer[(gen * (o.tsteps + 1)) - 1] < o.max_cp: if gen == o.epochs: return gen, 0 gen += 1 if o.min_cp > titer[(gen * (o.tsteps + 1)) - 1]: tendency = -1 else: tendency = 1 return gen, tendency
d995917d5e0abcee4e10bc8209f8c3fee4ad2e2b
70,286
def encode_value(value, col_sep, encl='"'): """ convert a value to string and enclose with `encl` if it contains ',' """ if not isinstance(value, str): value = str(value) if col_sep in value: value = '{encl}{value}{encl}'.format(encl=encl, value=value) return value
35894d8a7a8524fecb5d4351ffa88bda38a4fb6c
70,293
def torch_to_numpy(tensor): """Convert from pytorch tensor to numpy array.""" if tensor.requires_grad: return tensor.detach().numpy() return tensor.numpy()
ba2f4a8d060987879552b6145b018bef526f5b33
70,295
def concatenate_residue_labels(labels): """ Concatenate residue labels. This function is a generator. Parameters ---------- labels : numpy array of shape (N, M) Where N is the number of rows, and M the number of columns with the labels to be concatenated. """ empty_join = ''.join return (empty_join(res_label) for res_label in labels)
679f408f2d6e4c37831fd1579963299f0dd13464
70,300
def filt_tracks_by_intensities (df_tracks, df_ints_by_track, int_type, bounds): """Filter tracks based on their intensities (both minimum and maximum). Args: df_tracks (Pandas dataframe): track data (can come from channel of either color) df_ints_by_track (Pandas dataframe): intensities by track (typically this would be the output of the compute_track_intensities function) int_type (string): column name in df_ints_by_track storing the intensity values will be used for track filtering bounds (tuple): lower and upper bounds of the intensities Returns: df_ints_filt (Pandas dataframe): filtered df_ints_by_track with only the desired tracks remaining. df_tracks_filt (Pandas dataframe): filtered df_tracks with only the desired tracks remaining. """ ibt, it = df_ints_by_track, int_type df_ints_filt = ibt.loc[(ibt[it]>bounds[0]) & (ibt[it]<bounds[1])].copy() filt_IDs = df_ints_filt['track_ID'].unique() df_tracks_filt = df_tracks[df_tracks['track_ID'].isin(filt_IDs)].copy() return df_ints_filt, df_tracks_filt
bbabdc62fb017a66d6f374544af5291c48d68167
70,303
from typing import OrderedDict def getitemsets(schema): """Return an ordered dictionary of itemsets with 'name' as key""" return OrderedDict([(itemset['name'], itemset) for itemset in schema['result']['item_sets']])
f10b269b375171c78563893dff4c6c58be11be7a
70,312
def is_int_in_inclusive_range(value, min_value, max_value): """ Is the given value an int in the range [min_value, max_value] :param value: value being checked :type value: Any :param min_value: minimum allowed int :type min_value: int :param max_value: maximum allowed int :type max_value: int :return: True if the value is int on given range, False otherwise :rtype: bool """ return isinstance(value, int) and (value in range(min_value, max_value + 1))
a847e49590b8bfbd034bb70bd657428dd69542ce
70,316
import torch def get_rays_tourism(H, W, kinv, pose): """ phototourism camera intrinsics are defined by H, W and kinv. Args: H: image height W: image width kinv (3, 3): inverse of camera intrinsic pose (4, 4): camera extrinsic Returns: rays_o (H, W, 3): ray origins rays_d (H, W, 3): ray directions """ yy, xx = torch.meshgrid(torch.arange(0., H, device=kinv.device), torch.arange(0., W, device=kinv.device)) pixco = torch.stack([xx, yy, torch.ones_like(xx)], dim=-1) directions = torch.matmul(pixco, kinv.T) # (H, W, 3) rays_d = torch.matmul(directions, pose[:3, :3].T) rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # (H, W, 3) rays_o = pose[:3, -1].expand_as(rays_d) # (H, W, 3) return rays_o, rays_d
06111ae31773231dad875922f8e0a764a163e6eb
70,320
def ficha(nome='<Desconhecido>', gols=0): """ -> Fichamento de um jogador. :param nome: recebe nome do jogador. :param gols: recebe quantidade de gols feitos pelo jogador. :return: O nome do jogador e a quantidade de gols feito por ele. """ return nome, gols
10d887b805a028e8e6016f3293060c2603dd33fc
70,321
def matches_any(cf_stack_name: str, stack_refs: list): """ Checks if the stack name matches any of the stack references """ cf_stack_name = cf_stack_name or "" # ensure cf_stack_name is a str try: name, version = cf_stack_name.rsplit("-", 1) except ValueError: name = cf_stack_name version = "" return any(ref.matches(name, version) for ref in stack_refs)
17f33283983aa04e6162c86c837093f9c5dfddf5
70,323
from typing import Tuple from typing import Union def multiple_cnn_size( input: Tuple[int, int], kernel: Union[int, Tuple[int, int]], padding: Union[int, Tuple[int, int]] = 0, stride: Union[int, Tuple[int, int]] = 1, n_convs: int = 1, ) -> Tuple[int, int]: """ Return the size of the output of more than one convolutional layer. This function can replace cnn_size. Args: input: Size of the input image. kernel: Kernel size, it is assumed to be a square. padding: Padding size. stride: Stride. n_convs: Number of convolutions. Returns: The output size. """ if isinstance(kernel, int): kernel = (kernel, kernel) if isinstance(padding, int): padding = (padding, padding) if isinstance(stride, int): stride = (stride, stride) # Function that computes our function. def f(i, k, p, s): count = 0 for j in range(0, n_convs): s_j = s ** j count += -s_j * k + 2 * s_j * p + s_j * s count += i return count / (s ** n_convs) out_w = f(i=input[0], k=kernel[0], p=padding[0], s=stride[0]) out_h = f(i=input[1], k=kernel[1], p=padding[1], s=stride[1]) return int(out_w), int(out_h)
0b89380f1153d61f3b561e463c0d50116902bd3b
70,328
def get_gt_web(best_matches, query_gt_dict): """ Given best matches and query ground truth, return list of ground truths corresponding to best_matches (for deployment use) Args: best_matches : list of best matching files query_gt_dict : dictionary indicating the positive and negative examples for the query Returns: list of ground truths corresponding to best_matches """ # Create python list to store preds preds = [] # Iterate through the best matches and find predictions for i, pic in enumerate(best_matches): img_name = "{}".format(pic.split("/")[-1]) if img_name in query_gt_dict['positive']: preds.append(1) elif img_name in query_gt_dict['negative']: preds.append(-1) else: preds.append(0) return preds
71d7a39428da200bce01eea5118d6ec38e6716a7
70,329
def is_reply(tweet): """ Determines if the tweet is a reply to another tweet. :param tweet: Tweepy tweet object for which to determine if it's a replt """ # If the tweet does not have an in_reply value if tweet.in_reply_to_screen_name is None: return False else: return True
ab8c3947dcac1cad96e34d14ca7529a16ad06855
70,330
def dump_address_pair(pair): """Dump a (name, address) pair in a canonicalized form.""" if pair[0]: return '"' + pair[0] + '" <' + pair[1] + '>' else: return pair[1]
37876ae2b6233b9d32f36a1b87cd96d81fbecb87
70,337
import warnings def _recalculate_channel_slope(z_up, z_down, dx, threshold=1e-4): """Recalculate channel slope based on elevation. Parameters ---------- z_up : float Upstream elevation. z_down : float Downstream elevation. dz : float Distance. Examples -------- >>> from landlab.components.network_sediment_transporter.network_sediment_transporter import _recalculate_channel_slope >>> import pytest >>> _recalculate_channel_slope(10., 0., 10.) 1.0 >>> _recalculate_channel_slope(0., 0., 10.) 0.0001 >>> with pytest.warns(UserWarning): ... _recalculate_channel_slope(0., 10., 10.) 0.0 """ chan_slope = (z_up - z_down) / dx if chan_slope < 0.0: chan_slope = 0.0 warnings.warn( "NetworkSedimentTransporter: Negative channel slope encountered.", UserWarning, ) elif chan_slope < threshold: chan_slope = threshold return chan_slope
e67938d93a3496a02b78ea5c510fefe790aaac27
70,339
def eselect2(g, x, z): """ For use in compute_IDC. Selects all edges in g except incoming to x and outgoing from z. :param g: graph :param x: nodes :param z: nodes :return: The set of edges in g that are not incoming to x or outgoing from z. """ edges = set(g.es.select().indices) to_x = set(g.es.select(_to_in=g.vs.select(name_in=x).indices).indices) from_z = set(g.es.select(_from_in=g.vs.select(name_in=z).indices).indices) selection = edges - to_x - from_z return selection
de1e2a88bb8139b70b532890522bb64324c7b8ce
70,341
def median(lst, lst_size=None): """Compute median.""" lst = sorted(lst) if lst_size: n = lst_size n_diff = n - len(lst) i = (n - 1) // 2 - n_diff if i < 0: if i == -1 and not n % 2: return lst[0] / 2. return 0 else: n = len(lst) i = (n - 1) // 2 return lst[i] if n % 2 else (lst[i] + lst[i + 1]) / 2.
c11359f5bc00696766ea348c919a0dfe9376ca73
70,343
def _get_alias_name(full_alias_name: str) -> str: """ Parse short alias name from full raw value Example: srn:reference-data/AliasNameType:UWI: -> UWI :param full_alias_name: full raw alias name value :return: short alias name value :rtype: str """ return full_alias_name.split(":")[-2]
f2ab55fe3b66aa4cc0dd5567b81b07a31ea0e139
70,349
def gdv(dd,n=0): """Get-dict-val; returns n-th val of dict dd.""" return dd[list(dd.keys())[n]]
91c618aac978a39aa7278b609e81bc278e8a6626
70,350
def filter_both(predicate, iterable): """ Splits the iterable into two groups, based on the result of calling `predicate` on each element. WARN: Consumes the whole iterable in the process. This is the price for calling the `predicate` function only once for each element. (See itertools recipes for similar functionality without this requirement.) >>> filter_both(lambda x: x%2 == 0, range(4)) ([0, 2], [1, 3]) """ yes, no = [], [] for i in iterable: if predicate(i): yes.append(i) else: no.append(i) return yes, no
0bb1dc4b387296793853ec5a308ec7747ddaa51d
70,353
import re def remove_hyperlinks(word): """Removes hyperlinks from a word""" return re.sub(r"http\S+", "", word)
3401fca55cab9857129b136bf4c883e088965870
70,355
def to_id(item) -> str: """ method for getting an id from an item :param item: the item to get the id from :return: the id fo the item """ if item is not None: return item.id else: return ''
60cacc6a133f87bb1bde86968687b76fad59491a
70,363
def dictAsKvParray(data, keyName, valueName): """ Transforms the contents of a dictionary into a list of key-value tuples. For the key-value tuples chosen names for the keys and the values will be used. :param data: The dictionary from which the date should be obtained. \t :type data: Dict<mixed, mixed> \n :param keyName: The name assigned to the key of the tuples. \t :type keyName: string \n :param valueName: The name assigned to the value of the tuples \t :type valueName: string \n :returns: A list of key-value tuples with the chosen names for the keys and values. \t :rtype: [{keyName: mixed, valueName: mixed}] \n """ res = [] for key in data.keys(): res.append({keyName: key, valueName: data[key]}) return res
00aad99776eec512be1e510b9c783e4118b31cd9
70,364
def total_enrollment(k12): """ Total enrollment across all grade levels. """ return k12['K6'] + k12['G7_8'] + k12['G9_12']
ee2a640cc96d7abf04a6fbb014266eceae6d0810
70,366
import math def isPrimeTrialDiv(num: int) -> bool: """Is prime trial division Uses the `trial division`_ algorithm for testing if a given number is prime. Args: num: Integer to determine if prime. Returns: True if num is a prime number, otherwise False. .. _trial division: https://en.wikipedia.org/wiki/Trial_division """ # All numbers less than 2 are not prime: if num < 2: return False # See if num is divisible by any number up to the square root of num: for i in range(2, int(math.sqrt(num)) + 1): if num % i == 0: return False return True
9e9492b2640de2d305c59b23d4118108b63b534e
70,369
import json def remove_dupe_dicts(l): """ Removes duplicate dictionaries from a list. Uses list comprehension and the json library to sort and stringify each dictionary and the set data type to ensure unique values. Works with nested data structures. Args: l (list): a list of (nested) data structures. Returns: A list of unique values. """ list_of_strings = [json.dumps(d, sort_keys=True) for d in l] list_of_strings = set(list_of_strings) return [json.loads(s) for s in list_of_strings]
0127e53bcab2f7c4d2d5c97984e494e0edf694f1
70,370
def doGroup(indata, group_key_func, group_data_func): """Group the indata based on the keys that satisfy group_key_func (applied to the value) Return a dict of groups summarized by group_data_func Each group returned by group_data_func must be a dictionary, possibly similar to the original value of the indata elements Args: indata: group_key_func: group_data_func: Returns: dict of groups summarized by group_data_func """ gdata = {} for k, inel in indata.items(): gkey = group_key_func(inel) if gkey in gdata: gdata[gkey].append(inel) else: gdata[gkey] = [inel] outdata = {} for k in gdata: # value is a summary of the values outdata[k] = group_data_func(gdata[k]) return outdata
2ec01d97815b7758c089a22dde75587e7f734b0f
70,372
def crop_img_arr(img_arr, bbox): """Crop bounding box from image. Parameters ---------- img_arr Image in array format bbox Coordinates of bounding box to crop Returns ------- img_arr Cropped image """ return img_arr[bbox[0] : bbox[1], bbox[2] : bbox[3], :]
7c0b3b2e894b4e43d24e196f5d11eba6da4a83b5
70,373
def _calculate_conductivity(row): """ formula from https://in-situ.com/wp-content/uploads/2015/01/Specific-Conductance-as-an-Output-Unit-for-Conductivity-Readings-Tech-Note.pdf """ r = 0.0191 return (row['water_specific_conductivity_mS/cm'] * (1.0 + (r * (row['water_temp_C'] - 25.0))))
35161d3c79e5d90513696b2e6d79f3189094f912
70,375
import re def extract_ids(path: str) -> list: """ Read a list of sentences on CoNLL-U format and return the list of sentence's ids. Parameters ---------- path: str Path to input CoNLL-U file. Returns ------- list: List of ids. """ ids = [] conllu_sentence_id_regex = r"sent_id = (dante_01_.*)" with open(path, "r") as conllu_file: conllu_data = conllu_file.read() ids = re.findall(conllu_sentence_id_regex, conllu_data) return ids
118d0ddd42a1ab8ed89b47f15551d156c64b255a
70,376
def _filter_features( example, feature_whitelist): """Remove features that are not whitelisted. Args: example: Input example. feature_whitelist: A list of feature names to whitelist. Returns: An example containing only the whitelisted features of the input example. """ return { feature_name: example[feature_name] for feature_name in feature_whitelist if feature_name in example }
26f4afd9297f1b678761646494ebb83d4724ca01
70,379
def add_street_to_items(street, items): """ Adding each element of a list to another list :param street: List of elements to be added :param items: List where elements will be added to :return: list with all elements """ for item in street: items.append(item) return items
7b398d08e3a5204043d1e45d43b341ff12f8c1e8
70,382
from typing import Set def select_define(defines: Set[str], family_header: str) -> str: """Selects valid define from set of potential defines. Looks for the defines in the family header to pick the correct one. Args: defines: set of defines provided by `parse_product_str` family_header: `{family}.h` read into a string Returns: A single valid define Raises: ValueError if exactly one define is not found. """ valid_defines = list( filter( lambda x: f'defined({x})' in family_header or f'defined ({x})' in family_header, defines)) if len(valid_defines) != 1: raise ValueError("Unable to select a valid define") return valid_defines[0]
fafd14907ad98f84c2f019441a181afc54855c24
70,388
def pick(dct, *keys): """Pick a subset of a dict.""" return {k: v for k, v in dct.items() if k in keys}
b5866be6bc6edf09d8490241509b492a24986b63
70,389
import random def is_prime(num: int, num_rounds: int = 40) -> bool: """Probabilistically determine if a given number is a prime. This function uses `Miller–Rabin <https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test>`__ primality test to probabilistically determine if the provided number is prime. Args: num: prime candidate num_rounds: number of iterations inside the algorithm Returns: whether the number is a prime or not """ if num == 2 or num == 3: # pragma: no cover return True if num <= 1 or num % 2 == 0: # pragma: no cover return False r, d = 0, num - 1 while d % 2 == 0: r += 1 d //= 2 for _ in range(num_rounds): a = random.randrange(2, num - 1) x = pow(a, d, num) if x == 1 or x == num - 1: continue for _ in range(r - 1): x = pow(x, 2, num) if x == num - 1: break else: return False return True
531dd08035bd7407f1ac1ba5283005ae47078e86
70,393
import pathlib import re def grep_dir(path: pathlib.Path, glob_pattern: str, search_pattern: str) -> bool: """Recursively go through a dir and it's children and find the regex.""" pattern = re.compile(search_pattern) for fil in path.glob(glob_pattern): if not fil.is_file(): continue if pattern.search(fil.read_text()): return True return False
e3006f0f9976cf0de75b9b5fd71553ed66c0a6e6
70,394
def isiterable(obj): """ Check if an object supports iteration """ try: iter(obj) # Try to get its iterator except TypeError: return False # Failed, NOT iterable return True
5e54f7b80ebde74b396a195d32e916a5ea7e05c0
70,397
import math def list_to_puzzle(lst): """ Converts a one dimensional puzzle list and returns it's two dimensional representation. [1, 2, 3, 4, 5, 6, 7, 8, 0] --> [[1, 2, 3], [4, 5, 6], [7, 8, 9]] """ n_sqrt = int(math.sqrt(len(lst))) puzzle = [] for i in range(0, len(lst), n_sqrt): line = [] for j in range(0, n_sqrt): line.append(lst[i + j]) puzzle.append(line) return puzzle
872fbf969b25cb05f37457d084c785b0aa22ebcf
70,398
def timestamp_to_date(timestamp, return_as_string=False): """ turn pandas timestamp into YYYY-MM-DD format """ if return_as_string: return str(timestamp.strftime('%Y-%m-%d')) return timestamp.date()
d50a07ec118b58ae0f3f583f3a226e6dbb684eca
70,399
def to_iamc_template(df): """Format pd.DataFrame *df* in IAMC style. Parameters ---------- df : pandas.DataFrame May have a 'node' column, which will be renamed to 'region'. Returns ------- pandas.DataFrame The returned object has: - Any (Multi)Index levels reset as columns. - Lower-case column names 'region', 'variable', and 'unit'. Raises ------ ValueError If 'time' is among the column names; or 'region', 'variable', or 'unit' is not. """ if 'time' in df.columns: raise ValueError('sub-annual time slices not supported by ' 'ixmp.TimeSeries') # reset the index if meaningful entries are included there if not list(df.index.names) == [None]: df.reset_index(inplace=True) # rename columns to standard notation cols = {c: str(c).lower() for c in df.columns} cols.update(node='region') df = df.rename(columns=cols) required_cols = ['region', 'variable', 'unit'] if not set(required_cols).issubset(set(df.columns)): missing = list(set(required_cols) - set(df.columns)) raise ValueError("missing required columns `{}`!".format(missing)) return df
d6c562d0168808cd8ad25b64b17d66a6afe73f5a
70,403
import re def param_name(name): """ Removes the integer id from a Parameterized class name. """ match = re.findall(r'\D+(\d{5,})', name) return name[:name.index(match[0])] if match else name
349731c86f25c2832c281da04358699093c77ef2
70,404
def load_name_index(file_name): """ Returns names: a list where names[i] is the name of phone[i] """ names = [] with open(file_name) as timit_file: for line in timit_file: idx, label = line.split(",") idx = int(idx) - 1 # -1 because 1-based indexing name, junk = label.strip().split(" ") if name not in names: names.append(name) return names
e7710f37e098d10f8590a75f0e187f756979aeda
70,406
def eratosthenes_sieve(n): """ Sieve of Eratosthenes Complexity: O(NloglogN) We can find all the prime number up to specific point. This technique is based on the fact that the multiples of a prime number are composite numbers. That happens because a multiple of a prime number will always have 1, itself and the prime as a divisor (maybe even more) and thus, it's not a prime number. A common rule for sieves is that they have O(logN) complexity. """ primes = [True] * (n+1) primes[0] = False primes[1] = False for i in range(2, int(n**0.5) + 1): if primes[i]: for j in range(i*i, n+1, i): primes[j] = False final_primes = [] for i in range(len(primes)): if primes[i]: final_primes.append(i) return final_primes
e4b0446d93d7ad6df8b98ed976a53f77ec21067a
70,410
def get_canonical_form(mpa): """ Returns canonical form of mpa. Is either 'left', 'right' or None """ cform = mpa.canonical_form if cform[0] == len(mpa) - 1: return 'left' elif cform[1] == 1: return 'right' else: return None
27cfb219b02567d19cb7d3e6e0b88ad962df0f23
70,413
def StartsWith(lines, pos, string): """Returns True iff the given position in lines starts with 'string'.""" return lines[pos.line][pos.column:].startswith(string)
1c427d15b9376c87f6f8a01319724473d21a06fa
70,416
def gassmann_dry2sat(Kdry, Kmin, Kfl, phi): """ Gassman substitution from Dry Rock moduli to saturated moduli """ a = 1.0 - Kdry/Kmin b = phi/Kfl + (1.0-phi)/Kmin - Kdry/(Kmin**2.0) Ksat = Kdry + (a**2.0)/b return Ksat
252e95f3db7dd5b080667e72d6906623ae49af25
70,417
from typing import Union from typing import Collection from typing import Mapping def _ensure_ids( labels_or_ids: Union[Collection[int], Collection[str]], label_to_id: Mapping[str, int], ) -> Collection[int]: """Convert labels to IDs.""" return [ label_to_id[l_or_i] if isinstance(l_or_i, str) else l_or_i for l_or_i in labels_or_ids ]
0148f21c6522820cd6f9df071431257a0e2d6caa
70,418
def begin(task): """ Create the initial state. :param task: the random word that is chosen :return: initial state (all elements are dash) """ ans = "" for i in range(len(task)): ans += '_' return ans
cde527fb1b0acac7999bf6700b31460315554e42
70,419
import random def crd(a, p, b, d): """ Returns atomic coordinate using given alat, place, basis atom coordinate and maximum random distortion value In: -> a - lattice constant -> p - cell number in a supercell -> b - basis coordinate -> d - maximum random distortion (in fractions of lattice constant) Out: -> crd - an atomic coordinate (float) """ return a*(p+b+random.uniform(-d,d))
d894661e384557edd190bc9f88d4fbe758855048
70,421
import logging def _WarnAboutDuplicates(offsets): """Warns about duplicate offsets. Args: offsets: list of offsets to check for duplicates Returns: True if there are no duplicates, False otherwise. """ seen_offsets = set() ok = True for offset in offsets: if offset not in seen_offsets: seen_offsets.add(offset) else: ok = False logging.warning('Duplicate offset: ' + hex(offset)) return ok
50f9e6891e79afc61f53ae5adb8d6832e8ff2856
70,427
def num_paths_to_top(height, possible_steps): """ Given the height of a staircase n, and a list of possible steps you can take at a time, calculate the number of possible paths you can take to reach the top of the staircase. For example, given a height 3, and the possible steps [1, 2, 3], there would be 4 possible ways to climb the staircase" [1,1,1] [1,2] [2,1] [3] Args: height: The height of the staircase possible_steps: A list of integer value for the number of steps we can climb at a time. Returns: The total number of possible paths we can take to the top. """ memoized = [None] * (height + 1) # We use an inner function to capture the memoization table def _num_paths_to_top(height, possible_steps): # If we are at the top, then we terminate the recursion. if height < 1: return 0 # Do we already know the possible steps at this height? if memoized[height] is not None: return memoized[height] # The remaining combinations will be 1 (for this step), # plus all the possible remaining steps we can take. combos = 1 for i in possible_steps: # Do not bother checking steps that are larger than the remaining height, # they will return 0 anyway. if i <= height: combos = combos + _num_paths_to_top(height - i, possible_steps) # Store the possible steps for this height for future calculations memoized[height] = combos return combos return _num_paths_to_top(height, possible_steps)
a631e9719f02291e4ce5bdc029fb1c54a158ad64
70,428
def to_string(recipe, num_servings): """ Return a list of strings with a description of this recipe. The recipe will be customized for the given number of servings. """ s = [] s.append("Recipe for {}, {} servings:".format(recipe['name'], num_servings)) s.append("") s.append("Ingredients:") s.append("") for ingredient in recipe['ingredients']: s.append(" {} - {} {}".format(ingredient['ingredient'], ingredient['amount'] * num_servings / recipe['num_servings'], ingredient['units'])) s.append("") s.append("Instructions:") s.append("") for i,instruction in enumerate(recipe['instructions']): s.append("{}. {}".format(i+1, instruction)) return s
714cc696a1032786d582de64e0654d0f5eeb0a2e
70,432
def dict_get(_dict, keys): """Get dict values by keys.""" return [_dict[key] for key in keys]
9ce636167f6f4ada73e32533e78a8b16ede83f2c
70,441
def _is_primary_connection_has_ip(vapp): """Return True in case when primary interface has some ip""" network_info = vapp.get_vms_network_info() # we dont have any network, skip checks if not network_info: return True if not network_info[0]: return True # we have some networks for conn in network_info[0]: if conn['is_connected'] and conn['is_primary'] and conn['ip']: return True return False
d8c4e03c5494596685ddfc529ceb74c1fe2971a2
70,444
def total_xor(a): """ Calculates the XOR total run from [0, a] """ # Special case: if (a <= 0): return 0 res = [a,1,a+1,0] return res[a%4]
5e09bb24a7a9d2e96c0b19a67e2645e5d07d9710
70,446
import math def degrees_to_xy(degrees, radius, origin): """Calculates a point that is at an angle from the origin; 0 is to the right. Args: * origin: tuple of (x,y) values Returns: * (x,y) tuple """ radians = float(degrees) * math.pi / 180.0 x_o = math.cos(radians) * radius + origin[0] y_o = math.sin(-radians) * radius + origin[1] return (x_o, y_o)
5b3d067479064d0e78e04d25e41431db2fab5a13
70,454