content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from pathlib import Path
import glob
def get_file_path(directory: str, filename: str) -> Path:
"""Resolves a filename and directory, returns a Path."""
try:
path = list(Path(directory).glob(glob.escape(filename)+".*"))[0]
except IndexError:
raise ValueError("File does not exist!")
return path | 5614adbf8e3d26bb962797a438dda85536adacc1 | 91,113 |
def _isnumber(obj):
"""Test is object is a number (copied from pmisc module to avoid import loops)."""
return (
(obj is not None)
and (not isinstance(obj, bool))
and isinstance(obj, (int, float, complex))
) | 8a6e436cfc5698f1060720d73c56a033ad8df559 | 91,118 |
def dummy_stake_data() -> dict:
"""
Return dummy Verus stake data.
"""
stake_data = {
'txid': 'qwerty123456',
'time': 1234567890,
'amount': 123.123
}
return stake_data | 9742445c29afc0f4bb357a0cb5d75cf327e42037 | 91,120 |
import base64
def decode_creds(authorization_token):
"""
Decode Basic auth token into username, password.
:param str authorization_token: Basic auth token
:returns tuple: (username, password)
"""
try:
_, auth64 = authorization_token.split(' ')
username, password = base64.b64decode(auth64).decode().split(':')
except (AttributeError, ValueError): # pragma: no cover
username = password = None
return (username, password) | 221dadb24e85e7e34ab9215460a2517659ed343e | 91,121 |
import re
def replace_all(repls, str):
"""
Applies replacements as described in the repls dictionary on input str.
:param repls: Dictionary of replacements
:param str: The string to be changed
:return: The changed string
"""
return re.sub('|'.join(re.escape(key) for key in repls.keys()),
lambda k: repls[k.group(0)], str) | 43fbfee772e777e819f3559790fa744f0c6d45d9 | 91,122 |
def _flip(callback):
"""Flips arguments: the first one becomes the second."""
return lambda first, second: callback(second, first) | 42fbf558a550bb8a79adba3a47df52fa2385434b | 91,125 |
def cumsum(x):
"""
Create a list of summed up values from another list
"""
# Initialize list with prefilled values
AccumRatios = [z for z in range(len(x))]
# Fill accumulation list
for acc in range(len(x)):
if acc == 0:
AccumRatios[acc] = x[acc]
else:
AccumRatios[acc] = AccumRatios[acc-1] + x[acc]
# Return list
return(AccumRatios) | 22b84a3cc86acc5fb50d59c6c76a9ed68e665cf1 | 91,131 |
def readable_timedelta(days):
"""To get the number of weeks and days in given nuber of days"""
number_of_weeks = days // 7 #To get number of weeks
number_of_days = days % 7 # To get number of days
return('{} week(s) and {} day(s)'.format(number_of_weeks, number_of_days)) | 5be929ebb108192540648dbd2af980a009437ad7 | 91,138 |
def squashstr(string):
"""Squashes a string by removing the spaces and lowering it"""
nospaces = "".join(string.split())
return nospaces.lower() | 6a729d829044f1ddfc15022c396352d6b454c26c | 91,139 |
def get_excluded_genes(exc_arg):
"""Load set of transcripts to be excluded."""
if exc_arg:
f = open(exc_arg, "r")
exclude = set(x.rstrip() for x in f)
f.close()
return exclude
else:
return set() | a1d7fc5521f80265e6ab2cdced72b0044c61e0a0 | 91,144 |
def isc_1km_to_5km ( i_sc_1km ) :
"""
return the 5km grid index cross track of a 1km pixel
"""
return ( i_sc_1km - 2. ) / 5. | 4c6622450345832114f1cac162593ceab5450c9f | 91,146 |
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = 'STOKES'
newwcs.wcs.cname[add_before_ind] = 'STOKES'
return newwcs | 381873c05ddba2f823223a9dc82eb00437fe73f3 | 91,150 |
from pathlib import Path
def is_raster_format (image_path: str) -> bool:
"""
Is the file at the provided path a raster image?
Parameters:
image_path (str): Path to file.
Returns:
bool: Whether the file is a raster image.
"""
RASTER_FORMATS = [".jpg", ".jpeg", ".tif", ".tiff"]
format = Path(image_path).suffix.lower()
return format in RASTER_FORMATS | 9817c1d08f0b8cd4a01073d03f753577106b1c54 | 91,151 |
def get_shape_id(shape: tuple) -> str:
"""
Join shape numbers into a string with `_` delimiters.
Parameters
----------
shape : tuple
Same as pandas.Dataframe.shape.
Returns
-------
str
"""
return "_".join([str(element) for element in shape]) | f96d372c1215426ca4e239e1bb85e7f7b6a5e530 | 91,165 |
from typing import List
def get_labels() -> List[str]:
"""Get labels of parameters."""
return [
"Brightness",
"Contrast",
"Saturation",
"Lift (R)",
"Lift (G)",
"Lift (B)",
"Gamma (R)",
"Gamma (G)",
"Gamma (B)",
"Gain (R)",
"Gain (G)",
"Gain (B)",
] | 22eddf60946258f56c7ae0c35167860eb1c95ac3 | 91,167 |
from typing import Union
from typing import List
def validate_int(int_input: Union[int, List]) -> List[int]:
"""Checks if input is list.
:param asset_input: int, list
Single int or list of ints
:return List of ints.
:raises ValueError if input is neither a int or list
"""
if isinstance(int_input, int):
return [int_input]
elif isinstance(int_input, list):
return int_input
else:
raise ValueError("Input should be of type int or list") | 82c3f8c0262fdf89726e4d87be5ecea03ad900fd | 91,168 |
def get_nearest_rhythm(interval, rhythmical_durations):
"""
Given a certain interval in seconds, gets the rhythmical duration
that has the lower distance with it.
:param interval: duration in seconds
:param rhythmical_durations: dictionary returned by the get_durations
:return:
"""
# the keys and the values obtained from the methods "values" and "keys"
# of a dictionary are correctly ordered, so we can calculate
# the distances from the keys, get the index of the argmin
# and use it to get the correct key
# values of the rhythmical_durations dictionary
rhythmical_durations_values = list(rhythmical_durations.values())
# keys of the rhythmical_durations dictionary
rhythmical_durations_keys = list(rhythmical_durations.keys())
# list comprehension used to map the distance function to the values
distances = [abs(interval - x) for x in rhythmical_durations_values]
# argmin of the distance (an index)
result_index = distances.index(min(distances))
# using the index to get the correct rhythmical duration key
return rhythmical_durations_keys[result_index] | 20f04f522a1850e279c297ba51c67e83fe8266de | 91,169 |
import decimal
def prompt_decimal(prompt):
""" Prompt until the user provides a decimal """
while True:
try:
return decimal.Decimal(input(prompt))
except decimal.InvalidOperation:
print('Provide a decimal') | 389e20cd25ba02d7f85ba545697a3cda001016c8 | 91,171 |
def extract_bgnbd_params(model):
"""Extracts params from the (M)BG/NBD model
Args:
model: the (M)BG/NBD model.
Returns:
The a, b, r and alpha params of the (M)BG/NBD model.
"""
r, alpha, a, b = model._unload_params('r', 'alpha', 'a', 'b')
return {'r': r, 'alpha': alpha, 'a': a, 'b': b} | b6528acf111ad6fd12a6566c30a39c01c0818665 | 91,177 |
def angular_difference(degrees1, degrees2):
"""
Calculates the smaller angle between the provided bearings / headings
"""
diff = abs(degrees1 - degrees2)
if diff > 180:
diff = abs(diff - 360)
return diff | 84adb99255b33a03060f70bb2c3db925f7b41ee5 | 91,178 |
def remaining_object_heat_capacity(height_obj, density_obj, heat_cap_obj):
"""
Equation 8.23
Args:
height_obj: mean height of the greenhouse object [m]
density_obj: density of the greenhouse object [kg m-3]
heat_cap_obj: specific heat capacity of the object [J K-1 kg-1]
Returns: [J K^-1 m^-2]
"""
return height_obj * density_obj * heat_cap_obj | b9ead4e9c794d5bfe3faa3efd8dfe911ac08c603 | 91,183 |
def get_sec(time): #helper function to convert serial output time string into int, just for testing convenience
"""Get Seconds from time."""
time = str(time)[2:-1]
h, m, s = str(time).split(':')
ts = int(h) * 3600 + int(m) * 60 + float(s)
return ts | b12fae7121d7c0dfca3d1aa991b3869ef735758a | 91,184 |
def list_data_connections(graph):
"""Return all edges that are data connections"""
return [(u,v) for u,v,data in graph.edges_iter(data=True)
if data.get('conn')] | 53a170a78cb5e6ef494e8c39a6e4dbe66102f5a2 | 91,185 |
from typing import Optional
from typing import Iterator
def safe_iter(obj) -> Optional[Iterator]:
"""
Returns an iterator for the object if it's iterable,
or None if it's not.
:param obj: The object to get the iterator for.
:return: The iterator, or None if it's not iterable.
"""
try:
return iter(obj)
except Exception:
return None | 3bf1c3828da26286b62e36d3975d9488f4ab2067 | 91,189 |
def encontrar_mejor_puntaje_equipo(salon: list, m: int) -> int:
""" El Mejor Equipo
Parámetros:
salon (list): Una lista de listas que representa una matriz cuadrada, con el número de problemas
resueltos por cada estudiante.
m (int): Número de equipos en los que el profesor divide el salón
Retorno:
int: Entero que representa la suma de puntajes de los integrantes del mejor equipo en el juego planteado
por el profesor.
"""
n = len(salon)
primer_e = 0
segundo_e = 0
# Filas.
for i in range(n):
# Columnas.
for j in range(n):
if j <= (n/m) - 1:
primer_e += salon[i][j]
elif 2 * (n/m) - 1 >= j >= (n/m):
segundo_e += salon[i][j]
if primer_e > segundo_e:
return primer_e
else:
return segundo_e | 8d9f5612253efc8fc1570fb2eded582f8cfa7af8 | 91,191 |
import math
def perfect_square(n):
""" Checks if n is a perfect square. """
return (round(math.sqrt(n)) ** 2) == n | 77bd9e45df383c1ecb7b0531e142decae2b6663a | 91,195 |
def breadth_first_search_graph(head, target):
"""
Search a graph for a target value.
Args:
head: pointer to node in the graph
targer: the target value to find
Returns:
Node which has value = target.
"""
visited = set([head])
queue = [head]
while queue:
current = queue.pop()
for adjacent in current.adjacent_list:
if adjacent in visited:
continue
if adjacent.val == target:
return adjacent
else:
queue.insert(0, adjacent)
visited.add(adjacent)
return None | 01e7c96bf26b88dba420c840c6f2b1da8286ca31 | 91,200 |
import re
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) | 01d6487799a4255df73ce8267bd722686da89dfa | 91,201 |
def merge1(list1, list2):
"""
Merge two sorted lists.
Returns a new sorted list containing those elements that are in
either list1 or list2.
This function is recursive.
"""
first_list = list(list1)
second_list = list(list2)
# appending the rest of the non-empty list
if len(first_list) == 0:
return second_list
elif len(second_list) == 0:
return first_list
# merging the list recursively
else:
if first_list[0] < second_list[0]:
rest_merged = merge1(first_list[1: ], second_list)
return [first_list[0]] + rest_merged
else:
rest_merged = merge1(first_list, second_list[1: ])
return [second_list[0]] + rest_merged | 79c0bd0823b502f6f3734d7540a6d1ce96dc1a1c | 91,207 |
def _alpha_beta(state, depth, alpha, beta, value_function,
maximizing_player_id):
"""An alpha-beta algorithm.
Implements a min-max algorithm with alpha-beta pruning.
See for example https://en.wikipedia.org/wiki/Alpha-beta_pruning
Arguments:
state: The current state node of the game.
depth: The maximum depth for the min/max search.
alpha: best value that the MAX player can guarantee (if the value is <= than
alpha, the MAX player will avoid it).
beta: the best value that the MIN currently can guarantee (if the value is
>= than beta, the MIN player will avoid it).
value_function: An optional functioon mapping a Spiel `State` to a numerical
value, to be used as the value of the maximizing player for a node when we
reach `maximum_depth` and the node is not terminal.
maximizing_player_id: The id of the MAX player. The other player is assumed
to be MIN.
Returns:
A tuple of the optimal value of the sub-game starting in state
(given alpha/beta) and the move that achieved it.
Raises:
NotImplementedError: If we reach the maximum depth. Given we have no value
function for a non-terminal node, we cannot break early.
"""
if state.is_terminal():
return state.player_return(maximizing_player_id), None
if depth == 0 and value_function is None:
raise NotImplementedError(
"We assume we can walk the full depth of the tree. "
"Try increasing the maximum_depth or provide a value_function.")
if depth == 0:
return value_function(state), None
player = state.current_player()
best_action = -1
if player == maximizing_player_id:
value = -float("inf")
for action in state.legal_actions():
child_state = state.clone()
child_state.apply_action(action)
child_value, _ = _alpha_beta(child_state, depth - 1, alpha, beta,
value_function, maximizing_player_id)
if child_value > value:
value = child_value
best_action = action
alpha = max(alpha, value)
if alpha >= beta:
break # beta cut-off
return value, best_action
else:
value = float("inf")
for action in state.legal_actions():
child_state = state.clone()
child_state.apply_action(action)
child_value, _ = _alpha_beta(child_state, depth - 1, alpha, beta,
value_function, maximizing_player_id)
if child_value < value:
value = child_value
best_action = action
beta = min(beta, value)
if alpha >= beta:
break # alpha cut-off
return value, best_action | f986da24056acae16d37c4a7620dce207f490b72 | 91,208 |
def default_qp(depth, qs):
"""Default rule for Qp as a function of Qs.
Args:
depth (float)
Depth of location in m.
qs (float)
Quality factor for S wave.
Returns:
Quality factor for P wave.
"""
return 2.0*qs | fbf85d82ce837578b77982cc6a3508b467ac3359 | 91,212 |
def recon_rule_preview_payload(passed_keywords: dict) -> dict:
"""Create a properly formatted payload for retrieving a rule preview from recon.
{
"filter": "string",
"topic": "string"
}
"""
returned_payload = {}
if passed_keywords.get("filter", None):
returned_payload["filter"] = passed_keywords.get("filter", None)
if passed_keywords.get("topic", None):
returned_payload["topic"] = passed_keywords.get("topic", None)
return returned_payload | dc18c7db03ca133666ac9c8d7f46ae6d4c55c19d | 91,214 |
def subject_list_to_message(subject_list):
"""
Converts the list of subjects into a string to send the user.
"""
message_text = ""
srno = 0
for subject in subject_list:
message_text += "/{srno}. {subject}\n".format(srno = srno, subject = subject)
srno += 1
return message_text | d5c437b322271e854592defb3d8b4c8c9484ad60 | 91,217 |
def is_any_thread_alive(threads) -> bool:
"""
Checks if there are any threads running
Arguments:
- threads: A list of threads running
returns: True if there are any live threads, False otherwise
"""
return True in [t.is_alive() for t in threads] | 7ec9997946f8f791c6c0abcd1236116a33706453 | 91,221 |
def percentile_(data, percentile):
"""Calculates the given percentile of the object with linear interpolation"""
sorted_data = sorted(data)
i = percentile * (len(data) - 1)
floor = int(i // 1)
frac = i % 1
return sorted_data[floor] + (sorted_data[floor + 1] - sorted_data[floor]) * frac | 94b989e8bc6798982318fd87528dd10dfdff9c52 | 91,222 |
def find_adjacent(overlapping_information: list, existing_nodes: list):
"""
Gets a list of directly connected subgraphs and creates the indirect connections.
:param overlapping_information: a list of lists each containing direct connections betweeen some subgraphs.
:param existing_nodes: a list containing each existing node once.
:return: a list of lists each containing all reachable subgraphs with other connected subgraphs in between.
"""
result_connections = []
for node in existing_nodes:
already_checked = False
for c in result_connections:
if node in c:
already_checked = True
break
if already_checked is True:
continue
connection_list = []
connection_list.append(node)
has_changed = True
while has_changed is True:
has_changed = False
for direct_connection in overlapping_information:
will_be_checked = False
for n in connection_list:
if n in direct_connection:
will_be_checked = True
break
if will_be_checked is True:
for new_node in direct_connection:
if new_node not in connection_list:
connection_list.append(new_node)
has_changed = True
result_connections.append(connection_list)
return result_connections | 95fc8f2ffca76a8ac6661f03c13f8cf423f277fa | 91,227 |
def get_page_parameter(request):
"""
Return the ``page`` parameter of the request's GET parameters. This
number is used in combination with the ``limit`` parameter to
perform pagination on queries to the database or Elasticsearch. It
needs to be a positive integer. If the page is not indicated or
invalid, the default page is returned.
Args:
``request`` (django.http.HttpRequest): The request object with
parameter ``page`` set.
Returns:
``int``. The page.
"""
DEFAULT_PAGE = 1
try:
page = int(request.GET.get('page', DEFAULT_PAGE))
except ValueError:
page = DEFAULT_PAGE
if page <= 0:
return DEFAULT_PAGE
return page | a68c8d6c2cdf32738411770774118ce70b3206ce | 91,229 |
import math
def g(x=0):
"""Gaussian function for given parameter: x"""
value = (1/math.sqrt(2*math.pi))*math.exp(-(x**2)/2)
return value | 3a49eef91e3f59d056509c2ba1c3715e6574630b | 91,230 |
import torch
def _get_device(cuda_device: int) -> torch.device:
"""Return a `torch.cuda` device if `torch.cuda.is_available()` and `cuda_device>=0`.
Otherwise returns a `torch.cpu` device.
"""
if cuda_device != -1 and torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
return device | cc768f3cd1a2ad225c8e0eeb0415177b9a1f964d | 91,237 |
def get_intent_from_transfer_to_action_event(response):
"""
On a transfer to action event, MS will return the current context of the chat - including the intent name
This method will pull that value out of the response
"""
# Variables will be stored in a dict in the response:
if 'value' in response:
if 'va_LastTopic' in response['value']:
return response['value']['va_LastTopic']
return None | 45372bb2557d9b15d59c8e1309a08cd957a84490 | 91,241 |
import string
import random
def random_str(length=10, sample=None, prefix='', suffix=''):
"""Generate a random string
Sample should be a string or a list of strings/characters to
choose from. The default sample is lowercase ascii letters.
A few presets can be used:
- 'LOWERCASE': lower case ascii letters
- 'UPPERCASE': uppercase ascii letters
- 'DIGITS': digit characters
- 'SPECIAL': Special characters
Example:
random_str(sample=['LOWERCASE', '!@#$%'])
prefix: A string to be prepended to the generated string
suffix: A string to be appended to the generated string
"""
sample_match = {
'LOWERCASE': string.ascii_lowercase,
'UPPERCASE': string.ascii_uppercase,
'DIGITS': string.digits,
'SPECIAL': string.punctuation
}
sample_ = ''
if sample is None:
sample_ = string.ascii_lowercase
else:
if isinstance(sample, list):
for s in sample:
sample_ += sample_match.get(s, str(s))
elif isinstance(sample, str):
sample_ += sample_match.get(sample, str(sample))
random_string = ''.join(random.choice(sample_) for _ in range(length))
random_string = prefix + random_string + suffix
return random_string | 0ada04f53b40a4c76806be8bbafa02a62dcc8d79 | 91,250 |
def surprisetopN(algo, trainSet, raw_uid, N):
"""Derive the topN recommendations for user uid
algo: scikit-surprise trained algorithm
trainSet (surprise.dataset.Trainset)
raw_uid (int or float): raw uid
e.g. surprisetopN(algo, trainSet, 196, 3)
Returns:
list: (raw_iid, prediction) for the N recommended item_ids
"""
inner_uid = trainSet.to_inner_uid(raw_uid)
recom = []
profile = set(map(lambda x: x[0], trainSet.ur[inner_uid]))
for iid in trainSet.all_items():
if iid not in profile: # item is unseen
raw_iid = trainSet.to_raw_iid(iid)
pred = algo.predict(raw_uid, raw_iid, r_ui=-1, verbose=False)
recom.append((raw_iid, pred.est))
recom = sorted(recom, key=lambda x: x[1], reverse=True)
return recom[:N] | 26cb346192a5591f39f2b5de738d072dd934b2fc | 91,252 |
from typing import Dict
from typing import Any
from typing import Set
async def required_users_options(element: Dict[str, Any]) -> Set[int]:
"""
Returns all user ids that have voted on an option and are therefore required for the single votes table.
"""
return element["voted_id"] | 160dace3e597c4a8483e76e871e18336ae5bc323 | 91,253 |
def unique(values):
"""
Funkcja zwraca listę unikatowych wartości.
Utrudnienie: Funkcja zwraca unikatowe wartości w kolejności wystąpienia.
:param values: List of values to check.
:type values: list
:return: Unique values in order of appear.
:rtype: list
"""
t = []
for elem in values:
if elem not in t:
t.append(elem)
return t | ff66e55bf4feac94909318be3cfd2b096a0fa556 | 91,254 |
import re
def split_alpha_number(symbol):
"""
分离字母开头,然后接数字的情况,如果后面有别的字符会被截断
print(split_alpha_number("TF1703.CFE"))
('TF', '1703')
:param symbol:
:return:
"""
pattern = re.compile(r'([A-Za-z]+)(\d+)')
match = pattern.match(symbol)
if match:
return match.group(1, 2)
return None | 70688acbe7c6a09504dd3b1c4881b24f2ee64715 | 91,257 |
def get_docs(rule_class_instance):
"""Extract documents from a rule class instance
:param rule_class_instance : rule class instance from where to extract documents
:return : dictionary with Codacy documents info
"""
return dict(patternId=rule_class_instance.id,
title=rule_class_instance.shortdesc,
description=rule_class_instance.__doc__,
timeToFix=5) | 2bb0555b95cc386b4ffa1c716f517890e9ae8063 | 91,259 |
def lookup_clutter_geotype(geotype_lookup, population_density):
"""Return geotype based on population density
Params:
======
geotype_lookup : list of (population_density_upper_bound, geotype) tuples
sorted by population_density_upper_bound ascending
"""
highest_popd, highest_geotype = geotype_lookup[0]
middle_popd, middle_geotype = geotype_lookup[1]
lowest_popd, lowest_geotype = geotype_lookup[2]
if population_density < middle_popd:
return lowest_geotype
elif population_density > highest_popd:
return highest_geotype
else:
return middle_geotype | 4bdcbdaa2e778b26432a9c49edde7ab099e8a031 | 91,260 |
def listify(value):
"""If variable, return a list of 1 value, if already a list don't change a list. """
if value is not None:
if not isinstance(value, list):
value = [value]
else:
value = []
return value | 474caa06c252c5ead3d08f85ab8c828a244fc0fa | 91,261 |
def _is_normally_open(location, date):
"""Checks if the location is normally opened on a given date."""
opening = location["opening_weekdays"]
opening_weekday = opening[date.weekday()]
return opening_weekday["is_open"] | 64c7acd80c762e68373a60d57a6e448e28390860 | 91,265 |
def create_identifier_custom_id_from_name(name):
"""
Tries to turn the given name into an identifier. The identifier is always lower case. Spaces and minus signs are
turned to underscore.
Parameters
----------
name : `str`
The name of a custom_id.
Returns
-------
custom_id : `str`
The created custom id.
"""
return name.casefold().replace(' ', '_').replace('-', '_') | eb282419dafb0ee5b77d959a96cdf4c7c1ace2b4 | 91,267 |
import torch
def parallelize_model(model, device, num_gpus=None, gpu_ids=None, local_rank=-1):
"""Moves a model to the specified device (cpu or gpu/s)
and implements data parallelism when multiple gpus are specified.
Args:
model (Module): A PyTorch model.
device (torch.device): A PyTorch device.
num_gpus (int): The number of GPUs to be used.
If set to None, all available GPUs will be used.
Defaults to None.
gpu_ids (list): List of GPU IDs to be used.
If None, the first num_gpus GPUs will be used.
If not None, overrides num_gpus. if gpu_ids is an empty list
or there is no valid gpu devices are specified,
and device is "cuda", model will not be moved or parallelized.
Defaults to None.
local_rank (int): Local GPU ID within a node. Used in distributed environments.
If not -1, num_gpus and gpu_ids are ignored.
Defaults to -1.
Returns:
Module, DataParallel, DistributedDataParallel: A PyTorch Module or
a DataParallel/DistributedDataParallel wrapper,
when one or multiple gpus are used.
"""
if not isinstance(device, torch.device):
raise ValueError("device must be of type torch.device.")
model_module = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
if local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model_module,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True,
)
else:
if device.type == "cuda":
if num_gpus is not None:
if num_gpus < 1:
raise ValueError("num_gpus must be at least 1 or None")
num_cuda_devices = torch.cuda.device_count()
if num_cuda_devices < 1:
raise Exception("CUDA devices are not available.")
if gpu_ids is None:
num_gpus = (
num_cuda_devices
if num_gpus is None
else min(num_gpus, num_cuda_devices)
)
gpu_ids = list(range(num_gpus))
else:
gpu_ids = list(
set(list(range(num_cuda_devices))).intersection(gpu_ids))
if len(gpu_ids) > 0:
model = torch.nn.DataParallel(model_module, device_ids=gpu_ids)
return model | 482982b6ba1dd992b10bcbb15539a2355f18a27f | 91,268 |
import hashlib
import binascii
def hash_utf8(string):
"""given utf8 string return md5 hash value as hex string"""
hasher = hashlib.md5()
hasher.update(string.encode("utf-8"))
return binascii.hexlify(hasher.digest()).decode("utf-8") | ad88e01938dcab8b5af2c41cd7e0e9e2f103d246 | 91,276 |
def gtex_location_to_gwas_location_(gtex_location: str) -> str:
"""Converts variant locations in GWAS catalog format to GTEx format.
i.e. given 'chr1_64764_C_T_b38', returns '4:79296443'.
"""
parts = gtex_location.split("_")
chr = parts[0][3]
return f"{chr}:{parts[1]}" | 2de3a9ccf0b1af5f478396614ee73f8ec7a45030 | 91,279 |
import stat
def node_name(text, n, show_hash):
"""Add symbols to a node's name to differentiate file types."""
prefix = ''
if show_hash:
prefix += "%s " % n.hash.encode('hex')
if stat.S_ISDIR(n.mode):
return '%s%s/' % (prefix, text)
elif stat.S_ISLNK(n.mode):
return '%s%s@' % (prefix, text)
else:
return '%s%s' % (prefix, text) | 4b7428c35f2e150a39eb9a777d3cb0678dc20a6e | 91,280 |
def tidyspaces(strng):
"""
Replace multiple consecutive spaces in a string with a single space
"""
return " ".join(strng.split()) | 2cbbb373e5bf7c4ac32b9e84826764778525d1f9 | 91,284 |
def contains_unquoted_target(x: str,
quote: str = '"', target: str = '&') -> bool:
"""
Checks if ``target`` exists in ``x`` outside quotes (as defined by
``quote``). Principal use: from
:func:`contains_unquoted_ampersand_dangerous_to_windows`.
"""
in_quote = False
for c in x:
if c == quote:
in_quote = not in_quote
elif c == target:
if not in_quote:
return True
return False | 2e66a4d06a1401407f62c938bd82fc1f42426247 | 91,286 |
import math
def get_uncorrelated_stride(statistical_inefficiency):
"""Returns the integer index stride between uncorrelated samples
in a time series.
Parameters
----------
statistical_inefficiency: float
The statistical inefficiency of the time series.
Returns
-------
int
The integer index stride between uncorrelated samples
in a time series.
"""
return int(math.ceil(statistical_inefficiency)) | 4b2bdde252ff391802414feb54cb1c680b81f4eb | 91,289 |
def cepline(qindex, cepstrum, ax, sr=None):
"""Draw a 1-D cepstrum line on an axis."""
if sr is not None:
qaxis = qindex/sr
xlabel = 'Quefrency (s)'
else:
qaxis = qindex
xlabel = 'Quefrency (sample)'
line, = ax.plot(qaxis, cepstrum)
ax.axis('tight')
ax.set_xlabel(xlabel)
ax.set_ylabel('Amplitude')
return line | 6edd3ade55f00b4a8b7cd56c0c524cd067888b3f | 91,290 |
import re
def get_reference_id(reference):
"""
Extract reference id ([N])
:param reference: Any possible reference
:return: reference id
"""
ref_id = -1
match = re.search('\[[0-9]+\]', reference)
if match:
ref_id = int(match.group(0).strip('[]'))
return ref_id | f753e910e2ca159d7c1a96afc038b8d985b20922 | 91,293 |
def triangulateSquares(F,
a=[0, 1, 2], b=[2, 3, 0],
c=[1, 0, 2], d=[3, 2, 0]
):
"""
Convert squares to triangles
"""
FT = []
for face in F:
FT.append([face[a[0]], face[a[1]], face[a[2]]])
FT.append([face[b[0]], face[b[1]], face[b[2]]])
# FT.append([face[c[0]], face[c[1]], face[c[2]]])
# FT.append([face[d[0]], face[d[1]], face[d[2]]])
# FT.append([face[0], face[3], face[2]])
return FT | d828bbae8ba929a0413592b5b24da0b73b73c553 | 91,298 |
def pytest_parser(stdout, stderr, previous_reports=None):
"""
Example fail summary_line: =========== 4 failed, 38 passed in 0.10 seconds =============
Example success summary_line: ========= 21 passed in 0.05 seconds =========
"""
lines = stdout.split('\n')
summary_line = lines[-2].split()
if summary_line[2].lower() == 'passed':
passed_tests = int(summary_line[1])
failed_tests = 0
else:
passed_tests = int(summary_line[3])
failed_tests = int(summary_line[1])
return {
'stdout': stdout,
'stderr': stderr,
'summary': {
'passed_tests': passed_tests,
'failed_tests': failed_tests,
'total_tests': passed_tests + failed_tests
}
} | b599adb93c6fa89acf1d543a8efa8020b01a116d | 91,301 |
def pad_or_truncate(some_list, target_len):
"""
This function shortens or extends a list. When it is extended, extra 0 values are added. This can be helpful to
simulate what happens after a storm has passed.
:param some_list: The original list of values
:param target_len: the wanted length of the list
:return: a list of values with length of target_len
"""
return some_list[:target_len] + [0]*(target_len - len(some_list)) | 6766d384c63a0c463f2838970205c7fdcb0f66df | 91,309 |
def cols_with_missing_values(dataframe):
""" query a dataframe and find the columns that have missing values"""
return list(dataframe.columns[dataframe.isnull().any()]) | 7c588e911c307cc727dbdde00f3d51c22e14c149 | 91,319 |
import fnmatch
def _fileMatchesPatterns(filename, patterns):
"""Helper function which checks if file matches one of the patterns."""
for pattern in patterns:
if fnmatch.fnmatchcase(filename, pattern):
return True
return False | 8bf47126d315c04b97031e62f55c364ebd9a039f | 91,322 |
def parse_contig_name(name):
""" Parse the assembler contig name """
word = name.split(' ')[0]
fields = word.split('_')
bc = fields[0]
cluster = '_'.join(fields[0:3]),
gene = '_'.join(fields[0:4])
isoform = '_'.join(fields[0:5])
return bc, cluster, gene, isoform | f25535fa335cc7ebc25e772ce9c7c203db936146 | 91,323 |
def get_text_unit_field(sample, biosample_name, field_to_fetch, is_list=False):
"""
This function will parse text and unit fields in biosamples
:param sample: sample to parse
:param biosample_name: name to use in biosample record
:param field_to_fetch: text or unit to use
:param is_list: does this record allow to use multiple values
:return: parsed biosample record
"""
if is_list:
tmp = list()
if biosample_name in sample['characteristics']:
for item in sample['characteristics'][biosample_name]:
tmp.append(item[field_to_fetch])
return tmp
else:
if biosample_name in sample['characteristics']:
return sample['characteristics'][biosample_name][0][field_to_fetch]
else:
return '' | 0b9b9f3e98fa5a83292c8ce31ca679537b0c0a31 | 91,326 |
import io
def _is_real_file(f):
"""Checks if f is backed by a real file (has a fileno)"""
try:
return f.fileno() >= 0
except io.UnsupportedOperation:
return False
except AttributeError:
return False | 2dfa2c61c4e8af1d0455dc2b17942944ab094d15 | 91,327 |
def split_es_url(url):
"""splits ES URL into host index, type"""
tokens = url.split('/')
return tokens | bee6a0cc24276caec0eea6fddfc1c859bbaefa0c | 91,338 |
def _PackageIdArgument(x):
"""Convert a string into a package ID while checking its range.
Args:
x: argument string.
Returns:
the package ID as an int, or -1 in case of error.
"""
try:
x = int(x, 0)
if x < 0 or x > 127:
x = -1
except ValueError:
x = -1
return x | ce35dd0521608c7f7200ef0bf4bca94e83f5d741 | 91,340 |
def replace(image, replacement_image):
"""
Replaces all bands in image with the bands in image2. All properties from image are kept.
"""
return image.select([]).addBands(replacement_image) | c524bf62a1d75e09a59cebc4ee06ef7294683692 | 91,342 |
def get_backend_bucket_outputs(res_name, backend_name):
""" Creates outputs for the backend bucket. """
outputs = [
{
'name': 'name',
'value': backend_name
},
{
'name': 'selfLink',
'value': '$(ref.{0}.selfLink)'.format(res_name)
}
]
return outputs | 45fd59e7e9c5bb4a13e05ea45283338bee125843 | 91,343 |
import torch
def broadcast_fn(fn, yp, yt):
"""Broadcast a distance function."""
yp_b, yt_b = torch.broadcast_tensors(yp, yt)
return fn(yp_b, yt_b) | 39caef32bc420cc35838afd11b9199b6048bf423 | 91,344 |
def process_jobid(results):
"""Process Job ID Results
Args:
results (Element): XML output containing the job ID
Returns:
job_id (str): The job ID number
"""
job_id = results.find('./result/job').text
return job_id | 9a0269855df46a60ecb1ae743c75bd188e06f839 | 91,349 |
def FormatSeconds(secs):
"""Formats seconds for easier reading.
@type secs: number
@param secs: Number of seconds
@rtype: string
@return: Formatted seconds (e.g. "2d 9h 19m 49s")
"""
parts = []
secs = round(secs, 0)
if secs > 0:
# Negative values would be a bit tricky
for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
(complete, secs) = divmod(secs, one)
if complete or parts:
parts.append("%d%s" % (complete, unit))
parts.append("%ds" % secs)
return " ".join(parts) | 6a7bd1fc84b24b962e673d22112c5d5fbd55085c | 91,358 |
def _optional_dict_to_list(param_value_dict, key_string='id', value_string='stringValue'):
"""
If given a dictionary, convert it to a list of key-value dictionary entries.
If not given a dictionary, just return whatever we were given.
"""
if not isinstance(param_value_dict, dict):
return param_value_dict
value_objects = []
for param_id, value in param_value_dict.items():
value_objects.append({key_string: param_id, value_string: value})
return value_objects | 23a0576ca63f936bf7c73419c07b530086142bda | 91,361 |
def mod_eikonal(pi, pj, pr):
"""Modified eikonal factor for soft particle with momentum pr
emitted from the dipole with momenta pi and pj.
This is obtained starting from the eikonal and:
- ignoring 1 / sir, which is already included in the normalisation factor;
- multiplying by the partial fraction sjr / (sir + sjr) to regulate for sjr -> 0.
"""
pipj = pi.dot(pj)
pijpr = pr.dot(pi+pj)
return 2 * pipj / pijpr | 8e9c9c1fe6f9e094eea8187a87e963f38d49f6b7 | 91,364 |
def hex_reflect_x(x, y, z):
"""Reflects the given hex through the y-axis
and returns the co-ordinates of the new hex"""
return -x, -z, -y | 39dd5127300c37f1ac5159c0e2ccec05fd6504d5 | 91,366 |
def countComponents1(n: int, edges: list[list[int]]) -> int:
"""
quick find based implemenation
Args:
n (int): number of nodes
edges (list[list[int]]): list of edges
Returns:
int: number of connected components
"""
connections = [n for n in range(n)]
for edge in edges:
left, right = edge[0], edge[1]
if connections[left] != connections[right]:
old_group = connections[right]
connections[right] = connections[left]
for index in range(n):
if connections[index] == old_group:
connections[index] = connections[left]
return len(set(connections)) | 43fc6a3349d29a34aae5e6eb8028c794dcd89353 | 91,367 |
import re
def clean_paragraph(p):
""" Curate paragraph object before save, in particular deal with hyphen and spaces """
# Attach together words (>= 2 char to avoid things like A minus, B minus...)
# that may have been split at end of row like géographie = "géo - graphie"
# real separator have been turned into longer hyphen during parsing to avoid confusion with those.
# Accents accepted thks to https://stackoverflow.com/a/24676780/8086033
w_expr = "(?i)(?:(?![×Þß÷þø])[-'a-zÀ-ÿ]){2,}"
p["paragraph"] = re.sub("{} - {}".format(w_expr, w_expr),
lambda x: x.group(0).replace(' - ', ''),
p["paragraph"])
# reattach words that were split, like Fort-Cros = "Fort- Cros"
p["paragraph"] = re.sub("{}- {}".format(w_expr, w_expr),
lambda x: x.group(0).replace('- ', '-'),
p["paragraph"])
return p | 63c5118b3ee910d569bb9f4f8640f47a093214b6 | 91,372 |
def generate_experiment_info(config):
"""
Generate experiment info, to be displayed by GPS Trainig GUI.
Assumes config is the config created in hyperparams.py
"""
common = config['common']
algorithm = config['algorithm']
if type(algorithm['cost']) == list:
algorithm_cost_type = algorithm['cost'][0]['type'].__name__
if (algorithm_cost_type) == 'CostSum':
algorithm_cost_type += '(%s)' % ', '.join(
map(lambda cost: cost['type'].__name__,
algorithm['cost'][0]['costs']))
else:
algorithm_cost_type = algorithm['cost']['type'].__name__
if (algorithm_cost_type) == 'CostSum':
algorithm_cost_type += '(%s)' % ', '.join(
map(lambda cost: cost['type'].__name__,
algorithm['cost']['costs']))
if 'dynamics' in algorithm:
alg_dyn = str(algorithm['dynamics']['type'].__name__)
else:
alg_dyn = 'None'
return (
'exp_name: ' + str(common['experiment_name']) + '\n' +
'alg_type: ' + str(algorithm['type'].__name__) + '\n' +
'alg_dyn: ' + alg_dyn + '\n' +
'alg_cost: ' + str(algorithm_cost_type) + '\n' +
'iterations: ' + str(config['iterations']) + '\n' +
'conditions: ' + str(algorithm['conditions']) + '\n' +
'samples: ' + str(config['num_samples']) + '\n'
) | a998e2e3e93a8c1aac156ff34d81af143c29af99 | 91,378 |
def product_of_list(l):
"""
Recursively returns the product of all numbers in a list
@param {list} list of numbers
@return {number} product of all numbers in list
"""
# define an inner function
def inner(l):
if l == []:
return 1
return l.pop() * inner(l)
# if empty list, return 0, do NOT call inner
if l == []:
return 0
# call inner
return inner(l) | 7ea9c100ed6b26dd163e025676976fb7ac14253c | 91,389 |
def _get_data_var_message(obj):
"""
Get message for named data variables.
"""
try:
return f" Data variable: {obj.name}" if obj.name else ""
except AttributeError:
return "" | 693cb97eb18eb8a974f3af503920650ae549bc84 | 91,395 |
def days_between(d1, d2):
""" Count days between two date objects. Returns int """
return abs((d2 - d1).days) | 78f2ca9a6cbb167053d966a76d3fee00a3aaebbf | 91,400 |
def unique(in_tuple: tuple) -> tuple:
"""Return a tuple with only unique elements."""
return tuple(set(in_tuple)) | b0843f7329443d920a81f678ad41344e5c7b0fb5 | 91,401 |
def speaker_listing(talk):
""" Return a list of the speakers' names of the talk."""
return [u'{} {}'.format(speaker.user.first_name, speaker.user.last_name)
for speaker in talk.get_all_speakers()] | c6b18ac7d70db6524460b3f739a97d65e45efd96 | 91,402 |
def read_metaphlan_markers_info(filename):
""" Reads the MetaPhlAn markers_info.txt file.
MetaPhlAn's OTU analogous are 'clades'. Currently, they have around 8900.
A 'clade' is composed of one or many (sub)sequences of specific marker
genes. Those marker genes come from three sources: 1) genbank: "^gi|",
2) gene: "^GeneID:", and 3) NCBI nr: "^NC_".
Parameters
----------
filename : str
Path to the filename 'markers_info' of MetaPhlAn.
Returns
-------
A dict with an entry for each 'clade'. Their values are dicts themselves,
with keys that refer to one of the three sequence sources. And their values
are sets of marker gene IDs. For example:
's__Escherichia_phage_vB_EcoP_G7C': {'GeneID': {'11117645', '11117646'}}
"""
clades = {}
file = open(filename, 'r')
for line in file:
if line.startswith('gi|'):
type_ids = 'gi'
accession = line.split('\t')[0].split('|')[1]
elif line.startswith('GeneID:'):
type_ids = 'GeneID'
accession = line.split('\t')[0].split(':')[1]
elif line.startswith('NC_'):
type_ids = 'NC'
accession = line.split('\t')[0]
else:
type_ids = None
accession = None
if (type_ids is not None) and (accession is not None):
clade = line.split("clade': '")[1].split("'")[0]
if clade not in clades:
clades[clade] = {}
if type_ids not in clades[clade]:
clades[clade][type_ids] = {}
clades[clade][type_ids][accession] = True
for clade in clades:
for type_id in clades[clade]:
clades[clade][type_id] = set(clades[clade][type_id].keys())
file.close()
return clades | 269bc530301740408ee4a6b19fd37c27f0645f55 | 91,403 |
def count_lines(path) -> int:
"""Return the line count of a given path."""
with open(path) as file:
return sum(1 for _ in file) | 354bb98147d29dbab7d2c2b28cf40968feb8fb53 | 91,404 |
def f1_score(tp,fp,tn,fn):
""" Computes F1-score, see http://en.wikipedia.org/wiki/F1_score
:param tp: True positives (TP)
:param fp: False positives (FP)
:param tn: True negatives (TN)
:param fn: False negatives (FN)
:return: F1-score in [0,1]
"""
return 2*tp/float(2*tp + fn + fp) | ad4716db78c16479ed2df17ce5bf07dc76b1b50b | 91,409 |
import json
def load_results(results_file):
"""Load the results of conversion"""
if results_file.exists():
with open(results_file, "r", encoding="utf8") as f:
return json.load(f)
return {} | ef0df47d8b238e4f1791e5518baf38d032833e74 | 91,414 |
def _run_analysis_alias(protocol_analyzer):
""" An alias function to pass to multiprocessing threads
This function is used internally in the BilayerAnalyzer.run_analysis_mp
function to pass to the multiprocessing threads.
"""
print(protocol_analyzer)
return protocol_analyzer | 5ab7f03127703a7d69139378f2f34de022aa3302 | 91,415 |
def get_unique_list(items):
""" Return a sorted de-duplicated list of items from input list.
:type items: List(str)
:param items: The input list of strings
:return: Sorted list with duplicates removed
"""
items.sort()
result = []
previous = ''
for item in items:
if item != previous:
result.append(item)
previous = item
return result | 4575b09f782bac025671eb6c3ddd841f9fd441c9 | 91,424 |
import itertools
def flatten_list(the_list):
""" take a list of lists and flatten it to just a list """
return [] if the_list is None else list(itertools.chain.from_iterable(the_list)) | 78f7f01cbc92b4ba38ad7a7466d4a9950086ac86 | 91,427 |
def _get_title_maybe(chap):
"""
Chapter to title (string) or None
"""
if "tags" not in chap:
return None
return chap["tags"].get("title", None) | 6b99aa5501753d1295caaf363b73b57b3dafb9d3 | 91,428 |
def is_permutation(xs, ys):
"""Returns True iff the two lists are permutations of eachother."""
return sorted(xs) == sorted(ys) | 4e00e5fcf7e1303a4f42e3f295e79fa244560b1c | 91,429 |
import re
def check_contain_chinese(string):
"""Check if string has Chinese.
Args:
string (str): String to be checked.
Returns:
bool: True means there is, False means no.
"""
pattern = re.compile('[\u4e00-\u9fa5]+')
match = pattern.search(string)
if match:
return True
return False | 56d0569d2d4b0dc61b6ea49331146f21465521c1 | 91,431 |
def get_task_dimensions(task):
"""
Get the number of channels, channel size (height * width) and number of classes of a dataset.
:param task: str, '*MNIST' or 'CIFAR*' for any string *
:return: int, int, int; in_channels, input_size, n_classes
"""
if 'MNIST' in task:
in_channels = 1
input_size = 28 * 28
n_classes = 10
elif 'CIFAR' in task:
in_channels = 3
input_size = 32 * 32
n_classes = int(task[5:])
else:
raise ValueError('Task must be either *MNIST or CIFAR*, but %s was given' % task)
return in_channels, input_size, n_classes | 41e03f067ef4ddc722cdeaad41808d29169b72ef | 91,432 |
def to_bit_list(val, width=16):
"""Convert a number to a list of bits, LSB first"""
return [(1 if val & (1<<n) else 0) for n in range(width)] | 66365917d9c60b10da277b8d777a3060dd70c511 | 91,433 |
def SetOption(*_args, **_kw):
"""Fake SetOption"""
return True | c35b88eb8c683619a08360133b9980552536e459 | 91,442 |
def parse(stdin):
"""
Parse an input data set into a list.
"""
return [
int(line) for line in stdin.read().strip().split("\n")
] | 6696c6e5b817ff0cf6ee30f2fce61a8cdabd8153 | 91,443 |
def comp_height_opening(self, Ndisc=200):
"""Compute the height of the opening area (Hslot - Hactive)
Parameters
----------
self : Slot
A Slot object
Ndisc : int
Number of point to discretize the lines
Returns
-------
Hwind: float
Height of the opening area [m]
"""
return self.comp_height(Ndisc=Ndisc) - self.comp_height_active(Ndisc=Ndisc) | 2690083e370897ad0f77cfdddf5b904a2adbe5b2 | 91,444 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.