content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from datetime import datetime
def _slurm_time_trans(timestr, _format="%Y-%m-%dT%H:%M:%S"):
"""
Fault tolerant time string translate to time obj and time stamps
:param timestr: str.
:param _format: Optional[str]. Default as "%Y-%m-%dT%H:%M:%S" used in sacct output
:return: Tuple[Datetime, float]. Datetime obj and timestamp.
"""
try:
timeob = datetime.strptime(timestr, _format)
timets = timeob.timestamp()
except ValueError:
timeob = timestr
timets = timeob
return timeob, timets | 9a2fced939b6019a9f46d070ffc4c2ccc03c2214 | 102,623 |
from typing import Tuple
def get_transfer_distance(
client,
start,
end,
mode='driving',
) -> Tuple[int, int]:
"""Get the distance and duration of a transfer.
Args:
client (Client): A googlemaps API client.
start (string): The starting point, either lat,long as a string or an address.
end (string): The ending point, either lat,long as a string or an address.
mode (string): The transporation method, driving by default.
Returns:
(int, int): Returns a tuple of the distance in kilometers and the
duration in seconds.
"""
driving_distances = client.distance_matrix(start, end, mode=mode)
elements = driving_distances['rows'][0]['elements'][0]
km = elements['distance']['value']
duration = elements['duration']['value']
return km, duration | 2d2376c55f9d8f8283c580df77c448cc1ec60918 | 102,624 |
def get_size_string(size):
"""Return string representation of bytes"""
# byte conversion numbers
mbyte = 1024 ** 2
gbyte = 1024 ** 3
tbyte = 1024 ** 4
pbyte = 1024 ** 5
# convert to float
size = float(size)
# get string
if size / pbyte > 1:
result = '{:.1f} PB'.format(size / pbyte)
elif size / tbyte > 1:
result = '{:.1f} TB'.format(size / tbyte)
elif size / gbyte > 1:
result = '{:.1f} GB'.format(size / gbyte)
else:
result = '{:.1f} MB'.format(size / mbyte)
return result | 3735bb39a209e44e79fb1b4d462b9dd535e0bad4 | 102,625 |
import json
def dump_json(file, array):
"""Dumps a dict to a JSON file"""
with open(file, 'w') as f:
return json.dump(array, f) | 605a7bfcaeb7502749999b7a0b8353c724b66242 | 102,631 |
def editTransSplits(splits, minTransLen):
"""
Called by transOrigin(), this function takes the splits that could be combined to create the current peptide,
and sorts them in the required order for the rest of the algorithm to work. We want the longest split to be first
in each pair, and we want the tuple pairs which have both splits with length less than minTransLen to be last
in the list.
:param splits: a list of tuples, where each tuple is a possible pair of subsequences which could be combined to
make the peptide.
:param minTransLen: the minimum length that a cleavage must be for its location to be reported in the origin data
of a trans spliced peptide.
:return splitsNew: splits sorted so that the longest split in each tuple appears first, and the tuples with both
splits less than minTransLen at the end of the list.
"""
#print(splits)
splits1 = []
splits2 = []
for tuple in splits:
# sort the tuple so that the longest split appears first so that it is checked first
tuple = sorted(tuple, key=len)
# we want the tuples which have both splits < MIN_TRANS_LEN to be at the end. We only run
# them if none of the previous tuples have been found.
if len(tuple[1]) < minTransLen:
splits2.append(tuple)
else:
splits1.append(tuple)
splitsNew = splits1 + splits2
#print(splitsNew)
return splitsNew | 43b440f360ba33c13326a0466010f66815fbd68a | 102,638 |
def is_test_name(name):
"""
Returns whether the given name should be collected as a test.
Parameters
----------
name : `str`
The test's name.
Returns
-------
is_test_name : `bool`
"""
if name == 'test':
return True
if name.startswith('test_'):
return True
return False | 6adf0792bb479ef1134fcdbd2fe6c5252f3847f9 | 102,642 |
def is_triangular(number: int) -> bool:
"""
Returns True if number is a triangular number:
can be written as (s+1)*s/2
https://oeis.org/A000217
"""
t = 0
i = 0
while t < number:
i += 1
t += i
if t == number:
return True
return False | 8198f0d25d9759ee40462e2f5424feacd5d6ce99 | 102,644 |
from datetime import datetime
def _ms_to_iso8601(milliseconds):
"""Convert ms to ISO8601"""
return datetime.fromtimestamp(int(milliseconds)).isoformat() | 0fb078d13aa322b9e809d17912004720ea1b26e9 | 102,645 |
from typing import Tuple
import re
import fnmatch
def compile_patterns(patterns: Tuple[str, ...], flags=re.IGNORECASE, use_regex=False):
"""Compile regular expression (or glob) patterns with `re.compile`."""
if use_regex:
return re.compile("|".join(patterns), flags=flags)
return re.compile(
"|".join(fnmatch.translate(pattern) for pattern in patterns),
flags=flags,
) | 59532e8f844663768cd6fc22a8735b776ad78601 | 102,646 |
def trim_dataset(mat, batch_size):
"""
trims dataset to a size that's divisible by BATCH_SIZE
"""
no_of_rows_drop = mat.shape[0]%batch_size
if(no_of_rows_drop > 0):
return mat[:-no_of_rows_drop]
else:
return mat | 430f545f58650645acff32506bf56564074dbb96 | 102,651 |
def get_url_at_page_number(url: str, counter: int) -> str:
"""Retrieves the link to the next result page of a query."""
# All result pages will start out like this
root_url = "https://www.hearthstonetopdecks.com/cards/"
# Fhe first page of the query is followed by a string
# describing your query options, like this
query_url = url.split(root_url)[1]
# But subsequent pages have text describing the page number
# in between the rool url and the query text, as such:
next_url = f"page/{counter}/"
# Finally, reconstruct the next URL
return root_url + next_url + query_url | 1b31b87086b16da536fe189e699b13f1fea8cd1f | 102,652 |
def read_solver_outputs(solver, folder):
"""read solver iteration outputs from file."""
with open(folder / (solver + "_iter_outputs.txt"), "r", encoding="utf-8") as f:
contents = f.readlines()
return contents | be490497306a8b200d53e8bf673f92f0014ee9c7 | 102,655 |
import io
def image_data(img, format, **save_options):
"""Save a PIL Image instance and return its byte contents"""
fp = io.BytesIO()
img.save(fp, format, **save_options)
fp.seek(0)
return fp.read() | c4282c65b669443c65238487ef7bb1745a67f633 | 102,656 |
def f(x, r):
"""
Implement the right-hand-side of the differential equation
x' = r * x - x / (1 + x)
"""
return r * x - x / (1 + x**2) | 348a16b367bee8f3ff937dd575d06d1f4220b00b | 102,661 |
def convert_to_dict(passport: str) -> dict:
"""Convert a single passport row from the input into a dict."""
passport_dict = {}
passport_split = passport[1:].split(" ")
for field in passport_split:
field_value = field.split(":")
passport_dict[field_value[0]] = field_value[1]
return passport_dict | a257c604d975eb773d7e0ea2a87256750097abcf | 102,663 |
def parse_list(list_str):
"""Parse comma-separated list"""
if list_str.strip():
return [t.strip() for t in list_str.split(',') if t.strip()]
else:
return [] | 0263090c4436800f625f01f895d7115dc8e9c7d9 | 102,669 |
def check_possible_win(piece, win, board_state):
"""Returns a bool depending on if a win with a
given piece is on the board"""
# Check if the given piece has the winning combination on board
for square in win:
if "{}{}".format(piece, square) not in board_state:
return False
return True | f845626e7c5677b7c529cc923950644319c251c3 | 102,679 |
import re
import requests
def isbn(bot, trigger):
"""Look up a book by its ISBN."""
if not re.match(
r"((978[\--– ])?[0-9][0-9\--– ]{10}[\--– ][0-9xX])|((978)?[0-9]{9}[0-9Xx])",
trigger.group(2)):
return bot.reply("I need a valid ISBN.")
isbn_sanitized = trigger.group(2).replace(" ", "").replace("-", "")
url = f"https://openlibrary.org/isbn/{isbn_sanitized}"
try:
book = requests.get(url).url
bot.say(book)
except:
bot.reply("Error reaching API, probably.") | 7d7c0ad7188420b2fb15895d36b50aff87921bf3 | 102,682 |
import re
def split_problematic_endpoints_line(line):
"""
If the line of host contains more than one ":",
for example: 10.99.184.69:900010.37.170.125:9006
this splits the line and return a list of correct endpoints
Args:
``line``: the problemtic line which contains more than one endpoint string.
Returns:
the splitted list of the problematic line which has correct endpoint strings.
"""
colon_parts = line.strip().split(":")
offset = len(colon_parts[-1])
colon_positions = [m.start() for m in re.finditer(':', line)]
start = 0
split_parts = []
for colon_position in colon_positions:
end = colon_position + offset + 1
split_part = line[start:end]
split_parts.append(split_part)
start = end
return split_parts | 24e5cf08074df9527014ae8d83110c1e264debc1 | 102,683 |
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item]) | 3ac4a80d0efdd82b0725ef215863808697a1374a | 102,684 |
def list_unique(items):
""" Given a list, return a new list with the unique items in order from the original list """
uniq = set()
return [i for i in items if str(i) not in uniq and (uniq.add(str(i)) or True)] | 08e6b48c55b734d9c3caed560874ac150ab7f714 | 102,686 |
import torch
def tile_and_concat(tensor: torch.Tensor, vector: torch.Tensor) -> torch.Tensor:
"""Merge 1D and 2D tensor (use to aggregate feature maps and representation
and compute local mutual information estimation)
Args:
tensor (torch.Tensor): 2D tensor (feature maps)
vector (torch.Tensor): 1D tensor representation
Returns:
torch.Tensor: Merged tensor (2D)
"""
B, C, H, W = tensor.size()
vector = vector.unsqueeze(2).unsqueeze(2)
expanded_vector = vector.expand((B, vector.size(1), H, W))
return torch.cat([tensor, expanded_vector], dim=1) | cb066833448e00ed430af5626b09ec5070148e43 | 102,689 |
def get_response_content(content_items):
"""
:param content_items: {}
:return:
{
"content":[
citem1,citem2,...]
}
"""
result = {}
c = []
for item in content_items:
c.append(item)
result['content'] = c
return result | 4141006354bbc497ff83866c3f08786ef219f4bb | 102,694 |
import json
def get_config(server, path="connection.config"):
"""
Static method to get the configuration for the server connection
:param server: name of server to connect to
:type server: String
:param path: path of the configuration file
:type path: String
:return: hostname, port
:rtype: String, Int
"""
with open(path, 'r') as f:
c = json.load(f)
return c[server]["hostname"], c[server]["port"] | ad6329f159ec5a5939ebb8dbcf590c4030bb440a | 102,696 |
def trimBytes(bs):
""" Trims trailing zeros in a byte string """
n = bs.find(b'\0')
if n != -1:
return bs[:n]
return bs | e5ec0821539107e1154d0915690ee5169f1eefab | 102,698 |
def get_function_names(module, prefix):
"""Get function names with a certain prefix from a module."""
return [fun for fun in dir(module) if fun.startswith(prefix)] | fe1a27eb1c18d7351f430c35af0a2da940ac3aa9 | 102,699 |
def be16_to_native(data, signed=False) -> int:
""" Unpacks a bytes-like object respecting big-endianness of outside world and returns an int according to signed.
Args:
data: bytes-like object. The data to be unpacked & converted
signed (bool): Whether or not `data` is signed
"""
return int.from_bytes(data[1][:2], 'big', signed=signed) | 27111c325192430f00d57148e06fa015c2921911 | 102,703 |
def nonZeroMean(arr):
"""Takes the mean of an array not counting None or 0 values."""
total = 0
count = 0
for x in arr:
if x is not None and x != 0:
total, count = total + x, count + 1
count = 1 if count == 0 else count
return round(total / count, 2) | 6aaedc27c611e730dc935fde4d9be66599260ddb | 102,706 |
def calc_zeta_induced_quasisteady(E, x):
"""
Induced zeta potential (quasi-steady limit)
"""
zeta_induced_quasisteady = E*x
return zeta_induced_quasisteady | 46a5d88b3cfd81252b6919ece5b0bdd456d2a987 | 102,712 |
def get_entity_by_type_and_id(entity_type, entity_id):
"""Return the datastore object with the given type and id if it exists."""
if not entity_id or not str(entity_id).isdigit() or int(entity_id) == 0:
return None
return entity_type.get_by_id(int(entity_id)) | bf1e5125346b59db00e288cd0db4ed839299a5eb | 102,714 |
def GetNodeText(node):
"""Returns the node text after stripping whitespace."""
return node.text.strip() if node.text else '' | 5aed6a760521b5323858481ddc15aeb8770a2c5d | 102,715 |
def load_patients_ids(ids_file_path):
"""Loads patient ids from a file in which ids are separated by a new line
"""
with open(ids_file_path) as file:
lines = file.readlines()
patients_ids = [line.strip() for line in lines]
return patients_ids | 5989e571f6d9e763f0765787c29a601a893d9266 | 102,716 |
def _get_variance(color_count: dict, average: float) -> float:
"""
Compute the variance
:param color_count: Number of each color in the tile
:param average: Average pixel color in the tile
:return: Variance
"""
variance = 0.0
for pixel in color_count:
a = pixel - average
variance += color_count[pixel] * a * a
return variance | e49ec26520a7fe8b8ca2a6eb108c567d41f2d61f | 102,717 |
def is_valid_password(password):
"""
Validates password complexity
- min length 6
- min one upper case
- min one lower case
- min one special char
:param password: the password in question
"""
upper = lower = special = False
for char in password:
if not upper and char.isalnum() and char.isupper():
upper = True
elif not lower and char.isalnum() and char.islower():
lower = True
elif not special and not char.isalnum():
special = True
else:
continue
if len(password) < 6 or not upper or \
not lower or not special:
return False
return True | 82e57755e9aedaa9e652c231103b3697a7cac5e3 | 102,718 |
def clean_record_id(seq_record):
"""
cleans up the record ID if there is a "/"
:return: a str with clean id
"""
if "/" in seq_record.id:
clean_id = str(seq_record.id)
clean_id = clean_id.split('/')[0]
else:
clean_id = seq_record.id
return clean_id | f951604144e6fd4035b13c4ea179549eb41c581a | 102,719 |
import math
def dist(p1, p2):
"""
Calculates the euclidean distance between two cartesian points given as tuples
:param p1: Point 1 as a tuple
:param p2: Point 2 as a tuple
:return: Distance between points 1 and 2
"""
return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) | c44bec491734e7375a0b21b5ec4119945234e747 | 102,720 |
def tuples_to_spans(tree):
"""
Returns list of spans, that are (start, size).
"""
result = []
def helper(tr, pos=0):
if isinstance(tr, str):
return 1
size = 0
for x in tr:
subsize = helper(x, pos=pos+size)
size += subsize
result.append((pos, size))
return size
helper(tree)
return result | dec51d2a51f6aa8a8b03c801a125cea507805fee | 102,721 |
def untabify(text, width):
"""Replace tabs with spaces in text."""
def next_stop(p):
return width * ((p + width) // width)
def pad(p):
return ' ' * (next_stop(p) - p)
out = list(text)
pos = 0
for cur, c in enumerate(out):
if c == '\t':
out[cur] = pad(pos)
pos += len(out[cur])
elif c == '\n':
pos = 0
else:
pos += 1
return ''.join(out) | d0996d2115145c5e47b2cd3d6c24a0ce9ea20a7f | 102,723 |
def split_filters(name):
"""
Examples:
>>> split_filters("a.b.c")
('a.b.c', [])
>>> split_filters("a.b.c|upper|strict")
('a.b.c', ['upper', 'strict'])
>>> split_filters("|upper|strict")
('', ['upper', 'strict'])
>>> split_filters("")
('', [])
"""
index = name.find("|")
if index == -1:
return name, []
name, filters = name[:index], name[index + 1 :]
return name, filters.split("|") | 13131246a833865752d18010275c0baab839624c | 102,724 |
from typing import Tuple
def get_fields_for_plot(cloudnet_file_type: str) -> Tuple[list, int]:
"""Return list of variables and maximum altitude for Cloudnet quicklooks.
Args:
cloudnet_file_type (str): Name of Cloudnet file type, e.g., 'classification'.
Returns:
tuple: 2-element tuple containing feasible variables for plots
(list) and maximum altitude (int).
"""
max_alt = 12
if cloudnet_file_type == "categorize":
fields = [
"Z",
"v",
"width",
"ldr",
"v_sigma",
"beta",
"lwp",
"Tw",
"radar_gas_atten",
"radar_liquid_atten",
]
elif cloudnet_file_type == "classification":
fields = ["target_classification", "detection_status"]
elif cloudnet_file_type == "iwc":
fields = ["iwc", "iwc_error", "iwc_retrieval_status"]
elif cloudnet_file_type == "lwc":
fields = ["lwc", "lwc_error", "lwc_retrieval_status"]
max_alt = 6
elif cloudnet_file_type == "model":
fields = ["cloud_fraction", "uwind", "vwind", "temperature", "q", "pressure"]
elif cloudnet_file_type == "lidar":
fields = ["beta", "beta_raw", "depolarisation", "depolarisation_raw"]
elif cloudnet_file_type == "mwr":
fields = ["lwp"]
elif cloudnet_file_type == "radar":
fields = ["Zh", "v", "width", "ldr", "sldr"]
elif cloudnet_file_type == "disdrometer":
fields = ["rainfall_rate", "n_particles"]
elif cloudnet_file_type == "drizzle":
fields = ["Do", "drizzle_N"]
max_alt = 4
else:
raise NotImplementedError(cloudnet_file_type)
return fields, max_alt | 45e3c8d2ca12656bcd6fc6d7bcc4ea33a9a7e28a | 102,728 |
def qsize(queue):
"""Get the (approximate) queue size where available.
Parameters
----------
queue : :class:`queue.Queue`
Input queue.
Returns
-------
int
Queue size, -1 if `qsize` method isn't implemented (OS X).
"""
try:
return queue.qsize()
except NotImplementedError:
# OS X doesn't support qsize
return -1 | be170990e4f65d00252b0d9016bfd13774bb3ef1 | 102,729 |
def pad(f_bytes, mod_len):
""" Pads the file bytes to the nearest multiple of mod_len """
return f_bytes + (b'0' * (mod_len - len(f_bytes) % mod_len)) | 22068d2efe00ce93215c97bfcdede603d6ea4091 | 102,732 |
def swap_bytes(data_str):
"""Accepts string with hex, returns integer with order swapped for CAN."""
a = int(data_str, 16)
return ((a & 0xff) << 24) + ((a & 0xff00) << 8) + ((a & 0x00ff0000) >> 8) + ((a & 0xff000000) >> 24) | 7e79ebadb236818d1b9946cf0c78ee928c8f4379 | 102,738 |
def set_config_var(from_file, from_env, default):
"""
Set a configuration value based on the hierarchy of:
default => file => env
:param from_file: value from the configuration file
:param from_env: value from the environment
:param default: default configuration value
:return: value to use as configuration
"""
if from_env is not None:
return from_env
elif from_file is not None:
return from_file
else:
return default | 48e0fa5aa4b9e67ceb4a7cb1fea7e23529a5f3bf | 102,741 |
import six
def isstring(obj):
"""Python 2/3 compatible string check"""
return isinstance(obj, six.string_types) | fb378e6ac2e955474d40b883bf73c06f9013d852 | 102,744 |
def ids_from_sentence(vocabulary, sentence):
"""
Convenience method, for converting a sequence of words to ids.
:param vocabulary: Language, object of the language to use the look up of.
:param sentence: string, a tokenized sequence of words.
:return: list, containing the ids (int) of the sentence in the same order.
"""
return [vocabulary(word.rstrip()) for word in sentence.strip().split() if word.rstrip() != ''] | 66e0347abbd90dc800e98d54057fcf1b480a0c51 | 102,745 |
def area_over_choked_area(
mach,
gamma=1.4
):
"""
Gives A/A^* (where A^* is "A-star"), the ratio of cross-sectional flow area to the cross-sectional flow area that would result in choked (M=1) flow.
Applicable to 1D isentropic nozzle flow.
Args:
mach: Mach number [-]
gamma: The ratio of specific heats. 1.4 for air across most temperature ranges of interest.
"""
gp1 = gamma + 1
gm1 = gamma - 1
return (
(gp1 / 2) ** (-gp1 / (2 * gm1)) *
(1 + gm1 / 2 * mach ** 2) ** (gp1 / (2 * gm1)) / mach
) | 147a01ae40ede54fcffc9897c6c0591aabbe50bb | 102,749 |
def split_training_test(document_pairs):
"""
Given a list of things, split them into training and test.
Returns a pair of lists: training, test.
Simplest split: every 10th thing is in the test set.
"""
training = []
test = []
for i, pair in enumerate(document_pairs):
if i % 10 == 9:
test.append(pair)
else:
training.append(pair)
return training, test | 0dd42f576ea230dcc92d2d6b26631b718bb95e6b | 102,751 |
def from_disk(session, schema, path, depth=0, extension=None):
"""Return a DataFrame object from files read.
Arguments:
session -- SparkSession object
schema -- data types for each column
path -- location where files are stored
Keyword arguments:
depth -- depth of directory tree underneth path to data files
extension -- file extension to read (json or parquet)"""
depth = depth if depth > 0 else 1
wild_card_path = path + '/'.join(['*'for _ in range(depth)]) + '.' + extension
df = None
if extension == 'json':
df = session.read.json(
path=wild_card_path,
schema=schema,
multiLine=True,
encoding='UTF-8',
mode='DROPMALFORMED')
elif extension == 'parquet':
df = session.read.parquet(wild_card_path)
else:
print(f'ERROR: {extension} files are not supported')
return df | 3e2607f2351aa31e57583ad4466b9c985dd28dd3 | 102,756 |
import random
def gen_rand_seq(length):
"""
Generate a random DNA sequence of defined length
Parameters
----------
length : int
Returns
---------
seq : string
Random DNA sequence
"""
nt = ['A','T','C','G'] #nucleotides
seq = ''.join(random.choice(nt) for i in range(length))
return(seq) | 0d56d74777a6adf06a434c7c9afdd28b4d3d6f75 | 102,759 |
def vhost_delete_controller(client, ctrlr):
"""Delete vhost controller from configuration.
Args:
ctrlr: controller name to remove
"""
params = {'ctrlr': ctrlr}
return client.call('vhost_delete_controller', params) | 3819027812b45106e9024eef926466352efd01b8 | 102,761 |
from pathlib import Path
import yaml
def get_fhir_version_from_sushi_config(base_path: Path) -> str:
"""
Get the FHIR version from the SUSHI config file.
:param base_path: Path to the SUSHI config file
:return: FHIR version string
"""
conf_filename = base_path / "sushi-config.yaml"
if not conf_filename.exists():
raise FileNotFoundError(f"Could not find {conf_filename}")
with open(conf_filename, "r") as f:
conf = yaml.safe_load(f)
fhir_version = conf["fhirVersion"]
return fhir_version | d477362acdb1aec5013b89daf6e771247c91a585 | 102,763 |
def _list_product(lst):
"""Computes product of element of the list."""
result = 1
for item in lst:
result *= item
return result | e32e048c9273734226c3ba417e8f1fc417f0f8da | 102,764 |
import json
def json_to_dict(filename):
""" Simplify json loading """
with open(filename, "rb") as json_file:
object = json.load(json_file)
return(object) | 0afaffdecce9faa2f7a62106a5be01a45051affc | 102,767 |
def produced_files(nhosts, jobid, njobtasks):
"""
Associates file (to be written) to each job ID
:param nhosts: total number of hosts
:param jobid: job ID
:param njobtasks: N of tasks per job
:return: hosts_files: list of files (and associated host) for this jobID
"""
return [(
task % nhosts,
"kron_file_host{}_job{}_id{}".format(task % nhosts, jobid, jobid*njobtasks + task)
) for task in range(njobtasks)] | faad267a690580c4202fd75a672e9f0205f259cd | 102,769 |
def _auth_post(self, url, body, return_response=False, **kwargs):
"""Synchronous POST request. Used as standard over async currently.
If return_response == True, the response object is returned rather than
its .json() output.
Any additional kwargs are forwarded onto the requests.post().
"""
r = self._zegami_session.post(url, body, **kwargs)
self._check_status(r, is_async_request=False)
return r if return_response else r.json() | 47daccf9236d8ca7544aa139a6d17e1b3e503d40 | 102,776 |
def Getp2(M_0, M_1, M_2):
"""
Consider a two-body decay :math:`M_0\\rightarrow M_1M_2`. In the rest frame of :math:`M_0`, the momentum of
:math:`M_1` and :math:`M_2` are definite.
:param M_0: The invariant mass of :math:`M_0`
:param M_1: The invariant mass of :math:`M_1`
:param M_2: The invariant mass of :math:`M_2`
:return: the momentum of :math:`M_1` (or :math:`M_2`)
"""
M12S = M_1 + M_2
M12D = M_1 - M_2
p = (M_0 - M12S) * (M_0 + M12S) * (M_0 - M12D) * (M_0 + M12D)
return p / (4 * M_0 * M_0) | 36709e1bac55954c01bb12eeb4be4c5e298790b5 | 102,783 |
def get_id_key(entity_name):
"""Get entity id key.
Follows the simple convention that *id_key* is the *entity_name*
followed by '_id'.
Args:
entity_name (str): entity_name
Return:
id_key (str)
.. code-block:: python
>>> from utils.db import get_id_key
>>> entity_name = 'province'
>>> print(get_id_key(province))
'province_id'
"""
return '%s_id' % (entity_name) | 8586e6823251c8f37957a27889d59ee77b94d88c | 102,786 |
def plot_grid(flood, grid):
"""
Plot grid over flood image.
Parameters
----------
flood : GeoDataFrame
grid : GeoDataFrame
"""
ax = grid.plot(color='yellow');
return flood.plot(ax=ax, color='black', alpha=0.5); | faea707b30bbc2a3c3aa02944ffd705846525551 | 102,790 |
from typing import IO
def _isatty(stream: IO) -> bool:
"""Returns ``True`` if the stream is part of a tty.
Borrowed from ``click._compat``."""
# noinspection PyBroadException
try:
return stream.isatty()
except Exception:
return False | a8544b17d83ca813ed94d2a25b6958ed10143913 | 102,793 |
import re
def search_matches(match_list, search_list, ignore_list=None):
"""
Return list of football matches that match search
"""
if ignore_list is None:
ignore_list = []
search = re.compile('|'.join(search_list))
my_matches = [m for m in match_list if search.search(m['fixture'])]
if ignore_list:
ignore = re.compile('|'.join(ignore_list))
my_matches = [m for m in my_matches if not ignore.search(m["fixture"])]
return my_matches | 51ae6dcd18b1f89825b6193f114a09bc1aab3682 | 102,797 |
import ntpath
def win_to_cygwin_path(path):
"""
Converts a Windows path to a Cygwin path.
:param path: Windows path to convert.
Must be an absolute path.
:type path: str
:returns: Cygwin path.
:rtype: str
:raises ValueError: Cannot convert the path.
"""
drive, path = ntpath.splitdrive(path)
if not drive:
raise ValueError("Not an absolute path!")
t = { "\\": "/", "/": "\\/" }
path = "".join( t.get(c, c) for c in path )
return "/cygdrive/%s%s" % (drive[0].lower(), path) | 0d4ee237ddb4efa9c81e231b63c55ae2873d5726 | 102,798 |
def get_related_model(model, relation_field_name):
"""
Get the related model of the (presumed) relation field with the given name
on the given model.
:param type model: The model class to inspect the field on.
:param str relation_field_name: The field name of the (presumed) relation
field.
:return: The model class reached via the relation field or None if the
field is not actually a relation field.
:rtype: type | None
"""
if hasattr(model._meta, 'get_field_by_name'): # pragma: no cover
# Older Django versions (<1.8) only allowed to find reverse relation
# objects as well as fields via the get_field_by_name method, which
# doesn't exist in recent versions anymore.
field_or_rel, _, direct, _ = model._meta.get_field_by_name(relation_field_name)
# Unlike in recent Django versions, the reverse relation objects and
# fields also didn't provide the same attributes, which is why they
# need to be treated differently.
if not direct: # direct=False means a reverse relation object
return field_or_rel.field.model
return field_or_rel.rel and field_or_rel.rel.to
return model._meta.get_field(relation_field_name).related_model | eadba80c32e5d03b350e3575e1c72e56c35a60ab | 102,802 |
def _decode(encoded):
"""Decode a string into utf-8
:param encoded: Encoded string
:type encoded: string
:return: Decoded string
:rtype: string
"""
if not encoded:
return ""
decoded = encoded.decode('utf-8')
if decoded.endswith('\n'):
decoded = decoded[:-1]
return decoded | c2d77b450e11a8914c0be62faefa3376b52ab43d | 102,805 |
import csv
def rd_csv(fname):
"""
read rows from csv file as list of dicts
"""
with open(fname) as fin:
rdr = csv.DictReader(fin)
return [dict(row) for row in rdr] | 9feccd7e37e921342ccfa24cbe494bb6193ec430 | 102,806 |
import torch
def rgba_to_rgb(rgba: torch.Tensor):
"""
Converts tensor from 3 channels into 4.
Multiplies first 3 channels with the last channel.
[... 4 H W] -> [... 3 H W]
"""
return rgba[..., :-1, :, :] * rgba[..., -1:, :, :] | 1735c0ee9fb0a5ec66ae38e515d831f8d58cf550 | 102,814 |
def ensure_string(data):
"""Ensure the data are decoded to a string if they are bytes.
Parameters
----------
data : str or bytes
"""
if isinstance(data, str):
return data
elif isinstance(data, bytes):
return data.decode("ascii", "replace")
else:
raise TypeError(f"Data are neither bytes nor string: {type(data)}") | ab7b6c30e86d95f533c4d9dcf6f51ce8583e53da | 102,816 |
def get_oldest(fromlist):
"""
get_oldest(fromlist) where fromlist is a list of DataObjects
Get the oldest timestamp out of all the timestamps in the DataObject list.
"""
oldest_timestamp = fromlist[0].data[0][1] #take the first timestamp from the first DataObject in the fromlist list
for obj in fromlist:
if obj.oldest_sample < oldest_timestamp:
oldest_timestamp = obj.oldest_sample
return oldest_timestamp | 0593c1a529e6d4191661d40a88b2d9b5467ef130 | 102,818 |
import textwrap
def fill(text, width):
"""Wraps each paragraph in text (a string) so every line
is at most width characters long, and returns a single string
containing the wrapped paragraph.
"""
width = int(width)
paras = text.replace("\r\n","\n").replace("\r","\n").split("\n\n")
wrapped = []
for para in paras:
if para:
lines = para.split("\n")
maxlen = max([len(line) for line in lines])
if maxlen > width:
para = textwrap.fill(para, width, replace_whitespace=False)
wrapped.append(para)
return "\n\n".join(wrapped) | a4c6292f4d314ca32b922439ba6092f8c346b16e | 102,823 |
def create_labels(sizes):
"""create labels starting at 0 with specified sizes
sizes must be a tuple
"""
labels = []
for i, size in enumerate(sizes):
labels += size*[i]
return(labels) | 91ea7d4fe7927fb0d97b990f52dff74462523787 | 102,824 |
def filtered(alert, rules):
"""Determine if an alert meets an exclusion rule
:param alert: The alert to test
:param rules: An array of exclusion rules to test against
:returns: Boolean - True if the alert should be dropped
"""
return any(rule(alert) for rule in rules) | fb09354439ecd0201222417f9352c96e1568af76 | 102,831 |
def build_like(operator="AND", **kwargs):
"""Generates an SQL WHERE string.
Will replace None's with IS NULL's.
Keyword Args:
Containing SQL search string
Eg: ``{"foo": "x", "bar": None}``
Returns:
Tuple containing string that can
be used after LIKE in SQL statement,
along with a list of the values.
Eg. ("foo like ? AND bar IS NULL", [ "x%" ])
"""
vals = []
query = []
for k in kwargs:
if kwargs[k] is None:
query.append(k + " IS NULL")
else:
query.append(k + " LIKE ?")
vals.append(kwargs[k] + '%')
if query:
return ((" %s " % operator).join(query), vals)
else:
return (None, []) | a0743f92c67e0ee9dd723c27d2d3ebe55d51755b | 102,832 |
def merge_dicts(*dicts):
"""return dict created by updating each dict in sequence, last key wins"""
result = {}
for d in dicts:
result.update(d)
return result | a9bffda2d6362af207e1dd70ec34235b98eec726 | 102,840 |
import time
def get_current_timestamp() -> float:
"""
获取当前时间戳
:return:
"""
return time.time() | d6dbf5e26376a47397344a97a25c72437c92bdeb | 102,843 |
def _all_done(fs, download_results):
"""
Checks if all futures are ready or done
"""
if download_results:
return all([f.done for f in fs])
else:
return all([f.success or f.done for f in fs]) | b2d2f2dc2c9a325935f1f74d0df967132e19ec85 | 102,850 |
import random
import string
def random_string(length=6):
"""Return a random upper-case string of given length.
:param length: length of string to return.
"""
return ''.join(random.choice(string.ascii_uppercase) for _ in range(length)) | 8bb9fe334a130294ebe19452a2e21a59cf049296 | 102,854 |
def subj_object_provider(subj_name, subj_dict):
"""
Convenience function version of the subject_dict, to be turned into a
nipype node.
Parameters
----------
subj_name : str
subject ID
subj_dict : dict
Dictionary of explore.hf subject objects
Returns
-------
subj_obj : explore.hf subject object
Corresponding subject object
"""
subj_obj = subj_dict[subj_name]
return subj_obj | ddfc5e2503e1aac90e4014afcdf85850cfead218 | 102,860 |
def generate_symbol(registers, polynomials, K):
"""Convolutionally encode one bit of data, output the encoded symbol"""
# NOTE: EXPECTS registers FORMAT SUCH THAT LEFTMOST BIT IS MOST RECENT
xor = 0
symbol = ''
# for each symbol bit
for p in polynomials:
xor = 0
# for each memory stage
for k in range(K):
# check polynomial to see if stage is connected
# xor if there is a connection
if (p >> k) & 1:
xor ^= registers[k]
# add digit to symbol
symbol += str(xor)
return symbol | fea814807ce662ccbcad72861a412e382a1349c1 | 102,864 |
def trailing_whitespace(editor, item):
""" Pylint method to fix trailing-whitespace error """
line_no = item.line_no
repaired_line = editor.lines[line_no].rstrip()
loc = (line_no, line_no + 1)
editor.replace_range(loc, [repaired_line])
return (line_no, 0) | 954a9c9bb8b592904597b35fe06215b23392da8a | 102,865 |
import requests
def get_dois(issn, from_year, to_year):
"""
Returns a list of DOIs for a particular timeframe.
"""
dois = []
url = f"https://api.crossref.org/journals/{issn}/works?rows=500&filter=from-pub-date:{from_year}-01-01,until-pub-date:{to_year}-01-01&mailto=team@ourresearch.org"
r = requests.get(url)
if r.status_code == 200:
for article in r.json()["message"]["items"]:
dois.append(article["DOI"])
return dois | 9e634e5feb2f74234e37a80bc796dd08612fad91 | 102,866 |
def _get_exported_include_tree(dep):
"""
Generate the exported thrift source includes target use for the given
thrift library target.
"""
return dep + "-thrift-includes" | 86cf5901413cac9a05d87509be0c86c0f5fb6fb5 | 102,873 |
def attribute_lca_map(tree, leaf_graph):
"""
Lowest common ancestor of `i` and `j` for each edge :math:`(i, j)` of the leaf graph of the given tree.
Complexity: :math:`\mathcal{O}(n\log(n)) + \mathcal{O}(m)` where :math:`n` is the number of nodes in `tree` and
:math:`m` is the number of edges in :attr:`leaf_graph`.
:param tree: input tree (Concept :class:`~higra.CptHierarchy`)
:param leaf_graph: graph on the leaves of the input tree (deduced from :class:`~higra.CptHierarchy` on `tree`)
:return: a 1d array
"""
lca = tree.lowest_common_ancestor_preprocess()
res = lca.lca(leaf_graph)
return res | 1e90c0c31421bd8ee2489946e6b9c5bee747908f | 102,875 |
import random
def _loader(uris_data: list, target: int, original: int) -> list:
""" Given a target load size, sample from the URIs list to generate the load required"""
if target > original:
load = _loader(uris_data, target - original, original)
target = original
else:
load = []
load += random.sample(uris_data, target)
return load | 1becba902a0d6f310efb0f3e77266e87f9e23900 | 102,877 |
async def check_archon(command, spectro: str):
"""Check the archon CCD status"""
# Check that the configuration of archon controller has been loaded.
archon_cmd = await (await command.actor.send_command("archon", "status"))
if archon_cmd.status.did_fail:
return "Failed getting status from the controller"
else:
replies = archon_cmd.replies
check_idle = replies[-2].body["status"]["status_names"][0]
if check_idle != "IDLE":
return "archon is not initialized"
else:
return True | aa9cc1496dab5582028b2546ebec8c218092e0e7 | 102,881 |
def to_tf(input):
"""
Transform to tensorflow order of dimensions with channel last.
:param input: np array
:return: channels last
"""
return input.transpose(0, 2, 3, 1) | e0b43de2a9ffe46183d80fbb8d2bd102b8e69afe | 102,891 |
def get_character(context, character_name):
"""
Get character from context
:param context: context
:param character_name: name of character
:type character_name: string
.. versionadded:: 0.8
"""
characters = [x for x in context.characters
if x.name == character_name]
return characters[0] | e1549353cbd75c0f34b6d06a622ccc3c80d0f746 | 102,892 |
import itertools
def get_nonredun_dict_key_pairs(in_dict):
"""Take a dict and return a list of all unique two-key combinations of keys
of the dict.
"""
# Don't modify the dict if there is only one key.
if len(in_dict.keys()) == 1:
return in_dict
# Copy the input dict.
in_dict2 = in_dict.copy()
# Get a list of lists of all unique combinations of keys in the input dict.
two_key_combos1 = list(itertools.permutations(in_dict2.keys(), 2))
two_key_combos2 = [sorted(list(x)) for x in two_key_combos1]
two_key_combos3 = []
for elem in two_key_combos2:
if elem not in two_key_combos3:
two_key_combos3.append(elem)
# Return list of uniqe combos.
return two_key_combos3 | a61ecff566eae25a2e2a52dbb0236f83dfe14bdc | 102,893 |
from typing import List
import csv
from io import StringIO
def split_token(token: str) -> List[str]:
"""
Splits tokens separated by ";".
If the resulting values have semicolons, the values must be quoted.
>>> split_token('"foo";"bar;far"')
['foo', 'bar;far']
"""
stream = csv.reader(StringIO(token), delimiter=";")
return next(stream) | 0bf0374dd7e7119192c9ff3b1d16afa2a68f58f4 | 102,897 |
def _format_column(column, places=4):
"""
Given a column from the table, converts it to a nicely formatted string
and returns it. The column can be float, int, bool or a string. Strings
are returned untouched.
"""
if isinstance(column, float):
column = ("%%.%dg" % places) % column
elif isinstance(column, int) or isinstance(column, bool):
column = str(column)
#else:
# It's a string, leave it alone.
return column | cc79f4b6db8d80020b68cd246073444d683822ae | 102,898 |
def sign(a) :
"""Helper returning the sign of a
"""
return (a>0) - (a<0) | 68b57d581868bad9b5e032d2b4029da3162c31cb | 102,901 |
def gen_unknown_filter(skip_unknown = False):
"""
Generation of a SPARQL Filter clause to exclude unknown edge type
Produces something like
FILTER( ?controlType = "ACTIVATION"^^xsd:string || ?controlType = "INHIBITION"^^xsd:string)
"""
if skip_unknown == True:
return 'FILTER( ?controlType = "ACTIVATION"^^xsd:string || ?controlType = "INHIBITION"^^xsd:string)'
else:
return '' | d88f502b3c2ec270d3f0434bca2ef6caa81bc6b5 | 102,902 |
def build_kwargs(registry_id):
"""
Builds a kwargs dict which may contain the optional registryId.
:param registry_id: Optional string containing the registryId.
:return: kwargs dict with registryId, if given
"""
if not registry_id:
return dict()
else:
return dict(registryId=registry_id) | 293625e3dd7d2fb18c113d5db6301510accc69a3 | 102,903 |
def rindex_list(elem, l):
"""Return the index of the rightmost element in list."""
return len(l) - list(reversed(l)).index(elem) - 1 | d9e4a33c4a9871cba37fbff06283613a0f390758 | 102,905 |
def convertToClass(predictions):
"""
Given a set of network outputs, returns class predictions
by thresholding them.
"""
return (predictions >= 0.5) * 1 | 2ccf72255d3f5d8ad81be707e90d4c6c5dfeb362 | 102,907 |
import json
def jsonify(topic, msg):
""" json encode the message and prepend the topic """
return topic + ' ' + json.dumps({'message':msg}) | e73bd2062ca487089ea070d9a7f43a2cd08054ad | 102,910 |
import torch
def earth_mover_distance(input: torch.Tensor, target: torch.Tensor, r: float = 2):
"""
Batch Earth Mover's Distance implementation.
Args:
input: B x num_classes
target: B x num_classes
r: float, to penalize the Euclidean distance between the CDFs
Returns:
"""
N, num_classes = input.size()
input_cumsum = torch.cumsum(input, dim=-1)
target_cumsum = torch.cumsum(target, dim=-1)
diff = torch.abs(input_cumsum - target_cumsum) ** r
class_wise = (torch.sum(diff, dim=-1) / num_classes) ** (1. / r)
scalar_ret = torch.sum(class_wise) / N
return scalar_ret | 11bce97c9d654c54eece4a60065fd19a957d8e58 | 102,914 |
import pickle
def read_pickled_file(filename):
"""
Returns the contents of `filename` if it exists and can be parsed by pickle.
Returns `None` if file cannot be read.
"""
try:
with open(filename, 'rb') as f:
return pickle.load(f)
except:
return None | 0d921ed2ec1692f37cd639e8f865281e81b94d65 | 102,915 |
import random
def rand100() -> int:
"""Returns a random integer within [0, 100)."""
return random.randrange(0, 100) | 55899616e156ce8c1efb8c4dd2837a1b1a25e0a1 | 102,917 |
def _insertion_cost(distances, previous_node, next_node, inserted_node):
"""Calculates insertion costs of inserting a node into a tour.
Args:
distances: A distance matrix.
previous_node: The node before the inserted node. Can be a vector.
next_node: The node after the inserted node. Can be a vector.
inserted_node: The node to insert.
Returns:
The extra tour cost(s) when inserting the node at the given location(s).
"""
return (distances[previous_node, inserted_node]
+ distances[inserted_node, next_node]
- distances[previous_node, next_node]) | 6235c4fce7b9d7c6f095c7aeec5ae312a9327b48 | 102,927 |
def convert_size(free, total, mode):
"""
Takes free and total size and coverts them based on conversion mode
free - returns free
total - returns total
used - returns difference between total and free
pfree - returns free as percentage of total
pused - returns used as percentage of total
:param mode: one of SIZE_CONVERSION_MODES
"""
if total == 0:
return 0 # even if free is not 0, it is better to alert authorities
value = None
if mode == 'free':
value = free
elif mode == 'total':
value = total
elif mode == 'used':
value = total - free
elif mode == 'pfree':
value = (free / total) * 100
elif mode == 'pused':
used = (total - free)
value = (used / total) * 100
return value | 62ae759359758d511792412ee04b6b9a581a4fb7 | 102,934 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.