content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_failing_item(state):
"""Return name of failing item or None if none are failing"""
for key, value in state.items():
if not value:
return key
return None
|
59d0f1cba66664faf16c51c168ec2a085f123ba7
| 87,564
|
def sigma_k (n, k):
""" Sum of divisors of integer n to the power k.
Computes the sum of the positive divisors of the
integer n raised to the power k.
"""
n = abs (n)
sum = n**k
for d in range (1, 1 + (n // 2)):
if n % d == 0:
sum = sum + d**k
return sum
|
df2adc9a1f450bfa67de6145fb7040e3ac655f7d
| 87,569
|
def merge_vcfs(vcf_file, merged_mut_file):
"""
This module will accept the vcf files for mutect and radia read into memory in a dict object
VCF_FILE and will merge the calls. Merged calls are printed to MERGED_MUT_FILE.
VCF_FILE is a dict with
key : mutation caller (mutect or radia)
value : dict with
key: (chrom, pos, ref, alt)
value: vcf line in list form (split by tab)
"""
mutect_keys = set(vcf_file['mutect'].keys())
radia_keys = set(vcf_file['radia'].keys())
common_keys = radia_keys.intersection(mutect_keys)
# Open as append since the header is already written
with open(merged_mut_file, 'a') as outfile:
for mutation in common_keys:
print('\t'.join(vcf_file['radia'][mutation]), file=outfile)
return None
|
300a3a89a146ec101a87ccbd56b11fc661457f9d
| 87,570
|
import re
def word_counter(s, ignore_case=True):
"""
Returns a tuple made up of:
- A dictionary of all the words from `s` and how many times they appear
- A tuple of the number of unique words and the total number of words
"""
if ignore_case:
clean_s = [x.lower() for x in re.split(r"\s|\.|,", s) if x]
else:
clean_s = [x for x in re.split(r"\s|\.|,", s) if x]
all_words = [word for word in clean_s]
words_and_counts = {word: 0 for word in set(all_words)}
for word in all_words:
words_and_counts[word] += 1
return words_and_counts, (len(set(all_words)), len(all_words))
|
e16e34b97196d0ca15443a64161c9a35b14b6c0b
| 87,572
|
import torch
def compute_distances(x, n_particles, n_dimensions, remove_duplicates=True):
"""
Computes the all distances for a given particle configuration x.
Parameters
----------
x : torch.Tensor
Positions of n_particles in n_dimensions.
remove_duplicates : boolean
Flag indicating whether to remove duplicate distances
and distances be.
If False the all distance matrix is returned instead.
Returns
-------
distances : torch.Tensor
All-distances between particles in a configuration
Tensor of shape `[n_batch, n_particles * (n_particles - 1) // 2]` if remove_duplicates.
Otherwise `[n_batch, n_particles , n_particles]`
"""
x = x.reshape(-1, n_particles, n_dimensions)
distances = torch.cdist(x, x)
if remove_duplicates:
distances = distances[:, torch.triu(torch.ones((n_particles, n_particles)), diagonal=1) == 1]
distances = distances.reshape(-1, n_particles * (n_particles - 1) // 2)
return distances
|
e6d39e9855c57f5a926054df032cd70ba58d38e9
| 87,573
|
import random
def fill_array_rand(size, randStart, randEnd):
"""
Create an array with certain size filled with random values within a specified range.
:param size: size of array
:type size: int
:param randStart: start of random value range
:type randStart: int
:param randEnd: end of random value range
:type randEnd: int
:returns: array of size with values between randStart and randEnd
"""
return [random.randint(randStart, randEnd) for _ in range(size)]
|
c00d0b0f30852ef723fa16f42f8a6a548068e45d
| 87,577
|
def is_registered_path(tmod, pin, pout):
"""Checks if a i/o path is sequential. If that is the case
no combinational_sink_port is needed
Returns a boolean value
"""
for cell, ctype in tmod.all_cells:
if ctype != "$dff":
continue
if tmod.port_conns(pin) == tmod.cell_conn_list(
cell, "D") and tmod.port_conns(pout) == tmod.cell_conn_list(
cell, "Q"):
return True
return False
|
c5c35a1f2928dc8b6862bc4ebbfdac69fcb3eba9
| 87,583
|
import pickle
def create_posgresurl(fname='SQLAuth.sql', sqldb_dict=None):
"""
Creates a PostGresSQL url that can be used to connect to a Database.
Parameters
----------
sqldb_dict: dict, optional
If None the function will load the SQL credentials pickled file. Otherwise SQL credentials can be
explicitely provided using this parameter.
dir_path: Path object, optional
Path to SQL credentials pickled file. SQL credentials can be created using create_postgres_authdict()
or explicitely provided. Default is pulled from config files.
Unnecessary if sqldb_dict is provided.
Default sql_db
filename: str, optional
Filename of the SQL credentials pickled file.
Defailt SQLAuth.sql
Returns
-------
Str
PostgreSQL url to connect to a Database
Raises
------
IOError: If credentials file cannot be loaded.
"""
if not sqldb_dict:
try:
sqldb_dict = pickle.load(open(fname, 'rb'))
except:
raise IOError('ERROR: Could not load SQL credentials. Check path.')
return 'postgres://{}:{}@{}/{}'.format(sqldb_dict['username'], sqldb_dict['password'], sqldb_dict['host:port'],
sqldb_dict['dbname'])
|
c38f5c777eb59645166eece3f477c35355a93f53
| 87,584
|
def domainmatch(ptrs, domainsuffix):
"""grep for a given domain suffix against a list of validated PTR
domain names.
Examples:
>>> domainmatch(['FOO.COM'], 'foo.com')
1
>>> domainmatch(['moo.foo.com'], 'FOO.COM')
1
>>> domainmatch(['moo.bar.com'], 'foo.com')
0
"""
domainsuffix = domainsuffix.lower()
for ptr in ptrs:
ptr = ptr.lower()
if ptr == domainsuffix or ptr.endswith('.' + domainsuffix):
return True
return False
|
d69e68f984836222923e44d1b2671b487288dc31
| 87,586
|
def get_deep_name(name, mode='_'):
"""
Returns name transformed by mode. If mode is '_' dots in name are
replaced by underscores, and if it is 'last', only the part after the
rightmost dot is used as name.
Arguments:
- name: attribute name
- mode: determines how a name containing dots is transformed
"""
if mode == '_':
name = name.replace('.', '_')
elif mode == 'last':
attributes = name.split('.')
name = attributes.pop()
else:
raise ValueError("Argument mode can be '_', or 'last' but not " + mode)
return name
|
3bcf0b85602f8241005a333876216da3f5a19623
| 87,590
|
def str_to_bytes(string):
"""Convert a str (or unicode) to bytes."""
if isinstance(string, bytes):
return string
return string.encode('utf-8')
|
21ab441dc714f74532bb4fca6912880d9d6b32a2
| 87,593
|
from typing import Dict
def _parse_job_run_statuses(list_jobs_string: str) -> Dict[str, str]:
"""Parse the list_jobs string to return job run status."""
job_statuses = {}
# first three is table start and header row, last one is table end
rows = list_jobs_string.splitlines()[3:-1]
for r in rows:
segments = [s.strip() for s in r.split("|")]
job_statuses[segments[1]] = segments[3]
return job_statuses
|
c2fcc384d2e79d994803f3a548000d8ef88b6e93
| 87,598
|
def flip(d):
"""In a dictionary, swap keys and values"""
return {v: k for k, v in d.items()}
|
59ca5f0ef6e6c19ff98aeb57d88fd5e277c03c7d
| 87,602
|
def v_from_line(line):
"""
Extracts the V values from an element line.
Returns:
v: list, with v[0] as the original line, so the indexing starts at 1
"""
v = line.split('/')[0].split()[3:] # V data starts with index 4
v[0] = line # Preserve original line
return v
|
36170854d8003f11824afb86f6390779eef0fd42
| 87,603
|
import re
def extract_redundancy_factor(oclass):
"""Extract the redundancy factor from an object class.
Args:
oclass (str): the object class.
Returns:
int: the redundancy factor.
"""
match = re.search("EC_[0-9]+P([0-9])+", oclass)
if match:
return int(match.group(1))
match = re.search("RP_([0-9]+)", oclass)
if match:
return int(match.group(1)) - 1
return 0
|
5b619d9d9d4194f5926b9ec65861053c1152ae48
| 87,604
|
def safe_eval(expr, local_var):
"""Eval expr with the given local variables.
"""
return eval(expr, globals(), local_var)
|
d03e07192112b2fb18c2cd94317acb6a05014d11
| 87,605
|
def sanitize(s, strict=True):
"""
Sanitize a string.
Spaces are converted to underscore; if strict=True they are then removed.
Parameters
----------
s : str
String to sanitize
strict : bool
If True, only alphanumeric characters are allowed. If False, a limited
set of additional characters (-._) will be allowed.
"""
allowed = ''.join(
[
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'abcdefghijklmnopqrstuvwxyz',
'0123456789',
]
)
if not strict:
allowed += '-_.'
s = str(s).replace(' ', '_')
return ''.join([i for i in s if i in allowed])
|
3150b7d118443224630dcfd2925799bf8daea602
| 87,606
|
import re
def validate_email(email):
"""Validate email address"""
re_str = r'^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$'
match = re.match(re_str, email)
return match is not None
|
1235eb7e5154e54b7ed5500b7bb2a737321b4718
| 87,609
|
from typing import Any
def convert_attrs(value: Any) -> Any:
"""Convert Token.attrs set as ``None`` or ``[[key, value], ...]`` to a dict.
This improves compatibility with upstream markdown-it.
"""
if not value:
return {}
if isinstance(value, list):
return dict(value)
return value
|
13c3a22ebb9bd394058b9ca654d45411dd3e6d1c
| 87,611
|
def dictstrip(d):
"""Given a dict, return the dict with keys mapping to falsey values removed."""
return {k: v for k, v in d.items() if v}
|
7cd8cea481e899623df3d1a8196a58cf94c85713
| 87,614
|
def int_float_verbose(x_int:int, y_float:float, *, verbose=False):
"""
Pointless demonstration converter.
[[arguments]]
{x_int} A pointless int.
{y_float} A pointless float.
[[end]]
[[options]]
{verbose} Allows control of
the int float pair's verbosity.
Example:
for a in code:
print(example(a))
Hopefully this all survives.
[[end]]
"""
return (x_int, y_float, "verbose" if verbose else "silent")
|
c06a861a45de909a6a75232a577247fd8272c9c2
| 87,618
|
def applyAndPack(images, action):
"""
Images is a colletion of pairs (`title`, image). This function applies `action` to the image part of `images`
and pack the pair again to form (`title`, `action`(image)).
"""
return list(map(lambda img: (img[0], action(img[1])), images))
|
29e73a7f7fb26683fc9cb5d811c79296dac4a4b6
| 87,619
|
import math
def to_rad(angle) -> float:
"""Convert angle (in degree) to radiants."""
return math.pi / 180 * angle
|
e6b5d004e5f99e98d536f9550139378c019bd147
| 87,621
|
def add_move_for_straight_line_pieces(move_matrix, row, col, game_board, piece, moves):
"""
Adds moves for Rook, Bishop and Queen
:param move_matrix: How the piece moves
:param row: Which row the piece is on
:param col: Which column the piece is on
:param game_board: game_board array
:param piece: The piece
:param moves: Current moves
:return: Array of legal moves
"""
for m in move_matrix:
for i in range(1, 8):
target_row = row + (i * m[0])
target_col = col + (i * m[1])
if 0 <= target_row < 8 and 0 <= target_col < 8:
target = game_board[target_row, target_col].get_content()
if target is not None:
if (
piece.get_type().islower() and target.get_type().isupper()
) or (
piece.get_type().isupper() and target.get_type().islower()
):
moves.append([i * m[0], i * m[1]])
break
moves.append([i * m[0], i * m[1]])
else:
break
return moves
|
9b7f681acf41801220ca8b1d4e72d8f7ad5733d2
| 87,622
|
def latlon_to_cell(lat, lon):
"""
Return the cell that contains the given lat/lon pair
NOTE: x of cell represents min (contained) lon value but y of cell represents max (not contained) lat value
that is, 120/-20 contains lon values 120->120.99999 but lat values -19->-19.99999
that is, that is, 120/-20 does NOT contain lat value of -20
:param lat: latitude
:param lon: longitude
:return: cell as x, y pair
"""
x = int(lon)
y = int(lat) - 1
return x, y
|
fd3088134710cd6e29b634d56b26c002a7f9a238
| 87,624
|
import csv
def parse_domains(filename):
"""
Parse a CSV file with domains
in the format of:
Rank, URL, Linking Root Domains, External Links, mozRank, mozTrust
Download CSV from: https://moz.com/top500/domains/csv
"""
print('Parsing domains from {0}'.format(filename))
domains = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
url = row[1]
if url == 'URL': # that's the header
continue
domain_name, tld = url.rstrip('/').rsplit('.', maxsplit=1)
domains.append((domain_name, tld))
return domains
|
0f7a861e63922759f322f418136fbd0d142e5c6c
| 87,635
|
def _same_dimension(x, y):
"""Determines if two `tf.Dimension`s are the same.
Args:
x: a `tf.Dimension` object.
y: a `tf.Dimension` object.
Returns:
True iff `x` and `y` are either both _unknown_ (i.e. `None`), or both have
the same value.
"""
if x is None:
return y is None
else:
return y is not None and x.value == y.value
|
66d99621a5130d42833d044a5c80c9d87a3f9c2b
| 87,636
|
from typing import Dict
def deep_merge(parent: Dict, child: Dict) -> Dict:
"""Deeply merge two dictionaries.
Dictionary entries will be followed and merged, anything else will be
replaced. If the child dictionary has overlapping values. `child` is merged
into `parent`. The operation is in-place, but the result is still returned.
"""
for key, value in child.items():
parent_value = parent.get(key)
if isinstance(parent_value, dict):
if isinstance(value, dict):
deep_merge(parent_value, value)
else:
parent[key] = value
else:
parent[key] = value
return parent
|
41b81a98faea32e771883d945e7ed71e4f1fda32
| 87,640
|
def get_point(track, time_idx):
"""
Get GPX track point from a track for a given time index.
:param track: track DataFrame
:param time_idx: time index
:return: GPX track point for time index, or None if no point exists at
that time
"""
try:
return track.loc[time_idx]
except KeyError:
return None
|
4203bcbbc7a1342664e6e42c3ef39e14b62546ca
| 87,644
|
import hashlib
def entropy_to_masterkey(entropy: bytes) -> bytes:
"""Derive master key from entropy.
Algorithm is specified in:
https://github.com/cardano-foundation/CIPs/blob/master/CIP-0003/Icarus.md
Test vector from CIP-0003:
>>> e = bytes.fromhex('46e62370a138a182a498b8e2885bc032379ddf38')
>>> entropy_to_masterkey(e).hex()
'c065afd2832cd8b087c4d9ab7011f481ee1e0721e78ea5dd609f3ab3f156d245d176bd\
8fd4ec60b4731c3918a2a72a0226c0cd119ec35b47e4d55884667f552a23f7fdcd4a10c6cd2\
c7393ac61d877873e248f417634aa3d812af327ffe9d620'
Test wallets of PySeedRecover:
>>> e = bytes.fromhex('7c7079e639eedf56920e134b606a49f8'
... '8ba21d42d0be517b8f29ecc6498c980b')
>>> entropy_to_masterkey(e).hex()
'00d370bf9e756fba12e7fa389a3551b97558b140267c88166136d4f0d2bea75c393f5e\
3e63e61578342fa8ab1313a7315693c5e679e3cf79f7fe8f13bf8ffe9c2a67ac173bbb2afd3\
4381905fa247c65c0d8eb66c42d2373d54bd5eef73e49da'
>>> e = bytes.fromhex('7c7079e639eedf56920e134b606a49f8'
... '8ba21d42d0be517b8f29ecc6498c980f')
>>> entropy_to_masterkey(e).hex()
'b03595d980ab77fac0d95d0e563de43ad2978b2a22e8f0a14ad69a1964eddf5ed13ffc\
0e596edf974cb477cb08c5fc499efbaafa5103a2afa6094468759c1d1c694734296dd915dd1\
61df3703a3c1e0b4562fad0b67fdbf3fa7b819791cc5cac'
"""
key = bytearray(hashlib.pbkdf2_hmac('sha512', b'', entropy, 4096, 96))
key[0] &= 0b11111000
key[31] &= 0b00011111
key[31] |= 0b01000000
return key
|
f153b885f7f38711fb8c4918bb380dbead20de10
| 87,647
|
def get_csv_length(path):
"""
Get the number of lines of the specified file
:param path: path to the file to analyse
:type path: str
:return: the number of lines in the csv
:rtype: int
"""
with open(path) as filename:
return len(list(filename.readlines()))
|
900d865c7fddc823cb4c81ffc2531a23e71d5b71
| 87,654
|
def _cg_a_mult(OtOr, X, y, v):
"""
Compute the multiplication Av, where A = X'X + X'yX + λ.
"""
XtXv = OtOr @ v
XtyXv = X.T @ (y * (X @ v))
return XtXv + XtyXv
|
63cef7d64e5e184260f78e043c2d1421a9dcc253
| 87,655
|
import asyncio
def aio_run(coroutine, loop=None, close_after_complete=False):
"""
Decorator to run an asyncio coroutine as a normal blocking
function.
Arguments:
- `coroutine`: The asyncio coroutine or task to be executed.
- `loop`: An optional :class:`asyncio.AbstractEventLoop`
subclass instance.
- `close_after_complete`: Close `loop` after the coroutine
returned. Defaults to ``False``.
Returns the result of the asyncio coroutine.
Example:
.. code-block::
@asyncio.coroutine
def coroutine(timeout):
yield from asyncio.sleep(timeout)
return True
# Call coroutine in a blocking manner
result = aio_run(coroutine(1.0))
print(result)
"""
# Create a new event loop (if required)
if loop is None:
loop_ = asyncio.get_event_loop()
# Closed? Set a new one
if loop_.is_closed():
loop_ = asyncio.new_event_loop()
asyncio.set_event_loop(loop_)
else:
loop_ = loop
# Run the coroutine and get the result
result = loop_.run_until_complete(coroutine)
# Close loop (if requested)
if close_after_complete:
loop_.close()
# Return the result
return result
|
aaec2754c405931f7a1ae1bd951e1a29c6d11170
| 87,659
|
def evidence(var, e, outcomeSpace):
"""
argument
`var`, a valid variable identifier.
`e`, the observed value for var.
`outcomeSpace`, dictionary with the domain of each variable
Returns dictionary with a copy of outcomeSpace with var = e
"""
newOutcomeSpace = outcomeSpace.copy() # Make a copy of outcomeSpace with a copy to method copy(). 1 line
newOutcomeSpace[var] = (e,) # Replace the domain of variable var with a tuple with a single element e. 1 line
return newOutcomeSpace
|
0338e6a1419ee3495b16743dfd428c50ccb2c49c
| 87,661
|
import copy
def _manual_join_columns(columns_to_join):
"""Separate manually joined columns from columns_to_join
If columns_to_join contains 'metadata', 'system_metadata', or
'pci_devices' those columns are removed from columns_to_join and added
to a manual_joins list to be used with the _instances_fill_metadata method.
The columns_to_join formal parameter is copied and not modified, the return
tuple has the modified columns_to_join list to be used with joinedload in
a model query.
:param:columns_to_join: List of columns to join in a model query.
:return: tuple of (manual_joins, columns_to_join)
"""
manual_joins = []
columns_to_join_new = copy.copy(columns_to_join)
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join_new:
columns_to_join_new.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join_new
|
36cf892b1cc7bef4e0a50d7964578406f1691b0a
| 87,663
|
from pathlib import Path
def create_workdir(tmp_dir: Path, name: str):
"""
Create temporal workdir
"""
# create workdir for each job
workdir = tmp_dir / name
workdir.mkdir()
return workdir
|
8add8073cc8b3ec70b92fc6c03a56a366320c2d2
| 87,664
|
from pathlib import Path
def the_path_that_matches(pattern: str, in_directory):
"""
Finds one and only one path matching the specified pattern. Raises an error if it finds 2+ paths or no paths.
To learn how to use advanced patterns, read http://www.robelle.com/smugbook/wildcard.html
Parameters
----------
pattern : str
Pattern to search for.
in_directory : str or Path
Directory in which to search for the pattern.
Returns
-------
Path
Path found by search.
Raises
------
IOError
If it finds 2+ paths or no paths.
"""
matches = list(Path(in_directory).glob(pattern))
if not Path(in_directory).is_dir():
raise IOError(f"{in_directory} either doesn't exist or isn't a directory at all!")
elif(len(matches)) >= 2:
raise IOError(f"The directory {in_directory} exists but contains more than one path that matches '{pattern}': {[match.name for match in matches]}")
elif(len(matches)) == 0:
raise IOError(f"The directory {in_directory} exists but contains no paths that match pattern '{pattern}'")
else:
return matches[0]
|
b6c7f37a24220d02c03a8b38700f65405c9371f5
| 87,666
|
from pathlib import Path
import inspect
def here(frames_back: int = 0) -> Path:
"""
Get the current directory from which this function is called.
Args:
frames_back: the number of extra frames to look back.
Returns: the directory as a Path instance.
"""
stack = inspect.stack()
previous_frame = stack[1 + frames_back]
return Path(previous_frame.filename).parent
|
a261aeac42b8fe4143783e41a74c1025e937e17d
| 87,668
|
def analyzeMap(map, value, list=[]):
"""
Analyze a map of interactions to determine the overall
connectivity.
Parameters
map : A dictionary of lists which contain the connections
to the key (dictionary)
value : The key value to analyze (variable)
Returns
list : A connectivity list of the map (list)
Example
Given map {1: [2], 4: [5], 7: [5,9], 9: [14]} list will return
For 1: [1,2]
For 4,5,7,9,14: [4,5,7,9,14]
For all other X: [X]
"""
if value in list:
return []
else:
list.append(value)
if value in map:
for entry in map[value]:
newlist = analyzeMap(map, entry, list)
for newitem in newlist:
if newitem not in list:
list.append(newitem)
for key in map:
for entry in map[key]:
if entry == value and key not in list:
newlist = analyzeMap(map, key, list)
for newitem in newlist:
if newitem not in list:
list.append(newitem)
return list
|
bcb6297c16798a147fd3743ad5590435c0d3dca8
| 87,673
|
import torch
def _pdist(a, b):
"""Compute pair-wise squared distance between points in `a` and `b`.
Parameters
----------
a : array_like
An NxM matrix of N samples of dimensionality M.
b : array_like
An LxM matrix of L samples of dimensionality M.
Returns
-------
ndarray
Returns a matrix of size len(a), len(b) such that eleement (i, j)
contains the squared distance between `a[i]` and `b[j]`.
"""
if len(a) == 0 or len(b) == 0:
return torch.zeros(len(a), len(b), dtype=a.dtype, device=a.device)
a = a[:, None, :]
b = b[None, :, :]
return torch.sum(torch.pow(a - b, 2), dim=-1)
|
47025e93442418293cc51c7e6d335fa4861d7711
| 87,674
|
import binascii
def bin_compare_region(fn1, start1, end1, fn2, start2, end2):
"""compare region from two files
:param fn1: the first file to compare.
:param start1: start address of the file fn1.
:param end1: end address of file fn1
:param fn2: the second file to compare.
:param start2: start address of the file fn2.
:param end2: end address of file fn2.
:returns rtn: True/False.
"""
rtn = True
s1, s2 = (end1-start1), (end2-start2)
size_bytes = s1
if s1 > s2: size_bytes = s2
with open(fn1, 'rb') as f1, open(fn2, 'rb') as f2:
f1.seek(start1)
f2.seek(start2)
total, bufsize = 0, 16
while total < size_bytes:
temp1, temp2 =f1.read(bufsize), f2.read(bufsize)
total += 16
if temp1 != temp2:
print("index: 0x%x, 0x%x"%(start1+total, start2+total))
print("%-30s"%fn1, binascii.hexlify(temp1))
print("%-30s"%fn2, binascii.hexlify(temp2))
rtn=False
return rtn
|
7e587e49e2a572ebe8c4b699399e1861546e6202
| 87,683
|
import torch
def euclidean_distances(x, y):
""" Computes pairwise distances
@param x: a [n x d] torch.FloatTensor of datapoints
@param y: a [m x d] torch.FloatTensor of datapoints
@return: a [n x m] torch.FloatTensor of pairwise distances
"""
return torch.norm(x.unsqueeze(1) - y.unsqueeze(0), dim=2)
|
e6cd06f107b5fa3c94f2794d5e72802f3f3c19d2
| 87,687
|
import torch
def Variable(initial_value, name=None, trainable=True):
"""
Creates a new variable with value initial_value.
Parameters
----------
initial_value : tensor
A Tensor, or Python object convertible to a Tensor
name : str
Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically.
Returns
-------
Variable
"""
return torch.nn.Parameter(data=initial_value, requires_grad=trainable)
|
889c23d94af8ec1c70aad4811433ea68e2fa167a
| 87,688
|
def generate_evergreen_project_name(owner, project, branch):
"""Build an evergreen project name based on the project owner, name and branch."""
return "{owner}-{project}-{branch}".format(owner=owner, project=project, branch=branch)
|
254a9ab85d4f1805bfef4c1750e714f983bd0a65
| 87,689
|
def _parseFloats(x,useMax=True):
"""
Parses float strings. By default, if multiple floats, returns maximum. Can instead return the first.
"""
try:
return float(x)
except ValueError:
if useMax:
return max(map(float,x.split(',')))
else:
list(map(float,x.split(',')))[0]
|
e2afba15e050d90827843f752160cbfe94689826
| 87,691
|
def underline(s):
"""Returns the string s, underlined."""
return '\x1F%s\x1F' % s
|
5ee6f8ad8ed3032ea2a64dd14524f075f0297b91
| 87,700
|
def sort_genomic_ranges(rngs):
"""sort multiple ranges"""
return sorted(rngs, key=lambda x: (x.chr, x.start, x.end))
|
5cacfafc713c816997cb607c84069fe133badc5d
| 87,703
|
def strong(text: str) -> str:
"""
Return the *text* surrounded by strong HTML tags.
>>> strong("foo")
'<b>foo</b>'
"""
return f"<b>{text}</b>"
|
fa63f2b38cbb8841cd495e47a0085115ad3a75b9
| 87,704
|
def _intersect(rect1, rect2):
"""
Check whether two rectangles intersect.
:param rect1, rect2: a rectangle represented with a turple(x,y,w,h,approxPoly_corner_count)
:return whether the two rectangles intersect
"""
# check x
x_intersect = False
if rect1[0] <= rect2[0] and rect2[0] - rect1[0] < rect1[2]:
x_intersect = True
if rect2[0] <= rect1[0] and rect1[0] - rect2[0] < rect2[2]:
x_intersect = True
# check y
y_intersect = False
if rect1[1] <= rect2[1] and rect2[1] - rect1[1] < rect1[3]:
y_intersect = True
if rect2[1] <= rect1[1] and rect1[1] - rect2[1] < rect2[3]:
y_intersect = True
return x_intersect and y_intersect
|
24892f225ff2794e8f1f37714ba69c724dec4651
| 87,706
|
def is_unknown(val):
"""Check if value is in list of unknown values. If value
is not a string catch the AttributeError raised by use of
str.lower() and return False.
"""
unknown_vals = ('unknown', 'n/a')
try:
return val.lower() in unknown_vals # truth value
except AttributeError: # not a str
return False
|
9b15ba939dc76769188f741768c31d5f1bf437e2
| 87,709
|
def convert_case(s):
"""
Given a string in snake case, conver to CamelCase
"""
return ''.join([a.title() for a in s.split("_") if a])
|
82ad5907d932173721eb55791456ad2a5df4986a
| 87,710
|
def n_bonacci(N, n):
"""
Computes the n-bonacci number for the given input
Parameters
----------
N : int
the sequence number
n : int
the number to compute the series from
Returns
-------
int
the n-bonacci number for the given input
"""
if n <= 1:
return n
return N*n_bonacci(n-1, N)+n_bonacci(n-2, N)
|
5952d50bf508a336233ce0a06416d73c2fea5217
| 87,715
|
def is_nullable(column_elt):
"""
Identify whether a column is nullable.
@param IN column_elt Column XML element
@return True if it's nullable, False otherwise
"""
col_nullable = column_elt.get("nullable")
if not col_nullable:
raise Exception("Missing column 'nullable' attribute")
return col_nullable.lower() == "true"
|
b522aa6392de433e2ef4af83547d6c1c36636984
| 87,716
|
def sextractor_output(fname):
"""SExtractor detection FITS table name from FITS image name"""
return fname.replace(".fits",".fits.stars")
|
5001f163a64531a7c005f204878ae58c37c27595
| 87,718
|
def format_words(words, max_rows=10, max_cols=8, sep=" "):
"""Format a list of strings into columns.
If `len(words) > max_rows * max_cols`, then the last line of the output string will
instead indicate how many words are not included. Note that this line counts
against the row limit.
Parameters
----------
words : list of str
The words to be formatted.
max_rows : int, optional
The maximum number of rows to display. If the full string would require
more than `max_rows` rows, show an ellipsis and the number of missing
words on the last line instead.
max_cols : int, optional
The number of words per row.
sep : str, optional
The character(s) to put in between each column. Defaults to ' '
(five spaces) so the space in between each column is the same as the width
of each column.
Returns
-------
out_str : str
`words` formatted into a single string of rows and columns.
"""
lines = [sep.join(words[i : i + max_cols]) for i in range(0, len(words), max_cols)]
if len(lines) > max_rows:
lines = lines[: max_rows - 1]
n_missing = int(len(words) - (max_cols * len(lines)))
out_str = "\n".join(lines)
out_str += f"\n...({n_missing} more)"
else:
out_str = "\n".join(lines)
return out_str
|
a2fec921e019a51562a8e02f4c42b72e0a082301
| 87,722
|
from math import floor, log10
def _round_sig(x, sig=3):
"""
Rounds x to sig significant digits
:param x: The value to round
:param sig: Number of significant digits
:return: rounded value
"""
# Check for zero to avoid math value error with log10(0.0)
if abs(x) < 1e-12:
return 0
else:
return round(x, sig-int(floor(log10(abs(x))))-1)
|
c74b7c32c3c87ff0d6f0285f9873ff06f3f619da
| 87,727
|
def build_header(name, value):
"""
Takes a header name and value and constructs a valid string to add to the headers list.
"""
stripped_value = value.lstrip(" ").rstrip("\r\n").rstrip("\n")
stripped_name = name.rstrip(":")
return f"{stripped_name}: {stripped_value}\r\n"
|
0173a5742fb15c2a9dedaee57446ce3cd66ed769
| 87,729
|
def parse_sql_script(path) -> str:
"""Parse a SQL script directly from the `folder` folder.
Args:
path: The path and filename of the SQL script.
Returns:
A string of the parsed SQL script.
"""
with open(path, "r") as f:
return f.read()
|
ed74b7605d63ebf2cd0c0ab66e343e24ceb74757
| 87,732
|
def cli(ctx, workflow_id, version=""):
"""Display information needed to run a workflow.
Output:
A description of the workflow and its inputs.
For example::
{'id': '92c56938c2f9b315',
'inputs': {'23': {'label': 'Input Dataset', 'value': ''}},
'name': 'Simple',
'url': '/api/workflows/92c56938c2f9b315'}
"""
return ctx.gi.workflows.show_workflow(workflow_id, version=version)
|
0b60f7ef3623cd66e365d22dd85bf25fba522f13
| 87,733
|
def _validate_str(val):
"""Check to see that a string is a str type"""
if not isinstance(val, str):
raise ValueError("Passed value {} is not a string".format(val))
return val
|
16d95096555e414065edeb528db5e866322af48a
| 87,735
|
import copy
def deep_move_rectangle(rect, dx, dy):
"""Modifies the Rectangle by moving its corner.
Returns a new, deep copy of the rect.
rect: Rectangle object.
dx: How many units to shift the corner (right is positive)
dy: How many units to shift the corner (up is positive)
"""
rect2 = copy.deepcopy(rect)
rect2.corner.x += dx
rect2.corner.y += dy
return rect2
|
c0c4fb3b36d6424d34ea8bd11019f32cb6290bac
| 87,736
|
def _create_application_version_request(app_metadata, application_id, template):
"""
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
"""
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
|
652352e144fdd8cb2e8ce15367b5d6797d11dbac
| 87,738
|
def stopped(header):
""" Test if header stopped sucessfully."""
try:
status = header.stop['exit_status']
except KeyError:
status = 'Python crash before exit'
if status == 'success':
return True
else:
return False
|
9ab7d6b6773d6eedc067f05ee56d71eb08382616
| 87,742
|
def get_sep(file_path: str) -> str:
"""Figure out the sep based on file name. Only helps with tsv and csv.
Args:
file_path: Path of file.
Returns: sep
"""
if file_path[-4:] == '.tsv':
return '\t'
elif file_path[-4:] == '.csv':
return ','
raise ValueError('Input file is not a .csv or .tsv')
|
e6ab42dafb51c3b4f7f8fd1556b3edb89c6144f6
| 87,744
|
def get_max_y(file_name):
"""Get max y value to allow comparison of results (same scale)."""
# These values were determined by hand. We could be fancy and read
# all data files first to determine max y, but too much work for low
# return at this point.
if 'mlp' in file_name:
return 0.5
else:
# Assume it's the CNN test
return 3.5
|
0855c0d4370165784f1674b072fced52b8cd0a46
| 87,746
|
import re
def regex_from_word(word):
"""Generates a regext from the given search word."""
return "(\s{word})|(^{word})".format(
word=re.escape(word),
)
|
25857addc5a3d3f90c592ee9a52ccaf26db5662e
| 87,749
|
def get_inner_text(dom_node):
"""
Returns a unicode string that is the amalgamation of all the text
interior to node dom_node. Recursively grabs the inner text from
all descendent (child, grandchild, etc.) nodes.
"""
text_strings = []
for node in dom_node.childNodes:
if node.nodeType == dom_node.TEXT_NODE \
or node.nodeType == dom_node.CDATA_SECTION_NODE:
text_strings.append(node.data)
else:
text_strings.append(get_inner_text(node))
return "".join(text_strings).strip()
|
5c980818804fdeb3d33845df2d10aa3893f001c8
| 87,759
|
from typing import Any
def get_origin(obj: Any) -> Any:
"""Returns an original object.
Examples:
>>> class A:
... @property
... def x(self):
... pass
>>> hasattr(A.x, __name__)
False
>>> get_origin(A.x).__name__
'x'
"""
if isinstance(obj, property):
return get_origin(obj.fget)
if not callable(obj):
return obj
if hasattr(obj, "__wrapped__"):
return get_origin(obj.__wrapped__)
if hasattr(obj, "__pytest_wrapped__"):
return get_origin(obj.__pytest_wrapped__.obj)
return obj
|
95d9bb91918c91b2ee34192f8ef2e9431bf4c89c
| 87,763
|
def half(x):
"""Halves x. For example:
>>> half(6.8)
3.4
>>>
"""
return x / 2
|
f5d57f0686e98e2d21130e3605a5fe0980a19986
| 87,765
|
def _not_cal(not_sign, right):
"""
Reverse number
Args:
not_sign (bool): if has not sign
right (bool): right number
Returns:
bool
"""
if not_sign:
right = not right
return right
|
4bc0b13d474dd1a80c439c693872b1b5e9e91bfb
| 87,778
|
def _filter_special(text):
"""
Remove special characters from a str
Args:
text: str
Return:
lowercase str ascii-friendly
"""
text = text.lower()
text = ''.join([i for i in text if not i.isdigit()])
text = text.translate({ord(c): 'a' for c in 'áàäâ'})
text = text.translate({ord(c): 'e' for c in 'éèëê'})
text = text.translate({ord(c): 'i' for c in 'íìïî'})
text = text.translate({ord(c): 'o' for c in 'óòöô'})
text = text.translate({ord(c): 'u' for c in 'úùüû'})
text = text.replace('ñ', 'n')
text = text.replace('\n', '')
text = text.replace('.', '')
# text = re.sub(r"[^a-zA-Z0-9]+", ' ', text)
return text
|
3839336b45c0d862a9c2733570a1ef5c5510f998
| 87,780
|
import math
def vector_len(v):
"""Get length of 2D vector."""
x,y = v
return math.sqrt(x*x + y*y)
|
554c8b8602aff50e5a1fd579f87e5d756bde12f8
| 87,781
|
def f_lin(x, a, b):
"""Fonction linéaire : renvoie f(x) = a*x + b
Parameters
==========
x : float or ndarray
a : float
The line slope.
b : float
The origin ordinate.
Returns
=======
f(x) = a*x + b : float or ndarray
"""
return a*x + b
|
a8921fad8f17784702fc1d67c80ba1eb5d80d7d2
| 87,783
|
def get_number(input_string: str) -> tuple:
"""
Return a float out of the input string.
Parameters
----------
input_string : str
String with the number.
Returns
-------
tuple
The number and the length of the number in the string.
"""
number_sting = ""
if input_string[0] == "-":
number_sting += "-"
input_string = input_string[1:]
for character in input_string:
if not character.isdigit():
if character == ".":
number_sting += "."
else:
break
else:
number_sting += character
return (float(number_sting), len(number_sting))
|
31f291fb7045fde48d624c1fa79d7d606166e564
| 87,787
|
import re
def normalize_array_description(description):
"""
Normalize array object description by removing "<" and ">" characters and content between them.
:param str description: Array object description
:return: Normalized array object description string
:rtype: str
"""
return re.sub("^(<.*>)", "", description).strip()
|
71784ad25bd56d1063c476fefbb3d7909c37e364
| 87,788
|
def read_file(filename):
""" 'filename' should be a two column file. This
function will return, as two lists, the first and second column of this file.
"""
x, y = [], []
with open(filename, "r") as f:
for l in f:
if l[0] != "#":
l = l.split()
x.append(float(l[0]))
y.append(float(l[1]))
return x, y
|
92dc761852546626338016739dd42921f3b32c69
| 87,792
|
import re
def strip_line(single_line):
"""Strips the line and replaces neighbouring whitespaces with single space (except when within quotation marks)."""
single_line = single_line.strip()
if single_line.startswith('#'):
return single_line
within_quotes = False
parts = []
for part in re.split('"', single_line):
if within_quotes:
parts.append(part)
else:
parts.append(re.sub(r'[\s]+', ' ', part))
within_quotes = not within_quotes
return '"'.join(parts)
|
b7e9b4712824e60fc024abbd0bce97a98a8e6e33
| 87,793
|
def get_marshmallow_from_dataclass_field(dfield):
"""Helper method for checking marshmallow metadata succinctly."""
return dfield.metadata["marshmallow_field"]
|
bbf1cb4f2523d7566c44735f4cb7d3031f5f031b
| 87,794
|
from typing import Union
def pascals_to_bars(pa: float, unit: str) -> Union[float, str]:
"""
This function converts Pa to bar
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit)
>>> pascals_to_bars(45000, "Pa")
0.45
>>> pascals_to_bars("1200000", "Pa")
12.0
>>> pascals_to_bars(0, "Pa")
0.0
>>> pascals_to_bars(3.1, "mmHg")
'Invalid unit'
>>> pascals_to_bars("pass", "Pa")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'pass'
"""
if unit == "Pa":
bar = float(pa) / 100000
return bar
else:
return "Invalid unit"
|
f66700f8d54512bb0e42e3d8fc2a081b6aa457db
| 87,799
|
import typing
def _remove_keys(
parameters: typing.Dict[str, typing.Any], exclude_labels: typing.List[str]
) -> dict:
"""
Remove keys from a dictionary without changing the original.
Attempts to remove keys that don't exist in the dictinary are silently
ignored.
Args:
parameters: Dictionary to be adjusted.
Returns:
Modified dictionary.
"""
this_copy = parameters.copy()
for k in exclude_labels:
try:
del this_copy[k]
except KeyError:
# silently ignore missing keys by suppressing this exception
pass
return this_copy
|
17fdefa40cf558bb3b293f647edf6445dc9b58fd
| 87,800
|
def parser(content, objconf, skip=False, **kwargs):
"""Parses the pipe content
Args:
content (str): The content to tokenize
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
Returns:
Iter[dict]: The stream of items
Examples:
>>> from meza.fntools import Objectify
>>> objconf = Objectify({'delimiter': '//', 'token_key': 'token'})
>>> content = 'Once//twice//thrice//no more'
>>> result = parser(content, objconf)
>>> next(result) == {'token': 'Once'}
True
"""
if skip:
stream = kwargs["stream"]
else:
splits = [s.strip() for s in content.split(objconf.delimiter) if s]
deduped = set(splits) if objconf.dedupe else splits
keyfunc = lambda s: s.lower()
chunks = sorted(deduped, key=keyfunc) if objconf.sort else deduped
stream = ({objconf.token_key: chunk} for chunk in chunks)
return stream
|
33715c4b6f5b352ddd78efd538e288b456fa695f
| 87,802
|
import re
def parse_port_name(name):
"""
Parses a port name. Returns the base name, cell index and bit index.
>>> parse_port_name("A_PORT")
('A_PORT', None, None)
>>> parse_port_name("A_PORT_0")
('A_PORT', 0, None)
>>> parse_port_name("A_PORT_b31")
('A_PORT', None, 31)
>>> parse_port_name("A_PORT_0_b15")
('A_PORT', 0, 15)
"""
# A multi-bit port
m = re.match(r"(?P<name>.*)(_b(?P<bit>[0-9]+))$", name)
if m is not None:
port = m.group("name")
bit = int(m.group("bit"))
else:
port = name
bit = None
# A port of a sub-cell
m = re.match(r"(?P<name>.*)(_(?P<cell>[0-9]+))$", port)
if m is not None:
return m.group("name"), int(m.group("cell")), bit
return port, None, bit
|
643dbcad09f890419d36b47123a6478387944548
| 87,809
|
def recall_at_r(I, gt, r):
"""
Compute Recall@r over the all queries.
Args:
I (np.ndarray): Retrieval result, with shape(#queries, ANY), integer.
The index of the database item
gt (np.ndarray): Groundtruth. np.array with shape(#queries, ANY). Integer.
Only gt[:, 0] is used
r (int): Top-r
Returns:
The average recall@r over all queries
"""
assert r <= I.shape[1]
assert len(I) == len(gt)
n_ok = (I[:, :r] == gt[:, :1]).sum()
return n_ok / float(I.shape[0])
|
bd46d614c0eeff5d711d6b66500d61b570acab35
| 87,813
|
def continuous_plotting_label(x):
"""Function to make x-axis labels for continous feature plots"""
if type(x) == str:
return x
else:
return f"{int(round(x.left))}-{int(round(x.right))}"
|
d2b7f7db3a4390ac51e684ff71c097d1fabae548
| 87,814
|
async def convert_dict_to_sorted_list(dict):
"""Converts a dictionary object into a sorted-by-key list of tuples"""
lst = list(dict.items())
lst.sort()
return lst
|
e6883beda85e077ebeb10bea40e30b0280ae94de
| 87,823
|
def join_logs(schema_log, data_log):
"""Join logs strings into single string."""
return ('\n> SCHEMA VALIDATION\n\n' + schema_log + '\n\n'
'\n> DATA VALIDATION\n\n' + data_log + '\n')
|
8e7e2ea7a436d0948058cdd0f5af1a01e5d7447c
| 87,827
|
import pickle
def read_pickle_statefile(state_file):
"""Read pickled state file."""
with open(state_file, 'rb') as f:
return pickle.load(f)
|
07ce077a8ef6fcb0285d8398f3e23a3d799ccd01
| 87,837
|
import sqlite3
def query_from_table(db_connection, table_name, columns = '*'):
"""
Function queries data from table.
Arguments:
- db_connection: sqlite3 database connection.
- table_name: Database table name.
- columns: A list of columns, which should be queried. By default it is set
'*' and will query all data.
"""
if columns != '*':
columns = ', '.join(columns)
cursor = db_connection.cursor()
query = []
try:
query_str = 'SELECT ' + columns + ' FROM ' + table_name
cursor.execute(query_str)
query = cursor.fetchall()
except sqlite3.OperationalError:
print(sqlite3.OperationalError, 'Query: statement failed with Operational error')
return query
|
b7324f1fe8cf7131bf41e14070540a2a416f1265
| 87,840
|
import torch
from typing import Tuple
def accuracy(preds: torch.Tensor, targets: torch.Tensor, cls_dim: int = 1,
) -> Tuple[float, float, float, float]:
"""Calculate accuracy for given predictions and one-hot-encoded binary targets.
The class dimension is assumed to be in dimension cls_dim.
If the size of cls_dim is 1 in the predictions, it is assumed to be a binary classification
output in one node (non-one-hot-encoded) and the values are sigmoided and thresholded by 0.5 to
obtain the class prediction.
:param preds: predictions tensor
:param targets: targets tensor
:param cls_dim: the dimension in which to find the class information in case of multi-class
classification; e.g. if cls_dim == 1 the prediction tensors are assumed to be of shape
(batch, num_classes, *)
:return: tuple of (accuracy, #true pos, #true neg, total number of samples)
"""
# Calc true positives, true negatives, and tn+tp+fn+fp for batch
# from one-hot or binary encoding to class indices:
class_pred = torch.argmax(preds, dim=cls_dim) if preds.size()[cls_dim] > 1 else \
(torch.sigmoid(preds) > 0.5).squeeze(cls_dim).float()
class_gt = torch.argmax(targets, dim=cls_dim).float()
# accuracy calculation
batch_tp = float(torch.sum(class_pred * class_gt))
batch_tn = float(torch.sum((1 - class_pred) * (1 - class_gt)))
batch_all = float(class_gt.size()[0])
batch_acc = (batch_tp + batch_tn) / batch_all
return batch_acc, batch_tp, batch_tn, batch_all
|
ba6388b74fa539e51693843fcc5fbfc57ab7881e
| 87,844
|
import yaml
def read_file(filename):
"""Read a YAML file
Args:
filename (str): Name of the file.
Returns:
data (dict): A dictionary.
Examples:
>>> data = read_file('share/traj.yaml')
>>> type(data)
<class 'dict'>
>>> json.dumps(data, sort_keys=True)
'{"trajectory": {"q0": [443, 444], "q1": [205, 206], "t": [0.0416667, 0.0833333]}}'
"""
with open(filename, "r") as f:
data = yaml.load(f, yaml.SafeLoader)
return data
|
4321aa67ed4cdbd0b43bd77b436e77df6af36652
| 87,851
|
def set_action(registers, num_a, _, reg_c):
"""Perform assign action of num_a to reg_c in registers."""
registers[reg_c] = num_a
return registers
|
da6e9e59d1f437172f70e258ed068445def26de3
| 87,855
|
def split_extension(file_name, special=['tar.bz2', 'tar.gz']):
"""
Find the file extension of a file name, including support for
special case multipart file extensions (like .tar.gz)
Parameters
------------
file_name : str
File name
special : list of str
Multipart extensions
eg: ['tar.bz2', 'tar.gz']
Returns
----------
extension : str
Last characters after a period, or
a value from 'special'
"""
file_name = str(file_name)
if file_name.endswith(tuple(special)):
for end in special:
if file_name.endswith(end):
return end
return file_name.split('.')[-1]
|
7d8ee13b27f0ec5fce10f30817e4ed960a447b64
| 87,860
|
import math
def prime_divisors(n):
"""
Returns all prime divisors of a number
Parameters
----------
n : int
denotes the positive integer of which prime divisors needs to be find out
return : array
returns an array of integers denoting prime divisors of n
"""
arr = []
if(n<2):
return arr
while n % 2 == 0:
arr.append(2)
n = n / 2
for i in range(3,int(math.sqrt(n))+1,2):
while n % i== 0:
arr.append(int(i))
n = n / i
if n > 2:
arr.append(int(n))
if(len(arr) == 1):
return arr
else:
temp_arr = []
temp_arr.append(arr[0])
for i in range(1,len(arr)):
if(arr[i] != arr[i-1]):
temp_arr.append(arr[i])
return temp_arr
|
3ebc2e88cd974cac2f9f4cfebd4751a448660c49
| 87,861
|
import math
def calculate_epsilon(steps_done,
EGREEDY_EPSILON = 0.9,
EGREEDY_EPSILON_FINAL = 0.02,
EGREEDY_DECAY = 1000):
"""
Decays eplison with increasing steps
Input:
steps_done (int) : number of steps completed
Returns:
int - decayed epsilon
"""
epsilon = EGREEDY_EPSILON * math.exp(-1. * steps_done / EGREEDY_DECAY )
return max(epsilon,EGREEDY_EPSILON_FINAL)
|
2e36d31628dc25f46643bb49c7ed29c32717bcca
| 87,868
|
def has_isolated_transposition(source, image):
"""Check whether a, b exist, with a -> b and b -> a"""
f = dict(zip(source, image))
for s, i in zip(source, image):
if f[i] == s:
return True
return False
|
b856370161ff918bbbf679b1dd366aa2f563ff51
| 87,872
|
import time
def rt_parse_ticket_time(t):
"""
Given a string representing the RT ticket's creation timestamp, this
returns a parsed time struct version of this timestamp.
:param t: the string of the RT ticket's creation timestamp
"""
return time.strptime(t, "%a %b %d %H:%M:%S %Y")
|
8c83dea901f91fa598c8eb8c549d5d5d937e8bf2
| 87,877
|
def _get_sig_data(word: dict):
"""HELPER: extracts if the sig is in word dictionary
:param word: dictionary from the json word
:type word: dict
:return: if sig, ':' and '$' in the word dictionary, it returns it. otherwise it returns False.
:rtype: list
"""
if "sig" in word:
if ":" in word["sig"] and "$" in word["sig"]:
return [word["sig"][word["sig"].find(":") + 1:word["sig"].find("=")],
word["sig"][word["sig"].find("$") + 1:]]
return False
|
e337e285c42030e3ddaecd3a5ca294f424cb2141
| 87,880
|
def kms_key_exists(event):
"""Key exist function - check for KMS Key ID"""
if 'x-amz-server-side-encryption-aws-kms-key-id' in event['detail']['responseElements']:
return True
return False
|
c5498a3849541f9c56060d09395977dfd7799693
| 87,882
|
from typing import Tuple
import re
def _parse_pyside_uri(uri: str) -> Tuple[str, str]:
"""
Try to parse PySide URI and extract html file name and anchor
"""
uri_re = re.compile(
r"https://doc.qt.io/qtforpython(-5)?/(?P<path>(PySide[26])(/\w+)+)\.html#(?P<anchor>(\w+\.)+(\w+))"
)
matched = uri_re.match(uri)
if matched is None:
raise ValueError(f"Cannot parse '{uri}' uri")
path = matched.group("path")
uri_anchor = matched.group("anchor")
class_string = path.split("/")[-1]
anchor = "" if uri_anchor.endswith(class_string) else uri_anchor.split(".")[-1]
return class_string.lower() + ".html", anchor
|
cc5ec88a2f3e08ad0f3d7f654fa0b1a89c95a61b
| 87,883
|
def _remove_private_key(content):
"""
Remove most of the contents of a private key file for logging.
"""
prefix = '-----BEGIN PRIVATE KEY-----'
suffix = '-----END PRIVATE KEY-----'
start = content.find(prefix)
if start < 0:
# no private key
return content
# Keep prefix, subsequent newline, and 4 characters at start of key
trim_start = start + len(prefix) + 5
end = content.find(suffix, trim_start)
if end < 0:
end = len(content)
# Keep suffix and previous 4 characters and newline at end of key
trim_end = end - 5
if trim_end <= trim_start:
# strangely short key, keep all content
return content
return content[:trim_start] + '...REMOVED...' + content[trim_end:]
|
0ef09f64e60acd02c6a280934fb8074781c0185e
| 87,884
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.