content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_occ_cmap():
"""Generate custom color map for UNKNOWN/OCCUPIED/FREE."""
cmap = 'binary' # grey, black, white
#norm = colors.BoundaryNorm(bounds, cmap.N)
return cmap | f5ed622e611e143f93c53bd68375ff8a2948a69a | 40,011 |
import math
def round_molden(num, p=6):
"""Molden style number rounding in [Atoms] section."""
# Digit at pth position after dot.
p_digit = math.floor(abs(num) * 10 ** p) % 10
# If the 6th digit after dot is greater than 5, but is not 7,
# round the number upto 6th place.
# Else truncate at 6th digit after dot.
if p_digit > 5 and p_digit != 7:
return round(num, p)
if num >= 0:
return math.floor(num * 10 ** p) / 10 ** p
else:
return math.ceil(num * 10 ** p) / 10 ** p | b65e9eba26c091aa36bdc178449a9e61e7b0d50d | 40,013 |
import re
def preprocess(text: str):
"""
Preprocess text before feeding to model
"""
space_pattern = '\s+'
url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
symbol_regex = '&#[^\s]+'
mention_regex = '@[^\s]+'
parsed_text = text.lower()
parsed_text = re.sub(space_pattern, ' ', parsed_text)
parsed_text = re.sub(symbol_regex, ' ', parsed_text)
parsed_text = re.sub(url_regex, 'URLHERE', parsed_text)
parsed_text = re.sub(mention_regex, 'MENTIONHERE', parsed_text)
# words = word_tokenize(parsed_tweet)
# filtered_words = [word for word in words if not word in all_stopwords and word.isalnum()]
# porter = PorterStemmer()
# stemmed = [porter.stem(word) for word in filtered_words if word not in ['URLHERE', 'MENTIONHERE']]
# pos = pos_tag(filtered_words)
return parsed_text | 9e8c821ae212a5bda9b6bd596094550de59cc415 | 40,014 |
def getColData(data, colnums = None, colnames = None, copy=False):
"""
Get data from a DataFrame column(s)
Inputs:
> data: The DataFrame
> colnums: The number(s) of the column(s)
> colnames: The name(s) of the column(s)
> copy (False by default): should we copy the data?
Output:
> DataFrame of the specified column data
"""
if colnums is not None:
if copy is True:
subdata = data.iloc[:,colnums].copy()
else:
subdata = data.iloc[:,colnums]
elif colnames is not None:
if copy is True:
subdata = data[colnames].copy()
else:
subdata = data[colnames]
else:
print("Not doing anything with the data in getColData")
return data
return subdata | a215142f19327c26bebc96be5063cbcd54dae0f6 | 40,015 |
def as_sender_tuple(sender):
"""build a sender tuple
>>> as_sender_tuple('joe@testco.com')
('joe@testco.com', 'joe@testco.com')
>>> as_sender_tuple(('joe@testco.com', 'joe@testco.com'))
('joe@testco.com', 'joe@testco.com')
>>> as_sender_tuple(['joe@testco.com', 'joe@testco.com'])
('joe@testco.com', 'joe@testco.com')
"""
if isinstance(sender, str):
return sender, sender
return tuple(sender) | 59e48d4a0e4fab84a80a7891b184745db810e21c | 40,016 |
def listify_multiline_string(string):
"""
Return a list constructed by splitting the given multiline string,
stripping whitespace, and filtering out empty values.
:param string: The multiline string to convert into a list.
:return: The resulting list.
"""
result = [i.strip() for i in string.splitlines()]
return filter(None, result) | 3234b4e07cd8b47c9ca30dc2cd5e866ddee76969 | 40,017 |
def nodeCopy(s:str):
"""
nodeCopy(s) -> bool
Copy all selected nodes into a file or the clipboard.
@param s: The name of a clipboad to copy into. If s is the string '%clipboard%' this will copy into the operating systems clipboard.
@return: True if any nodes were selected, False otherwise.
"""
return bool() | 555ac43dec3750051f1e9181c2c14729223b10c4 | 40,018 |
def tennis_2d_ip(env_obs, subtask_obs):
"""Return True if the ball bounces off the floor for the 2nd time."""
return env_obs["achieved_goal"][2] == 1. | 84f82a0c977286a9de218f1b96a17212b79b53a2 | 40,019 |
def getHeight(root):
"""
Start with 0 height and recurse going down, increasing the levels
"""
if not root:
return 0
return 1 + max(getHeight(root.left), getHeight(root.right)) | 78be2bf322f49c593e1d8c14e94efafb14d39952 | 40,020 |
def resolve_slice(slice_, n):
"""Return a bounded slice given length `n`."""
return slice(*slice_.indices(n)) | 0f96737d05a9cf3845f9bb267e5cf75696fc42c1 | 40,021 |
def nid_to_slurm_nid_name(nid):
"""
Return string with slurm nid name for given nid number
"""
return "nid%06d" % nid | 7db01a6a8e27565b58d697195229a1cfe9626c83 | 40,024 |
def validate_subnet_mask(subnet_mask):
"""Checks that the argument is a valid subnet mask.
:param str subnet_mask: The subnet mask to check.
:return: True if the subnet mask is valid; false if not.
:rtype: bool
:raises ValueError: if the subnet mask is invalid.
.. seealso::
https://codereview.stackexchange.com/questions/209243/verify-a-subnet-mask-for-validity-in-python
"""
if subnet_mask is not None and subnet_mask.strip():
subnet_mask = subnet_mask.strip()
a, b, c, d = (int(octet) for octet in subnet_mask.split("."))
mask = a << 24 | b << 16 | c << 8 | d
if mask < 1:
raise ValueError("Invalid subnet mask: {0}".format(subnet_mask))
else:
# Count the number of consecutive 0 bits at the right.
# https://wiki.python.org/moin/BitManipulation#lowestSet.28.29
m = mask & -mask
right0bits = -1
while m:
m >>= 1
right0bits += 1
# Verify that all the bits to the left are 1"s
if mask | ((1 << right0bits) - 1) != 0xffffffff:
raise ValueError("Invalid subnet mask: {0}".format(subnet_mask))
return True
else:
raise ValueError("Invalid subnet mask: {0}.".format(subnet_mask)) | 64c4af917125183700df8849c6aaea058d89977d | 40,025 |
import math
def divisors(n: int) -> list[int]:
"""Find all positive integer divisors of a given positive integer.
Parameters
----------
n : int
Number whose divisors are to be found.
Returns
-------
list[int]
List of divisors of :math:`n`.
"""
if n == 1:
return [1]
d = [1, n]
sqrt = math.ceil(math.sqrt(n))
for k in range(2, sqrt):
if n % k == 0:
d.extend([k, n // k])
if n == sqrt ** 2 and sqrt not in d:
d.append(sqrt)
return sorted(d) | fda406fd51d81119b9e42c04abf51eb2f3f534b1 | 40,026 |
def greenwich_sidereal_time(year,doy):
"""
Approximate sidereal time at Greenwich
@param year : year of date
@type year : int
@param doy : day of year
@type doy : float
@return: Greenwich sidereal time in hours
"""
year_from_1966 = year-1966
dt = (year_from_1966*365 + int((year_from_1966 + 1)/4.) + int(doy)-1)/36525.
dst = 0.278329562 + (8640184.67*dt+0.0929*dt**2)/86400
gst0 = dst % 1 # GST on Jan. 0 of current year
return 24*(gst0 + (doy % 1)/0.997269566) % 24 | e940d95151f92436a096147676158fb138378f33 | 40,027 |
def _copy_bdd(u, level_map, old_bdd, bdd, cache):
"""Recurse to copy nodes from `old_bdd` to `bdd`.
@param u: node in `old_bdd`
@type level_map: `dict` that maps old to new levels
@type old_bdd, bdd: `BDD`
@type cache: `dict`
"""
# terminal ?
if abs(u) == 1:
return u
# non-terminal
# memoized ?
r = cache.get(abs(u))
if r is not None:
assert r > 0, r
# complement ?
if u < 0:
r = -r
return r
# recurse
jold, v, w = old_bdd._succ[abs(u)]
p = _copy_bdd(v, level_map, old_bdd, bdd, cache)
q = _copy_bdd(w, level_map, old_bdd, bdd, cache)
assert p * v > 0, (p, v)
assert q > 0, q
# map this level
jnew = level_map[jold]
g = bdd.find_or_add(jnew, -1, 1)
r = bdd.ite(g, q, p)
# memoize
assert r > 0, r
cache[abs(u)] = r
# complement ?
if u < 0:
r = -r
return r | 231f25e3d6b91cee8e7b54c8ec89c7710bfd1d09 | 40,029 |
import torch
import typing
def unvectorize(vector: torch.Tensor, reference_state_dict: typing.Dict[str, torch.Tensor]):
"""Convert a vector back into a state dict with the same shapes as reference state_dict."""
if len(vector.shape) > 1: raise ValueError('vector has more than one dimension.')
state_dict = {}
for k in sorted(reference_state_dict.keys()):
if vector.nelement() == 0: raise ValueError('Ran out of values.')
size, shape = reference_state_dict[k].nelement(), reference_state_dict[k].shape
this, vector = vector[:size], vector[size:]
state_dict[k] = this.reshape(shape)
if vector.nelement() > 0: raise ValueError('Excess values.')
return state_dict | 8ad072c18dd4af9dbb41034d010f0f3ce1d78602 | 40,030 |
from typing import Awaitable
import asyncio
async def delay_task(delay: float, task: Awaitable):
"""Wait a given amount of time before executing an awaitable."""
await asyncio.sleep(delay)
return await task | 90fb999293528043faf5a3841a8740436f337cd8 | 40,031 |
def build_user_agent(octavia_version: str, workspace_id: str) -> str:
"""Build user-agent for the API client according to octavia version and workspace id.
Args:
octavia_version (str): Current octavia version.
workspace_id (str): Current workspace id.
Returns:
str: the user-agent string.
"""
return f"octavia-cli/{octavia_version}/{workspace_id}" | 4226a93a4a06a744680ac6836785d01e81d1dfbc | 40,032 |
import numpy
def expsech2_dens_with_hole(R,z,Rd,Rm,zd,Sigma0):
"""rho(R,z) = Sigma0 / (4zd) exp(-Rm/R-R/Rd)*sech(z/[2zd])^2"""
if R == 0.:
return 0.
return Sigma0/(4*zd)*numpy.exp(-Rm/R-R/Rd)/numpy.cosh(z/(2*zd))**2 | b300c4a697de06605257d6960f63a3dfc2ef802d | 40,033 |
def extended_gcd(a, b):
"""
We know:
ax + by = gcd(a, b)
This function returns gcd(a,b), x , y
"""
if a == 0:
return b, 0, 1
gcd, x_, y_ = extended_gcd(b % a, a)
x = y_ - (b // a) * x_
y = x_
return gcd, x, y | f3e2b0a7c9001cac23586397349859ab02e4507d | 40,035 |
def label_from_id(id_string):
"""
Returns a label string constructed from the suppliued Id string
Underscore characters in the Id are replaced by spaces.
The first character may be capirtalized.
>>> label_from_id("entity_id") == "Entity id"
True
"""
temp = id_string.replace('_', ' ').strip()
label = temp[0].upper() + temp[1:]
return label | 9b52abdce169d26412a69585281ebadcff7cb0c2 | 40,036 |
import pytz
def get_obj_type(obj):
"""Determines the string representation of object's type."""
# Get default type value.
obj_type = type(obj).__name__
# Special handling for certain types.
if obj_type == 'NoneType':
obj_type = 'null'
elif isinstance(obj, pytz.BaseTzInfo):
obj_type = 'pytz_timezone'
return obj_type | 33537083991b4c8968b4dbec359295b9841ce29f | 40,037 |
def fixed_negative_float(response: str) -> float:
"""
Keysight sometimes responds for ex. '-0.-1' as an output when you input
'-0.1'. This function can convert such strings also to float.
"""
if len(response.split('.')) > 2:
raise ValueError('String must of format `a` or `a.b`')
parts = response.split('.')
number = parts[0]
decimal = parts[1] if len(parts) > 1 else '0'
decimal = decimal.replace("-", "")
output = ".".join([number, decimal])
return float(output) | cb46da6e91a517467d9516b34b15cb467044d911 | 40,038 |
def dtrunc (x: float) -> float:
""" Truncar un numero float """
k = int(x)
x = float(k) # numero float sin parte decimal
return x | 6ab03056a6792b6e48389806b920d5fc72a00e34 | 40,039 |
def read_taxdump(nodes_fp, names_fp=None):
"""Read NCBI taxdump.
Parameters
----------
nodes_fp : str
file path to NCBI nodes.dmp
names_fp : str, optional
file path to NCBI names.dmp
Returns
-------
dict of dict
taxid : {
'parent' : str
parent taxid
'rank' : str
taxonomic rank
'name' : str
taxon name, empty if names_fp is None
'children' : set of str
child taxids
}
"""
taxdump = {}
# format of nodes.dmp: taxid | parent taxid | rank | more info...
with open(nodes_fp, 'r') as f:
for line in f:
x = line.rstrip('\r\n').replace('\t|', '').split('\t')
taxdump[x[0]] = {'parent': x[1], 'rank': x[2], 'name': '',
'children': set()}
# format of names.dmp: taxid | name | unique name | name class |
if names_fp is not None:
with open(names_fp, 'r') as f:
for line in f:
x = line.rstrip('\r\n').replace('\t|', '').split('\t')
if x[3] == 'scientific name':
taxdump[x[0]]['name'] = x[1]
# identify children taxids
for tid in taxdump:
pid = taxdump[tid]['parent']
if tid != pid: # skip root whose taxid equals its parent
taxdump[pid]['children'].add(tid)
return taxdump | e0d7e7f8150bb2f13fbf0f586117bf69f33cb9ef | 40,040 |
def count_bits(number):
"""This function is the solution to the Codewars Bit Counting that
can be found at:
https://www.codewars.com/kata/526571aae218b8ee490006f4/train/python"""
bits = []
quotient = number
while divmod(quotient, 2)[0] != 0:
quotient, remainder = divmod(quotient, 2)
bits.append(remainder)
bits.append(quotient)
return bits.count(1) | d302f1519bac641c54db21c54853637bb891ef5b | 40,042 |
import json
def to_json(quiz_input):
"""
:param quiz_input:
:return:
"""
dictionary = dict()
try:
dictionary["code"] = quiz_input.code
dictionary["uri"] = quiz_input.uri
dictionary["name"] = quiz_input.name
dictionary["description"] = quiz_input.description
dictionary["score"] = quiz_input.score
dictionary["score"] = quiz_input.score
dictionary["max_score"] = quiz_input.max_score
dictionary["question_count"] = quiz_input.question_count
questions = []
for question in quiz_input.questions:
question_dict = dict()
question_dict["uri"] = question.uri
question_dict["code"] = question.code
question_dict["text"] = question.text
question_dict["score"] = question.score
question_dict["max_score"] = question.max_score
question_dict["option_count"] = question.option_count
questions.append(question_dict)
options = []
for option in question.options:
option_dict = dict()
option_dict["uri"] = option.uri
option_dict["code"] = option.code
option_dict["text"] = option.text
option_dict["score"] = option.score
option_dict["is_correct"] = option.is_correct
option_dict["is_selected"] = option.is_selected
options.append(option_dict)
question_dict["options"] = options
dictionary["questions"] = questions
except TypeError:
print("The parameter send is not an instance of a Quiz")
parent_dict = dict(quiz=dictionary)
return json.dumps(parent_dict) | 3a0e257eb5b37f1f00bc220c9c72964db2a909f8 | 40,044 |
def _create_group_to_col_position(column_groups):
"""Get mapping from column groups to column positions.
Args:
column_names (list): The column groups to display in the estimatino table.
Returns:
group_to_col_index(dict): The mapping from column group titles to column
positions.
"""
if column_groups is not None:
group_to_col_index = {group: [] for group in list(set(column_groups))}
for i, group in enumerate(column_groups):
group_to_col_index[group].append(i)
else:
group_to_col_index = None
return group_to_col_index | 4ff10a8b3076f940dffd4ea81ea25509b61565be | 40,045 |
def clean_data(data):
"""Takes a list of rows, and chooses the most recent one to include in the list returned"""
return list() | 57a693f4315ddf8a07919fa74ae5db4e42ea66ae | 40,047 |
from pathlib import Path
def rinextype(fn: Path) -> str:
"""
based on file extension only, does not actually inspect the file--that comes later
"""
if fn.suffix in ('.gz', '.zip', '.Z'):
fnl = fn.stem.lower()
else:
fnl = fn.name.lower()
if fnl.endswith(('obs', 'o', 'o.rnx', 'o.crx')):
return 'obs'
elif fnl.endswith(('nav', 'e', 'g', 'n', 'n.rnx')):
return 'nav'
elif fn.suffix.endswith('.nc'):
return 'nc'
else:
raise ValueError(f"I dont know what type of file you're trying to read: {fn}") | 251d3fd4e5bb4ebf124c2c5cffcc328d4d5e76d2 | 40,049 |
import numpy as np
def f_norm_scale(array_original):
"""
Normalize numpy array between -1 and +1
Developed by : Sai G.S. Pai (ETH Singapore)
Contact : saiganesh89@gmail.com
Date: June 30, 2020
INPUTS:
array_original : original array to normalize
OUTPUTS:
array_norm : normalized array
NOTE:
Requires numpy
"""
# initialize
array_norm = np.zeros(shape=(array_original.shape[0], array_original.shape[1]))
# normalize
for feature in range(0, array_original.shape[1]):
x_min = np.amin(array_original[:, feature])
x_max = np.amax(array_original[:, feature])
x_halfrange = (x_max - x_min) / 2
x_midpoint = (x_max + x_min) / 2
for idx in range(0, array_original.shape[0]):
array_norm[idx, feature] = (array_original[idx, feature] - x_midpoint) / x_halfrange
# return normalized array
return array_norm | 08752fb020a1b8b30211a4a3cabe340c5a30de29 | 40,050 |
import torch
def bbox_to_pv(bbox_list):
"""
Calculate position-velocity from pedestrain bounding boxes
"""
pv_3d = []
for i in range(len(bbox_list[0])):
p_1d = []
v_1d = []
pv_1d = []
for t in range(len(bbox_list)):
bbox = torch.squeeze(bbox_list[t][i], dim=0)
# float
b = list(map(lambda x: x.item(), bbox))
# compute bbox center
# xc = (b[0] + b[2]) / 2 - 960.0
# c = abs(-(b[1] + b[3]) / 2 + 1080.0)
xc = (b[0] + b[2]) / 2
yc = (b[1] + b[3]) / 2
# compute width, height
w = abs(b[2] - b[0])
h = abs(b[3] - b[1])
p_1d.append([xc, yc, w, h])
v_1d.append([0.0, 0.0, 0.0, 0.0])
for t in range(1, len(bbox_list)):
dx = abs(p_1d[t][0] - p_1d[t - 1][0])
dy = abs(p_1d[t][1] - p_1d[t - 1][1])
dw = abs(p_1d[t][2] - p_1d[t - 1][2])
dh = abs(p_1d[t][3] - p_1d[t - 1][3])
v_1d.append([dx, dy, dw, dh])
for t in range(len(bbox_list)):
pv_1d.append(torch.tensor(p_1d[t] + v_1d[t], dtype=torch.float32))
pv_tensors_2d = torch.stack(pv_1d)
pv_3d.append(pv_tensors_2d)
# stack batch
pv_tensors_3d = torch.stack(pv_3d)
return pv_tensors_3d | da39f33744550100315d90c4303e480b4926ca5e | 40,052 |
def _interval_example_data(data):
"""Docstring"""
avg_price_with_interval = (data.groupby('fruit')['total_price'].agg(
['mean', 'std', 'count'])
.assign(
lower_ci=lambda x: x['mean'] - 1.96 * x['std'] / x['count']**.5,
upper_ci=lambda x: x['mean'] + 1.96 * x['std'] / x['count']**.5)
.reset_index())
"""Print break"""
return avg_price_with_interval | b2ddb056435947de51821136503a7f66c928d023 | 40,054 |
import math
def law_of_sines(a, b, c):
"""
Return the angle of the corner opposite to side c in a triangle given by its 3 sides a, b and c (Law of sines)
"""
return math.degrees(math.acos((c**2 - b**2 - a**2)/(-2.0 * a * b))) | fa35c3e5e3df025701644049ba8c6eec8a5042ef | 40,055 |
def _sanitize_filename(filename):
"""
Get a filename that lacks the / character (so it doesn't express a path by
accident) and also lacks spaces (just for tab-completion convenience).
"""
return filename.replace('/', '_').replace(' ', '_') | ce19efdb439762d305987447651b8ffd9c6aaff8 | 40,056 |
def tuple_getter(tensor, idx_tuple):
""" access a tensor by a tuple """
tensor_ = tensor
for el in idx_tuple:
tensor_ = tensor_[el]
return tensor_ | b2ffbdd46cde7c29aba8345cf06a61911d8b4d23 | 40,059 |
def ascii_chars_to_image(ascii_chars, width=250):
"""Function to take a string of ASCII chars, and append a new line character after X (width) of pixels.
This essentially translates the ASCII string to an image.
"""
# join a newline character after every X amount of pixels (ie if width is 100, adds a newline char every 100 chars)
return "\n".join(
ascii_chars[i : i + width] for i in range(0, len(ascii_chars), width)
) | f94403a7e03ded22be107dc0446ffe5aa671bf0a | 40,060 |
from bs4 import BeautifulSoup
def replace_soup_tag(soup,
new_name, new_namespace=None, new_nsprefix=None, new_attrs={}, new_sourceline=None,
new_sourcepos=None, new_kwattrs={},
old_name=None, old_attrs={}, old_recursive=True, old_text=None, old_limit=None, old_kwargs={}, **kwargs):
"""Replace Old tag with New tag.
- Args named ``old_XXX`` specifies "How to find old tags"
- Args named ``new_XXX`` specifies "How to create new tags"
Args:
old_name (str) : A filter on tag name.
old_attrs (dict) : A dictionary of filters on attribute values.
old_recursive (bool) : If this is True, ``.find_all`` will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered.
old_limit (int) : Stop looking after finding this many results.
old_kwargs (dict) : A dictionary of filters on attribute values.
new_name (str) : The name of the new Tag.
new_namespace (str) : The URI of the new Tag's XML namespace, if any.
new_prefix (str) : The prefix for the new Tag's XML namespace, if any.
new_attrs (dict) : A dictionary of this Tag's attribute values; can be used instead of `kwattrs` for attributes like 'class' that are reserved words in Python.
new_sourceline (str) : The line number where this tag was (purportedly) found in its source document.
new_sourcepos (str) : The character position within ``sourceline`` where this tag was (purportedly) found.
new_kwattrs (dict) : Keyword arguments for the new Tag's attribute values.
Examples:
>>> from bs4 import BeautifulSoup
>>> from pycharmers.utils import replace_soup_tag
>>> section = BeautifulSoup(\"\"\"
... <h2>AAA</h2>
... <div>
... <p>aaaaaaaaaaaaaaaaaaaaaa</p>
... </div>
... <h3>BBB</h3>
... <div>
... <p>bbbbbbbbbbbbbbbbbbbbbb</p>
... </div>
>>> \"\"\")
>>> section = replace_soup_tag(soup=section, old_name="h3", new_name="h2")
>>> section
<html><body><h2>AAA</h2>
<div>
<p>aaaaaaaaaaaaaaaaaaaaaa</p>
</div>
<h2>BBB</h2>
<div>
<p>bbbbbbbbbbbbbbbbbbbbbb</p>
</div>
</body></html>
"""
for old in soup.find_all(name=old_name, attrs=old_attrs, recursive=old_recursive, text=old_text, limit=old_limit, **old_kwargs):
new = BeautifulSoup(markup="", features="lxml").new_tag(name=new_name, namespace=new_namespace, nsprefix=new_nsprefix, attrs=new_attrs, sourceline=new_sourceline, sourcepos=new_sourcepos, **new_kwattrs)
new.extend(list(old.children))
old.replace_with(new)
return soup | fa906bbb135ecf349844301ab82ddc144a38e6e6 | 40,063 |
def repos_dict(repos):
"""Returns {"repo1": "branch", "repo2": "pull"}."""
return {r: b or p for (r, (b, p)) in repos.items()} | 5c22fa4a843aecffa8250cf7bda353edef0c1dc2 | 40,065 |
def find_diff(d1, d2, path=""):
"""
Compare two nested dictionaries.
Derived from https://stackoverflow.com/questions/27265939/comparing-python-dictionaries-and-nested-dictionaries
:param d1: Dict 1
:param d2: Dict 2
:param path: Level
:return:
"""
return_str = ""
for k in d1:
if k not in d2:
return_str += "{0} {1}\n".format(path, ":")
return_str += "{0} {1}\n".format(k + " as key not in d2", "\n")
else:
if type(d1[k]) is dict:
if path == "":
path = k
else:
path = path + "->" + k
return_str += find_diff(d1[k], d2[k], path)
elif type(d1[k]) == list:
find_diff(dict(zip(map(str, range(len(d1[k]))), d1[k])), dict(zip(map(str, range(len(d2[k]))), d2[k])),
k)
else:
if d1[k] != d2[k]:
return_str += "{0} {1}\n".format(path, ":")
return_str += "{0} {1} {2} {3}\n".format(" - ", k, " : ", d1[k])
return_str += "{0} {1} {2} {3}\n".format(" + ", k, " : ", d2[k])
return return_str | 5c7ea403f311f3c1fba23d00264fa8519c2c1bc8 | 40,066 |
def nice_price(price):
""" Returns the price in nice numbers with k/m/b on the end as a string """
if price < 1000:
return f'{price:,.0f} gp'
elif price < 1000000:
return f'{price / 1000:,.1f} K gp'
elif price < 1000000000:
return f'{price / 1000000:,.1f} M gp'
else:
return f'{price / 1000000000:,.2f} B gp' | fe1ae999321808b7cc819db2f71433509143802c | 40,067 |
def title(text):
"""Convert to title case."""
return text.title() | 4e66499cb607f5656f2463b28ed84ba40cb6039a | 40,069 |
def input_(text=''):
"""
Input function that will not exit until it sees a response.
"""
while True:
try:
thing = input(text)
if thing == '':
raise ValueError
else:
return thing
except (EOFError, KeyboardInterrupt, ValueError):
print() | 74651c782481ca46258261765ee851ab161b7167 | 40,070 |
import zipfile
def unzip(path_zip, path_save):
"""
unzip the file and save it.
"""
with zipfile.ZipFile(path_zip, 'r') as zip_ref:
zip_ref.extractall(path_save)
return None | 97d891c8e4f6af89cab6655761c2e0592750d21b | 40,071 |
def _is_stdlib(s):
"""Imports from stdlib like import scala.concurrent.duration.Duration"""
prefixes = {
'java.',
'javax.',
'javaw.',
'scala.'
}
for p in prefixes:
if s.startswith('import ' + p):
return True
return False | e12edc1c8932fe4ed43931a64c619356c432d475 | 40,073 |
def byLength(word1, word2):
"""
Compars two strings by their length
Returns:
Negative if word2 > word1
Positive if word1 > word2
Zero if word1 == word 2
"""
return len(word1) - len(word2) | 98221c3dd8d308eb9bd2055eef8f345208d0b166 | 40,074 |
def invert(array):
"""return a dictionary mapping array values to arrays of indices
containing those values
"""
inverted_array = {}
for i, val in enumerate(array):
inverted_array.setdefault(val, []).append(i)
return inverted_array | ed04b2cf90d0ec07d96f4153f6a793c780397eb9 | 40,075 |
def hash_distance(left_hash, right_hash):
"""Compute the hamming distance between two hashes"""
if len(left_hash) != len(right_hash):
raise ValueError('Hamming distance requires two strings of equal length')
return sum(map(lambda x: 0 if x[0] == x[1] else 1, zip(left_hash, right_hash))) | 3061fd1f22e2c56240656256508a009b0f1d4fe5 | 40,076 |
import os
def expandpath(path):
"""Expand a path to an absolute path
This will convert a relative path to an absolute path and also expand any
user directories such as ~/ to the user's homedir.
"""
return os.path.abspath(os.path.expanduser(path)) | d2fe1efc848d4e0d586ee67397e6a313000695cf | 40,077 |
def reduce_dict(dictionary, keys):
"""Returns a dictionary containing only the input keys"""
return {key: (dictionary[key] if key in dictionary else []) for key in keys} | be075ac04376d1922c70ae8de37fa2843a06ba12 | 40,078 |
def _node_size(node):
"""Computes `node`'s size."""
if node:
return 1 + _node_size(node.left) + _node_size(node.right)
else:
return 0 | 95c25a3380cfe880f27e67be4bc0bfe5e7e0682c | 40,079 |
import re
def parse_ksql_query(query):
"""Read the parameters of the provided KSQL query.
Args:
query (str): the query given as string.
Returns:
(str, str, str, str): a tuple that contains: the name of the table, the name of
the column that holds the metrics value, the name of the column that is used
for comparison with the metric name, and the name to compare to.
"""
query_pattern = "SELECT (\\w+) FROM (\\w+) WHERE (\\w+) *= *'(.+)';"
match_group = re.match(query_pattern, query, re.IGNORECASE)
assert match_group is not None, f"The query {query!r} has an invalid format."
value_column, table, comparison_column, metric_name = match_group.groups()
message = (
"The column for the metric names and the one for the metrics values"
" cannot be the same."
)
assert value_column != comparison_column, message
return table, value_column, comparison_column, metric_name | 2207428b49650b7f5a843673a4ee242886bab781 | 40,080 |
import math
def _on_base(base_run_id) -> str:
"""
Exists Runner
:param base_run_id: retrosheet base_run_oid
:return: '1' or '0'(1:True, 0:False)
"""
if type(base_run_id) == float and math.isnan(base_run_id):
return '0'
elif type(base_run_id) == str and len(base_run_id) > 0:
return '1'
return '0' | 13116740f95a1b26c72e4861a3dd6f611e8e3cb6 | 40,081 |
def clean_line(string, stop_char):
"""
# clean_line :: String char -> String
Receives a String and a 'stop_char'.
Scans the string backwards and cuts at the first 'stop_char', returning the new String
ex:
clean_line("this is a # string", '#') --> "this is a "
clean_line("[ X ] > [ V ] # V eh a palavra vazia.", '#') --> "[ X ] > [ V ] "
clean_line("[ X ] > [ V ] # V eh a # palavra vazia.", '#') --> "[ X ] > [ V ] # V eh a "
"""
pos = len(string) - 1
cut_pos = 0
stop_char_found = False
while stop_char_found is False and pos >= 0:
if string[pos] == stop_char:
cut_pos = pos + 1
stop_char_found = True
pos -= 1
return string[:cut_pos] | 47fb50ca276794b605e5178493afa51ea8155722 | 40,082 |
import math
def from_string(s):
"""Function to get the low of the range."""
if len(s.split('To')) == 2:
low = int(s.split('To')[0])
high = int(s.split('To')[1])
elif s.startswith('Upto'):
low = 0
high = int(s.replace('Upto', ''))
elif s.endswith('OrMore'):
low = int(s.replace('OrMore', ''))
high = math.inf
else:
low = int(s)
high = low
return (low, high) | fe2439437cc309ac053e8d19312b9d1cb9c9ac5c | 40,084 |
import torch
def matrix_cosine_similarity(x: torch.Tensor, y: torch.Tensor, eps: float=1e-8):
"""
:param x (batch_size, length_1, dim)
:param y (batch_size, length_2, dim)
:return
(batch_size, length_1, length_2)
"""
length_1, length_2 = x.size(1), y.size(1)
# shape: (batch_size, length_1, length_2)
dot_product = x.bmm(y.permute(0, 2, 1))
# shape: (batch_size, length_1), (batch_size, length_2)
x_norm, y_norm = x.norm(dim=-1, p=None), y.norm(dim=-1, p=None)
# added eps for numerical stability
x_norm = torch.max(x_norm, eps * x_norm.new_ones(x_norm.size()))
y_norm = torch.max(y_norm, eps * y_norm.new_ones(y_norm.size()))
expanded_x_norm = x_norm.unsqueeze(-1).repeat(1, 1, length_2)
expanded_y_norm = y_norm.unsqueeze(1).repeat(1, length_1, 1)
# shape: (batch_size, length_1, length_2)
norm = expanded_x_norm * expanded_y_norm
similarity = dot_product / norm
return similarity | 8d37ca5591df5c34999cc038e8da31d8a6be2955 | 40,085 |
import pkg_resources
def parse_version(version_string):
"""
Parse string as a verison object for comparison
Example: parse_version('1.9.2') > parse_version('1.9.alpha')
See docs for pkg_resource.parse_version as this is just a wrapper
"""
return pkg_resources.parse_version(version_string) | 678554ac2095bd2939f634c7c45bddbac86ec3d4 | 40,086 |
def reverse(intList): #9
"""
Reverse the intList
"""
return list(reversed(intList)) | 7107210b89a41a1b96ae1e70eb70966b5a6a52b1 | 40,088 |
def parse_db_arguments(string):
"""Return a list of db arguments parsed from string.
Split string into arguments, strip whitespace from them, and return a list of
the resulting arguments.
"""
arguments = string.split(',')
arguments = [argument.strip() for argument in arguments]
return arguments | 47dd28393794ddc5ee3edff487f2b80efd84496f | 40,089 |
def get_grid_coordinates(img_num, grid_size, w, h):
""" given an image number in our sprite, map the coordinates to it in X,Y,W,H format"""
y = int(img_num / grid_size)
x = int(img_num - (y * grid_size))
img_x = x * w
img_y = y * h
return "%s,%s,%s,%s" % (img_x, img_y, w, h) | 851a0a08cd833cfd498c3d062ada371f07f5e83c | 40,091 |
import re
def filter_malicious(text):
"""We don't want the mensa page to be able to allow arbitrary formating in our slack channel.
emojis are allowed, but except of that only a very limited set of characters."""
return re.sub(r"[^\w €\-,\(\)\:]", "", text) | c4a2ea8c62f47d3a5a1d330340553868bfd7bdc3 | 40,092 |
def shrink_string(_str, strip_chars=None, nullable=True):
"""
:param _str:
:param nullable:
:param strip_chars:
:return:
"""
if isinstance(_str, str):
if strip_chars is None:
return _str.strip()
else:
return _str.strip(strip_chars)
if nullable:
return None
else:
return "" | f0feca8f2aaf43a44591051b086361c487c105b2 | 40,093 |
import argparse
def cli() -> argparse.Namespace:
"""Command line interface."""
parser = argparse.ArgumentParser(
allow_abbrev=False,
description='Aggregate third-party packages and standard library modules imported across all Python files in a given directory.' # noqa: E501
)
parser.add_argument(
'DIR_TO_SCAN',
help='target directory to scan'
)
parser.add_argument(
'-x',
default=None,
dest='DIR_TO_EXCLUDE',
help='directory to exclude during scanning'
)
parser.add_argument(
'--ignore-std-lib',
dest='IGNORE_STD_LIB',
action='store_const',
const=True,
default=False,
help='whether to leave standard library modules out of the report'
)
parser.add_argument(
'--alphabetical',
dest='ALPHABETICAL',
action='store_const',
const=True,
default=False,
help='whether to sort the report alphabetically'
)
return parser.parse_args() | 7baaa03174f71b499e97f608d267a8516ec8eed5 | 40,094 |
def divide_integer_evenly(n, m):
"""Returns a list of `m` integers summing to `n`, with elements as even as
possible. For example:
```
divide_integer_evenly(10, 4) -> [3, 3, 2, 2]
divide_integer_evenly(20, 3) -> [7, 6, 6]
```
"""
lengths = [n // m] * m
for i in range(n - sum(lengths)):
lengths[i] += 1
return lengths | 90f9ac9533d859834048abcddc4d8acab44c3189 | 40,095 |
import numpy
def AB_zeropoints(filters):
"""Compute AB zeropoints for given filters.
"""
# define AB source in flambda
ABsource = 3631e-23 # 3631 Jy -> erg/s/Hz/cm^2
c = 29979245800.0 # cm/s
nm_to_cm = 1.0e-7
zps = {}
for filter_name, filter_ in filters.iteritems():
fwave = filter_['wave']
throughput = filter_['throughput']
ABflambda = ABsource * c / fwave**2 / nm_to_cm # erg/s/Hz/cm^2*cm/s/nm^2 -> erg/s/cm^2/nm
AB_photons = ABflambda * fwave * throughput
dlambda = fwave[1] - fwave[0] # assuming linear wavelength bins!
AB_sumphotons = (AB_photons * dlambda).sum()
zps[filter_name] = -2.5 * numpy.log10(AB_sumphotons)
return zps | 16494d6c5c4edf990370113f23adcdebbb4527d1 | 40,096 |
import re
def temp_dir(request, tmpdir_factory):
"""
Similar to the pytest built-in tmpdir fixture, but returns a string, and with a less horrible name.
"""
name = re.sub('[\W]', '_', request.node.name)[:30]
return str(tmpdir_factory.mktemp(name, numbered=True)) | fd4638667b4957c9af3bfae7efd02d5e16afc159 | 40,097 |
def map_to_range(
old_min: float,
old_max: float,
new_min: float,
new_max: float,
value: float,
) -> float:
"""Maps a value from within one range of inputs to within a range of outputs."""
return ((value - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min | c46897d0a8cc1bad79d0fd5c06589075d433c0d8 | 40,098 |
def add_field_defaults_to_node(node):
"""
Since we test using POST, all fields must be present, even if the field will just have the default
value set. Rather than manually setting a bunch of default values on every node, we just assign it here.
"""
node.update(
{
"license_description": None,
"copyright_holder": "",
"questions": [],
"extra_fields": {}
})
if "files" not in node:
node["files"] = []
if "description" not in node:
node["description"] = ""
if "children" in node:
for i in range(0, len(node["children"])):
node["children"][i] = add_field_defaults_to_node(node["children"][i])
return node | 37d1bfd5c8912e21c57c2985ff7c47ef0d7c07ab | 40,099 |
def unpack_experiment_groups(experiment_groups):
"""
"""
experiments = []
for experiment_group in experiment_groups:
name = experiment_group["name"]
for experiment_dir in experiment_group["dirs"]:
experiments.append({
"name": name,
"dir": experiment_dir
})
return experiments | e8d51f14d838c398b978491756e94a64c0c5dc00 | 40,100 |
import os
def _get_cloud_sdk_platform_dir():
"""Returns the path of google-cloud-sdk/platform directory."""
res = os.path.dirname(os.path.realpath(__file__))
for _ in range(5):
res = os.path.dirname(res)
if not os.path.exists(res):
raise OSError('Cannot locate Google Cloud SDK. Please make sure you are '
'using testbed from Google Cloud SDK.')
return res | a274853d9770699c2ef662a330b9fa94a71fe645 | 40,101 |
def leaf_edges(network):
"""
returns leaf edges of the "compas" network as a list
"""
leaf_ver_lis=network.leaves()
leaf_edg_lis=[]
for key in leaf_ver_lis:
edg=network.connected_edges(key)
leaf_edg_lis.append(edg[0])
return leaf_edg_lis | 62e28f2d9a4ccb8c948e8eb77282ca8a04c008c2 | 40,102 |
def add (x,y):
"""
add : adds two value
"""
return x+y | a73fcdbd0909ccdccac548e82026efaedf41dabd | 40,103 |
from typing import Union
from typing import List
def is_valid_label(label: Union[str, List[str]]) -> bool:
"""Test whether label has a valid value.
:param label: a phrase label (either a string or a list of strings)
:type label: Union[str, List[str]]
:return: whether the label is valid
:rtype: bool
"""
if isinstance(label, list):
for item in label:
if not isinstance(item, str):
return False
return True
return isinstance(label, str) | 5042b45c3ae984a5a623f48887bfa7d6084cbcc8 | 40,104 |
import json
def weather(horizon):
"""
Reads json and outputs based on selected paramter
Horizon can be either "all" or an integer between 1 and 90 representing desired timestamp
Eg: http://127.0.0.1:5000/latest/all or http://127.0.0.1:5000/latest/54
Parameters
----------
horizon : string
Horizon can be either "all" or an integer between 1 and 90 representing desired timestamp
Returns
-------
output
Json to output to page
"""
with open(r'.\RESULTS\saved_forecasts_PRD.json', 'r') as jsonfile:
file_data = json.loads(jsonfile.read())
if horizon == "all":
output = json.dumps(file_data)
else:
output = json.dumps(file_data[horizon])
return output | 9d54cde52f3685475dfe5691dff3ebc6e6584428 | 40,105 |
def get_name(first_name,last_name):
"""Return a full name neatly formatted"""
full_name = first_name + ' '+ last_name
return full_name.title() | 7b5e1c185e4e22abb91d63a1f92c3b86e75da3ca | 40,106 |
import argparse
def get_arg_parser():
"""
Get an argument parser which allows setup and retract sub-commands
Ex:
This loads the lookup in the table my_table located in the dataset my_dataset of
the project my_project
dc732_suppress -p my_project setup my_dataset.my_table
This retracts rows with concepts in my_dataset.my_table from all tables in datasets d1 and d2
of the project my_project and backs up data in my_sandbox
dc732_suppress -p my_project retract -s my_sandbox -c my_dataset.my_table -d d1 d2
:return: the parser
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--project_id',
action='store',
dest='project_id',
help='Identifies the project containing the datasets to retract from',
required=True)
subparsers = parser.add_subparsers(dest='cmd')
setup_parser = subparsers.add_parser('setup')
setup_parser.add_argument(
'concept_lookup_dest_table',
action='store',
help='Table where suppressed concepts should be loaded')
retract_parser = subparsers.add_parser('retract')
retract_parser.add_argument(
'-s',
'--sandbox_dataset_id',
action='store',
dest='sandbox_dataset_id',
help='Identifies the dataset where output is stored',
required=True)
retract_parser.add_argument(
'-c',
'--concept_lookup_table',
action='store',
dest='concept_lookup_table',
help='Table where suppressed concepts are loaded',
required=True)
retract_parser.add_argument('-d',
'--dataset_ids',
action='store',
dest='dataset_ids',
nargs='+',
help='Identifies dataset(s) to retract from',
required=True)
return parser | 5f23630de5d3ac10037782c547a017de6a6b8460 | 40,107 |
import os
def find_corr_map(pair_path_list, label_path):
"""
get the label path from the image path, assume the file name is the same
:param pair_path_list: the path list of the image
:param label_path: the path of the label folder
:return:
"""
return [[os.path.join(label_path, os.path.split(pth)[1]) for pth in pair_path] for pair_path in pair_path_list] | 5a0d17ed11b74b28066b6f96817d23750af1a80e | 40,109 |
import sqlite3
def fetch_shows():
"""
Recuperamos los distintos identificador de shows
seleccionados por los usuarios
"""
con = sqlite3.connect('harvester.db')
cursorObj = con.cursor()
cursorObj.execute('select distinct show_id from lists order by show_id desc')
rows = cursorObj.fetchall()
shows = list(map(lambda show: show[0], rows))
return shows | 5b002da135c419cc9547fe14cc2e92eea61f341b | 40,112 |
def fix_unsigned(data, is_xarray=True):
"""
The function is used to fix data written as signed integer bytes
with an "_Unsigned" attribute, but which have been automatically
converted to floating point by xarray.
This function removes the scale_factor and add_offset, and then casts
to an unsigned integer type, before scaling the data once again.
Returns a 64-bit numpy floating point array.
Could be used to rewrite fix_event_locations, but should be equivalent.
"""
dtype = data.encoding['dtype'].str.replace('i', 'u')
scale_factor = data.encoding['scale_factor']
add_offset = data.encoding['add_offset']
unscale = ((data - add_offset)/scale_factor).data.astype(dtype).astype('float64')
fixed = unscale * scale_factor + add_offset
return fixed | 039f09d9c65c330435fd26db36c541ff0c3d94d0 | 40,114 |
def capitalize(header_name):
""" Capitalize header field name. Eg., 'content-type' is capilalized to
'Content-Type'.
.. deprecated:: 0.4
"""
return "-".join([f.capitalize() for f in header_name.split("-")]) | d6e9a8bbf73d44459581c7d1619f86bc0bf1b4ff | 40,117 |
def get_token_time(token_index, sentence, duration):
"""
Linearly interpolate to guess the time a token was utterred
"""
sentence_len = max(len(sentence), 1)
return token_index / sentence_len * duration | 40b6c0c7b7854b2f33f3434395851a4ba58c98fb | 40,118 |
def is_palindrome(num):
"""
Returns true if num is a palindrome.
"""
return str(num) == str(num)[::-1] | 490ca1326254e525bcefb21e7822c82ad730962a | 40,119 |
def _dist(i_a, i_b):
"""
Just a random distance metric used to decide if to compute mutual
information for nearby epochs
:param i_a: information for epoch a
:param i_b: information for epoch b
:return: some notion of distance
"""
d = max(
max(abs(i_a[0] - i_b[0])),
max(abs(i_a[1] - i_b[1])),
)
return d | 32f158d8f3c79c70e90b8ecc66e0a604afa9f153 | 40,121 |
from typing import List
import random
def gen_ints(a: int, b: int, n: int) -> List[int]:
"""Returns an iterable (currently list) of non-repeating, randomized ints."""
assert a < b, "a must be smaller than b"
return random.sample(range(a, b), n) | 566da812bacaf4f420401ae72a8471e1ac5e1097 | 40,125 |
import time
def make_timestamp():
"""
Returns the localtime year-month-day-hr-min-sec as a string
"""
timevals = time.localtime()[:-3]
ts = "-".join(str(x) for x in timevals)
return ts | 13be02df41c9bfe44ed9ce07eed7891aeb280d2a | 40,126 |
import pandas as pd
import numpy as np
from pathlib import Path
import os
def get_normative_structures(structure):
"""Returns a structure from the ReproNimCDE if available
"""
normative = {"isAbout": "<UNKNOWN>", "hasLaterality": None}
location = Path(os.path.dirname(__file__))
df = pd.read_excel(location / "mapping_data/ReproNimCDEs.xlsx", header=[0, 1])
labels = df[("Atlas Segmentation Label", "Unnamed: 6_level_1")].str
start_indices = labels.find(structure).values
indices = np.nonzero(start_indices > -1)
indices = indices[0]
if len(indices):
idx = indices[start_indices[indices].argsort()[0]]
uberon = df[("Structure", "Preferred")].iloc[idx]
if str(uberon) != "nan":
uberon = uberon.replace("UBERON:", "http://purl.obolibrary.org/obo/UBERON_")
normative["isAbout"] = uberon
laterality = df[("Laterality", "ILX:0106135")].iloc[idx]
normative["hasLaterality"] = laterality
else:
normative["isAbout"] = f"<UNKNOWN - {structure}>"
if normative["hasLaterality"] is None:
if "lh" in structure or "Left" in structure:
normative["hasLaterality"] = "Left"
elif "rh" in structure or "Right" in structure:
normative["hasLaterality"] = "Right"
else:
normative["hasLaterality"] = f"<UNKNOWN - {structure}>"
if normative["hasLaterality"] == "None":
normative["hasLaterality"] = None
return {k: v for k, v in normative.items() if v is not None} | a6d222953d0f3bfba1b37b108c4c2ac0624e03cd | 40,128 |
def default_path(path):
"""
Converts path to default form (with no slash at the end)
:param path: string - path to convert
:return: string - result path
"""
while path[len(path) - 1] == '/' or path[len(path) - 1] == '\\':
path = path[0:-1]
return path | 32d050c46b1830b13d7ca6a9a94405d6c053610c | 40,129 |
def merge_dict(*dicts):
"""Merge dictionaries
:param dicts: Dictionaries to merge
:type dicts: Tuple of dicts
:return: Dictionary containing fields of all input dictionaries
:type return: dict
Copyright (C) 2022 Machine Learning Group of the University of Oldenburg.
Licensed under the Academic Free License version 3.0
"""
merged = dicts[0].copy()
for d in dicts:
merged.update(d) # Python 2 friendly
return merged | 89040629ad01c192e5a27a901c6885ce17d1d996 | 40,130 |
from typing import Any
def main(backend, user_messenger, **kwargs) -> Any:
"""Main entry point of the program.
Args:
backend (qiskit.providers.Backend): Backend to submit the circuits to.
user_messenger (qiskit.providers.ibmq.runtime.UserMessenger): Used to communicate with the
program consumer.
kwargs: User inputs.
Returns:
Final result of the program.
"""
return "Done" | 138ab2cf556fb0a1da08581cc732e201741d877d | 40,131 |
def may_view_cronjobs_not_logged_in(app, identity, model, permission):
""" Cronjobs are run anonymously from a thread and need to be excluded
from the permission rules as a result.
"""
return True | 21d6e7f999e94c5aa8b88af6378906e632144e49 | 40,132 |
import time
import hashlib
def user_sign_api(data, private_key):
"""
用户签名+时间戳 md5加密
:param data:
:param private_key:
:return:
"""
api_key = private_key
# 当前时间
now_time = time.time()
client_time = str(now_time).split('.')[0]
# sign
md5 = hashlib.md5()
sign_str = client_time + api_key
sign_bytes_utf8 = sign_str.encode('utf-8')
md5.update(sign_bytes_utf8)
sign_md5 = md5.hexdigest()
if isinstance(data, dict):
data['time'] = client_time
data['sign'] = sign_md5
return data | 1bb736b370b4f9254a234bf42d4f5875f5d337a6 | 40,134 |
def eval_metric(results, params):
"""BLEU Evaluate """
crr_cnt, total_cnt = 0, 0
for result in results:
total_cnt += 1
p = result['pred_answer']
g = result['gold_answer']
if p == g:
crr_cnt += 1
return crr_cnt * 100. / total_cnt | 9ba5b564533a9d3252a008e233b089d7c15f711d | 40,135 |
def unpack_singleton(x):
"""
Return original except when it is a sequence of length 1 in which case return the only element
:param x: a list
:return: the original list or its only element
"""
if len(x) == 1:
return x[0]
else:
return x | 0bffdcc339c593aafb1f657134da5d67fc538cbf | 40,136 |
def paren_join(items, sep):
"""Join items by sep with parens around individual items but not the whole."""
return items[0] if len(items) == 1 else "(" + (") " + sep + " (").join(items) + ")" | b513413cdd1c47628068bbe6942db11ac713ad47 | 40,137 |
def countSegments(s):
"""
:type s: str
:rtype: int
"""
count=0
for i in range(len(s)):
if s[i] != " " and (i==0 or s[i-1]==" "):
count+=1
return count | 5ab405426c1fb04088e9fc69d54159ff1474bfda | 40,138 |
import re
def clean(word):
"""Removes any non A-Z characters and any vowels from a word"""
cleanWord = re.sub('[^A-Za-z]', '', word) #remove any special characters (non A-Z chars)
cleanWord = re.sub('[AaEeIiOoUu]', '', cleanWord) #remove any vowels
return cleanWord | 2d37f8f474c52009dc7278eacf9cc81535fc9786 | 40,139 |
def filter_hmm_hit_list(hmm_hit_list, e_value_cutoff="1e-25", hmm_coverage=0.3, max_align_overlap=0.5):
"""
Filters HMM gits by E-Value, Coverage and Overlap between hits.
:param hmm_hit_list: List of HMM hit objects.
:param e_value_cutoff: The E-Value cutoff for hits.
:param hmm_coverage: The HMM coverage cutoff for hits.
:param max_align_overlap: The maximum overlap percentage between overlapping HMM hits.
:return: List of filtered HMM hit objects.
"""
hmm_hit_list = [hit for hit in hmm_hit_list if hit.e_value < float(e_value_cutoff)] # Filters hits by E-value.
i = 0
while i < (len(hmm_hit_list) - 1):
hit_one = hmm_hit_list[i] # Current Row in hit table.
hit_two = hmm_hit_list[i + 1] # Row below.
if hit_one.target_protein == hit_two.target_protein:
overlap_between_hits = hit_one.ali_to - hit_two.ali_from
if overlap_between_hits > 0:
# If the overlap is greater than 50% of either alignment.
if ((float(overlap_between_hits) / float(hit_one.ali_length)) > max_align_overlap) or (
(float(overlap_between_hits) / float(hit_two.ali_length)) > max_align_overlap):
if hit_one.e_value < hit_two.e_value:
hmm_hit_list.remove(hit_two)
else:
hmm_hit_list.remove(hit_one)
i -= 1 # Resets list index.
i += 1
hmm_hit_list = [hit for hit in hmm_hit_list if hit.hmm_coverage > hmm_coverage] # Filters by Query Coverage.
return hmm_hit_list | de8f6932a895021bbc56d7d7a17d4d8c6c2f2746 | 40,140 |
import torch
def divide_img(img, patch_size):
"""Divides image into tensor of image patches
Args:
img: batch of images, torch.tensor, e.g. [batch_size, channels, 32, 32]
patch_size: patch size, tuple e.g. (4,4)
Returns:
A torch.tensor of stacked stacked flattened image patches, e.g. [batch_size, 64, channels, 4, 4]
"""
height = img.shape[2]
width = img.shape[3]
patch_size_h = patch_size[0]
patch_size_w = patch_size[1]
A = []
for i in range(int(height/patch_size_h)):
for j in range(int(width/patch_size_w)):
A.append(img[:,:,i*patch_size_h:i*patch_size_h+patch_size_h,j*patch_size_w:j*patch_size_w+patch_size_w])
return torch.stack(A).permute(1,0,2,3,4) | 83afe56f1dbf985a2edc8c299cc046d7f71690e6 | 40,141 |
def dict_hex_finder(single_hex_dict: dict):
"""Pulls the 'hex' key, 'num_files' key, and the file list out of the dict.
- Args:
- single_hex_dict (dict): dict with one hex, plus various other keys
- Returns:
- 0 [str]: hex
- 1 [list]: files
- 2 [int]: number of files
"""
hex_val, files, num_files = '', '', 0
for k, v in single_hex_dict.items():
if k == 'num_files':
num_files = v
elif k == 'action':
pass
elif k == 'sub-action':
pass
else:
hex_val = k
files = v
return hex_val, files, num_files | a9446c57881a3c29d44a53b8fffbe269c5cb17f4 | 40,142 |
import mimetypes
def to_mime(file_path):
"""
Return the mime type from a given path
:param file_path: Path to analyse
:type file_path: str
:return: Mime type
:rtype: str
"""
return mimetypes.guess_type(file_path)[0] | 29493825b494fce5268e40c220c81fc8ca85457a | 40,144 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.