content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from bs4 import BeautifulSoup
import requests
def get_soup(url: str) -> BeautifulSoup:
"""Get an instance of BeautifulSoup for a specific url"""
response = requests.get(url)
if response.status_code != requests.codes.ok:
print(f"url request error: response.status_code is {response.status_code}")
raise requests.ConnectionError
html = response.text
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
return soup
|
34f172c2d6d2d7928d93f3a11768cc45272fc399
| 699,833
|
def quaternion_upper_hemispher(q):
"""
The quaternion q and โq represent the same rotation be-
cause a rotation of ฮธ in the direction v is equivalent to a
rotation of 2ฯ โ ฮธ in the direction โv. One way to force
uniqueness of rotations is to require staying in the โupper
halfโ of S 3 . For example, require that a โฅ 0, as long as
the boundary case of a = 0 is handled properly because of
antipodal points at the equator of S 3 . If a = 0, then require
that b โฅ 0. However, if a = b = 0, then require that c โฅ 0
because points such as (0,0,โ1,0) and (0,0,1,0) are the
same rotation. Finally, if a = b = c = 0, then only d = 1 is
allowed.
:param q:
:return:
"""
a, b, c, d = q
if a < 0:
q = -q
if a == 0:
if b < 0:
q = -q
if b == 0:
if c < 0:
q = -q
if c == 0:
print(q)
q[3] = 1
return q
|
1a95442fa0016aa02968c294110b540391d35550
| 699,834
|
def get_leaf_artists(root_artist, matchfunc):
"""Return all the leaf artists.
which
Args:
root_artist: Artist.
matchfunc: callable: artist -> bool.
Return:
All the decendant Artists matched with `matchfunc`.
Note
----
As you might notice, this class is similar to `Artist.findobj`.
However, only leaf artists are gathered.
* https://matplotlib.org/_modules/matplotlib/artist.html#Artist.findobj
"""
def _inner(artist, is_target):
if matchfunc(artist):
is_target = True
children = artist.get_children()
if children:
result = sum([_inner(elem, is_target) for elem in children], [])
else:
if is_target:
result = [artist]
else:
result = []
return result
result = _inner(root_artist, False)
return result
|
63b7304137cfc502cd22b17be6b4869a7fff879b
| 699,835
|
def intersect(hrect, r2, centroid):
"""
checks if the hyperrectangle hrect intersects with the
hypersphere defined by centroid and r2
"""
maxval = hrect[1, :]
minval = hrect[0, :]
p = centroid.copy()
idx = p < minval
p[idx] = minval[idx]
idx = p > maxval
p[idx] = maxval[idx]
return ((p - centroid) ** 2).sum() < r2
|
6050742ae4527f5baba3c6cb8a484b04d32c0b3c
| 699,836
|
import re
def remove_trailing_commas(json_like):
"""
Removes trailing commas from `json_like` and returns the result.
Examples
--------
>>> remove_trailing_commas('{"foo":"bar","baz":["blah",],}')
'{"foo":"bar","baz":["blah"]}'
"""
trailing_object_commas_re = re.compile(
r'(,)\s*}(?=([^"\\]*(\\.|"([^"\\]*\\.)*[^"\\]*"))*[^"]*$)')
trailing_array_commas_re = re.compile(
r'(,)\s*\](?=([^"\\]*(\\.|"([^"\\]*\\.)*[^"\\]*"))*[^"]*$)')
# Fix objects {} first
objects_fixed = trailing_object_commas_re.sub("}", json_like)
# Now fix arrays/lists [] and return the result
return trailing_array_commas_re.sub("]", objects_fixed)
|
0d51b1cb7508ab00ec353a1446210b6e44c64c58
| 699,837
|
def is_method_of(method, obj):
"""Return True if *method* is a method of *obj*.
*method* should be a method on a class instance; *obj* should be an instance
of a class.
"""
# Check for both 'im_self' (Python < 3.0) and '__self__' (Python >= 3.0).
cls = obj.__class__
mainObj = getattr(method, "im_self", getattr(method, "__self__", None))
return isinstance(mainObj, cls)
|
554ab48effb7ce996846192786ce2141abf671a4
| 699,838
|
import os
def make_fig_save_dirs(save_dir_root, pdf=False):
"""Create directories for saving figures in svg and png formats
Args:
save_dir_root (string): root directory for saving figures
Returns:
dir_svg (string): valid directory
dir_png (string): valid directory
"""
dir_png = os.path.join(save_dir_root, 'png')
dir_svg = os.path.join(save_dir_root, 'svg')
if not os.path.exists(dir_png):
os.makedirs(dir_png)
if not os.path.exists(dir_svg):
os.makedirs(dir_svg)
return dir_svg, dir_png
|
4f7164ca441b4cbdceed3478c6cb5121cfb1fb5d
| 699,839
|
import os
def get_workflow_config_path(repo):
"""Returns the full path for the git workflow config file."""
return os.path.join(repo.git_dir, 'config_workflow')
|
bc3c90c597e78b3922db1318608a4d184b603c17
| 699,840
|
def callback(func):
"""
A decorator to add a keyword arg 'callback' to execute a method on the return value of a function
Used to add callbacks to the API calls
:param func: The function to decorate
:return: The wrapped function
"""
def wrap(*args, **kwargs):
callback_ = kwargs.pop('callback', None)
result = func(*args, **kwargs)
if callback_:
callback_(result)
return result
return wrap
|
a2599306d091bc48df5ab1ca90e34f4529c85cab
| 699,841
|
def remove_dups_stringlist(str):
"""Remove duplicates from list as string"""
arr = str.split(',')
arr = list(dict.fromkeys(arr))
return ','.join(arr)
|
ac54bbfa9c48f730ffd29db0a2cd210b1f3a7c79
| 699,842
|
import torch
def dice_loss_norm(input, target):
"""
input is a torch variable of size BatchxnclassesxHxW representing log probabilities for each class
target is a 1-hot representation of the groundtruth, should have same size as the input
"""
assert input.size() == target.size(), "Input sizes must be equal."
assert input.dim() == 4, "Input must be a 4D Tensor."
# uniques = np.unique(target.numpy())
# assert set(list(uniques)) <= set([0, 1]), "target must only contain zeros and ones"
probs = input
num = probs * target # b,c,h,w--p*g
num = torch.sum(num, dim=3)
num = torch.sum(num, dim=2) #
num = torch.sum(num, dim=0)# b,c
den1 = probs * probs # --p^2
den1 = torch.sum(den1, dim=3)
den1 = torch.sum(den1, dim=2) # b,c,1,1
den1 = torch.sum(den1, dim=0)
den2 = target * target # --g^2
den2 = torch.sum(den2, dim=3)
den2 = torch.sum(den2, dim=2) # b,c,1,1
den2 = torch.sum(den2, dim=0)
dice = 2 * ((num+0.0000001) / (den1 + den2+0.0000001))
# dice_eso = dice[1:] # we ignore bg dice val, and take the fg
dice_eso = dice
dice_total = -1 * torch.sum(dice_eso) / dice_eso.size(0) # divide by batch_sz
return dice_total
|
e3640a6660df0a572b1b8deccf9d508d7f7e40a9
| 699,843
|
import os
def files_in_current_dir(dir_name):
"""list all files in a dir.
Args:
dir_name (str): path to a dir
Returns:
list: files in a dir.
"""
return [os.path.abspath(os.path.join(dir_name, x))
for x in os.listdir(dir_name)
if os.path.isfile(os.path.abspath(os.path.join(dir_name, x)))]
|
01842f7b688049ceed7ec5a78446df338abf1d7d
| 699,844
|
def make_usage(template, command_map, alias_map):
"""Generate the usage doc based on configured commands and aliases"""
def format_command_info(command_name):
func = command_map[command_name]
# Some commands (but not all) have aliases
aliases = [k for k in alias_map.keys() if alias_map[k] == command_name]
aliases = " ".join(sorted(aliases)) if aliases else ""
aliases = f"\n Aliases: {aliases}"
return f" {command_name:8s} {func.__doc__}{aliases}\n"
command_info_parts = map(
format_command_info, (name for name in sorted(command_map.keys()))
)
return template.format(COMMANDS="\n".join(command_info_parts))
|
2cee786c095a897dc583fedff17aeef974ba8d5b
| 699,845
|
import threading
def setInterval(interval):
"""
Decorator generator. Calls the decorated methods every `interval` seconds.
Args:
interval (int): Period
Returns:
(function) Decorated Function.
"""
def decorator(function):
"""
Helper method.
"""
def wrapper(*args, **kwargs):
"""
Manages the thread.
"""
stopped = threading.Event()
def loop(): # executed in another thread
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
|
c6bbced84a081fad88056bce5ec1f9f762336cbb
| 699,846
|
def save_group(batch_dir, group):
"""
make group directory and save group info.
"""
group_name = group["group_name"]
group_dir = batch_dir / group_name
group_dir.mkdir()
# save group information
info_file = group_dir / "group_info.txt"
with open(info_file, "w") as info_f:
info_f.write(f"group_name: {group_name}\n")
info_f.write(f"group_info: {group['group_info']}\n")
return group_dir
|
aabf6e1a4e4ee08df4cfab490982f4eafa694b7e
| 699,848
|
def log2(instructions):
"""Integer only algorithm to calculate the number of bits needed to store a number"""
bits = 1
power = 2
while power < instructions:
bits += 1
power *= 2
return bits
|
8933e1c5d2cf6811cd15351f76658a7ed2be707f
| 699,849
|
def codepipeline_approval(message):
"""Uses Slack's Block Kit."""
console_link = message['consoleLink']
approval = message['approval']
pipeline_name = approval['pipelineName']
action_name = approval['actionName']
approval_review_link = approval['approvalReviewLink']
expires = approval['expires']
return (
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': f'Pipeline "{pipeline_name}" is waiting for approval.',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Open in :aws: Console',
'emoji': True,
},
'url': console_link,
},
},
{
'type': 'section',
'fields': [
{
'type': 'mrkdwn',
'text': f'*Action name*:\n{action_name}',
},
{
'type': 'mrkdwn',
'text': f'*Expires:* {expires}',
},
],
},
{
'type': 'actions',
'elements': [
{
'type': 'button',
'text': {
'type': 'plain_text',
'emoji': False,
'text': 'Review approve',
},
'style': 'primary',
'url': approval_review_link,
},
],
},
)
|
3d3aaa51b916d77d67c6c071c3e9403df691c1d9
| 699,850
|
def fetch_intron(start, cigar):
"""
To retrieve the 'N' cigar
:param start:
:param cigar:
:return:
"""
intronbound = []
for c, l in cigar:
if c == 3:
intronbound.append((start + 1, start + l))
start += l
elif c == 1:
continue
elif c == 2:
start += l
elif c == 4:
# start += l
# u'3.15 ่ฟ้ๅฅฝๅๆ้ฎ้ข๏ผๅพๆน??'
continue
elif c == 0:
start += l
else:
continue
return intronbound
|
f52f383077d8e4ef99721279405c2cd47cb94e2b
| 699,851
|
def _formatted_hour_min(seconds):
"""Turns |seconds| seconds into %H:%m format.
We don't use to_datetime() or to_timedelta(), because we want to
show hours larger than 23, e.g.: 24h:00m.
"""
time_string = ''
hours = int(seconds / 60 / 60)
minutes = int(seconds / 60) % 60
if hours:
time_string += '%dh' % hours
if minutes:
if hours:
time_string += ':'
time_string += '%dm' % minutes
return time_string
|
87fb84b5b8f190102309facfb1e33cffa24bcdbf
| 699,852
|
from typing import Counter
def number_of_pairs(gloves):
"""
Given an array describing the color of each glove,
return the number of pairs you can constitute,
assuming that only gloves of the same color can form pairs.
Examples:
input = ["red", "green", "red", "blue", "blue"]
result = 2 (1 red pair + 1 blue pair)
"""
count = 0
gloves = Counter(gloves)
for x in gloves.values():
count += x // 2
return count
|
e499f0e924b0154684ad2ecda51d7ed0ed63d183
| 699,854
|
def Rmax_Q11(Vmax):
""" Estimation of the radius of maximum wind according to the formula proposed
by Quiring et al. (2011); Vmax and Rmax are in nautical miles.
Expression herein converted in km"""
Vm= Vmax * 0.5399568
Rmax = ((49.67 - 0.24 * Vm)) * 1.852
return Rmax
|
e320acfd64abc9e7ae30ca70979cf057239bae09
| 699,855
|
def get_option_value(elem):
""" Get the value attribute, or if it doesn't exist the text
content.
<option value="foo">bar</option> => "foo"
<option>bar</option> => "bar"
:param elem: a soup element
"""
value = elem.get("value")
if value is None:
value = elem.text.strip()
if value is None or value == "":
msg = u"Error parsing value from {}.".format(elem)
raise ValueError(msg)
return value
|
b2a549d8b5ec3c895ff2b3c2978437a25afe99b1
| 699,856
|
def scatter_scale(target, max_size=50.0):
"""Return a scaled list of sizes."""
result = target.apply(lambda x:max_size*(1-x))
return result
|
0ea6b68b4f8e3f579b98becf3e12b1bec7de95e2
| 699,857
|
def clearDiskCache():
"""
clearDiskCache() -> None
Clear the disk cache of all files.
"""
return None
|
4f1ad8928b637505d96172a002507a8a658e363f
| 699,858
|
def EI(sections, normal=None): # {{{
"""Calculate the bending stiffnes of a cross-section.
The cross-section is composed out of rectangular nonoverlapping sections
that can have different Young's moduli.
Each section is represented by a 4-tuple (width, height, offset, E).
The offset is the distance from the top of the section to the top of the
highest section. This should always be a positive value.
E is the Young's modulus of the material of this section.
Arguments:
sections: Iterable of section properties.
normal: The Young's modulus to which the total cross-section will be
normalized. (Not used anymore, retained for compatibility.)
Returns:
Tuple of EI, top and bottom. Top and bottom are with respect to the
neutral line.
Examples:
>>> E = 210000
>>> B = 100
>>> H = 20
>>> sections = ((B, H, 0, E),)
>>> EI(sections)
(14000000000.0, 10.0, -10.0)
>>> B = 100
>>> h = 18
>>> t = 1
>>> H = h + 2 * t
>>> E = 210000
>>> sections = ((B, t, 0, E), (B, t, h+t, E))
>>> EI(sections)
(3794000000.0, 10.0, -10.0)
>>> E1, E2 = 200000, 71000
>>> t1, t2 = 1.5, 2.5
>>> H = 31
>>> B = 100
>>> sections = ((B, t1, 0, E1), (B, t2, H-t2, E2))
>>> EI(sections)
(9393560891.143106, 11.530104712041885, -19.469895287958117)
"""
normal = sections[0][-1]
normalized = tuple((w * E / normal, h, offs) for w, h, offs, E in sections)
A = sum(w * h for w, h, _ in normalized)
S = sum(w * h * (offs + h / 2) for w, h, offs in normalized)
yn = S / A
# Find any geometry that straddles yn.
to_split = tuple(g for g in sections if g[2] < yn and g[1] + g[2] > yn)
geom = tuple(g for g in sections if g not in to_split)
# split that geometry.
# The new tuple has the format (width, height, top, bottom)
new_geom = []
for w, h, offs, E in to_split:
h1 = yn - offs
h2 = h - h1
new_geom.append((w, h1, h1, 0, E))
new_geom.append((w, h2, 0, -h2, E))
# Convert the remaining geometry to reference yn.
for w, h, offs, E in geom:
new_geom.append((w, h, yn - offs, yn - offs - h, E))
EI = sum(E * w * (top ** 3 - bot ** 3) / 3 for w, h, top, bot, E in new_geom)
top = max(g[-3] for g in new_geom)
bot = min(g[-2] for g in new_geom)
return EI, top, bot
|
24b5ca79f0a3f041586e2f9d7fe8d7953cd96780
| 699,859
|
def filter_labels_by_class(obj_labels, classes):
"""Filters object labels by classes.
Args:
obj_labels: List of object labels
classes: List of classes to keep, e.g. ['Car', 'Pedestrian', 'Cyclist']
Returns:
obj_labels: List of filtered labels
class_mask: Mask of labels to keep
"""
class_mask = [(obj.type in classes) for obj in obj_labels]
return obj_labels[class_mask], class_mask
|
854a32da802c794b0622a0a36895590823b7c780
| 699,860
|
def _update_entries(data_arr, first_one, second_one):
"""
Replaces 2 entries in the array, and returns the modified array.
This only operates on arrays returned by the likes of _augment_array().
"""
temp1 = data_arr[first_one]
temp2 = data_arr[second_one]
intended_index = temp2[0] - 1
difference = abs(intended_index - first_one)
if difference == 0:
data_arr[first_one] = temp2[0]
else:
data_arr[first_one] = (temp2[0],) + (difference,)
intended_index = temp1[0] - 1
difference = abs(intended_index - second_one)
if difference == 0:
data_arr[second_one] = temp1[0]
else:
data_arr[second_one] = (temp1[0],) + (difference,)
return data_arr
|
505f0334152819408b0e1eb358f635f5793e93ff
| 699,861
|
def probably_reconstruction(file) -> bool:
"""Decide if a path may be a reconstruction file."""
return file.endswith("json") and "reconstruction" in file
|
fc5c20fe8fddc9f8ffaab0e746100a534e6a5f57
| 699,863
|
import re
def error_086_ext_link_two_brackets(text):
"""Fix some cases and return (new_text, replacements_count) tuple."""
# case: [[http://youtube.com/|YouTube]]
def _process_link(match_obj):
"""Deals with founded wiki-link."""
link = match_obj.group(1)
name = match_obj.group(2)
if "wikipedia.org" in link.lower():
link = re.sub(" ", "_", link)
else:
link = re.sub(" ", "%20", link)
return "[" + link + " " + name + "]"
exp1 = r"\[\[(https?://[^|\[\]\n]+)\|([^|\[\]\n]+)\]\]"
(text, count1) = re.subn(exp1, _process_link, text, flags=re.I)
# case: [[http://youtube.com YouTube]]
exp2 = r"\[(\[https?://[^\[\]\n]+\])\]"
(text, count2) = re.subn(exp2, "\\1", text, flags=re.I)
return (text, count1 + count2)
|
6e5acc412be1de2b5cbc580984b70cc66cf7fba6
| 699,864
|
import re
def get_ip(ip_str):
"""
input format: SH-IDC1-10-5-30-[137,152] or SH-IDC1-10-5-30-[137-142,152] or SH-IDC1-10-5-30-[152, 137-142]
output format 10.5.30.137
"""
# return ".".join(ip_str.replace("[", "").split(',')[0].split("-")[2:])
return ".".join(re.findall(r'\d+', ip_str)[1:5])
|
0a49bf6ae1bdaed6a88e793a9d9d1cd2e1064de3
| 699,865
|
from typing import Counter
def check_author_count(counter: Counter) -> bool:
"""
Takes a set of documents and counts the number of authors. If less than
2, returns False otherwise True.
:param counter: a Counter object for author counts.
:return: a boolean indicating whether or not the document set can be
analyzed (True for yes, no for False).
"""
if len(counter) == 1:
return False
return True
|
31b697cc0e5a395ebb0702c40e86f5b21760021d
| 699,866
|
def default_destinations(iata_code_original_destination):
"""Get three default destinations different from original destination of query."""
# Paris, London, Rome, New York
defaults = ['CDG', 'LHR', 'FCO', 'JFK']
if iata_code_original_destination in defaults:
defaults.remove(iata_code_original_destination)
else:
defaults.remove('JFK')
return defaults
|
904ebb69bdb3bb893580b201bbc50060a194ed7b
| 699,867
|
def _ToCamelCase(name):
"""Converts hyphen-case name to CamelCase."""
parts = name.split('-')
return ''.join(x.title() for x in parts)
|
c164572c386e16c9fdf193eac24f350ee8218cfc
| 699,868
|
def tree_intersection(tree_one,tree_two):
"""[given two binarytrees as parameters returns the itersection of the trees values]
Args:
tree_one ([binarytree]): [description]
tree_two ([binarytree]): [description]
Returns:
[list]: [intersection of trees]
"""
intersection = []
for item in tree_two.breadth_first():
if item in tree_one.breadth_first():
intersection.append(item)
return intersection
|
b2277a8121ce24446ef6ff1957d46c0e0853e79e
| 699,869
|
def name_records_summary(self):
"""
Count records for every name record.
Args:
mod_name (str):
Return:
None
"""
counts = {}
for mod, records in self.records.items():
for rec in records:
if rec['id'] not in counts:
counts[rec['id']] = {'name': self.name_records[rec['id']], 'counts': {}}
if mod not in counts[rec['id']]['counts']:
counts[rec['id']]['counts'][mod] = 1
else:
counts[rec['id']]['counts'][mod] += 1
return counts
|
4c23d37bb8aac839afcb2a2cb724d352db8e5d8c
| 699,871
|
import hashlib
def same_files(fname, fname2):
"""
Two files are the same if they have the same hash.
:param fname String: The first filename.
:param fname2 String: The second filename.
"""
try:
hash1 = hashlib.sha512()
with open(fname, 'rb') as fin:
hash1.update(fin.read())
hash2 = hashlib.sha512()
with open(fname2, 'rb') as fin:
hash2.update(fin.read())
return hash1.hexdigest() == hash2.hexdigest()
except IOError:
return False
|
502b7b6942d8766edad78d7aa0c7de9ddd85a7cb
| 699,872
|
import os
import glob
import itertools
import re
def get_machine_id_list_for_test(target_dir,
dir_name="test",
ext="wav"):
"""
target_dir : str
base directory path of "dev_data" or "eval_data"
test_dir_name : str (default="test")
directory containing test data
ext : str (default="wav)
file extension of audio files
return :
machine_id_list : list [ str ]
list of machine IDs extracted from the names of test files
"""
# create test files
dir_path = os.path.abspath("{dir}/{dir_name}/*.{ext}".format(dir=target_dir, dir_name=dir_name, ext=ext))
file_paths = sorted(glob.glob(dir_path))
# extract id
machine_id_list = sorted(list(set(itertools.chain.from_iterable(
[re.findall('id_[0-9][0-9]', ext_id) for ext_id in file_paths]))))
return machine_id_list
|
8b3964886ad3b2dbda86d508985898f3875a3f7f
| 699,873
|
def line_animation(line, idx, limit=10, step=1):
""" Animate long string """
line = line+' | '
line_start = 0
line_end = len(line)
short_line_start = line_start+idx
short_line_end = limit+idx
short_line = line[short_line_start:short_line_end]
add_line_end = abs(len(short_line)-limit)
if short_line_end > line_end:
short_line = short_line + line[0:add_line_end]
idx += step
if idx > (line_end + limit) - 1:
idx = 0
return idx, short_line
|
85f84c524c45ea81b2b83d14bff743791d126137
| 699,874
|
def format_date(value):
"""Return value as string."""
return_value = None
if value:
return_value = str(value)
return return_value
|
76887bb3be2c858102f1b37e0a1ed8ba42235353
| 699,875
|
def bbox2points(bbox):
"""
From bounding box yolo format
to corner points cv2 rectangle
"""
x, y, w, h = bbox
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
|
e6c3de2477e16a74c4c4c6d00d97eca473073f1d
| 699,876
|
def level_to_rgb(level, background):
"""
Converts internal consept of level to a gray color for text in RGB
:param level: range from 1 to 15
:return: tuple referring to (R, G, B)
"""
#Level goes from 1 to 15, starting from level1:(30,30,30), and getting darker with 1 point
if level not in range(1, 16):
return None
gray_code = background[0]+15-level+1
print(f"{level}::{gray_code}")
return (gray_code, gray_code, gray_code)
|
5183e72f4422031f2aed9881be1cce869c8b2606
| 699,877
|
import os
def extension(path: str) -> str:
"""
finds extension
"""
base_name = os.path.basename(path)
name = os.path.splitext(base_name)
extension = name[1]
return extension
|
16771d82f62815531ffd0394902fd84647ded58c
| 699,878
|
def get_message_relay(celery_app):
"""
Function that return a celery task list.
"""
return celery_app.tasks['eduid_msg.tasks.send_message']
|
d024b6c95013d8fbd854d833e74dbedf0220a7df
| 699,879
|
import json
def get_boxnote_text(filepath):
"""Convert a boxnote to plain text.
Parameters:
filepath (str): the path to the boxfile
Returns: the text of the boxfile as a string
"""
f = open(filepath, encoding="utf8")
text = json.loads(f.read())["atext"]["text"]
f.close()
return text
|
ba41e36d534931b9e2d1a401d7317ee4f8956f13
| 699,880
|
def func_args_realizer(args):
"""
Using an ast.FunctionDef node, create a items list node that
will give us the passed in args by name.
def whee(bob, frank=1):
pass
whee(1, 3) => [('bob', 1), ('frank', 3)]
whee(1) => [('bob', 1), ('frank', 1)]
"""
items = map("('{0}', {0})".format, args)
items_list = "[ {0} ]".format(', '.join(items))
return items_list
|
d8f4bc8b7a79796e9512b6c6c2ad884e79389ebc
| 699,881
|
def categorical_log_prob(self, logits):
""" torch RelaxedOneHotCategorical log_prob is weird (uses that of TransformedDistribution)
need to use log_prob from base_dist instead
"""
return self.base_dist.log_prob(logits)
|
50b9124ddbdd04696b4b152d55cb0ec4ffd86b05
| 699,883
|
from typing import Dict
def filter_val_not_none(d: Dict) -> Dict:
"""
Return a new dictionary composed of all key-value pairs (k, v) in d where v is not None.
:param d: original dictionary
:return: d, without key-value pairs where value is None
>>> filter_val_not_none({"a": 5, "b": 10, "c": None}) == {"a": 5, "b": 10}
True
"""
return {k: v for k, v in d.items() if v is not None}
|
21aaf90a4407a690ce76d09dfb54d93c1293c953
| 699,884
|
def get_card_ids(db) -> list:
"""
Gets a list of all card IDs in the datase.
:return: List of card IDs.
"""
return db['cards'].distinct('_id')
|
48e6e1880253603233ccdc55fb38269f75375f8f
| 699,885
|
import time
def datetime_to_integer(datetime):
"""Convert datetime object to integer value.
'datetime' is the datetime object"""
return time.mktime(datetime.timetuple())
|
8d4d94fac947c3dd9e82ee3d60a1a57a6440457d
| 699,886
|
import operator
def sort_load_list_by_time(load_list):
"""Given the standard load list return a list orderd by time
The list contains a tuple of the load_id and the actual load_set
"""
return sorted(load_list, key=operator.itemgetter(1))
|
1ae00f8ffe0cf4ef7ab899b9e6d3c9a0051c7e91
| 699,887
|
def crop_to_ratio(im, desired_ratio=4 / 3):
""" Crop (either) the rows or columns of an image to match (as best as possible) the
desired ratio.
Arguments:
im (np.array): Image to be processed.
desired_ratio (float): The desired ratio of the output image expressed as
width/height so 3:2 (= 3/2) or 16:9 ( = 16/9).
Returns:
An image (np.array) with the desired ratio.
"""
height = im.shape[0]
width = im.shape[1]
if width / height < desired_ratio: # Crop rows
desired_height = int(round(width / desired_ratio))
to_crop = height - desired_height
top_crop = to_crop // 2
bottom_crop = to_crop - top_crop
cropped_image = im[top_crop:height - bottom_crop, :]
else: # Crop columns
desired_width = int(round(height * desired_ratio))
to_crop = width - desired_width
left_crop = to_crop // 2
right_crop = to_crop - left_crop
cropped_image = im[:, left_crop:width - right_crop]
return cropped_image
|
dd2301708aa514b2d9b87758ce38d7bd9f9d874c
| 699,888
|
def grompp_npt(job):
"""Run GROMACS grompp for the npt step."""
npt_mdp_path = "npt.mdp"
msg = f"gmx grompp -f {npt_mdp_path} -o npt.tpr -c em.gro -p init.top --maxwarn 1"
return msg
|
7afdf17586250a62106c67b2594d1aa057fef09e
| 699,889
|
def uri_leaf(uri):
"""
Get the "leaf" - fragment id or last segment - of a URI. Useful e.g. for
getting a term from a "namespace like" URI.
>>> uri_leaf("http://purl.org/dc/terms/title") == 'title'
True
>>> uri_leaf("http://www.w3.org/2004/02/skos/core#Concept") == 'Concept'
True
>>> uri_leaf("http://www.w3.org/2004/02/skos/core#") # returns None
"""
for char in ('#', '/', ':'):
if uri.endswith(char):
break
if char in uri:
sep = char
leaf = uri.rsplit(char)[-1]
else:
sep = ''
leaf = uri
if sep and leaf:
return leaf
|
3045806ac56124331c58b6daffb5c1b5c202c0eb
| 699,890
|
import yaml
def load_yaml_config(filename):
"""Load a YAML configuration file."""
with open(filename, "rt", encoding='utf-8') as file:
config_dict = yaml.safe_load(file)
return config_dict
|
771dbf8fdaca1575bc9bdb472d6aa1405c689e7a
| 699,891
|
import uuid
def build_request_body(method, params):
"""Build a JSON-RPC request body based on the parameters given."""
data = {
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": str(uuid.uuid4())
}
return data
|
372df70bd17e78f01de5f0e537988072ac9716cc
| 699,892
|
def part1(input_data):
"""
>>> part1(["939","7,13,x,x,59,x,31,19"])
295
"""
timestamp = int(input_data[0])
bus_ids = input_data[1].split(',')
# Ignore bus_ids with 'x'
bus_ids = map(int, filter(lambda bus_id: bus_id != 'x', bus_ids))
# (id, time_to_wait)
# last_busstop = timestamp % id
# time_to_wait = id - last_busstop
bus_ids = [(bus_id, bus_id - (timestamp % bus_id)) for bus_id in bus_ids]
bus_ids.sort(key=lambda x: x[1])
next_bus_id, next_bus_time_to_wait = bus_ids[0]
return next_bus_id * next_bus_time_to_wait
|
99c1928c5833f3c9773a28323f2bd2a903f626f3
| 699,893
|
import uuid
def read_bootid():
"""
Mocks read_bootid as this is a Linux-specific operation.
"""
return uuid.uuid4().hex
|
02ef132a4aa157a4c111bf8938a4a3716a9c2f29
| 699,894
|
def list_group(flatten_list, offset_list):
"""list_flatten็้ๆไฝ"""
pos_lists = []
for offset in offset_list:
pos_lists.append(flatten_list[offset[0] : offset[1]])
return pos_lists
|
6ae7f52d267ea682c704a4a494bdfb63f8ccf23a
| 699,895
|
import numpy
def normalized(vector):
"""
Get unit vector for a given one.
:param vector:
Numpy vector as coordinates in Cartesian space, or an array of such.
:returns:
Numpy array of the same shape and structure where all vectors are
normalized. That is, each coordinate component is divided by its
vector's length.
"""
length = numpy.sum(vector * vector, axis=-1)
length = numpy.sqrt(length.reshape(length.shape + (1, )))
return vector / length
|
424595a59e52ed8b328a629b28dd0206c599484f
| 699,896
|
def sample_conditional_random(generator, m, n, **kwargs):
"""
Sample `m * n` points from condition space completely randomly.
"""
return generator.condition_distribution.sample(m * n).eval()
|
bc50e32e47bffe10c11df02cf365c5396b722d4a
| 699,897
|
import torch
def fetch_optimizer(lr, wdecay, epsilon, num_steps, params):
""" Create the optimizer and learning rate scheduler """
optimizer = torch.optim.AdamW(params, lr=lr, weight_decay=wdecay, eps=epsilon)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, lr, num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler
|
e6d028f4adf58c303e1e7f0cb0b5233cf4f6026c
| 699,898
|
def load_bmrbm(table_fname, resname_col, atom_col, shift_col):
"""Load a BMRBM table and return a dictionary."""
bmrb_dic = {}
with open(table_fname, "r") as bmrb_file:
for line in bmrb_file.readlines():
# Split so that the whitespaces don't matter
c_shift = line.split()
if not c_shift:
# line is empty
continue
# bmrb_columns starts with index 1 so here we need to -1 all positions
bmrb_resnum = int(c_shift[resname_col - 1])
bmrb_atom = c_shift[atom_col - 1]
bmrb_shift = float(c_shift[shift_col - 1])
if bmrb_resnum not in bmrb_dic:
bmrb_dic[bmrb_resnum] = {}
bmrb_dic[bmrb_resnum][bmrb_atom] = bmrb_shift
return bmrb_dic
|
85abb1fb714f407b91ecb9b3b04163b2badbec8f
| 699,899
|
def yp_raw_competitors(data_path):
"""
The file contains the list of business objects.
File Type: JSON
"""
return f'{data_path}/yp_competitors.json'
|
7c33229d9cec5900a2e7b4fd868f91f576761c50
| 699,900
|
import re
def to_num(string):
"""Convert string to number (or None) if possible"""
if type(string) != str:
return string
if string == "None":
return None
if re.match("\d+\.\d*$", string):
return float(string)
elif re.match("\d+$", string):
return int(string)
else:
return string
|
79a0740e298e33198dca2d7b7fcd53700f121869
| 699,901
|
def button_action (date, action, value) :
""" Create a button for time-tracking actions """
''"approve", ''"deny", ''"edit again"
if not date :
return ''
return \
'''<input type="button" value="%s"
onClick="
if(submit_once()) {
document.forms.edit_daily_record ['@action'].value =
'daily_record_%s';
document.forms.edit_daily_record ['date'].value = '%s'
document.edit_daily_record.submit ();
}
">
''' % (value, action, date)
|
314f950d1601987d23490fc48c2f5365a39d4533
| 699,902
|
def get_tidy_invocation(f, clang_tidy_binary, checks, build_path,
quiet, config):
"""Gets a command line for clang-tidy."""
start = [clang_tidy_binary]
# Show warnings in all in-project headers by default.
start.append('-header-filter=src/')
if checks:
start.append('-checks=' + checks)
start.append('-p=' + build_path)
if quiet:
start.append('-quiet')
if config:
start.append('-config=' + config)
start.append(f)
return start
|
50e95d612f08ec5762bd2d0689a4fcbe9c699a11
| 699,903
|
def get_control_colors():
"""
Returns control colors available in DCC
:return: list(tuple(float, float, float))
"""
return list()
|
3629fdd069353e4916ba23e172299e96e389b824
| 699,904
|
import torch
def _pre_conv(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
"""
This is a block of local computation done at the beginning of the convolution. It
basically does the matrix unrolling to be able to do the convolution as a simple
matrix multiplication.
Because all the computation are local, we add the @allow_command and run it directly
on each share of the additive sharing tensor, when running mpc computations
"""
assert len(input.shape) == 4
assert len(weight.shape) == 4
# Change to tuple if not one
stride = torch.nn.modules.utils._pair(stride)
padding = torch.nn.modules.utils._pair(padding)
dilation = torch.nn.modules.utils._pair(dilation)
# Extract a few useful values
batch_size, nb_channels_in, nb_rows_in, nb_cols_in = input.shape
nb_channels_out, nb_channels_kernel, nb_rows_kernel, nb_cols_kernel = weight.shape
if bias is not None:
assert len(bias) == nb_channels_out
# Check if inputs are coherent
assert nb_channels_in == nb_channels_kernel * groups
assert nb_channels_in % groups == 0
assert nb_channels_out % groups == 0
# Compute output shape
nb_rows_out = int(
((nb_rows_in + 2 * padding[0] - dilation[0] * (nb_rows_kernel - 1) - 1) / stride[0]) + 1
)
nb_cols_out = int(
((nb_cols_in + 2 * padding[1] - dilation[1] * (nb_cols_kernel - 1) - 1) / stride[1]) + 1
)
# Apply padding to the input
if padding != (0, 0):
padding_mode = "constant"
input = torch.nn.functional.pad(
input, (padding[1], padding[1], padding[0], padding[0]), padding_mode
)
# Update shape after padding
nb_rows_in += 2 * padding[0]
nb_cols_in += 2 * padding[1]
# We want to get relative positions of values in the input tensor that are used
# by one filter convolution.
# It basically is the position of the values used for the top left convolution.
pattern_ind = []
for ch in range(nb_channels_in):
for r in range(nb_rows_kernel):
for c in range(nb_cols_kernel):
pixel = r * nb_cols_in * dilation[0] + c * dilation[1]
pattern_ind.append(pixel + ch * nb_rows_in * nb_cols_in)
# The image tensor is reshaped for the matrix multiplication:
# on each row of the new tensor will be the input values used for each filter convolution
# We will get a matrix [[in values to compute out value 0],
# [in values to compute out value 1],
# ...
# [in values to compute out value nb_rows_out*nb_cols_out]]
im_flat = input.reshape(batch_size, -1)
im_reshaped = []
for cur_row_out in range(nb_rows_out):
for cur_col_out in range(nb_cols_out):
# For each new output value, we just need to shift the receptive field
offset = cur_row_out * stride[0] * nb_cols_in + cur_col_out * stride[1]
tmp = [ind + offset for ind in pattern_ind]
im_reshaped.append(im_flat[:, tmp])
im_reshaped = torch.stack(im_reshaped).permute(1, 0, 2)
# The convolution kernels are also reshaped for the matrix multiplication
# We will get a matrix [[weights for out channel 0],
# [weights for out channel 1],
# ...
# [weights for out channel nb_channels_out]].TRANSPOSE()
weight_reshaped = weight.reshape(nb_channels_out // groups, -1).t()
return (
im_reshaped,
weight_reshaped,
torch.tensor(batch_size),
torch.tensor(nb_channels_out),
torch.tensor(nb_rows_out),
torch.tensor(nb_cols_out),
)
|
be27cb73d5dd7480da477b46c3fa64a32feee744
| 699,905
|
def dataset_pre_0_3(client):
"""Return paths of dataset metadata for pre 0.3.4."""
project_is_pre_0_3 = int(client.project.version) < 2
if project_is_pre_0_3:
return (client.path / 'data').rglob(client.METADATA)
return []
|
892732100f46c8ad727b91d63d8563181d7a9dbb
| 699,906
|
import copy
def sanitize_slicing(slice_across, slice_relative_position):
"""
Return standardized format for `slice_across` and `slice_relative_position`:
- either `slice_across` and `slice_relative_position` are both `None` (no slicing)
- or `slice_across` and `slice_relative_position` are both lists,
with the same number of elements
Parameters
----------
slice_relative_position : float, or list of float, or None
slice_across : str, or list of str, or None
Direction(s) across which the data should be sliced
"""
# Skip None and empty lists
if slice_across is None or slice_across == []:
return None, None
# Convert to lists
if not isinstance(slice_across, list):
slice_across = [slice_across]
if slice_relative_position is None:
slice_relative_position = [0]*len(slice_across)
if not isinstance(slice_relative_position, list):
slice_relative_position = [slice_relative_position]
# Check that the length are matching
if len(slice_across) != len(slice_relative_position):
raise ValueError(
'The argument `slice_relative_position` is erroneous: \nIt should have'
'the same number of elements as `slice_across`.')
# Return a copy. This is because the rest of the `openPMD-viewer` code
# sometimes modifies the objects returned by `sanitize_slicing`.
# Using a copy avoids directly modifying objects that the user may pass
# to this function (and live outside of openPMD-viewer, e.g. directly in
# a user's notebook)
return copy.copy(slice_across), copy.copy(slice_relative_position)
|
1f7c3a0f70ecfc2bc66434d3acde684b499bb35c
| 699,907
|
def is_dependency_valid(dep: dict, dep_dict: dict) -> bool:
"""
:param dep: a dependency that may or may not be valid or up-to-date
:param dep_dict: a dictionary mapping dependency identifiers to their up-to-date dependency counterparts
:return: a boolean indicating whether the dependency is out-of-date or not
"""
if not dep or 'org' not in dep or 'name' not in dep or '{}-{}'.format(dep['org'], dep['name']) not in dep_dict:
return False
return True
|
8a5a384ae94152921749d1d93cfe48ce6723e320
| 699,908
|
def equations(abs1, abs2, abs3, solvent):
"""Contains absorption constans (coef) and formulas."""
separator = "___________________________________________"
coef = [[10.05, 0.97, 16.36, 2.43, 7.62, 15.39, 1.43, 35.87, 205],
[9.93, 0.75, 16.23, 2.42, 7.51,15.48, 1.3, 33.12, 213],
[10.36, 1.28, 17.49, 2.72, 7.64, 16.21, 1.38, 40.05, 211],
[13.36, 5.19, 27.43, 8.12, 5.24, 22.24, 2.13, 97.64, 209],
[11.24, 2.04, 20.13, 4.19, 7.05, 18.09, 1.90, 63.14, 214],
[12.25, 2.79, 21.50, 5.10, 7.15, 18.71, 1.82, 85.02, 198],
[16.72, 9.16, 34.09, 15.28, 1.44, 24.93, 1.63, 104.96, 221],
[16.82, 9.28, 36.92, 16.54, 0.28, 27.64, 1.91, 95.15, 225]]
chl_a = coef[solvent][0] * abs3 - coef[solvent][1] * abs2
chl_b = coef[solvent][2] * abs2 - coef[solvent][3] * abs3
chl_ab = coef[solvent][4] * abs3 + coef[solvent][5] * abs2
car = (1000 * abs1 - coef[solvent][6] *
chl_a - coef[solvent][7] * chl_b)/coef[solvent][8]
results = [chl_a, chl_b, chl_ab, car]
return results
|
126dc09f0823bf4ae1e881ef3b6284ca76e81fb3
| 699,910
|
def get_traitset_map(pop):
"""
Utility method which returns a map of culture ID's (hashes) and the trait set
corresponding to a random individual of that culture (actually, the first one
we encounter).
"""
traitsets = {}
graph = pop.agentgraph
for nodename in graph.nodes():
traits = graph.node[nodename]['traits']
culture = pop.get_traits_packed(traits)
if culture not in traitsets:
traitsets[culture] = traits
return traitsets
|
c80f1f05e0dd5268990e62e6b87726d5349b53f7
| 699,911
|
def prefix(txt, pref):
"""
Place a prefix in front of the text.
"""
return str(pref) + str(txt)
|
e9b4efd78f9132f7855cccba84c8a2d4b58ae8bb
| 699,912
|
def _fix_basebox_url(url):
"""
Kinda fix a basebox URL
"""
if not url.startswith('http'):
url = 'http://%s' % url
if not url.endswith('/meta'):
url += '/meta'
return url
|
5c1b446809089ae9239c232588b3f6176ec79549
| 699,913
|
def isinstance_all(iterable, class_or_tuple):
"""
Check if all items of an iterable are instance of
a class ou tuple of classes
>>> isinstance_all(['Hello', 'World'], str)
True
>>> isinstance_all([1, 'Hello'], (str, int))
True
>>> isinstance_all([True, 'Hello', 5], int)
False
"""
return all(
isinstance(obj, class_or_tuple)
for obj in iterable
)
|
1ea1bf7d66e5436ac429123fef4b33ba92195292
| 699,914
|
import os
def root_dir():
"""
find the root directory for web static files
Returns:
root path for static files
"""
return os.path.join(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"web"
),
"static"
)
|
3e3f40d501ece43f2f0b58cb25f36ef288b84c74
| 699,915
|
import imp
import sys
def main_is_frozen():
"""Return ``True`` if we're running from a frozen program."""
return (
# new py2exe
hasattr(sys, "frozen") or
# tools/freeze
imp.is_frozen("__main__"))
|
37871436e0967709f368d5b6c1913c8218bed283
| 699,916
|
import sys
def create_nonce(context, ag):
"""
Creates a nonce to this actor for passing to the second actor; the second actor will use the
nonce to message back to the first actor.
:param context:
:return:
"""
try:
rsp = ag.actors.addNone()
except Exception as e:
print("Got exception {} trying to create nonce.".format(e))
sys.exit()
return rsp
|
88203803f88d03a6ba4c260000a43379939929a6
| 699,917
|
import random
def make_data(n,m):
"""make_data: prepare matrix of m times n random processing times"""
p = {}
for i in range(1,m+1):
for j in range(1,n+1):
p[i,j] = random.randint(1,10)
return p
|
3a51402c3807ab8ca0f1f3386663299a3e254bf1
| 699,918
|
def to_gigabytes(number):
"""Convert a number from KiB to GiB
This is used mainly for the gauge, everything else uses the dynamic
`unit` function.
"""
return number / 1024 ** 2
|
d930541627f32415e432fea57da4c0bc2fa7909f
| 699,919
|
import torch
def differential(f, A, E):
""" Computes the differential of f at A when acting on E: (df)_A(E) """
n = A.size(0)
M = torch.zeros(2*n, 2*n, dtype=A.dtype, device=A.device, requires_grad=False)
M[:n, :n] = A
M[n:, n:] = A
M[:n, n:] = E
return f(M)[:n, n:]
|
d424c9fbe7344b3ba4293c1e3e8e192dfa253f66
| 699,920
|
def get_distance(vectors):
"""[Calculate the euclidian distance between two vectors,
represented as columns in a DataFrame.]
Args:
vectors ([DataFrame]): [DataFrame containing the
two columns representing the vectors.]
Returns:
[Float]: [Euclidian distance of the vectors.]
"""
# get distance between vectors by essentially creating a vector which points from one
# to the other (difference of input vectors) and than calculating the length of the
# new vector through (summ_components**2)**(1/2)
diff_summ = 0
for i in range(len(vectors.index)):
diff_summ = diff_summ + (vectors.iloc[i, 0] - vectors.iloc[i, 1]) ** 2
distance = (diff_summ) ** (1 / 2)
return distance
|
761681dd2f63cdd18b5d34bb28dfb8805d4e27d7
| 699,921
|
import tarfile
import pathlib
def _strip_paths(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo:
"""Ensure source filesystem absolute paths are not reflected in tar file."""
original_path = pathlib.Path(tarinfo.name)
tarinfo.name = f"{original_path.name}"
return tarinfo
|
06e262f93b3c5d0b8beab36b2ae7320a2464ad5b
| 699,922
|
def sequentially_executed(nb):
"""Return True if notebook appears freshly executed from top-to-bottom."""
exec_counts = [
cell["execution_count"]
for cell in nb.get("cells", [])
if (
cell["source"]
and cell.get("execution_count", None) is not None
)
]
sequential_counts = list(range(1, 1 + len(exec_counts)))
# Returns True if there are no executed code cells, which is fine?
return exec_counts == sequential_counts
|
d4d6f24f966726c6a6521888bea6fe13f57d1a21
| 699,923
|
def my_func_3(x, y):
"""
ะะพะทะฒัะฐัะฐะตั ะฒะพะทะฒะตะดะตะฝะธะต ัะธัะปะฐ x ะฒ ััะตะฟะตะฝั y.
ะะผะตะฝะพะฒะฐะฝะฝัะต ะฟะฐัะฐะผะตััั:
x -- ัะธัะปะพ
y -- ััะตะฟะตะฝั
(number, number) -> number
>>> my_func_3(2, 2)
4
"""
if y < 0:
r = 1.0
for _ in range(abs(y)):
r *= x
r = 1 / r
return r
elif y > 0:
r = 1
for _ in range(abs(y)):
r *= x
return r
return 1
|
fd9f4d5dc31b530cef2ee495b16c5781f74530b5
| 699,924
|
def flavors_ram(nova):
"""Get a dict of flavor IDs to the RAM limits.
:param nova: A Nova client.
:return: A dict of flavor IDs to the RAM limits.
"""
return dict((str(fl.id), fl.ram) for fl in nova.flavors.list())
|
796be050d98dac01a36da15a9de41466ebfbd75d
| 699,925
|
import re
def re_cap(*regexes):
"""
Capture first of the supplied regex
:param regexes: list or regex strings
:return: captured string | None
"""
def go(string):
for reg in regexes:
matches = re.search(reg, string)
if matches:
return matches.group(1)
return None
return go
|
8029a2d475c43b0873e676ba4049e970a2077664
| 699,926
|
def score2act(score):
"""
Convert the Z-score predicted by PADDLE to a fold-activation value.
Only useful as a rough reference for activation in S. cerevisiae, as the
fold-activation will vary between different experimental conditions.
"""
return 1.5031611623938073**score
|
044cf53813623e5427e7bda2e69afba47f435af0
| 699,927
|
import os
import shutil
def make_saliency_dir(date_time: str) -> str:
"""Make directories for saving saliency map result."""
save_dir = f"./data/saliency_map/{date_time}"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
os.makedirs(f"./data/saliency_map/{date_time}/input_image")
os.makedirs(f"./data/saliency_map/{date_time}/state")
os.makedirs(f"./data/saliency_map/{date_time}/saliency")
os.makedirs(f"./data/saliency_map/{date_time}/overlay")
saliency_map_dir = f"./data/saliency_map/{date_time}/"
return saliency_map_dir
|
84a2e493002263385911fb6f9967796e24cfda5e
| 699,928
|
import os
import time
def intro():
"""intro screen, self explanatory"""
os.system('clear')
print("\n\n Welcome to the grand game of 'Mushroom Picking'!\n\n")
time.sleep(1)
print("""'Tales border on the thin line between reality and myth' - Mc' Dingus\n\n
You wake up in your wooden shed, the first thing your senses pick up is a message:\n
"Johny my boy, I'd like some mushrooms for my soup, could you get me some from
The Dark Forest? Thanks. Oh actually bring me... 23 of the small ones and that funny one~"\n
In that instant you know what date it is, it's your grandmas death aniversary~!
You take your basket, put on your special camo outfit and go mushroom picking.\n\nHint: Don't starve.
""")
time.sleep(2)
print("Mdlkxzmcp, Rafaล Sebestjanski and Pawel Potaczek present:::\n\n")
time.sleep(1)
print (""" โโโโโโ โโโโโ โโโโโโโโ โโโโ โ โโโโโโ โโโโโโ โโโโโโโโ โโโโโโโโ โโโโโโ โโโโโโโโ
โ โโ โ โ โโ โโ โ โ โ โ โ โโ โ โโ โโ โ โ โ โ โ โ โโ โ โ โ โ โ โ โ
โ โ โ โโโโโ โ โโโโโ โ โโโ โ โโโโโ โ โ โ โโโโโ โโโโโโ โโ โ โ
โ โ โโ โ โโ โ โ โ โ โ โโ โโ โโ โ โ โ โโ โ โ
โโโโโโโ โ โโ โ โ โโ โ โ โโโโ โ โ โโโโโโ โโโโ โโ
โ โ โ โ โ โ โ โ โ โ โ โ โ โ โ
โ โ โ โ โ """)
time.sleep(3)
print("\nCONTROL:\nA - move left\nS - move down\nD - move right\nW - move up\n")
print("OBJECTS:\n@ - player\n# - tree\nA - mountain\n~ - water\n= - bridge\nqp - mushroosm\n% - shoes\n& - meat")
print("u - basket\nS - extra life")
while True:
ready = input("\nAre you ready for the greatest adventure of your life? ").lower()
if ready in ("yes", "y", "ye"):
return False
elif ready in ("quit", "q"):
quit()
else:
time.sleep(1)
continue
|
84a4ce483ab714495facd962f6b3e589a36d9f90
| 699,929
|
def make_grpc_unary_method(channel, service_name, method_descriptor, symbol_database_instance):
# type (Channel, str, MethodDescriptor, Any) -> Callable
"""Make grp callable on the channel.
Args:
channel: grpc channel
service_name: name of service
method_descriptor: method descriptor
symbol_database_instance: symbol db instance
"""
input_prototype = symbol_database_instance.GetPrototype(method_descriptor.input_type)
output_prototype = symbol_database_instance.GetPrototype(method_descriptor.output_type)
method = channel.unary_unary(
"/{}/{}".format(service_name, method_descriptor.name),
request_serializer=input_prototype.SerializeToString,
response_deserializer=output_prototype.FromString,
)
return method
|
7d3d6b3708185f7f243756804a2030db12fc1daf
| 699,930
|
def set_crs(gdf, crs):
"""
Set CRS in GeoDataFrame when current projection is not defined.
Parameters
----------
gdf : geopandas.GeoDataFrame
the geodataframe to set the projection
Returns
-------
gdf : geopandas.GeoDataFrame
the geodataframe with the projection defined """
gdf = gdf.set_crs(crs)
return gdf
|
77fc8f303882116fb450149c61332879fb28f6db
| 699,931
|
def dataset_type(dataset):
"""
Parameters
----------
dataset : dataset script object
Returns
-------
str : The type of dataset.
Example
-------
>>> for dataset in reload_scripts():
... if dataset.name=='aquatic-animal-excretion':
... print(dataset_type(dataset))
...
tabular
"""
for _, table_obj in dataset.tables.items():
if hasattr(table_obj, 'dataset_type') and table_obj.dataset_type in \
["RasterDataset", "VectorDataset"]:
return "spatial"
return "tabular"
|
4a63021ce725c116b0ed23c851da5983df5c79b5
| 699,932
|
def GetDistinguishableNames(keys, delimiter, prefixes_to_remove):
"""Reduce keys to a concise and distinguishable form.
Example:
GetDistinguishableNames(['Day.NewYork.BigApple', 'Night.NewYork.BigMelon'],
'.', ['Big'])
results in {'Day.NewYork.BigApple': 'Day.Apple',
'Night.NewYork.BigMelon': 'Night.Melon'}.
If a key has all parts commonly shared with others, then include
the last shared part in the names. E.g.,
GetDistinguishableNames(['Day.NewYork', 'Day.NewYork.BigMelon'],
'.', ['Big'])
results in {'Day.NewYork.BigApple': 'NewYork',
'Night.NewYork.BigMelon': 'NewYork.Melon'}.
Args:
keys: The list of strings, each is delimited by parts.
delimiter: The delimiter to separate parts of each string.
prefixes_to_remove: The list of prefix strings to be removed from the parts.
Returns:
short_names: A dictionary of shortened keys.
"""
def RemovePrefix(part, prefixes_to_remove):
for prefix in prefixes_to_remove:
if part.startswith(prefix):
return part[len(prefix):]
return part
key_part_lists = [key.split(delimiter) for key in keys]
shortest_length = min(len(part_list) for part_list in key_part_lists)
# common_part[i] = True if all parts at position i are the same across keys.
common_part = [True] * shortest_length
for part_list in key_part_lists[1:]:
for i in range(shortest_length):
if part_list[i] != key_part_lists[0][i]:
common_part[i] = False
# The prefix list to add if one of the key happens to be the concatenation of
# all common parts.
prefix_list = ([key_part_lists[0][shortest_length - 1]]
if all(common_part) else [])
short_names = {}
for key, part_list in zip(keys, key_part_lists):
short_names[key] = delimiter.join(prefix_list + [
RemovePrefix(part, prefixes_to_remove)
for n, part in enumerate(part_list)
if n >= shortest_length or not common_part[n]])
return short_names
|
13cc78b172d0ae074fa3bfa3d9ff93f5877c557d
| 699,933
|
import binascii
import re
def humanhexlify(data, n=-1):
"""Hexlify given data with 1 space char btw hex values for easier reading for humans
:param data: binary data to hexlify
:param n: If n is a positive integer then shorten the output of this function to n hexlified bytes.
Input like
'ab\x04ce'
becomes
'61 62 04 63 65'
With n=3 input like
data='ab\x04ce', n=3
becomes
'61 62 04 ...'
"""
tail = b' ...' if 0 < n < len(data) else b''
if tail:
data = data[:n]
hx = binascii.hexlify(data)
return b' '.join(re.findall(b'..', hx)) + tail
|
883323524ecc8b9f55138d290a38666e5c06bac3
| 699,934
|
import re
def yes_workload_no_snippet_target_line(patterns, painted_lines, split_text):
"""Find line to use for scan process in yes workload, no code snippet use case"""
faultable_line_list = []
faultable_line_number_list = []
for line_number in painted_lines:
detected_parts_list_line = split_text[line_number]
find_function = re.findall("def:*", detected_parts_list_line)
find_class = re.findall("class:*", detected_parts_list_line)
find_import = re.findall("import", detected_parts_list_line)
find_comment = re.findall("#", detected_parts_list_line)
if (
not find_function
and not find_class
and not find_import
and not find_comment
):
for pattern in patterns:
result = re.findall(pattern, detected_parts_list_line, re.MULTILINE)
if result:
faultable_line_list.append(detected_parts_list_line)
faultable_line_number_list.append(line_number)
return faultable_line_list, faultable_line_number_list
|
bcfdee07500c564c3b2bd82e2fefa24533e06e67
| 699,935
|
def make_arc_consistent(Xj, Xk, csp):
"""Make arc between parent (Xj) and child (Xk) consistent under the csp's constraints,
by removing the possible values of Xj that cause inconsistencies."""
# csp.curr_domains[Xj] = []
for val1 in csp.domains[Xj]:
keep = False # Keep or remove val1
for val2 in csp.domains[Xk]:
if csp.constraints(Xj, val1, Xk, val2):
# Found a consistent assignment for val1, keep it
keep = True
break
if not keep:
# Remove val1
csp.prune(Xj, val1, None)
return csp.curr_domains[Xj]
|
12f75686cf18fdb9b976f36c7e985593bc0aaf10
| 699,936
|
import sys
def task_batch_local():
"""Run batch mode locally"""
return {
'basename': 'batchLocal',
'actions': ["%s -m surround_tensorboard_example --mode batch" % sys.executable]
}
|
b578760a2a5732d6c59c92bf674fbee9b7812af0
| 699,937
|
import os
def make_bin_path(base_path, middle=None):
"""Creates a path to the data binaries."""
if base_path[-1] == "/":
base_path = base_path[:-1]
if middle is None:
base_path = os.path.join(f'{base_path}-bin')
else:
base_path = os.path.join(f'{base_path}-bin', middle)
return base_path
|
817fe8e9d9d564333b47511ea0675972d0f04fad
| 699,938
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.