content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def generation(this, genct, thisgen, lastgen):
""" Prints a notice that a generation is done.
"""
print >> this.out, 'Generation {0} complete.'.format(genct)
| 5,340,400
|
def extract_zip(filepath):
"""
Function purpose: unzip file (always inside a new folder)
filepath: filepath to zipfile
"""
abs_path = os.path.abspath(filepath)
root_folder = os.path.split(abs_path)[0]
zip_name = os.path.split(abs_path)[1][:-4]
zip_folder_path = os.path.join(root_folder, zip_name)
with zipfile.ZipFile(abs_path) as zip_file:
zip_file.extractall(zip_folder_path)
| 5,340,401
|
def get_label_parts(label):
"""returns the parts of an absolute label as a list"""
return label[2:].replace(":", "/").split("/")
| 5,340,402
|
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-c',
'--cdhit',
help='Output file from CD-HIT (clustered proteins)',
metavar='str',
type=str,
required=True)
parser.add_argument(
'-p',
'--proteins',
help='Proteins FASTA',
metavar='str',
type=str,required=True)
parser.add_argument(
'-o',
'--outfile',
help='Output file',
metavar='str',
type=str,
default='unclustered.fa')
return parser.parse_args()
| 5,340,403
|
def test_invalid_arguments():
"""Test error messages raised by invalid arguments."""
n_ch, n_times = 2, 100
data = np.random.RandomState(0).randn(n_ch, n_times)
info = create_info(n_ch, 100., 'eeg')
raw = RawArray(data, info, first_samp=0)
# negative floats PTP
with pytest.raises(ValueError,
match="Argument 'flat' should define a positive "
"threshold. Provided: '-1'."):
annotate_amplitude(raw, peak=None, flat=-1)
with pytest.raises(ValueError,
match="Argument 'peak' should define a positive "
"threshold. Provided: '-1'."):
annotate_amplitude(raw, peak=-1, flat=None)
# negative PTP threshold for one channel type
with pytest.raises(ValueError,
match="Argument 'flat' should define positive "
"thresholds. Provided for channel type "
"'eog': '-1'."):
annotate_amplitude(raw, peak=None, flat=dict(eeg=1, eog=-1))
with pytest.raises(ValueError,
match="Argument 'peak' should define positive "
"thresholds. Provided for channel type "
"'eog': '-1'."):
annotate_amplitude(raw, peak=dict(eeg=1, eog=-1), flat=None)
# test both PTP set to None
with pytest.raises(ValueError,
match="At least one of the arguments 'peak' or 'flat' "
"must not be None."):
annotate_amplitude(raw, peak=None, flat=None)
# bad_percent outside [0, 100]
with pytest.raises(ValueError,
match="Argument 'bad_percent' should define a "
"percentage between 0% and 100%. Provided: "
"-1.0%."):
annotate_amplitude(raw, peak=dict(eeg=1), flat=None, bad_percent=-1)
# min_duration negative
with pytest.raises(ValueError,
match="Argument 'min_duration' should define a "
"positive duration in seconds. Provided: "
"'-1.0' seconds."):
annotate_amplitude(raw, peak=dict(eeg=1), flat=None, min_duration=-1)
# min_duration equal to the raw duration
with pytest.raises(
ValueError,
match=re.escape("Argument 'min_duration' should define a "
"positive duration in seconds shorter than the "
"raw duration (1.0 seconds). Provided: "
"'1.0' seconds.")
):
annotate_amplitude(raw, peak=dict(eeg=1), flat=None, min_duration=1.)
# min_duration longer than the raw duration
with pytest.raises(
ValueError,
match=re.escape("Argument 'min_duration' should define a "
"positive duration in seconds shorter than the "
"raw duration (1.0 seconds). Provided: "
"'10.0' seconds.")
):
annotate_amplitude(raw, peak=dict(eeg=1), flat=None, min_duration=10)
| 5,340,404
|
def assert_array_equal(x: xarray.coding.strings.StackedBytesArray, y: numpy.ndarray):
"""
usage.xarray: 2
"""
...
| 5,340,405
|
def alignImages():
"""
how good the the align have to be
"""
MAX_FEATURES = 1000
GOOD_MATCH_PERCENT = 1
tempSearch = 'temp\\'
typeSearch = '.png'
nameSearch = 'align'
imFilename = tempSearch + nameSearch + typeSearch
im1 = cv2.imread(imFilename, cv2.IMREAD_COLOR)
refFilename = "resources\\ref.png"
im2 = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
im1Gray = cv2.resize(im1Gray, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
im2Gray = cv2.resize(im2Gray, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
#matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
outFilename = tempSearch+"webcam.png"
#os.remove(outFilename)
cv2.imwrite(outFilename, im1Reg) #saves the file as webcam.png in the temp folder
"""
Checks if the align image look like the order card if not it will do the method again
"""
if(SearchBot.check('webcam','ref') < 0.64):
alignImages()
| 5,340,406
|
def combined_spID(*species_identifiers):
"""Return a single column unique species identifier
Creates a unique species identifier based on one or more columns of a
data frame that represent the unique species ID.
Args:
species_identifiers: A tuple containing one or pieces of a unique
species identifier or lists of these pieces.
Returns:
A single unique species identifier or a list of single identifiers
"""
# Make standard input data types capable of element wise summation
input_type = type(species_identifiers[0])
assert input_type in [list, tuple, str, pandas.core.series.Series, numpy.ndarray]
if input_type is not str:
species_identifiers = [pandas.Series(identifier) for identifier in species_identifiers]
single_identifier = species_identifiers[0]
if len(species_identifiers) > 1:
for identifier in species_identifiers[1:]:
single_identifier += identifier
if input_type == numpy.ndarray:
single_identifier = numpy.array(single_identifier)
else:
single_identifier = input_type(single_identifier)
return single_identifier
| 5,340,407
|
def use_meter_tide_correction(MainProg):
"""
Function for using internal CG5 tide correction option.
update the Campaign().corr_g list
"""
tide_correction_meter(MainProg.campaigndata)
| 5,340,408
|
def live(
lang: str = typer.Argument(
None, callback=lang_callback, autocompletion=complete_existing_lang
)
):
"""
Serve with livereload a docs site for a specific language.
This only shows the actual translated files, not the placeholders created with
build-all.
Takes an optional LANG argument with the name of the language to serve, by default
language.
"""
if lang is None:
lang = default_lang
lang_path: Path = docs_root_path / lang
os.chdir(lang_path)
mkdocs.commands.serve.serve(dev_addr="127.0.0.1:8008")
| 5,340,409
|
def qr(A, prec=1e-10):
"""
computes a faster and economic qr decomposition similar to:
http://www.iaa.ncku.edu.tw/~dychiang/lab/program/mohr3d/source/Jama%5CQRDecomposition.html
"""
m = len(A)
if m <= 0:
return [], A
n = len(A[0])
Rdiag = [0] * n;
QR = copy.deepcopy(A)
for k in range(n):
# Compute 2-norm of k-th column without under/overflow.
nrm = 0.0
for i in range(k, m):
nrm = sqrt(nrm ** 2 + QR[i][k] ** 2)
if abs(nrm) > prec:
# Form k-th Householder vector.
if k < m and QR[k][k] < 0:
nrm = -nrm
for i in range(k, m):
QR[i][k] /= nrm
if k < m:
QR[k][k] += 1.0
# Apply transformation to remaining columns.
for j in range(k + 1, n):
s = 0.0
for i in range(k, m):
s += QR[i][k] * QR[i][j]
if k < m:
s = -s / QR[k][k]
for i in range(k, m):
QR[i][j] += s * QR[i][k]
Rdiag[k] = -nrm;
# compute R
R = [[0] * n for z in range(min(m, n))]
for i in range(m):
for j in range(i, n):
if i < j:
R[i][j] = QR[i][j]
if i == j:
R[i][i] = Rdiag[i]
# compute Q
w = min(m, n)
Q = [[0] * w for i in range(m)]
for k in range(w - 1, -1, -1):
if k < w:
Q[k][k] = 1.0;
for j in range(k, w):
if k < m and abs(QR[k][k]) > prec:
s = 0.0
for i in range(k, m):
s += QR[i][k] * Q[i][j]
s = -s / QR[k][k]
for i in range(k, m):
Q[i][j] += s * QR[i][k]
return Q, R
| 5,340,410
|
def test_extract_zip(tmpdir):
"""Test Case"""
with pytest.raises(TypeError, match=r'[E001]'):
extract_zip(zip_file=123, unzip_dir=1254)
temp_model_dir = tmpdir.mkdir("sub")
zipped_file = str(temp_model_dir) + "/" + WORD_TOKENIZER_FILE_NAME
download_from_url(WORD_TOKENIZER_WEIGHTS_URL, str(temp_model_dir), WORD_TOKENIZER_FILE_NAME)
unzip_dir = tmpdir.mkdir("unzipped")
unzip_dir = str(unzip_dir)
model_path = unzip_dir + "/" + "word_tokenizer.h5"
vocab_path = unzip_dir + "/" + "vocab.txt"
extract_zip(zipped_file, unzip_dir)
assert Path(model_path).exists() is True
assert Path(vocab_path).exists() is True
| 5,340,411
|
def on_task_requested(summary, deduped):
"""When a task is created."""
fields = _extract_job_fields(summary.tags)
fields['deduped'] = deduped
_jobs_requested.increment(fields=fields)
| 5,340,412
|
def split_range(r, n):
"""
Computes the indices of segments after splitting a range of r values
into n segments.
Parameters
----------
r : int
Size of the range vector.
n : int
The number of splits.
Returns
-------
segments : list
The list of lists of first and last indices of segments.
Example
-------
>>> split_range(8, 2)
[[0, 4], [4, 8]]
"""
step = int(r / n)
segments = []
for i in range(n):
new_segment = [step * i, step * (i + 1)]
segments.append(new_segment)
# correct the gap in the missing index due to the truncated step
segments[-1][-1] = r
return segments
| 5,340,413
|
def is_connected_to_mongo():
"""
Make sure user is connected to mongo; returns True if connected, False otherwise.
Check below url to make sure you are looking for the right port.
"""
maxSevSelDelay = 1 # how long to spend looking for mongo
try: # make sure this address is running
url = "mongodb://127.0.0.1:27017" # standard mongo port
client = pymongo.MongoClient(url, serverSelectionTimeoutMS=maxSevSelDelay) # check the url for specified amt of time
client.admin.command("serverStatus") # connect via serverStatus (will not cause error if connected)
except pymongo.errors.ServerSelectionTimeoutError as err: # error if serverStatus does not go through
return False # not connected
return True
| 5,340,414
|
def make_xyY_boundary_data(
color_space_name=cs.BT2020, white=cs.D65, y_num=1024, h_num=1024):
"""
Returns
-------
ndarray
small x and small y for each large Y and hue.
the shape is (N, M, 2).
N is a number of large Y.
M is a number of Hue.
"2" are small x and small y.
"""
mtime = MeasureExecTime()
mtime.start()
out_buf = np.zeros((y_num, h_num, 2))
y_list = np.linspace(0, 1.0, y_num)
for idx, large_y in enumerate(y_list):
print(f"idx = {idx} / {y_num}")
out_buf[idx] = make_xyY_boundary_data_specific_Y(
large_y=large_y, color_space_name=color_space_name,
white=white, h_num=h_num)
mtime.lap()
mtime.end()
fname = f"./_debug_lut/xyY_LUT_YxH_{y_num}x{h_num}.npy"
np.save(fname, out_buf)
| 5,340,415
|
def _fill_three_digit_hex_color_code(*, hex_color_code: str) -> str:
"""
Fill 3 digits hexadecimal color code until it becomes 6 digits.
Parameters
----------
hex_color_code : str
One digit hexadecimal color code (not including '#').
e.g., 'aaa', 'fff'
Returns
-------
filled_color_code : str
Result color code. e.g., 'aaaaaa', 'ffffff'
"""
filled_color_code: str = ''
for char in hex_color_code:
filled_color_code += char * 2
return filled_color_code
| 5,340,416
|
def normalize(text: str, convert_digits=True) -> str:
"""
Summary:
Arguments:
text [type:string]
Returns:
normalized text [type:string]
"""
# replacing all spaces,hyphens,... with white space
space_pattern = (
r"[\xad\ufeff\u200e\u200d\u200b\x7f\u202a\u2003\xa0\u206e\u200c\x9d]"
)
space_pattern = re.compile(space_pattern)
text = space_pattern.sub(" ", text)
# remove keshide,
text = re.sub(r"[ـ\r]", "", text)
# remove Aarab
text = re.sub(r"[\u064B\u064C\u064D\u064E\u064F\u0650\u0651\u0652]", "", text)
# replace arabic alphabets with equivalent persian alphabet
regex_list = [
(r"ء", r"ئ"),
(r"ﺁ|آ", r"آ"),
(r"ٲ|ٱ|إ|ﺍ|أ", r"ا"),
(r"ﺐ|ﺏ|ﺑ", r"ب"),
(r"ﭖ|ﭗ|ﭙ|ﺒ|ﭘ", r"پ"),
(r"ﭡ|ٺ|ٹ|ﭞ|ٿ|ټ|ﺕ|ﺗ|ﺖ|ﺘ", r"ت"),
(r"ﺙ|ﺛ", r"ث"),
(r"ﺝ|ڃ|ﺠ|ﺟ", r"ج"),
(r"ڃ|ﭽ|ﭼ", r"چ"),
(r"ﺢ|ﺤ|څ|ځ|ﺣ", r"ح"),
(r"ﺥ|ﺦ|ﺨ|ﺧ", r"خ"),
(r"ڏ|ډ|ﺪ|ﺩ", r"د"),
(r"ﺫ|ﺬ|ﻧ", r"ذ"),
(r"ڙ|ڗ|ڒ|ڑ|ڕ|ﺭ|ﺮ", r"ر"),
(r"ﺰ|ﺯ", r"ز"),
(r"ﮊ", r"ژ"),
(r"ݭ|ݜ|ﺱ|ﺲ|ښ|ﺴ|ﺳ", r"س"),
(r"ﺵ|ﺶ|ﺸ|ﺷ", r"ش"),
(r"ﺺ|ﺼ|ﺻ", r"ص"),
(r"ﺽ|ﺾ|ﺿ|ﻀ", r"ض"),
(r"ﻁ|ﻂ|ﻃ|ﻄ", r"ط"),
(r"ﻆ|ﻇ|ﻈ", r"ظ"),
(r"ڠ|ﻉ|ﻊ|ﻋ", r"ع"),
(r"ﻎ|ۼ|ﻍ|ﻐ|ﻏ", r"غ"),
(r"ﻒ|ﻑ|ﻔ|ﻓ", r"ف"),
(r"ﻕ|ڤ|ﻖ|ﻗ", r"ق"),
(r"ڭ|ﻚ|ﮎ|ﻜ|ﮏ|ګ|ﻛ|ﮑ|ﮐ|ڪ|ك", r"ک"),
(r"ﮚ|ﮒ|ﮓ|ﮕ|ﮔ", r"گ"),
(r"ﻝ|ﻞ|ﻠ|ڵ", r"ل"),
(r"ﻡ|ﻤ|ﻢ|ﻣ", r"م"),
(r"ڼ|ﻦ|ﻥ|ﻨ", r"ن"),
(r"ވ|ﯙ|ۈ|ۋ|ﺆ|ۊ|ۇ|ۏ|ۅ|ۉ|ﻭ|ﻮ|ؤ", r"و"),
(r"ﺔ|ﻬ|ھ|ﻩ|ﻫ|ﻪ|ۀ|ە|ة|ہ", r"ه"),
(r"ﭛ|ﻯ|ۍ|ﻰ|ﻱ|ﻲ|ں|ﻳ|ﻴ|ﯼ|ې|ﯽ|ﯾ|ﯿ|ێ|ے|ى|ي", r"ی"),
(r"¬", r""),
(r"•|·|●|·|・|∙|。|ⴰ", r"."),
(r",|٬|٫|‚|,", r"،"),
(r"ʕ|\?", r"؟"),
(r"|ِ||ُ||َ||ٍ||ٌ||ً", r""),
]
for pattern, replac in regex_list:
text = re.sub(pattern, replac, text)
# replace arabic and english digits with equivalent persian digits
num_dict = dict()
if convert_digits:
num_dict[u"0"] = u"۰"
num_dict[u"1"] = u"۱"
num_dict[u"2"] = u"۲"
num_dict[u"3"] = u"۳"
num_dict[u"4"] = u"۴"
num_dict[u"5"] = u"۵"
num_dict[u"6"] = u"۶"
num_dict[u"7"] = u"۷"
num_dict[u"8"] = u"۸"
num_dict[u"9"] = u"۹"
num_dict[u"%"] = u"٪"
num_dict[u"٠"] = u"۰"
num_dict[u"١"] = u"۱"
num_dict[u"٢"] = u"۲"
num_dict[u"٣"] = u"۳"
num_dict[u"٤"] = u"۴"
num_dict[u"٥"] = u"۵"
num_dict[u"٦"] = u"۶"
num_dict[u"٧"] = u"۷"
num_dict[u"٨"] = u"۸"
num_dict[u"٩"] = u"۹"
num_pattern = re.compile(r"(" + "|".join(num_dict.keys()) + r")")
text = num_pattern.sub(lambda x: num_dict[x.group()], text)
punctuation_after, punctuation_before = r"\.:!،؛؟»\]\)\}", r"«\[\(\{"
regex_list = [
# replace quotation with «»
('"([^\n"]+)"', r"«\1»"),
# replace single quotation with «»
("'([^\n\"]+)'", r"«\1»"),
# replace ٬ with «»
('٬([^\n"]+)٬', r"«\1»"),
# replace Double Angle Bracket with «»
('《([^\n"]+)》', r"«\1»"),
# replace dot with momayez
("([\d+])\.([\d+])", r"\1٫\2"),
# replace 3 dots
(r" ?\.\.\.", " … "),
# fix ی space
(r"([^ ]ه) ی ", r"\1ی "),
# put zwnj after می, نمی
(r"(^| )(ن?می) ", r"\1\2"),
# put zwnj before تر, تری, ترین, گر, گری, ها, های
(
r"(?<=[^\n\d "
+ punctuation_after
+ punctuation_before
+ "]{2}) (تر(ین?)?|گری?|های?)(?=[ \n"
+ punctuation_after
+ punctuation_before
+ "]|$)",
r"\1",
),
# join ام, ایم, اش, اند, ای, اید, ات
(
r"([^ ]ه) (ا(م|یم|ش|ند|ی|ید|ت))(?=[ \n" + punctuation_after + "]|$)",
r"\1\2",
),
# remove space before and after quotation
('" ([^\n"]+) "', r'"\1"'),
# remove space before punctuations
(" ([" + punctuation_after + "])", r"\1"),
# remove space after punctuations
("([" + punctuation_before + "]) ", r"\1"),
# put space after . and :
(
"(["
+ punctuation_after[:3]
+ "])([^ "
+ punctuation_after
+ "\w\d\\/۰۱۲۳۴۵۶۷۸۹])",
r"\1 \2",
),
# put space after punctuation
(
"([" + punctuation_after[3:] + "])([^ " + punctuation_after + "])",
r"\1 \2",
),
# put space before punctuations
(
"([^ " + punctuation_before + "])([" + punctuation_before + "])",
r"\1 \2",
),
# Remove repeating characters (keep 2 repeats)
(r"(ئآابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیچ)\1+", r"\1\1"),
]
for pattern, replac in regex_list:
text = re.sub(pattern, replac, text)
# fix "؟ " in links
text = re.sub(r"([a-zA-z]+)(؟ )", r"\1?", text)
# fix "، " in English numbers
text = re.sub(r"([0-9+])، ([0-9+])", r"\1,\2", text)
# fix "٫" in English numbers
text = re.sub(r"([0-9+])٫([0-9+])", r"\1.\2", text)
# fix "، " in farsi digits
text = re.sub(r"([۰-۹+])، ([۰-۹+])", r"\1٫\2", text)
return text
| 5,340,417
|
def register_name_for(entity):
"""
gets the admin page register name for given entity class.
it raises an error if the given entity does not have an admin page.
:param type[pyrin.database.model.base.BaseEntity] entity: the entity class of
admin page to get its
register name.
:raises AdminPageNotFoundError: admin page not found error.
:rtype: str
"""
return get_component(AdminPackage.COMPONENT_NAME).register_name_for(entity)
| 5,340,418
|
def test_open_tigl():
"""Test the function 'open_tigl'"""
# Create TIGL handle for a valid TIXI handles
tixi_handle = cpsf.open_tixi(CPACS_IN_PATH)
tigl_handle = cpsf.open_tigl(tixi_handle)
assert tigl_handle
# Raise error for an invalid TIXI handles
with pytest.raises(AttributeError):
tixi_handle = cpsf.open_tigl('invalid_TIGL_handle')
| 5,340,419
|
def strip_function_tags(tree):
"""Removes all function tags from the tree."""
for subtree in tree.all_subtrees():
subtree.label_suffix = ''
| 5,340,420
|
def index():
"""Show all the posts, most recent first."""
db = get_db()
posts = db.execute(
# "SELECT p.id, title, body, created, author_id, username"
# " FROM post p"
# " JOIN user u ON p.author_id = u.id"
# " ORDER BY created DESC"
"SELECT *, l.author_id as love_author, count(distinct l.id) as likes"
" FROM post p"
" LEFT JOIN user u ON p.author_id = u.id"
" LEFT JOIN love l ON p.id = l.post_id"
" GROUP BY p.id"
" ORDER BY created DESC"
# "SELECT p.id, title, body, created, author_id, username, count(distinct love.id)"
# " FROM post p"
# " LEFT JOIN love on p.id=love.post_id"
# " JOIN user u ON p.author_id = u.id"
# " GROUP BY p.id"
).fetchall()
return render_template("blog/index.html", posts=posts)
| 5,340,421
|
def get_user_information(fbid, extra_fields=[]):
""" Gets user basic information: first_name, last_name, gender,
profile_pic, locale, timezone
:usage:
>>> # Set the user fbid you want the information
>>> fbid = "<user fbid>"
>>> # Call the function passing the fbid of user.
>>> user_information = fbbotw.get_user_information(fbid=fbid)
:param str fbid: User id to get the information.
:param list extra_fields: Extra fields that your app is allowed to \
request. eg. 'locale', 'timezone', 'gender'
:return dict:
>>> user_information = {
"id": "user_id",
"name": "User Full Name",
"first_name": "User First Name",
"last_name": "User Last Name",
"profile_pic": "https://cdn_to_pic.com/123",
}
:facebook docs: `/user-profile <https://developers.facebook.com/docs/\
messenger-platform/user-profile>`_
"""
user_info_url = GRAPH_URL.format(fbid=fbid)
payload = dict()
fields = [
'name', 'first_name', 'last_name', 'profile_pic'
] + extra_fields
payload['fields'] = (
",".join(fields)
)
payload['access_token'] = PAGE_ACCESS_TOKEN
user_info = requests.get(user_info_url, payload).json()
return user_info
| 5,340,422
|
def parseTeam(teamString):
"""Parse strings for data from official Pokemon Showdown format.
Keyword arguemnts:\n
teamString -- a team string, copied from pokepaste or pokemon showdown
"""
pokemonList = teamString.split('\n\n')
teamList = []
#print(pokemonList)
for pokemon in pokemonList:
currentPokemonDict = {}
moveCounter = 1
currentPokemon = pokemon.split('\n')
if 'Ability' not in pokemon:
continue
for attribute in currentPokemon:
if 'Happiness:' or 'IVs:' or 'Shiny:' in attribute:
pass
if '@' in attribute:
attribute = attribute.split('@')
currentPokemonDict['Species'] = attribute[0].strip().replace(' ','')
if '(' in currentPokemonDict['Species']:
currentPokemonDict['Species'] = re.search(r'\(([^)]+)', currentPokemonDict['Species']).group(1)
if len(currentPokemonDict['Species']) == 1:
temp = attribute[0].split('(')[0]
currentPokemonDict['Species'] = temp.strip()
currentPokemonDict['Item'] = attribute[1].strip().replace(' ','')
if 'Nature' in attribute:
attribute = attribute.strip()
attribute = attribute.split(' ')
currentPokemonDict['Nature'] = attribute[0].strip()
if '- ' in attribute:
currentPokemonDict['Move'+str(moveCounter)] = attribute.split('- ')[1].strip().replace(' ','')
moveCounter += 1
if 'EVs' in attribute:
currentPokemonDict['HPEVs'] = 0
currentPokemonDict['AtkEVs'] = 0
currentPokemonDict['DefEVs'] = 0
currentPokemonDict['SpAEVs'] = 0
currentPokemonDict['SpDEVs'] = 0
currentPokemonDict['SpeEVs'] = 0
attribute = attribute.split(':')
attribute = attribute[1].split('/')
for item in attribute:
item = item.strip()
item = item.split(' ')
currentPokemonDict[item[1]+'EVs'] = int(item[0])
teamList.append(currentPokemonDict)
return teamList
| 5,340,423
|
def url_scheme(url, path):
"""Treat local URLs as 'file://'."""
if not urlparse(url).scheme:
url = "file://" + os.path.join(path, url)
return url
| 5,340,424
|
def mix_style(style_codes,
content_codes,
num_layers=1,
mix_layers=None,
is_style_layerwise=True,
is_content_layerwise=True):
"""Mixes styles from style codes to those of content codes.
Each style code or content code consists of `num_layers` codes, each of which
is typically fed into a particular layer of the generator. This function mixes
styles by partially replacing the codes of `content_codes` from some certain
layers with those of `style_codes`.
For example, if both style code and content code are with shape [10, 512],
meaning to have 10 layers and each employs a 512-dimensional latent code. And
the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
Then the top half of the content code (with shape [3, 512]) will be replaced
by the top half of the style code (also with shape [3, 512]).
NOTE: This function also supports taking single-layer latent codes as inputs,
i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
case, the corresponding code will be first repeated for `num_layers` before
performing style mixing.
Args:
style_codes: Style codes, with shape [num_styles, *code_shape] or
[num_styles, num_layers, *code_shape].
content_codes: Content codes, with shape [num_contents, *code_shape] or
[num_contents, num_layers, *code_shape].
num_layers: Total number of layers in the generative model. (default: 1)
mix_layers: Indices of the layers to perform style mixing. `None` means to
replace all layers, in which case the content code will be completely
replaced by style code. (default: None)
is_style_layerwise: Indicating whether the input `style_codes` are
layer-wise codes. (default: True)
is_content_layerwise: Indicating whether the input `content_codes` are
layer-wise codes. (default: True)
num_layers
Returns:
Codes after style mixing, with shape [num_styles, num_contents, num_layers,
*code_shape].
Raises:
ValueError: If input `content_codes` or `style_codes` is with invalid shape.
"""
if not is_style_layerwise:
style_codes = style_codes[:, np.newaxis]
style_codes = np.tile(
style_codes,
[num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
if not is_content_layerwise:
content_codes = content_codes[:, np.newaxis]
content_codes = np.tile(
content_codes,
[num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
style_codes.shape[1:] == content_codes.shape[1:]):
raise ValueError(f'Shapes of style codes and content codes should be '
f'[num_styles, num_layers, *code_shape] and '
f'[num_contents, num_layers, *code_shape] respectively, '
f'but {style_codes.shape} and {content_codes.shape} are '
f'received!')
layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
num_styles = style_codes.shape[0]
num_contents = content_codes.shape[0]
code_shape = content_codes.shape[2:]
s = style_codes[:, np.newaxis]
s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
c = content_codes[np.newaxis]
c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
from_style = np.zeros(s.shape, dtype=bool)
from_style[:, :, layer_indices] = True
results = np.where(from_style, s, c)
assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
return results
| 5,340,425
|
def genKinship(args):
"""
%prog genKinship genotype.mean
Calculate kinship matrix file
"""
p = OptionParser(genKinship.__doc__)
p.add_option('--type', default='1', choices=('1', '2'),
help='specify the way to calculate the relateness, 1: centered; 2: standardized')
p.set_slurm_opts(jn=True)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
geno_mean, = args
# generate a fake bimbam phenotype based on genotype
f = open(geno_mean)
num_SMs = len(f.readline().split(',')[3:])
mean_prefix = geno_mean.replace('.mean', '')
tmp_pheno = '%s.tmp.pheno' % mean_prefix
f1 = open(tmp_pheno, 'w')
for i in range(num_SMs):
f1.write('sm%s\t%s\n' % (i, 20))
f.close()
f1.close()
# the location of gemma executable file
gemma = op.abspath(op.dirname(__file__)) + '/../apps/gemma'
cmd = '%s -g %s -p %s -gk %s -outdir . -o gemma.centered.%s' \
% (gemma, geno_mean, tmp_pheno, opts.type, mean_prefix)
print('The kinship command running on the local node:\n%s' % cmd)
h = Slurm_header
header = h % (opts.time, opts.memory, opts.prefix, opts.prefix, opts.prefix)
header += cmd
f = open('%s.kinship.slurm' % mean_prefix, 'w')
f.write(header)
f.close()
print('slurm file %s.kinship.slurm has been created, you can sbatch your job file.' % mean_prefix)
| 5,340,426
|
def train_many_models(extractor, param_grid, data_dir, output_dir=None,
**kwargs):
"""
Train many extractor models, then for the best-scoring model, write
train/test block-level classification performance as well as the model itself
to disk in ``output_dir``.
Args:
extractor (:class:`Extractor`): Instance of the ``Extractor`` class to
be trained.
param_grid (dict or List[dict]): Dictionary with parameters names (str)
as keys and lists of parameter settings to try as values, or a list
of such dictionaries, in which case the grids spanned by each are
explored. See documentation for :class:`GridSearchCV` for details.
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html and gold standard blocks files
output_dir (str): Directory on disk to which the trained model files,
errors, etc. are to be written. If None, outputs are not saved.
**kwargs:
scoring (str or Callable): default 'f1'
cv (int): default 5
n_jobs (int): default 1
verbose (int): default 1
Returns:
:class:`Extractor`: The trained extractor model with the best-scoring
set of params.
See Also:
Documentation for grid search :class:`GridSearchCV` in ``scikit-learn``:
http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
"""
# set up directories and file naming
output_dir, fname_prefix = _set_up_output_dir_and_fname_prefix(output_dir, extractor)
# prepare and split the data
logging.info('preparing and splitting the data...')
data = prepare_all_data(data_dir)
training_data, test_data = train_test_split(
data, test_size=0.2, random_state=42)
train_html, train_labels, train_weights = extractor.get_html_labels_weights(training_data)
test_html, test_labels, test_weights = extractor.get_html_labels_weights(test_data)
# filter docs we can't get features from
train_blocks = np.array([extractor.blockifier.blockify(doc)
for doc in train_html])
train_mask = [extractor._has_enough_blocks(blocks) for blocks in train_blocks]
train_blocks = train_blocks[train_mask]
train_labels = np.concatenate(train_labels[train_mask])
train_weights = np.concatenate(train_weights[train_mask])
test_labels = np.concatenate(test_labels)
test_weights = np.concatenate(test_weights)
# get features
# TODO: This only 'fit's one doc at a time. No feature fitting actually
# happens for now, but this might be important if the features change
train_features = np.concatenate([extractor.features.fit_transform(blocks)
for blocks in train_blocks])
# fit many models
gscv = GridSearchCV(
extractor.model, param_grid, fit_params={'sample_weight': train_weights},
scoring=kwargs.get('scoring', 'f1'), cv=kwargs.get('cv', 5),
n_jobs=kwargs.get('n_jobs', 1), verbose=kwargs.get('verbose', 1))
gscv = gscv.fit(train_features, train_labels)
logging.info('Score of the best model, on left-out data: %s', gscv.best_score_)
logging.info('Params of the best model: %s', gscv.best_params_)
# evaluate best model on train and test data
extractor.model = gscv.best_estimator_
train_eval = evaluate_model_predictions(
train_labels, extractor.predict(train_html[train_mask]), weights=train_weights)
test_eval = evaluate_model_predictions(
test_labels, extractor.predict(test_html), weights=test_weights)
# pickle the final model
_write_model_to_disk(output_dir, fname_prefix, extractor)
return extractor
| 5,340,427
|
def broadcast(connect_queue, disconnect_queue, event_queue):
"""
Broadcast enqueued events to the connected websockets.
:param connect_queue:
A ``Queue`` instance for new connections.
:param disconnect_queue:
A ``Queue`` instance for disconnected websockets.
:param event_queue:
A ``Queue`` instance for events to broadcast.
"""
sockets = []
for event in event_queue:
while True:
try:
sockets.append(connect_queue.get_nowait())
except Empty:
break
while True:
try:
sockets.remove(disconnect_queue.get_nowait())
except Empty:
break
for socket in sockets:
try:
socket.send(event)
except Exception:
logger.exception('Sending message failed')
| 5,340,428
|
def cleanup_tmps(**kwargs):
"""Remove the temporal directory."""
tmpdir = os.getcwd()
os.chdir('/')
rmtree(tmpdir)
| 5,340,429
|
def test_new_justified_is_later_than_store_justified(spec, state):
"""
J: Justified
F: Finalized
fork_1_state (forked from genesis):
epoch
[0] <- [1] <- [2] <- [3] <- [4]
F J
fork_2_state (forked from fork_1_state's epoch 2):
epoch
└──── [3] <- [4] <- [5] <- [6]
F J
fork_3_state (forked from genesis):
[0] <- [1] <- [2] <- [3] <- [4] <- [5]
F J
"""
# The 1st fork, from genesis
fork_1_state = state.copy()
# The 3rd fork, from genesis
fork_3_state = state.copy()
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
yield 'anchor_state', state
yield 'anchor_block', anchor_block
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
on_tick_and_append_step(spec, store, current_time, test_steps)
assert store.time == current_time
# ----- Process fork_1_state
# Skip epoch 0
next_epoch(spec, fork_1_state)
# Fill epoch 1 with previous epoch attestations
fork_1_state, store, _ = yield from apply_next_epoch_with_attestations(
spec, fork_1_state, store, False, True, test_steps=test_steps)
# Fork `fork_2_state` at the start of epoch 2
fork_2_state = fork_1_state.copy()
assert spec.get_current_epoch(fork_2_state) == 2
# Skip epoch 2
next_epoch(spec, fork_1_state)
# # Fill epoch 3 & 4 with previous epoch attestations
for _ in range(2):
fork_1_state, store, _ = yield from apply_next_epoch_with_attestations(
spec, fork_1_state, store, False, True, test_steps=test_steps)
assert fork_1_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
assert fork_1_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
assert store.justified_checkpoint == fork_1_state.current_justified_checkpoint
# ------ fork_2_state: Create a chain to set store.best_justified_checkpoint
# NOTE: The goal is to make `store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch`
all_blocks = []
# Proposed an empty block at epoch 2, 1st slot
block = build_empty_block_for_next_slot(spec, fork_2_state)
signed_block = state_transition_and_sign_block(spec, fork_2_state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps)
assert fork_2_state.current_justified_checkpoint.epoch == 0
# Skip to epoch 4
for _ in range(2):
next_epoch(spec, fork_2_state)
assert fork_2_state.current_justified_checkpoint.epoch == 0
# Propose a block at epoch 4, 5th slot
# Propose a block at epoch 5, 5th slot
for _ in range(2):
next_epoch(spec, fork_2_state)
next_slots(spec, fork_2_state, 4)
signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True)
yield from tick_and_add_block(spec, store, signed_block, test_steps)
assert fork_2_state.current_justified_checkpoint.epoch == 0
# Propose a block at epoch 6, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot
next_epoch(spec, fork_2_state)
next_slots(spec, fork_2_state, spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2)
signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True)
assert fork_2_state.finalized_checkpoint.epoch == 0
assert fork_2_state.current_justified_checkpoint.epoch == 5
# Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED
time = store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT
on_tick_and_append_step(spec, store, time, test_steps)
assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
# Run on_block
yield from add_block(spec, store, signed_block, test_steps)
assert store.finalized_checkpoint.epoch == 0
assert store.justified_checkpoint.epoch == 3
assert store.best_justified_checkpoint.epoch == 5
# ------ fork_3_state: Create another chain to test the
# "Update justified if new justified is later than store justified" case
all_blocks = []
for _ in range(3):
next_epoch(spec, fork_3_state)
# epoch 3
_, signed_blocks, fork_3_state = next_epoch_with_attestations(spec, fork_3_state, True, True)
all_blocks += signed_blocks
assert fork_3_state.finalized_checkpoint.epoch == 0
# epoch 4, attest the first 5 blocks
_, blocks, fork_3_state = next_slots_with_attestations(spec, fork_3_state, 5, True, True)
all_blocks += blocks.copy()
assert fork_3_state.finalized_checkpoint.epoch == 0
# Propose a block at epoch 5, 5th slot
next_epoch(spec, fork_3_state)
next_slots(spec, fork_3_state, 4)
signed_block = state_transition_with_full_block(spec, fork_3_state, True, True)
all_blocks.append(signed_block.copy())
assert fork_3_state.finalized_checkpoint.epoch == 0
# Propose a block at epoch 6, 5th slot
next_epoch(spec, fork_3_state)
next_slots(spec, fork_3_state, 4)
signed_block = state_transition_with_full_block(spec, fork_3_state, True, True)
all_blocks.append(signed_block.copy())
assert fork_3_state.finalized_checkpoint.epoch == 3
assert fork_3_state.current_justified_checkpoint.epoch == 4
# FIXME: pending on the `on_block`, `on_attestation` fix
# # Apply blocks of `fork_3_state` to `store`
# for block in all_blocks:
# if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot):
# time = store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT
# on_tick_and_append_step(spec, store, time, test_steps)
# # valid_attestations=False because the attestations are outdated (older than previous epoch)
# yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=False)
# assert store.finalized_checkpoint == fork_3_state.finalized_checkpoint
# assert (store.justified_checkpoint
# == fork_3_state.current_justified_checkpoint
# != store.best_justified_checkpoint)
# assert (store.best_justified_checkpoint
# == fork_2_state.current_justified_checkpoint)
yield 'steps', test_steps
| 5,340,430
|
def analyze_iw(aoi, doi, dictionary, size, aoiId):
"""
Function that pre-processes sentinel-2 imagery and runs the LCC change detection algorithm
Parameters:
aoi(ee.Feature): area of interest with property 'landcover'
doi(ee.Date): date of interest
dictionary (ee.Dictionary): appropriate dictionary of lda coefficients
size (float): minimum size (ac) of changes to output
aoiId (str): unique identifier for the area of interest
Returns:
tuple: ee.FeatureCollection with properties 'id', and 'landcover',
ee.Image with bands
"""
# cast dictionary to ee.Dictionary for use in subsequent GEE ops
dictionary = ee.Dictionary(dictionary)
# grab the landcover property from aoi and then cast to geometry
lc = ee.Feature(aoi).get('mode')
aoi = aoi.geometry()
# TODO: This isn't working to add a unique ID
# function to add unique id and landcover type to output feature properties
def add_props(ft):
ftId = aoiId + '_' + '1'
print(ftId)
return ft.set({'id': ftId, 'landcover': lc})
try:
sq_meters = ee.Number(size).multiply(4047)
projdate = ee.Date(doi)
today = projdate.advance(6, 'month')
today_dt = str(datetime.fromtimestamp(int(today.getInfo()['value'])/1e3))[:10]
print('today', today_dt)
proj_dt = str(datetime.fromtimestamp(int(projdate.getInfo()['value']) / 1e3))[:10]
print('proj_dt:', proj_dt)
prior = ee.Date.fromYMD(projdate.get('year').subtract(1), projdate.get('month'), projdate.get('day'))
prior_dt = str(datetime.fromtimestamp(int(prior.getInfo()['value']) / 1e3))[:10]
print('prior_dt:', prior_dt)
rgbn = ['B2', 'B3', 'B4', 'B8', 'B11', 'B12']
print(today.get('year').getInfo())
if(prior.get('year').getInfo() >= 2019):
masked = SR.filterDate(prior, today).filterBounds(aoi).map(clouds.maskSR)
elif(today.get('year').getInfo() >= 2019):
s2 = S2.filterDate(prior, '2018-12-25').filterBounds(aoi).map(clouds.maskTOA)
sr = SR.filterDate('2018-12-26', today).filterBounds(aoi).map(clouds.maskSR)
masked = s2.select(rgbn).merge(sr.select(rgbn))
else:
masked = S2.filterDate(prior, today).filterBounds(aoi).map(clouds.maskTOA)
# if(projdate.get('year').getInfo() >= 2019):
# filtered = SR.filterDate(prior, today).filterBounds(aoi)
# masked = filtered.map(clouds.maskSR)
# else:
# filtered = S2.filterDate(prior, today).filterBounds(aoi)
# masked = filtered.map(clouds.maskTOA)
#masked = S2.filterDate(prior, today).filterBounds(aoi).map(mask)
corrected = terrain.c_correct(masked, rgbn, aoi, DEM)
after = corrected.filterDate(projdate, today)
count = after.size()
print('after size:', count.getInfo())
reference = after.sort('system:time_start', False)
time0 = ee.Image(reference.first()).get('system:time_start')
recent_date = str(datetime.fromtimestamp(int(time0.getInfo()) / 1e3))[:10]
before = corrected.filterDate(prior, projdate)
count = before.size()
print('before size:', count.getInfo())
reference = before.sort('system:time_start', False)
time0 = reference.first().get('system:time_start')
past_date = str(datetime.fromtimestamp(int(time0.getInfo()) / 1e3))[:10]
# run the IW algorithm between the before and after collections within the user defined AOI.
# by default, ag fields are masked by 'yes'
print('running the iw algorithm')
iwout = iw.runIW(before,
after,
aoi,
scl = 30,
tScl = 6,
ag = 'yes').clip(aoi)
print('performing LDA analysis')
# calculate LDA score to discriminate change/no-change pixels in iwout. Requires thresholds from habitat dictionary
scored = stats.ldaScore(
iwout,
['cv_z', 'rcvmax_z', 'ndvi_z', 'ndsi_z', 'ndwi_z', 'nbr_z'],
dictionary)
# scored = stats.ldaScore(iwout, 0 ['cv_z', 'rcvmax_z', 'ndvi_z', 'ndsi_z', 'ndwi_z', 'nbr_z'],
# [cvz, rcvz, ndviz, ndsiz, ndwiz, nbrz]).clip(aoi)
# create a binary [0, 1] image representing change and no-change pixels. Erode and dilate changed areas
selected = scored.gte(dictionary.toImage(['lda']))\
.focal_min(1, 'square', 'pixels')\
.focal_max(1, 'square', 'pixels')
# mask image to retain only pixels equal to '1'
selected = selected.updateMask(selected)
#maskSelected = selected.updateMask(selected.eq(0))
# mask out no-change areas (i.e. selected = 0) here. Creates fewer polygons which should save memory
# selected = selected.updateMask(selected.eq(1))
#print('selected is a ', type(selected))
scale = 10
tileScale = 6
# convert binary image to polygons. Note: this creates polygons for both contiguous change and contiguous no-change areas
polys = selected.reduceToVectors(
geometry=aoi,
scale=scale,
tileScale=tileScale,
eightConnected=True,
bestEffort=True,
maxPixels=1e13)
#print('polys is a ', type(polys))
count = polys.size().getInfo()
print(count)
#print('polys size:', count.getInfo(displaySize))
# return only polygons corresponding to change pixels
polys = polys.map(sz)
polys = polys.map(add_props)
# filter out change polygons smaller than the user defined minimum area
polys = polys.filter(ee.Filter.gte('area', sq_meters))
# indicator = True
return "OK", past_date, recent_date, polys, iwout.select([
'cv_z', 'nbr_z', 'ndsi_z', 'ndwi_z', 'ndvi_z', 'rcvmax_z'])
except Exception as error:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print ("")
print ("*******************************")
print ("Unexpected error in analyze.py")
print (exc_type, fname, exc_tb.tb_lineno)
#print("sys.exc_info:", sys.exc_info()[0])
print ("Error:", error)
print ("*******************************")
print ("")
return "error"
| 5,340,431
|
def _build_client_update(model: model_lib.Model,
use_experimental_simulation_loop: bool = False):
"""Creates client update logic for FedSGD.
Args:
model: A `tff.learning.Model` used to compute gradients.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A `tf.function`.
"""
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
@tf.function
def client_update(initial_weights, dataset):
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,
initial_weights)
def reduce_fn(state, batch):
"""Runs forward_pass on batch and sums the weighted gradients."""
accumulated_gradients, num_examples_sum = state
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
gradients = tape.gradient(output.loss, model_weights.trainable)
num_examples = tf.cast(output.num_examples, tf.float32)
accumulated_gradients = tuple(
accumulator + num_examples * gradient
for accumulator, gradient in zip(accumulated_gradients, gradients))
# We may be able to optimize the reduce function to avoid doubling the
# number of required variables here (e.g. keeping two copies of all
# gradients). If you're looking to optimize memory usage this might be a
# place to look.
return (accumulated_gradients, num_examples_sum + num_examples)
def _zero_initial_state():
"""Create a tuple of (gradient accumulators, num examples)."""
return tuple(
tf.nest.map_structure(tf.zeros_like,
model_weights.trainable)), tf.constant(
0, dtype=tf.float32)
gradient_sums, num_examples_sum = dataset_reduce_fn(
reduce_fn=reduce_fn,
dataset=dataset,
initial_state_fn=_zero_initial_state)
# We now normalize to compute the average gradient over all examples.
average_gradient = tf.nest.map_structure(
lambda gradient: gradient / num_examples_sum, gradient_sums)
model_output = model.report_local_unfinalized_metrics()
stat_output = collections.OrderedDict(num_examples=num_examples_sum)
average_gradient, has_non_finite_delta = (
tensor_utils.zero_all_if_any_non_finite(average_gradient))
if has_non_finite_delta > 0:
client_weight = tf.constant(0.0)
else:
client_weight = num_examples_sum
return client_works.ClientResult(
update=average_gradient,
update_weight=client_weight), model_output, stat_output
return client_update
| 5,340,432
|
def test_capped_setup(hass, aioclient_mock):
"""Test the default setup."""
config = {'platform': 'startca',
'api_key': 'NOTAKEY',
'total_bandwidth': 400,
'monitored_variables': [
'usage',
'usage_gb',
'limit',
'used_download',
'used_upload',
'used_total',
'grace_download',
'grace_upload',
'grace_total',
'total_download',
'total_upload',
'used_remaining']}
result = '<?xml version="1.0" encoding="ISO-8859-15"?>'\
'<usage>'\
'<version>1.1</version>'\
'<total> <!-- total actual usage -->'\
'<download>304946829777</download>'\
'<upload>6480700153</upload>'\
'</total>'\
'<used> <!-- part of usage that counts against quota -->'\
'<download>304946829777</download>'\
'<upload>6480700153</upload>'\
'</used>'\
'<grace> <!-- part of usage that is free -->'\
'<download>304946829777</download>'\
'<upload>6480700153</upload>'\
'</grace>'\
'</usage>'
aioclient_mock.get('https://www.start.ca/support/usage/api?key='
'NOTAKEY',
text=result)
yield from async_setup_component(hass, 'sensor', {'sensor': config})
state = hass.states.get('sensor.startca_usage_ratio')
assert state.attributes.get('unit_of_measurement') == '%'
assert state.state == '76.24'
state = hass.states.get('sensor.startca_usage')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.startca_data_limit')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '400'
state = hass.states.get('sensor.startca_used_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.startca_used_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '6.48'
state = hass.states.get('sensor.startca_used_total')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '311.43'
state = hass.states.get('sensor.startca_grace_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.startca_grace_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '6.48'
state = hass.states.get('sensor.startca_grace_total')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '311.43'
state = hass.states.get('sensor.startca_total_download')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '304.95'
state = hass.states.get('sensor.startca_total_upload')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '6.48'
state = hass.states.get('sensor.startca_remaining')
assert state.attributes.get('unit_of_measurement') == 'GB'
assert state.state == '95.05'
| 5,340,433
|
def ipv4_addr_check():
"""Prompt user for IPv4 address, then validate. Re-prompt if invalid."""
while True:
try:
return ipaddress.IPv4Address(input('Enter valid IPv4 address: '))
except ValueError:
print('Bad value, try again.')
raise
| 5,340,434
|
def test_account_reg_base_query(session, reg_num, reg_type, client_ref, registering, status, start_ts, end_ts):
"""Assert that account registration query base is as expected."""
params: AccountRegistrationParams = AccountRegistrationParams(account_id='PS12345',
collapse=True,
account_name='Unit Testing',
sbc_staff=False)
params.registration_number = reg_num
params.registration_type = reg_type
params.client_reference_id = client_ref
params.registering_name = registering
params.status_type = status
params.start_date_time = start_ts
params.end_date_time = end_ts
query = registration_utils.build_account_reg_base_query(params)
if params.registration_number:
assert query.find(registration_utils.QUERY_ACCOUNT_REG_NUM_CLAUSE) != -1
if params.registration_type:
assert query.find(registration_utils.QUERY_ACCOUNT_REG_TYPE_CLAUSE) != -1
if params.client_reference_id:
assert query.find(registration_utils.QUERY_ACCOUNT_CLIENT_REF_CLAUSE) != -1
if params.registering_name:
assert query.find(registration_utils.QUERY_ACCOUNT_REG_NAME_CLAUSE) != -1
if params.status_type:
assert query.find(registration_utils.QUERY_ACCOUNT_STATUS_CLAUSE) != -1
if params.start_date_time and params.end_date_time:
date_clause = registration_utils.build_reg_date_clause(params, True)
assert query.find(date_clause) != -1
| 5,340,435
|
def text(message):
"""Sent by a client when the user entered a new message.
The message is sent to all people in the room."""
room = session.get('room')
emit('message', {'msg': session.get('name') + ': ' + message['msg']}, room=room)
if room in active_bots.keys():
b = active_bots[room]
# Respond to user
response = b.respond(message['msg'])
response_delay = (len(response) / 5.0) * 100
# Write the input
emit('delay', {'msg': b.name() + ': ' + response, 'delay': response_delay}, room=room)
| 5,340,436
|
def getPlugins():
"""
List the plugins located in the plugins folder.
"""
plugins = []
pluginList = os.listdir(PluginFolder)
for pluginName in pluginList:
location = os.path.join(PluginFolder, pluginName)
if not os.path.isdir(location) or not MainModule + ".py" in os.listdir(location):
continue
info = imp.find_module(MainModule, [location])
plugins.append({"name": pluginName, "info": info})
return plugins
| 5,340,437
|
def energybalance_erg(ratio,crew,erg,w0=4.3801,dt=0.03,doplot=1,doprint=0,theconst=1.0):
"""
calculates one stroke with ratio as input, using force profile in time domain
"""
# w0 = initial flywheel angular velo
# initialising output values
dv = 100.
vavg = 0.0
vend = 0.0
power = 0.0
# stroke parameters
tempo = crew.tempo
mc = crew.mc
recprofile = crew.recprofile
d = crew.strokelength
Nrowers = 1
drag = erg.drag
inertia = erg.inertia
cord = erg.cord
cordlength = erg.cordlength
r = erg.r # sprocket radius
# nr of time steps
aantal = 1+int(round(60./(tempo*dt)))
time = linspace(0,60./tempo,aantal)
# flywheel angular velo
wf = zeros(len(time))+w0
wfdot = zeros(len(time))
# crew velo
vc = zeros(len(time))
vpull = zeros(len(time))
Fhandle = zeros(len(time))
Fres = zeros(len(time))
Fleg = zeros(len(time))
ydotdot = zeros(len(time))
ydot = zeros(len(time)) # +wf[0]*r
Pf = zeros(len(time))
Phandle = zeros(len(time))
Ebungee = zeros(len(time))
Pbungee = zeros(len(time))
handlepos = 0
vhand = ydot[0]
# initial handle and boat velocities
vc[0] = ydot[0]
# calculate average drive speed
tdrive = ratio*max(time)
vdriveavg = crew.strokelength/tdrive
idrivemax = int(round(tdrive/dt))
## powerconst = 2.58153699 # bij sin^(1/3)
## powerconst = 2 # bij sin
# powerconst = 1.5708 # bij sin^2
# macht = 2.
# vhandmax = np.pi*d/(powerconst*tdrive)
# vhand = vhandmax*(np.sin(np.pi*(time)/tdrive))**(macht)
# powerconst = 3.1733259127
# vhandmax = np.pi*d/(powerconst*tdrive)
# vhand = vhandmax*(1-np.cos(2*np.pi*(time)/tdrive))
macht = 0.5
x = np.linspace(0,1,100)
y = (x-x**2)**(macht)
s = np.cumsum(np.diff(x)*y[1:])[-1]
powerconst = 1/s
vhandmax = powerconst*d/tdrive
vhand = vhandmax*((time/tdrive)-(time/tdrive)**2)**macht
# stroke
for i in range(1,idrivemax):
now = dt*i
timerel = now/tdrive
time2 = (dt*(i+1))/tdrive
vi = vhand[i-1]
vj = vhand[i]
vpull[i] = vhand[i]
Tdrag = drag*wf[i-1]**2
handlepos += dt*vi
ydot[i] = crew.vcm(vi, handlepos)
# ydot[i] = vi*(1-timerel)
# ydot[i] = vi
ydotdot[i] = (ydot[i]-ydot[i-1])/dt
wnext = vj/r
wnext2 = wf[i-1]-dt*Tdrag/inertia
# if wnext > 0.99*wf[i-1]:
if wnext > wnext2:
wf[i] = wnext
Tacceler = inertia*(wnext-wf[i-1])/dt
else:
wf[i] = wf[i-1]-dt*Tdrag/inertia
Tacceler = 0
Tdrag = 0
wfdot[i] = (wf[i]-wf[i-1])/dt
Fhandle[i] = ((Tdrag+Tacceler)/r)+cord*(cordlength+handlepos)
Fres[i] = Nrowers*mc*ydotdot[i]
Fleg[i] = Fres[i]+Fhandle[i]
Ebungee[i] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[i] = (Ebungee[i]-Ebungee[i-1])/dt
vc[i] = ydot[i]
# recovery
trecovery = max(time)-time[idrivemax]
ratio = time[idrivemax]/max(time)
aantalstroke = idrivemax
if (recprofile == 1): # oude methode (sinus)
vhandmax = -np.pi*d/(2*trecovery)
vhand = vhandmax*np.sin(np.pi*(time-time[i])/trecovery)
for k in range(idrivemax,aantal):
Tdrag = drag*wf[k-1]**2 # drag torque
wf[k] = wf[k-1]-dt*Tdrag/inertia
ydot[k] = crew.vcm(vhand, handlepos)
# ydot[k] = vhand
vc[k] = ydot[k]
ydotdot[k] = (ydot[k]-ydot[k-1])/dt
handlepos = handlepos+vhand[k]*dt
Ebungee[k] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[k] = (Ebungee[k]-Ebungee[k-1])/dt
else:
vavgrec = d/trecovery
vcrecovery = zeros(aantal)
for k in range(idrivemax,aantal):
vhand = crew.vhandle(vavgrec,trecovery,time[k]-time[idrivemax])
vpull[k] = vhand
vcrecovery[k] = crew.vcm(vhand, handlepos)
# vcrecovery[k] = vhand
Tdrag = drag*wf[k-1]**2 # drag torque
wf[k] = wf[k-1]-dt*Tdrag/inertia
wfdot[k] = (wf[k]-wf[k-1])/dt
ydot[k] = vcrecovery[k]
vc[k] = ydot[k]
ydotdot[k] = (ydot[k]-ydot[k-1])/dt
handlepos = d+d*crew.dxhandle(vavgrec,trecovery,time[k]-time[idrivemax])
Fhandle[k] = cord*(cordlength+handlepos)
Fres[k] = Nrowers*mc*ydotdot[k]
Fleg[k] = Fres[k]+Fhandle[k]
Ebungee[k] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[k] = (Ebungee[k]-Ebungee[k-1])/dt
ydot[0] = ydot[0]/2.
ydotdot[1]=(ydot[1]-ydot[0])/dt
Pq = (Nrowers*mc)*ydotdot*ydot
Pleg = Fleg*ydot
Phandle = Fhandle*vpull
Parm = Phandle-Fhandle*ydot
Plegdiss = 0.5*theconst*(abs(Pleg)-Pleg)
Plegsource = abs(Pleg)
Parmdiss = 0.5*theconst*(abs(Parm)-Parm)
Parmsource = abs(Parm)
# sources
Elegsource = cumsum(Plegsource)*dt
Earmsource = cumsum(Parmsource)*dt
Eleg = cumsum(Pleg)*dt
Earm = cumsum(Parm)*dt
Ehandle = cumsum(Phandle)*dt
# sinks
# drag power
Pw = drag*wf**3.
Ew = cumsum(Pw)*dt
Elegdiss = cumsum(Plegdiss)*dt
Earmdiss = cumsum(Parmdiss)*dt
# storage
Pwheel = inertia*wf*wfdot
Ewheel = cumsum(Pwheel)*dt
Ewheel = Ewheel - Ewheel[0]
Ebungee = cumsum(Pbungee)*dt
Pqrower = abs(Pq)
Pdiss = 0.5*theconst*(Pqrower-Pq)
Eq = cumsum(Pq)*dt
Eqrower = cumsum(Pqrower)*dt
Ediss = cumsum(Pdiss)*dt
# printing
if (doprint==1):
print(("Ediss rower ",Ediss[aantal-1]))
print(("E drag ",Ew[aantal-1]))
print(("Eleg ",Eqrower[aantal-1]))
print(("Ehandle ",Ehandle[aantal-1]))
print(("Ebungee ",Ebungee[aantal-1]))
print("")
print(("P handle ",Ehandle[aantal-1]/time[aantal-1]))
print(("P drag ",Ew[aantal-1]/time[aantal-1]))
print("")
# plotting
if (doplot==1):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, ydot,'r-',label = 'Crew velocity')
pyplot.plot(time, vpull,'k-',label = 'Handle velocity')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('v (m/s)')
pyplot.show()
if (doplot==2):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Fhandle,'r-',label = 'Handle force')
pyplot.plot(time, Fleg,'b-',label = 'Leg force')
pyplot.plot(time, Fres,'g-',label = 'Accelerating force')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('force (N)')
pyplot.show()
if (doplot==3):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Phandle, 'r-', label = 'Handle Power')
pyplot.plot(time, Pleg,'b-',label = 'Leg power')
pyplot.plot(time, Pq,'k-',label = 'Kinetic power')
pyplot.plot(time, Parm,'y-',label = 'Arm power')
pyplot.plot(time, Pq+Phandle-Parm-Pleg,'b+', label = 'should be zero')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==4):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Ewheel,'g-',label = 'Flywheel energy stored')
pyplot.plot(time, Eq+Ebungee,'k-',label = 'Kinetic energy')
pyplot.plot(time, Ew,'r-',label = 'Drag dissipation')
pyplot.plot(time, Ediss,'b-',label = 'Rower body dissipation')
pyplot.plot(time, Ewheel+Eq+Ew+Ediss+Ebungee, 'b+', label = 'Sinks+Kinetic')
pyplot.plot(time, Ew+Ediss, 'r+', label = 'Sinks')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('Energy (J)')
pyplot.show()
if (doplot==5):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Pleg, 'y-', label = 'Leg power')
pyplot.plot(time, Plegdiss,'g-',label = 'Leg dissipation')
pyplot.plot(time, Plegsource,'g+',label = 'Leg source')
pyplot.plot(time, Parm, 'r-', label = 'Arm power')
pyplot.plot(time, Parmdiss,'k-',label = 'Arm dissipation')
pyplot.plot(time, Parmsource,'k+',label = 'Arm source')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==6):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Elegsource+Ehandle, 'bo', label = 'Leg power')
pyplot.plot(time, Elegdiss,'g-',label = 'Leg dissipation')
pyplot.plot(time, Earm, 'r-', label = 'Arm power')
pyplot.plot(time, Ehandle, 'k+', label = 'Handle power')
pyplot.plot(time, Earmdiss,'k-',label = 'Arm dissipation')
pyplot.plot(time, Eqrower+Ewheel+Ebungee, 'y+', label = 'Eqrower+Ewheel+Ecord')
pyplot.plot(time, Elegsource+Earmsource,'b+', label = 'Sources')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==7):
pyplot.clf()
pyplot.plot(time, Ew+Ediss, 'r-', label = 'Total Sinks')
# pyplot.plot(time, Elegsource+Earmsource,'go',label = 'Total Sources')
pyplot.plot(time, Eqrower+Ehandle,'y-',label = 'Total Sources 2')
pyplot.plot(time, Ewheel+Eq+Ew+Ediss+Ebungee, 'b+', label = 'Sinks+Kinetic')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==8):
pyplot.clf()
pyplot.plot(time, ydot, 'r-', label = 'Crew velocity')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel("v (m/s)")
pyplot.show()
if (doplot==9):
pyplot.clf()
wref = wf
pyplot.plot(time,wref,'r-',label='flywheel speed')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel("Flywheel speed (rad/sec)")
pyplot.show()
dw = wf[len(time)-1]-wf[0]
wavg = mean(wf)
wend = wf[len(time)-1]
energy = max(Ew+Ediss)
energyd = max(Ew)
energy = energy/Nrowers
energyd = energyd/Nrowers
power = energy*tempo/60.
powerd = energyd*tempo/60.
return [dw,wend,wavg,ratio,energy,power,powerd]
| 5,340,438
|
def _read_blockstream(file):
"""Read a block from a file."""
while True:
size = file.read(1)[0]
if size == 0:
break
for _ in range(size):
yield file.read(1)[0]
| 5,340,439
|
def get_gpu_count():
"""get avaliable gpu count
Returns:
gpu_count: int
"""
gpu_count = 0
env_cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if env_cuda_devices is not None:
assert isinstance(env_cuda_devices, str)
try:
if not env_cuda_devices:
return 0
gpu_count = len(
[x for x in env_cuda_devices.split(',') if int(x) >= 0])
logger.info(
'CUDA_VISIBLE_DEVICES found gpu count: {}'.format(gpu_count))
except:
logger.info(
'Cannot find available GPU devices, using CPU or other devices now.'
)
gpu_count = 0
else:
try:
gpu_count = str(subprocess.check_output(["nvidia-smi",
"-L"])).count('UUID')
logger.info('nvidia-smi -L found gpu count: {}'.format(gpu_count))
except:
logger.info(
'Cannot find available GPU devices, using CPU or other devices now.'
)
gpu_count = 0
return gpu_count
| 5,340,440
|
def get_EXP3_policy(Q, eta, G_previous):
"""
Obtain EXP-3 policy based on a given Q-function. Also, return updated
values of G, to be used in future calls to this function.
Inputs:
1) Q: a num_states x num_actions matrix, in which Q[s][a] specifies
the Q-function in state s and action a.
2) eta: a scalar; this is the eta parameter defined in the EPMC algorithm.
3) G_previous: num_states x num_actions matrix; this is a matrix of the
G-values defined in the EPMC algorithm. These values are from the
previous iteration.
Outputs:
1) policy: a policy, specified by a num_states x num_actions matrix, in
which policy[s][a] is the probability of taking action a in state s.
2) G: num_states x num_actions updated G matrix, as defined in the EPMC
algorithm.
"""
num_actions = Q.shape[1]
# Update the policy:
policy = np.exp((eta / num_actions) * G_previous)
policy = (policy.T / policy.sum(axis=1)).T
policy = eta / num_actions + (1 - eta) * policy
# Update G:
G = G_previous + Q / policy
return policy, G
| 5,340,441
|
def table_content(db, table):
"""
return a 2 dimentioanl array cont-aining all table values
========================================================
>>> table_content("sys", "host_ip")
[[1, 2, 3],
[2, 3, 4],
[3, 4, 5]]
========================================================
"""
#XXX: uses : `select * from table`
return execute_and_fetch(_SELECT_TABLE.format(db, table))
| 5,340,442
|
def process_sort_params(sort_keys, sort_dirs, default_keys=None,
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
if default_keys is None:
default_keys = ['created_at', 'id']
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs):
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys.
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction array size exceeds sort key array size.")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
| 5,340,443
|
def namify(idx):
"""
Helper function that pads a given file number and return it as per the dataset image name format.
"""
len_data = 6 #Ilsvr images are in the form of 000000.JPEG
len_ = len(str(idx))
need = len_data - len_
assert len_data >= len_, "Error! Image idx being fetched is incorrect. Invalid value."
pad = '0'*need
return pad+str(idx)
| 5,340,444
|
def subreddit_count():
"""
Get number of redditors, percentage of active redditors and growth in new redditors
"""
print("Getting Subreddit Stats now ...")
for key, subreddit_names in {**interested_stocks_subreddits, **interested_crypto_subreddits}.items():
for subreddit_name in subreddit_names:
subreddit = reddit.subreddit(subreddit_name)
subscribers = subreddit.subscribers
print("Looking at r/{} now with {} subscribers.".format(subreddit, subscribers))
active = subreddit.accounts_active
percentage_active = round((active / subscribers)*100, 2)
db.execute("SELECT subscribers FROM subreddit_count WHERE subreddit=? ORDER BY subscribers DESC LIMIT 1",
(subreddit_name, ))
try:
prev_subscribers = db.fetchone()[0]
growth = round((subscribers / prev_subscribers) * 100 - 100, 2)
except TypeError:
growth = 0
if key in interested_stocks_subreddits.keys() and key != "SUMMARY":
price_df = yf.Ticker(key).history(period="1y", interval="1d").reset_index().iloc[::-1]
price_df["% Price Change"] = price_df["Close"].shift(-1)
price_df["% Price Change"] = 100 * (price_df["Close"] - price_df["% Price Change"]) / price_df[
"% Price Change"]
price_df["Date"] = price_df["Date"].astype(str)
price_df = price_df.round(2)
change_price = price_df[price_df['Date'] == date_updated]["% Price Change"].values
if len(change_price == 1):
change_price = change_price[0]
else:
change_price = 0
else:
change_price = 0
db.execute("INSERT OR IGNORE INTO subreddit_count VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(date_updated, key, subreddit_name, subscribers, active, percentage_active,
growth, change_price))
conn.commit()
| 5,340,445
|
def delete_meeting(request, club_name, meeting_id):
"""Meeting is deleted by the host"""
meeting = Meeting.objects.get(id=meeting_id)
MeetingAttendance.objects.filter(user=request.user, meeting=meeting).delete()
meeting.delete()
return redirect('meeting_list', club_name)
| 5,340,446
|
def log_dir(request):
"""Retrieve user-provided logging directory on the command line."""
yield request.config.getoption("--log-dir")
| 5,340,447
|
def get_best_gain(mapping, candidate_mappings, weight_dict, instance_len, cur_match_num, lol1=None):
"""
Hill-climbing method to return the best gain swap/move can get
Arguments:
mapping: current node mapping
candidate_mappings: the candidates mapping list
weight_dict: the weight dictionary
instance_len: the number of the nodes in AMR 2
cur_match_num: current triple match number
Returns:
the best gain we can get via swap/move operation
"""
largest_gain = 0
# True: using swap; False: using move
use_swap = True
# the node to be moved/swapped
node1 = None
# store the other node affected. In swap, this other node is the node swapping with node1. In move, this other
# node is the node node1 will move to.
node2 = None
# unmatched nodes in AMR 2
unmatched = set(range(instance_len))
# exclude nodes in current mapping
# get unmatched nodes
for nid in mapping:
if nid in unmatched:
unmatched.remove(nid)
for i, nid in enumerate(mapping):
# current node i in AMR 1 maps to node nid in AMR 2
for nm in unmatched:
if nm in candidate_mappings[i]:
# remap i to another unmatched node (move)
# (i, m) -> (i, nm)
if veryVerbose:
print("Remap node", i, "from ", nid, "to", nm, file=DEBUG_LOG)
mv_gain = move_gain(mapping, i, nid, nm, weight_dict, cur_match_num)
if veryVerbose:
print("Move gain:", mv_gain, file=DEBUG_LOG)
new_mapping = mapping[:]
new_mapping[i] = nm
new_match_num = compute_match(new_mapping, weight_dict)
if new_match_num != cur_match_num + mv_gain:
print(mapping, new_mapping, file=ERROR_LOG)
print("Inconsistency in computing: move gain", cur_match_num, mv_gain, new_match_num,
file=ERROR_LOG)
if mv_gain > largest_gain:
largest_gain = mv_gain
node1 = i
node2 = nm
use_swap = False
# compute swap gain
if True:
for i, m in enumerate(mapping):
for j in range(i + 1, len(mapping)):
m2 = mapping[j]
if (m2 not in candidate_mappings[i]) and (m not in candidate_mappings[j]):
continue
# swap operation (i, m) (j, m2) -> (i, m2) (j, m)
# j starts from i+1, to avoid duplicate swap
if veryVerbose:
print("Swap node", i, "and", j, file=DEBUG_LOG)
print("Before swapping:", i, "-", m, ",", j, "-", m2, file=DEBUG_LOG)
print(mapping, file=DEBUG_LOG)
print("After swapping:", i, "-", m2, ",", j, "-", m, file=DEBUG_LOG)
sw_gain = swap_gain(mapping, i, m, j, m2, weight_dict, cur_match_num)
if veryVerbose:
print("Swap gain:", sw_gain, file=DEBUG_LOG)
new_mapping = mapping[:]
new_mapping[i] = m2
new_mapping[j] = m
print(new_mapping, file=DEBUG_LOG)
new_match_num = compute_match(new_mapping, weight_dict)
if new_match_num != cur_match_num + sw_gain:
print(mapping, new_mapping, file=ERROR_LOG)
print("Inconsistency in computing: swap gain", cur_match_num, sw_gain, new_match_num,
file=ERROR_LOG)
if sw_gain > largest_gain:
largest_gain = sw_gain
node1 = i
node2 = j
use_swap = True
# generate a new mapping based on swap/move
cur_mapping = mapping[:]
if node1 is not None:
if use_swap:
if veryVerbose:
print("Use swap gain", file=DEBUG_LOG)
temp = cur_mapping[node1]
cur_mapping[node1] = cur_mapping[node2]
cur_mapping[node2] = temp
else:
if veryVerbose:
print("Use move gain", file=DEBUG_LOG)
cur_mapping[node1] = node2
else:
if veryVerbose:
print("no move/swap gain found", file=DEBUG_LOG)
if veryVerbose:
print("Original mapping", mapping, file=DEBUG_LOG)
print("Current mapping", cur_mapping, file=DEBUG_LOG)
return largest_gain, cur_mapping
| 5,340,448
|
def best_predictor(user, restaurants, feature_fns):
"""Find the feature within FEATURE_FNS that gives the highest R^2 value
for predicting ratings by the user; return a predictor using that feature.
Arguments:
user -- A user
restaurants -- A dictionary from restaurant names to restaurants
feature_fns -- A sequence of functions that each takes a restaurant
"""
reviewed = list(user_reviewed_restaurants(user, restaurants).values())
"*** YOUR CODE HERE ***"
| 5,340,449
|
def mock_environ():
"""Mock for `os.environ.copy`"""
return {"SOME_ENV_VAR": "42"}
| 5,340,450
|
def get_bedtools_coverage_cmd(bam_filename, gff_filename,
output_filename,
require_paired=False):
"""
Get bedtools command for getting the number of reads
from the BAM filename that are strictly contained within
each interval of the GFF.
"""
args = {"bam_filename": bam_filename,
"gff_filename": gff_filename}
# Do not include strandedness flag since that doesn't handle
# paired-end cases
intersect_cmd = "bedtools intersect -abam %(bam_filename)s " \
"-b %(gff_filename)s -f 1 -ubam " %(args)
coverage_cmd = "%s | bedtools coverage -abam - -b %s -counts > %s" \
%(intersect_cmd, gff_filename, output_filename)
return coverage_cmd
| 5,340,451
|
def test_encode_datetime():
""" encode_datetime() should return a ECMA-262 compliant datetime string. """
val = datetime.datetime(2006, 11, 21, 16, 30)
assert encode_datetime(val) == '2006-11-21T16:30:00'
| 5,340,452
|
def get_notes_mapping_dict(notes_list: List) -> Tuple[Dict, np.array]:
"""
Function get list of midi notes and returns mapping for each note
:param notes_list:
:return:
"""
assert len(notes_list) > 0, 'Empty notes list !!'
full_list = sorted(set(notes_list))
notes2idx = {note_e: i for i, note_e in enumerate(full_list)}
idx2note = np.array(full_list)
return notes2idx, idx2note
| 5,340,453
|
def p_y_given_x(X, mean_x, variance_x):
"""
Calculates the probablity of class
value being y, given label is x.
PARAMETERS
==========
X: list
Input of unknown class values
given by user.
mean_x: ndarray(dtype=int,ndim=1,axis=1)
Mean for given label.
variance_x: ndarray(dtype=int,ndim=1,axis=1)
Variance for given label.
RETURNS
=======
p: float
Probability, according to gaussian
distribution, for given mean and variance.
"""
p = 1 / (np.sqrt(2 * np.pi * variance_x)) * \
np.exp((-(X - mean_x)**2) / (2 * variance_x))
return p
| 5,340,454
|
def run_train(cfg, wandb):
"""Train function starts here
Args:
cfg (obj `DictConfig`): This is the config from hydra.
"""
data_directory = cfg.data.data_directory
train_batch_size = cfg.data.train_batch_size
max_seq_len = cfg.task.max_seq_len # Maximum length per sequence
max_predictions_per_seq = cfg.task.max_predictions_per_seq # Maximum predictions (Mask) per sequence
dtype = cfg.trainer.dtype
is_training = cfg.model.is_training
use_dropout = cfg.model.use_dropout
loss_type = cfg.optimizer.loss_type
use_constant_lr = cfg.optimizer.use_constant_lr
num_layers = cfg.model.num_layers
return_all_layer_outputs = False
training_loss_names = None
if loss_type and loss_type == 'joint':
return_all_layer_outputs = True
training_loss_names = {'loss_{}'.format(i + 1) for i in range(num_layers)}
learning_rate = cfg.optimizer.learning_rate
warmup_rate = cfg.optimizer.warmup_rate
decay_function = cfg.optimizer.decay_function
steps_per_epoch = cfg.trainer.steps_per_epoch
epochs = cfg.trainer.epochs
distribution_strategy = cfg.trainer.strategy
num_gpus = cfg.trainer.num_gpus
tpu_address = cfg.trainer.tpu_address
model_checkpoint_dir = cfg.trainer.model_checkpoint_dir
# Get dataset and tokenizer
tokenizer_layer = get_tokenizer()
# We split text by words (whitespace), inside MLM function.
masked_lm_map_fn = mlm_fn(tokenizer_layer, max_seq_len, max_predictions_per_seq)
train_dataset = get_dataset(data_directory, masked_lm_map_fn, train_batch_size)
# validation_dataset = get_validation_data(all_questions, eval_batch_size, tokenizer_layer, max_seq_len)
# Get Model
model_fn = get_model(return_all_layer_outputs, is_training, use_dropout, tokenizer_layer.vocab_size.numpy())
# Get Optimizer
# steps_per_epoch is number of examples seen during one epoch (with batch size)
# total examples per epoch = steps_per_epoch * batch_size
examples_per_epoch = steps_per_epoch # Assume steps_per_epoch = 100000, and epochs = 5, examples = 500000
optimizer_fn = get_optimizer(
learning_rate, examples_per_epoch, epochs, warmup_rate, decay_function, use_constant_lr
)
# Get loss
loss_fn = get_loss(loss_type)
# Get trainer
trainer = get_trainer(
distribution_strategy=distribution_strategy, num_gpus=num_gpus, tpu_address=tpu_address, dtype=dtype
)
# Train
history = trainer.run(
model_fn=model_fn,
optimizer_fn=optimizer_fn,
train_dataset=train_dataset,
train_loss_fn=loss_fn,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
model_checkpoint_dir=model_checkpoint_dir,
batch_size=train_batch_size,
training_loss_names=training_loss_names,
repeat_dataset=True,
wandb=wandb,
)
return history
| 5,340,455
|
def test_cross_validation_manual_vs_scikit(estimator, build_dataset,
with_preprocessor):
"""Tests that if we make a manual cross-validation, the result will be the
same as scikit-learn's cross-validation (some code for generating the
folds is taken from scikit-learn).
"""
if any(hasattr(estimator, method) for method in ["predict", "score"]):
input_data, labels, preprocessor, _ = build_dataset(with_preprocessor)
estimator = clone(estimator)
estimator.set_params(preprocessor=preprocessor)
set_random_state(estimator)
n_splits = 3
kfold = KFold(shuffle=False, n_splits=n_splits)
n_samples = input_data.shape[0]
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
scores, predictions = [], np.zeros(input_data.shape[0])
for fold_size in fold_sizes:
start, stop = current, current + fold_size
current = stop
test_slice = slice(start, stop)
train_mask = np.ones(input_data.shape[0], bool)
train_mask[test_slice] = False
y_train, y_test = labels[train_mask], labels[test_slice]
estimator.fit(input_data[train_mask], y_train)
if hasattr(estimator, "score"):
scores.append(estimator.score(input_data[test_slice], y_test))
if hasattr(estimator, "predict"):
predictions[test_slice] = estimator.predict(input_data[test_slice])
if hasattr(estimator, "score"):
assert all(scores == cross_val_score(estimator, input_data, labels,
cv=kfold))
if hasattr(estimator, "predict"):
assert all(predictions == cross_val_predict(estimator, input_data,
labels,
cv=kfold))
| 5,340,456
|
def wrf_ll_to_ij(lon, lat, map_proj, truelat1=-999.,truelat2=-999.,stand_lon=999., \
ref_lat=-999,ref_lon=-999,pole_lat=90,pole_lon=0,knowni=-999,\
knownj=-999,dx=-999, dy=-999, latinc=-999., loninc=-999):
"""
Converts lon/lat values to i/j index values.
lon,lat - lat,lon values to convert
map_proj -- map projection
"""
lon2 = _promote_scalar(lon)
lat2 = _promote_scalar(lat)
map_proj2 = _promote_scalar(map_proj)
truelat12 = _promote_scalar(truelat1)
truelat22 = _promote_scalar(truelat2)
stand_lon2 = _promote_scalar(stand_lon)
ref_lat2 = _promote_scalar(ref_lat)
ref_lon2 = _promote_scalar(ref_lon)
pole_lat2 = _promote_scalar(pole_lat)
pole_lon2 = _promote_scalar(pole_lon)
knowni2 = _promote_scalar(knowni)
knownj2 = _promote_scalar(knownj)
dx2 = _promote_scalar(dx)
dy2 = _promote_scalar(dy)
latinc2 = _promote_scalar(latinc)
loninc2 = _promote_scalar(loninc)
return fplib.wrf_ll_to_ij(lon2,lat2,map_proj2,truelat12,truelat22,stand_lon2, \
ref_lat2,ref_lon2,pole_lat2,pole_lon2,knowni2, knownj2,\
dx2, dy2, latinc2,loninc2)
| 5,340,457
|
def create_specimen_resource(specimen_identifier: List[dict],
patient_reference: dict,
specimen_type: str,
received_datetime: str = None,
collection_datetime: str = None,
note: str = None) -> dict:
"""
Create specimen resource following the FHIR format
(http://www.hl7.org/implement/standards/fhir/specimen.html)
"""
specimen_type_system = 'http://terminology.hl7.org/CodeSystem/v2-0487'
specimen_resource = {
"resourceType": "Specimen",
"identifier": specimen_identifier,
"subject": patient_reference,
"type": create_codeable_concept(specimen_type_system, specimen_type)
}
if received_datetime:
specimen_resource["receivedTime"] = received_datetime
if collection_datetime:
specimen_resource["collection"] = {
"collectedDateTime": collection_datetime
}
if note:
specimen_resource["note"] = [{"text": note}]
return specimen_resource
| 5,340,458
|
def get_attribute_slots(
tracker: "Tracker", object_attributes: List[Text]
) -> List[Dict[Text, Text]]:
"""
Copied from rasa_sdk.knowledge_base.utils and overridden
as we also need to return the entity role for range queries.
If the user mentioned one or multiple attributes of the provided object_type in
an utterance, we extract all attribute values from the tracker and put them
in a list. The list is used later on to filter a list of objects.
For example: The user says 'What Italian restaurants do you know?'.
The NER should detect 'Italian' as 'cuisine'.
We know that 'cuisine' is an attribute of the object type 'restaurant'.
Thus, this method returns [{'name': 'cuisine', 'value': 'Italian'}] as
list of attributes for the object type 'restaurant'.
Args:
tracker: the tracker
object_attributes: list of potential attributes of object
Returns: a list of attributes
"""
attributes = []
for attr in object_attributes:
attr_val = tracker.get_slot(attr) if attr in tracker.slots else None
if attr_val is not None:
entities = tracker.latest_message.get("entities", [])
role = [e['role'] for e in entities if e['entity'] == attr and e['value'] == attr_val and 'role' in e]
role = role[0] if len(role) else None
attributes.append({"name": attr, "value": attr_val, "role": role})
return attributes
| 5,340,459
|
def pearson_correlation(self, preferences):
"""
Returns the Pearson Correlation of two user_s, A and B by
performing the PPMC calculation on the scatter plot of (a, b)
ratings on the shared set of critiqued titles.
"""
# Store the length to save traversals of the len computation.
# If they have no rankings in common, return 0.
length = len(preferences)
if length == 0:
return 0
# Loop through the preferences of each user_ once and compute the
# various summations that are required for our final calculation.
sumA = sumB = sumSquareA = sumSquareB = sumProducts = 0
for a, b in preferences.values():
sumA += a
sumB += b
sumSquareA += pow(a, 2)
sumSquareB += pow(b, 2)
sumProducts += a*b
# Calculate Pearson Score
numerator = (sumProducts*length) - (sumA*sumB)
denominator = sqrt(((sumSquareA*length) - pow(sumA, 2)) * ((sumSquareB*length) - pow(sumB, 2)))
# Prevent division by zero.
if denominator == 0:
return 0
return abs(numerator / denominator)
| 5,340,460
|
def update_stakeholder(id: int, name: str = None, company: str = None, role: str = None,
attitude: str = None, archived: bool = None) -> Stakeholder or None:
"""
Provide a POST API endpoint for updating a specific stakeholder.
:param id: ID of the stakeholder.
:param name: Name of the stakeholder.
:param company: Company of the stakeholder.
:param role: Role of the stakeholder.
:param attitude: Attitude of the stakeholder.
:return:
"""
try:
stakeholder = Stakeholder.query.get(id)
if not name:
raise KeyError('Name must not be empty')
stakeholder.name = name
stakeholder.company = company if company is not None else stakeholder.company
stakeholder.role = role if role is not None else stakeholder.role
stakeholder.attitude = attitude if attitude is not None else stakeholder.attitude
stakeholder.archived = archived if archived is not None else stakeholder.archived
db.session.commit()
return stakeholder
except AttributeError:
raise OperationalError(f"Could not load stakeholder with id {id}", {}, '')
except TypeError:
return None
| 5,340,461
|
def register():
"""Sign up user."""
if current_user.is_authenticated:
return redirect(url_for("homepage"))
form = RegistrationForm()
if form.validate_on_submit():
user = User(
username=form.username.data,
name=form.name.data,
email=form.email.data,
)
user.set_password(form.password.data)
user.set_is_admin()
db.session.add(user)
db.session.commit()
flash("Your account has been created, you are now able to log in.")
return redirect(url_for("users.login"))
return render_template("register.html", title="Register", form=form)
| 5,340,462
|
def _collect_all_tags(nb=None):
""" a fancy generated note showing #tag per [[]] (and [[]] per #tag)
-> nb/all tags.md
"""
ns = []
ns.append(', '.join([t.tag for t in nb.tags]))
ns.append('\n--- ')
for t in nb.tags:
t_w = f'{t} - '
for n in nb.get_tagged(t):
if not n.name == 'all tags':
t_w += f' [[{n.name}]], '
ns.append(t_w.rstrip(', '))
ns.append('\n--- ')
for n in nb.notes.values():
if not n.name == 'all tags':
fn_w = f'[[{n.name}]] - '
if n.tags:
for t in n.tags:
fn_w += f' #{t}, '
ns.append(fn_w.rstrip(', '))
nb.generate_note(
'all tags',
'\n'.join(str(ft) for ft in ns),
overwrite=True,
pnbp=True
)
| 5,340,463
|
def txt_as_matrix(buff, border):
"""\
Returns the text QR code as list of [0,1] lists.
:param io.StringIO buff: Buffer to read the matrix from.
"""
res = []
code = buff.getvalue().splitlines()
len_without_border = len(code) - border
for l in islice(code, border, len_without_border):
res.append([int(clr) for clr in islice(l, border, len_without_border)])
return res
| 5,340,464
|
def create_logismosb_node(name="LOGISMOSB"):
"""
This function...
:param name:
:return:
"""
node = Node(LOGISMOSB(), name=name)
config = read_machine_learning_config()
return set_inputs(node, config)
| 5,340,465
|
def segfault():
"""
(gdb) r <<< $(python2 -c "import pwn;print(pwn.cyclic(128, n=8))")
...
Can you ROP your way out of this?
Program received signal SIGSEGV, Segmentation fault.
0x0000000000400b6e in vuln ()
(gdb) info stack
#0 0x0000000000400b6e in vuln ()
#1 0x6161616161616164 in ?? ()
#2 0x6161616161616165 in ?? ()
...
"""
ofs = pwn.cyclic_find(pwn.p64(0x6161616161616164), n=8)
payload = b"A" * ofs + create_ropgadget()
pr.sendlineafter("Can you ROP your way out of this?\n", payload);
pr.interactive()
| 5,340,466
|
def test_get_source():
"""Tests if source of objects is gotten properly."""
assert source_utils.get_source(pytest.Cache)
| 5,340,467
|
def calc_tract_accessibility(tracts, pois, G, weight='length',
func=acc_cumulative_gaussian,k=5,
random_seed=None, func_kws={},
pois_weight_column=None,iter_cap=1_000):
"""
Calculate accessibility by census tract using given accessibility function.
Parameters
----------
tracts : GeoDataframe
Area GeoDataFrame containing census tract information
pois : GeoDataFrame
Point GeoDataFrame containing points of interest
G : NetworkX graph structure
Network Graph.
weight : string
Graph´s weight attribute for shortest paths (such as length or travel time)
func : function
Access score function to use. Options are: acc_cumulative,
acc_soft_threshold, and acc_cumulative_gaussian
func_kws : dictionary
arguments for the access score function
k : int
number of sampled points per tract
pois_weight_column : string
Column in the pois GeoDataFrame with location weights.
random_seed : int
random seed.
iter_cap : int
Parameter to limit memory usage. If the code raises memory error, lowering this
parameter might help.
Returns
-------
Dictionary in the form {tract index: average accessibility score}
"""
assert 0<k and type(k)==int, '"k" must be a positive integer'
# get places on the gdf
X = np.array([n.coords[0][0] for n in pois['geometry']])
Y = np.array([n.coords[0][1] for n in pois['geometry']])
#set places to nodes
nodes = ox.get_nearest_nodes(G,X,Y, method='balltree')
attrs = {}.fromkeys(G.nodes,0)
if pois_weight_column is None:
pois_weight_column = 'temp'
pois = pois.copy()
pois[pois_weight_column] = 1
for node, val in zip(nodes,pois[pois_weight_column]):
attrs[node] += val
nx.set_node_attributes(G,attrs,pois_weight_column)
# get igraph object for fast computations
Gig = get_full_igraph(G)
#create a dictionary for cross-references
node_dict = {}
for node in Gig.vs:
node_dict[int(node['osmid'])] = node
#get nodes to target (for faster shortest paths)
n_targets = [n for n in G.nodes if G.nodes[n][pois_weight_column]>0]
nig_targets = [node_dict[n] for n in n_targets]
vals = [G.nodes[n][pois_weight_column] for n in n_targets]
loop = tracts.iterrows()
X,Y = [],[]
for tract in tracts.iterrows():
tract = tract[1]
poly = tract['geometry']
# get k points within the polygon
X_,Y_ = random_points_in_polygon(k,poly,seed=random_seed)
#match points to graph
X+=X_
Y+=Y_
###here
X = np.array(X)
Y = np.array(Y)
trackt_ns = ox.get_nearest_nodes(G,X,Y,method='balltree')
ig_nodes = [node_dict[n] for n in trackt_ns]
#initiate total accessibility as zero
#calc distances to nodes
acc=[]
if len(ig_nodes)>=iter_cap*k:
loop = list(tracts.iterrows())
loop = [_[1] for _ in loop]
sects = [ig_nodes[x:x+iter_cap*k] for x in range(0,int((len(ig_nodes)//(iter_cap*k)+1)*(iter_cap*k))+1,iter_cap*k)]
loops = [loop[x:x+iter_cap] for x in range(0,int((len(loop)//(iter_cap)+1)*iter_cap)+1,iter_cap)]
# print(len(loops),len(sects))
for section,l in zip(sects,loops):
distances = Gig.shortest_paths_dijkstra(source=section, target=nig_targets, weights=weight)
n=0
for tract in l:
total_acc=0
for ds in distances[n:n+k]:
new = np.array(vals)*func(np.array(ds), **func_kws)
total_acc += new.sum()
acc.append(total_acc/k)
n+=k
else:
distances = Gig.shortest_paths_dijkstra(source=ig_nodes, target=nig_targets, weights=weight)
n=0
for tract in loop:
total_acc=0
for ds in distances[n:n+k]:
new = np.array(vals)*func(np.array(ds), **func_kws)
total_acc += new.sum()
acc.append(total_acc/k)
n+=k
return {i:a for i,a in zip(tracts.index,acc)}
| 5,340,468
|
def _exp_func(x, a, b, c):
"""Exponential function of a single variable, x.
Parameters
----------
x : float or numpy.ndarray
Input data.
a : float
First parameter.
b : float
Second parameter.
c : float
Third parameter.
Returns
-------
float or numpy.ndarray
a * exp(b * x) + c
"""
return a * np.exp(b * x) + c
| 5,340,469
|
def ilevenshtein(seq1, seqs, max_dist=-1):
"""Compute the Levenshtein distance between the sequence `seq1` and the series
of sequences `seqs`.
`seq1`: the reference sequence
`seqs`: a series of sequences (can be a generator)
`max_dist`: if provided and > 0, only the sequences which distance from
the reference sequence is lower or equal to this value will be returned.
The return value is a series of pairs (distance, sequence).
The sequence objects in `seqs` are expected to be of the same kind than
the reference sequence in the C implementation; the same holds true for
`ifast_comp`.
"""
for seq2 in seqs:
dist = levenshtein(seq1, seq2, max_dist=max_dist)
if dist != -1:
yield dist, seq2
| 5,340,470
|
def save_ages(history,outdir,index=0):
"""saves all cell ages for each tissue in history"""
if not os.path.exists(outdir): # if the folder doesn't exist create it
os.makedirs(outdir)
filename = "%s/ages_%03d"%(outdir,index)
wfile = open(filename,"w")
for tissue in history:
for age in tissue.age:
wfile.write("%.3e "%age)
wfile.write("\n")
| 5,340,471
|
def seek_inactive(x, start, length, direction=-1, abstol=0):
""" Seek inactive region to the left of start
Example
-------
>>> # _______ |
>>> seek_inactive([3, 2, 1, 1, 1, 2, 3, 4, 2], start=7, length=3)
(1, slice(2, 4))
When no sufficiently long sequence is found we return the end
>>> # _ |
>>> seek_inactive([3, 2, 1, 1, 1, 2, 3, 4, 2], start=7, length=5)
(3, slice(0, 0))
"""
end = -1 if direction == -1 else len(x)
ind = start
for i in range(start, end, direction):
if abs(x[i] - x[ind]) > abstol:
ind = i
if abs(ind - i) >= length - 1:
return x[ind], slice(ind, i, direction)
if direction == 1:
return x[-1], slice(-1, -1)
else:
return x[0], slice(0, 0)
| 5,340,472
|
def test_Coding_inverted():
"""Reverse oriented coding transcript."""
crossmap = Coding(_exons, _cds, True)
# Boundary between 5' and CDS.
invariant(
crossmap.coordinate_to_coding, 43,
crossmap.coding_to_coordinate, (-1, 0, -1, 0))
invariant(
crossmap.coordinate_to_coding, 42,
crossmap.coding_to_coordinate, (1, 0, 0, 0))
# Boundary between CDS and 3'.
invariant(
crossmap.coordinate_to_coding, 32,
crossmap.coding_to_coordinate, (6, 0, 0, 0))
invariant(
crossmap.coordinate_to_coding, 31,
crossmap.coding_to_coordinate, (1, 0, 1, 0))
| 5,340,473
|
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Firmata sensors."""
new_entities = []
board = hass.data[DOMAIN][config_entry.entry_id]
for sensor in board.sensors:
pin = sensor[CONF_PIN]
pin_mode = sensor[CONF_PIN_MODE]
differential = sensor[CONF_DIFFERENTIAL]
api = FirmataAnalogInput(board, pin, pin_mode, differential)
try:
api.setup()
except FirmataPinUsedException:
_LOGGER.error(
"Could not setup sensor on pin %s since pin already in use",
sensor[CONF_PIN],
)
continue
name = sensor[CONF_NAME]
sensor_entity = FirmataSensor(api, config_entry, name, pin)
new_entities.append(sensor_entity)
if new_entities:
async_add_entities(new_entities)
| 5,340,474
|
def export(est, export_dir, input_image_size=None):
"""Export graph to SavedModel and TensorFlow Lite.
Args:
est: estimator instance.
export_dir: string, exporting directory.
input_image_size: int, input image size.
Raises:
ValueError: the export directory path is not specified.
"""
if not export_dir:
raise ValueError('The export directory path is not specified.')
if not input_image_size:
input_image_size = FLAGS.input_image_size
tf.logging.info('Starting to export model.')
image_serving_input_fn = imagenet_input.build_image_serving_input_fn(
input_image_size)
est.export_saved_model(
export_dir_base=export_dir,
serving_input_receiver_fn=image_serving_input_fn)
| 5,340,475
|
def word_cross_product_phi(t1, t2):
"""Basis for cross-product features. This tends to produce pretty
dense representations.
Parameters
----------
t1, t2 : `nltk.tree.Tree`
As given by `str2tree`.
Returns
-------
defaultdict
Maps each (w1, w2) in the cross-product of `t1.leaves()` and
`t2.leaves()` to its count. This is a multi-set cross-product
(repetitions matter).
"""
return Counter([(w1, w2) for w1, w2 in product(t1.leaves(), t2.leaves())])
| 5,340,476
|
def cube_filter_highpass(array, mode='laplacian', verbose=True, **kwargs):
"""
Apply ``frame_filter_highpass`` to the frames of a 3d or 4d cube.
Parameters
----------
array : numpy ndarray
Input cube, 3d or 4d.
mode : str, optional
``mode`` parameter to the ``frame_filter_highpass`` function. Defaults
to a Laplacian high-pass filter.
verbose : bool, optional
If ``True`` timing and progress bar are shown.
**kwargs : dict
Passed through to the ``frame_filter_highpass`` function.
Returns
-------
filtered : numpy ndarray
High-pass filtered cube.
"""
array_out = np.empty_like(array)
if array.ndim == 3:
for i in Progressbar(range(array.shape[0]), verbose=verbose):
array_out[i] = frame_filter_highpass(array[i], mode=mode, **kwargs)
elif array.ndim == 4:
for i in Progressbar(range(array.shape[1]), verbose=verbose):
for lam in range(array.shape[0]):
array_out[lam][i] = frame_filter_highpass(array[lam][i],
mode=mode, **kwargs)
else:
raise TypeError('Input array is not a 3d or 4d cube')
return array_out
| 5,340,477
|
def train(
mat_dir,
output_dir,
ckpt_dir,
blm_idx,
lr,
weight_decay,
device,
epoch,
):
""" Train refinement stage
Args:
mat_dir (str): mat file directories
output_dir (str): Output dir
ckpt_dir (str): Checkpoint directory for trained models
blm_idx (int): Index for targeted bilateral landmark of the mandible
lr (float): Learning rate
weight_decay (float): Weight_decay
device (str): Cuda or CPU
epoch (int): Total training epoch
"""
# Load training data (prediction from global, local stages)
gs_tr_mat = sio.loadmat(os.path.join(mat_dir, 'global_train'))
gs_t1_mat = sio.loadmat(os.path.join(mat_dir, 'global_test1'))
gs_t2_mat = sio.loadmat(os.path.join(mat_dir, 'global_test2'))
ls_tr_mat = sio.loadmat(os.path.join(mat_dir, 'local_train'))
ls_t1_mat = sio.loadmat(os.path.join(mat_dir, 'local_test1'))
ls_t2_mat = sio.loadmat(os.path.join(mat_dir, 'local_test2'))
gt_tr_mat = sio.loadmat(os.path.join(mat_dir, 'gt_train'))
gt_t1_mat = sio.loadmat(os.path.join(mat_dir, 'gt_test1'))
gt_t2_mat = sio.loadmat(os.path.join(mat_dir, 'gt_test2'))
gs_tr = {'x':torch.tensor(gs_tr_mat['x']), 'y':torch.tensor(gs_tr_mat['y'])}
gs_t1 = {'x':torch.tensor(gs_t1_mat['x']), 'y':torch.tensor(gs_t1_mat['y'])}
gs_t2 = {'x':torch.tensor(gs_t2_mat['x']), 'y':torch.tensor(gs_t2_mat['y'])}
ls_tr = {'x':torch.tensor(ls_tr_mat['x']), 'y':torch.tensor(ls_tr_mat['y'])}
ls_t1 = {'x':torch.tensor(ls_t1_mat['x']), 'y':torch.tensor(ls_t1_mat['y'])}
ls_t2 = {'x':torch.tensor(ls_t2_mat['x']), 'y':torch.tensor(ls_t2_mat['y'])}
gt_tr = {'x':torch.tensor(gt_tr_mat['x']), 'y':torch.tensor(gt_tr_mat['y'])}
gt_t1 = {'x':torch.tensor(gt_t1_mat['x']), 'y':torch.tensor(gt_t1_mat['y'])}
gt_t2 = {'x':torch.tensor(gt_t2_mat['x']), 'y':torch.tensor(gt_t2_mat['y'])}
# Load models
model = RefineModel([blm_idx])
model.to(device)
params_to_optimize = model.get_train_params(lr)
# Optimizer and leraning rate scheduler.
optimizer = torch.optim.Adam(params_to_optimize, lr=lr)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lambda x: (1 - x / (SZ_TRAINING * 1e7)) ** weight_decay
)
# Training
model.train()
for step in range(SZ_TRAINING * epoch):
n_train_data = gs_tr['x'].shape[0]
for b_idx in range(n_train_data):
inputs = torch.cat([
gs_tr['x'][b_idx], ls_tr['x'][b_idx],
gs_tr['y'][b_idx], ls_tr['y'][b_idx],
], dim=0).to(device)
targets = torch.cat([
gt_tr['x'][b_idx][[blm_idx]],
gt_tr['y'][b_idx][[blm_idx]]],
dim=0
).float().to(device)
outputs = model(inputs)
loss = model.calc_loss(preds=outputs, gt=targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
for p in model.parameters(): # Regulization
p.data.clamp_(0) # Make parameters larger than 0
lr_scheduler.step()
# Validation
model.eval()
n_test_data = gs_t1['x'].shape[0]
test_loss = 0
rs_t1_rst = []
with torch.no_grad():
for b_idx in range(n_test_data):
inputs = torch.cat([
gs_t1['x'][b_idx], ls_t1['x'][b_idx],
gs_t1['y'][b_idx], ls_t1['y'][b_idx],
], dim=0).to(device)
targets = torch.cat([
gt_t1['x'][b_idx], gt_t1['y'][b_idx]
], dim=0).float().to(device)
rs_t1_rst.append(model(inputs).detach().clone().cpu().unsqueeze(0))
rs_t1_rst = torch.cat(rs_t1_rst, dim=0)
val_dict = validation(blm_idx, rs_t1_rst, gt_t1)
# Save best model
if step == 0:
best_val_stat = {
'MRE': val_dict['MRE'],
'SDR': val_dict['SDR'],
'step': step
}
else:
best_val_mre = best_val_stat['MRE']['average']
cur_val_mre = val_dict['MRE']['average']
if best_val_mre > cur_val_mre:
best_val_stat['MRE'] = val_dict['MRE']
best_val_stat['SDR'] = val_dict['SDR']
best_val_stat['step'] = step
torch.save({
'model': model.state_dict(),},
os.path.join(ckpt_dir,
'best_{}.pth'.format(S_LM_NAME_DICT[blm_idx])
)
)
# Logging
print('Landmark: {}, Step: {}, MRE: {:04f}, SDR: {:04f}'.format(
S_LM_NAME_DICT[blm_idx],
step,
val_dict['MRE']['average'],
val_dict['SDR'][20]['average']
))
log_rs(
step=step,
output_dir=output_dir,
best_val_stat=best_val_stat,
val_dict=val_dict,
)
| 5,340,478
|
def translate_text(text: str, url: str, model_id) -> TranslatedObject:
"""Translates a text with the url of a translation server. The url is the url that comes up when you start the
translation model"""
assert type(text) == str, "Text has to be of type string"
assert type(url) == str, "Url has to be of type string"
model_ids = get_valid_model_ids()
if model_id not in model_ids:
raise ModelIDNotFoundException(model_id, model_ids)
# text = re.sub(r"([?.!,:;¿])", r" \1 ", text)
# text = re.sub(r'[" "]+', " ", text)
text = mt_en.tokenize(text, return_str=True)
url = f"{url}/translator/translate"
headers = {"Content-Type": "application/json"}
data = [{"src": text, "id": model_id}]
response = requests.post(url, json=data, headers=headers)
translation = response.text
jsn = json.loads(translation)
tokens = jsn[0][0]['tgt']
input_text = jsn[0][0]['src']
score = jsn[0][0]['pred_score']
# text = re.sub(r" ([?.!,:،؛؟¿])", r"\1", text)
# text = mt_nl.detokenize(tokens)
text = tokens
return TranslatedObject(input_text, text, score)
| 5,340,479
|
def quantized_avg_pool_run(shape, dtype1, shape_list, dtype2, ksize, strides,
padding, data_format, quant_algo,
scale_mode, scale_sqrt, attrs):
"""run function"""
if not isinstance(shape_list, (list, tuple, type(None))):
raise RuntimeError("shape_list should be a list, tuple or None!")
op_attrs = [ksize, strides, padding, data_format,
quant_algo, scale_mode, scale_sqrt]
if shape_list is None:
mod = utils.op_build_test(quantized_avg_pool, [shape], [dtype1],
op_attrs=[None] + op_attrs,
kernel_name='quantized_avgpool', attrs=attrs)
else:
mod = utils.op_build_test(quantized_avg_pool,
[shape, shape_list], [dtype1, dtype2],
op_attrs=op_attrs,
kernel_name='quantized_avgpool', attrs=attrs)
expect, inputs, out_buf = gen_data(shape, dtype1, shape_list, dtype2, ksize,
strides, padding, data_format, quant_algo,
scale_mode, scale_sqrt)
output = utils.mod_launch(mod, (*inputs, *out_buf), expect=expect)
rtol, atol = get_rtol_atol("quantized_avgpool", dtype1)
if expect.dtype in ("int8", "uint8"):
cmp_res = compare_int(output, expect)
else:
cmp_res = compare_tensor(output, expect, rtol=rtol, atol=atol)
return inputs, output, expect, cmp_res
| 5,340,480
|
def map_repositories(packages):
"""Map repositories from PES data to RHSM repository id"""
repositories_mapping = _get_repositories_mapping()
repo_without_mapping = set()
for pkg, repo in packages.items():
if repo not in repositories_mapping:
repo_without_mapping.add(pkg)
continue
packages[pkg] = repositories_mapping[repo]
for pkg in repo_without_mapping:
del packages[pkg]
if repo_without_mapping:
report_skipped_packages('packages will not be installed or upgraded due to repositories unknown to leapp:',
repo_without_mapping)
| 5,340,481
|
def dry(message, func, *args, **kw):
"""Wraps a function that performs a destructive operation, so that
nothing will happen when a dry run is requested.
Runs func with the given arguments and keyword arguments. If this
is a dry run, print the message rather than running the function."""
if message is not None:
info(message)
if tasks.environment.dry_run:
return
return func(*args, **kw)
| 5,340,482
|
def move_cups(current: int, cups: CircularLinkedList) -> int: # return the new current cup
"""
1. The crab picks up the three cups that are immediately clockwise of the
current cup. They are removed from the circle; cup spacing is adjusted
as necessary to maintain the circle.
2. The crab selects a destination cup: the cup with a label equal to the
current cup's label minus one. If this would select one of the cups that
was just picked up, the crab will keep subtracting one until it finds a
cup that wasn't just picked up. If at any point in this process the value
goes below the lowest value on any cup's label, it wraps around to the
highest value on any cup's label instead.
3. The crab places the cups it just picked up so that they are immediately
clockwise of the destination cup. They keep the same order as when they
were picked up.
4. The crab selects a new current cup: the cup which is immediately
clockwise of the current cup.
Note that the current cup is specified by its label.
"""
# Pick up some cups from the next available location...
adjacent = cups.next(current)
picked_up = cups.to_list(location=adjacent, length=3)
# find the destination cup...
target = current - 1
counter = 0
while (target in picked_up) or (target not in cups):
target -= 1
counter += 1
if target < 0:
target = max(cups)
if counter > len(cups):
raise AssertionError("Stuck!")
# move the cups...
cups.move(dst=target, src=adjacent, length=3)
# return the new current cup...
return cups.next(current)
| 5,340,483
|
def hire(name, address, salary, manager, is_active, Session=Session):
"""Add an employee to the bank."""
# get manager_id
if manager:
firstname, lastname = split_name(manager)
with Session() as session:
stmt = select(Employee).where(and_(
Employee.firstname == firstname,
Employee.lastname == lastname))
logger.debug(f"Executing statement: {stmt}")
manager = session.execute(stmt).scalar_one()
manager_id = manager.id if manager else None
logger.info(f"New hire's manager_id is {manager_id}")
try:
with Session() as session:
new_employee = Employee(
name, address, salary, manager_id, is_active)
logger.debug(f"Adding new employee {new_employee}")
session.add(new_employee)
session.commit()
logger.info(f"New hire's id is {new_employee.id}")
except exc.SQLAlchemyError as e:
logger.error(f"Failed to create new employee {name}: {e}")
return new_employee
| 5,340,484
|
def get_colormap(n=18, randomize=True):
""" "Get expanded colormap"""
n_colors = np.ceil(n / 6) + 1
cols = []
for col in COLORS:
pal = sns.light_palette(col, n_colors=n_colors)
for rgb in pal[1:]:
cols.append(rgb)
if randomize:
shuffle(cols) # shuffle to break grouping
return ListedColormap(cols)
| 5,340,485
|
def partition2(n):
""" Coin partitions. Let partition(n) represent the number of different ways in which n coins can be separated into piles.
For example, five coins can be separated into piles in exactly seven different ways, so partition(5)=7. """
# dynamic programming table, table cell (i,j), parition size = i + 1, target n = i + 1, cell value = partition(n)
dp = {} # using dict as dynamic programming table is really slow
for i in range(n):
dp[(0,i)] = 1 # One way to partition any n using piles of size 1
dp[(i,0)] = 1 # One way to partition n=1
for i in range(1,n):
for j in range(1,n):
value = dp[(i-1,j)] # Include ways to partition n using piles <i
if i == j:
value += 1 # One way to make n using piles of the same size
elif j > i:
value += dp[(i,j-i-1)] # Include ways to make j-i using piles of size <i
dp[(i,j)] = value
if i == j:
print(i+1,value)
if value % N == 0:
print('result',i+1,value)
return value
return dp[(n-1,n-1)]
| 5,340,486
|
def all_gather_batch(tensors):
"""
Performs all_gather operation on the provided tensors.
"""
# Queue the gathered tensors
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_list = []
output_tensor = []
for tensor in tensors:
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
dist.all_gather(
tensor_all,
tensor,
async_op=False # performance opt
)
tensor_list.append(tensor_all)
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
| 5,340,487
|
def get_duration_and_elevation(table):
""""Return an array of duration and elevation gain from an html table"""
try:
hiking_duration = str(table.contents[0].text.strip()) #av.note: want this to be numeric
except:
hiking_duration = ""
try:
elevation_gain_ft = str(
table.contents[2]
.text.strip()
.replace("ft", "")
.replace(",", "")
.replace("with three different ascents", "")
.replace("with multiple ascents", "")
.replace("with two ascents", "")
.replace("with two different ascents", "")
.strip()
) #av.note: want this to be numeric
except:
elevation_gain_ft = ""
return hiking_duration, elevation_gain_ft
| 5,340,488
|
def display_nft_of_the_day(export: str) -> None:
"""Shows NFT of the day "https://www.coingecko.com/en/nft" [Source: CoinGecko]
NFT (Non-fungible Token) refers to digital assets with unique characteristics.
Examples of NFT include crypto artwork, collectibles, game items, financial products, and more.
Parameters
----------
export: str
Export dataframe data to csv,json,xlsx
"""
df = gecko.get_nft_of_the_day()
if gtff.USE_TABULATE_DF:
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
else:
console.print(df.to_string, "\n")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"nftday",
df,
)
| 5,340,489
|
def show_comparison(model, X_test, y_test, A_test, protected_features, prostprocess_preds):
"""
Returns Dashboard to show comparison of models based on the trade off of the disparity and accuracy
"""
FairlearnDashboard(sensitive_features=A_test, sensitive_feature_names=protected_features,
y_true=Y_test,
y_pred={"Unmitigated": model.predict(X_test) ,
"ThresholdOptimizer": postprocess_preds})
return dashboard
| 5,340,490
|
def my_edge(bw, threshold):
"""
2018.11.26
检测图像边缘
返回检测到的边缘二值图像
阈值用于消去检测到的噪声
时间复杂度:
Args:
bw: a grey-scale image with 8-bit depth
threshold: a decimal between 0 and 1
Returns:
bw_edge_binary: 二值化的边缘图像
Raises:
"""
m, n = bw.shape
bw0 = bw.astype(np.int16)
bw_edge_rows = np.zeros([m, n])
bw_edge_cols = np.zeros([m, n])
for i in range(m-1):
bw_edge_rows[i, :] = abs(bw0[i+1, :] - bw0[i, :])
bw_edge_rows[m-1, :] = 0
for j in range(n-1):
bw_edge_cols[:, j] = abs(bw0[:, j+1] - bw0[:, j])
bw_edge_cols[:, n-1] = 0
bw_edge = np.sqrt(bw_edge_cols*bw_edge_cols + bw_edge_rows*bw_edge_rows)
index_threshold = bw_edge.max()*threshold
bw_edge_binary = np.zeros([m, n])
for i in range(m):
for j in range(n):
if bw_edge[i, j] > index_threshold:
bw_edge_binary[i, j] = 1
return bw_edge_binary
| 5,340,491
|
def csc_matvec(csc, x):
"""
Matrix vector multiplication
using csc format
"""
if not sparse.isspmatrix_csc(csc):
raise Exception("Matrix must be in csc format")
nrow, ncol = csc.shape
nnz = csc.data.shape[0]
if x.size != ncol:
print(x.size, ncol)
raise ValueError("wrong dimension!")
xx = np.require(x, requirements="C")
if csc.dtype == np.float32:
y = np.zeros((nrow), dtype=np.float32)
libsparsetools.scsc_matvec(c_int(nrow), c_int(ncol), c_int(nnz),
csc.indptr.ctypes.data_as(POINTER(c_int)),
csc.indices.ctypes.data_as(POINTER(c_int)),
csc.data.ctypes.data_as(POINTER(c_float)),
xx.ctypes.data_as(POINTER(c_float)),
y.ctypes.data_as(POINTER(c_float)))
elif csc.dtype == np.float64:
y = np.zeros((nrow), dtype=np.float64)
libsparsetools.dcsc_matvec(c_int(nrow), c_int(ncol), c_int(nnz),
csc.indptr.ctypes.data_as(POINTER(c_int)),
csc.indices.ctypes.data_as(POINTER(c_int)),
csc.data.ctypes.data_as(POINTER(c_double)),
xx.ctypes.data_as(POINTER(c_double)),
y.ctypes.data_as(POINTER(c_double)))
else:
raise ValueError("Not implemented")
return y
| 5,340,492
|
def cmd_IMC2chan(command):
"""
@imc2chan
Usage:
@imc2chan <IMCServer> : <IMCchannel> <channel>
Links an IMC channel to an existing evennia
channel. You can link as many existing
evennia channels as you like to the
IMC channel this way. Running the command with an
existing mapping will re-map the channels.
Use 'imcchanlist' to get a list of IMC channels and
servers. Note that both are case sensitive.
"""
source_object = command.source_object
if not settings.IMC2_ENABLED:
s = """IMC is not enabled. You need to activate it in game/settings.py."""
source_object.emit_to(s)
return
args = command.command_argument
if not args or len(args.split()) != 2 :
source_object.emit_to("Usage: @imc2chan IMCServer:IMCchannel channel")
return
#identify the server-channel pair
imcdata, channel = args.split()
if not ":" in imcdata:
source_object.emit_to("You need to supply an IMC Server:Channel pair.")
return
imclist = IMC2_CHANLIST.get_channel_list()
imc_channels = filter(lambda c: c.name == imcdata, imclist)
if not imc_channels:
source_object.emit_to("IMC server and channel '%s' not found." % imcdata)
return
else:
imc_server_name, imc_channel_name = imcdata.split(":")
#find evennia channel
try:
chanobj = comsys.get_cobj_from_name(channel)
except CommChannel.DoesNotExist:
source_object.emit_to("Local channel '%s' not found (use real name, not alias)." % channel)
return
#create the mapping.
outstring = ""
mapping = IMC2ChannelMapping.objects.filter(channel__name=channel)
if mapping:
mapping = mapping[0]
outstring = "Replacing %s. New " % mapping
else:
mapping = IMC2ChannelMapping()
mapping.imc2_server_name = imc_server_name
mapping.imc2_channel_name = imc_channel_name
mapping.channel = chanobj
mapping.save()
outstring += "Mapping set: %s." % mapping
source_object.emit_to(outstring)
| 5,340,493
|
async def delete_all_groups_for_user(
user_id: int, query: CreateActionLogQuery, db: Session = Depends(get_db)
) -> Response:
"""
When a user removes his/her profile, make the user leave all groups.
This API is run asynchronously, and returns a `201 Created` instead of
`200 OK`.
**Potential error codes in response:**
* `250`: if an unknown error occurred.
"""
def leave_all_groups(user_id_, query_, db_):
environ.env.rest.group.delete_all_groups_for_user(user_id_, query_, db_)
try:
task = BackgroundTask(leave_all_groups, user_id_=user_id, query_=query, db_=db)
return Response(background=task, status_code=HTTP_201_CREATED)
except Exception as e:
log_error_and_raise_unknown(sys.exc_info(), e)
| 5,340,494
|
def encode_base58(s) -> bytes:
"""
Encodes/converts any bytes to Base58 to transmit public key
"""
count = 0
for c in s:
if c == 0:
count += 1
else:
break
num = int.from_bytes(s, 'big')
prefix = '1' * count
result = ''
while num > 0:
num, mod = divmod(num, 58)
result = BASE58_ALPHABET[mod] + result
return prefix + result
| 5,340,495
|
def get_config():
"""Get config from env vars.
Return:
dict: Keys are: policy_url, dane_id, policy_file_dir, crypto_path,
policy_name, ssids.
"""
config = {}
for x in ["policy_url", "policy_file_dir", "dane_id",
"crypto_path", "policy_name", "app_uid", "roles",
"trust_infile_path"]:
config[x] = os.getenv(x.upper())
for k, v in config.items():
if v is None:
print("Missing essential configuration: {}".format(k.upper()))
if None in config.values():
time.sleep(30)
sys.exit(1)
return config
| 5,340,496
|
def unix_only(f):
"""Only execute on unix systems"""
f.__test__ = os.name == "posix"
return f
| 5,340,497
|
def save_chapter(
body,
source_lang,
target_lang,
title,
public=False,
user=None):
"""Save chapter to database
Parameters:
body (string): input text
source_lang (string): source language
target_lang (string): target language
title (string): title of the chapter
public: visible to all users if true
user (User object): user that created the chapter
Returns:
Chapter: Chapter object created from the given parameters
boolean: True if text was analyzed, False if not
"""
# save chapter
chapter = Chapter()
chapter.body = body
chapter.created_by = user
chapter.title = title
chapter.source_lang = source_lang
chapter.target_lang = target_lang
chapter.public = public
chapter.save()
fulltext = title + ' ' + body
doc = spacy_analyze(fulltext, source_lang)
if doc:
word_properties = analyze_text(doc)
word_list = translate_words(
word_properties,
source_lang,
target_lang
)
# save word properties related to chapter
for w in word_list:
properties = word_properties.get(w.lemma)
wp = WordProperties()
if properties:
if properties['pos'] == w.pos:
wp.frequency = properties['count']
token_list = properties.get('orig')
if token_list:
wp.token = ', '.join(token_list)
wp.chapter = chapter
wp.word = w
wp.save()
return (chapter, True)
return (chapter, False)
| 5,340,498
|
def frequency(g, k, h):
"""
Computes the frequency for a given wave number and water depth
(linear dispersion relationship)
:param k: the wave number
:param g: -- gravitational acceleration
:param h: -- the water depth
:returns omega: -- wave frequency
"""
return np.sqrt(g * k * np.tanh(k * h))
| 5,340,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.