hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
178907a0d85d7faa09009d64b47aa951a1ef96e7
| 9,038
|
py
|
Python
|
scratch/losses/plot_predictions.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | 2
|
2021-07-26T10:56:33.000Z
|
2021-12-20T17:30:53.000Z
|
scratch/losses/plot_predictions.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | 1
|
2021-11-25T21:01:19.000Z
|
2021-12-05T01:40:53.000Z
|
scratch/losses/plot_predictions.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | 1
|
2021-11-27T02:35:10.000Z
|
2021-11-27T02:35:10.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from pickle import load
############# PLOT ##############
def cauchy_selection_loss_numpy(y_true, y_predicted):
y_max = 1
y_min = -1
gamma = 1
r = (y_true - y_predicted)/gamma
epsilon = 10**-6
tail_term = np.log(1 + np.square(r))
selection_term = np.log(np.arctan((y_max - y_predicted)/gamma) - np.arctan((y_min - y_predicted)/gamma) + epsilon)
loss = tail_term + selection_term
return np.mean(loss, axis=-1)
def loss(y_true, y_predicted, scaler):
y_predicted1 = scaler.transform(y_predicted.reshape(-1, 1)).flatten()
y_true1 = scaler.transform(y_true.reshape(-1, 1)).flatten()
return cauchy_selection_loss_numpy(y_true1, y_predicted1)
p_tr10 = np.load("predicted_training_10.npy")
t_tr10 = np.load("true_training_10.npy")
p10 = np.load("predicted_val_10.npy")
t10 = np.load("true_val_10.npy")
p_tr20 = np.load("predicted_training_20.npy")
t_tr20 = np.load("true_training_20.npy")
p20 = np.load("predicted_val_20.npy")
t20 = np.load("true_val_20.npy")
p_tr35 = np.load("predicted_training_35.npy")
t_tr35 = np.load("true_training_35.npy")
p35 = np.load("predicted_val_35.npy")
t35 = np.load("true_val_35.npy")
p_tr50 = np.load("predicted_training_50.npy")
t_tr50 = np.load("true_training_50.npy")
p50 = np.load("predicted_val_50.npy")
t50 = np.load("true_val_50.npy")
p_tr100 = np.load("predicted_training_100.npy")
t_tr100 = np.load("true_training_100.npy")
p100 = np.load("predicted_val_100.npy")
t100 = np.load("true_val_100.npy")
scaler_training_set = load(open('../mse/scaler_output.pkl', 'rb'))
f, axes = plt.subplots(4, 2, figsize=(12, 7), sharey=True, sharex=True)
axes[0,0].scatter(t_tr10, p_tr10, s=0.1, label="Epoch 10, L=%.3f" % loss(t_tr10, p_tr10, scaler_training_set))
axes[0, 1].scatter(t10, p10, s=0.2, label="Epoch 10, L=%.3f" % loss(t10, p10, scaler_training_set))
axes[0,0].set_title("Training set")
axes[0,1].set_title("Validation set")
axes[1,0].scatter(t_tr20, p_tr20, s=0.1, label="Epoch 20, L=%.3f" % loss(t_tr20, p_tr20, scaler_training_set))
axes[1, 1].scatter(t20, p20, s=0.2, label="Epoch 20, L=%.3f" % loss(t20, p20, scaler_training_set))
axes[2,0].scatter(t_tr35, p_tr35, s=0.1, label="Epoch 35, L=%.3f" % loss(t_tr35, p_tr35, scaler_training_set))
axes[2,1].scatter(t35, p35, s=0.2, label="Epoch 35, L=%.3f" % loss(t35, p35, scaler_training_set))
axes[3,0].scatter(t_tr50, p_tr50, s=0.1, label="Epoch 50, L=%.3f" % loss(t_tr50, p_tr50, scaler_training_set))
axes[3,1].scatter(t50, p50, s=0.2, label="Epoch 50, L=%.3f" % loss(t50, p50, scaler_training_set))
# axes[3,0].scatter(t_tr100, p_tr100, s=0.1, label="Epoch 100, MSE=%.3f" % mse(t_tr50, p_tr50))
# axes[3,1].scatter(t100, p100, s=0.2, label="Epoch 100, MSE=%.3f" % mse(t50, p50))
for ax in axes.flatten():
ax.plot([t_tr20.min(), t_tr20.max()], [t_tr20.min(), t_tr20.max()], color="grey")
ax.legend(loc=2, fontsize=13)
plt.subplots_adjust(left=0.08, top=0.94, bottom=0.12, wspace=0, hspace=0)
axes[3,1].set_ylim(9.5, 14.8)
f.text(0.5, 0.01,r"$\log(M_{\mathrm{truth}}/M_\odot)$")
f.text(0.01, 0.4,r"$\log(M_{\mathrm{predicted}}/M_\odot)$", rotation=90)
# f, axes = plt.subplots(1, 3, figsize=(10, 5), sharey=True)
# plt.subplots_adjust(wspace=0, top=0.92, left=0.1)
# truth_values = [-1, 0, 1]
# xs = [np.linspace(-1.5, -0.5, 10000), np.linspace(-0.5, 0.5, 100000), np.linspace(0.5, 1.5, 100000)]
# for i in range(3):
# axes[i].plot(xs[i], squared_error_numpy(truth_values[i], xs[i]) -
# squared_error_numpy(truth_values[i], truth_values[i]), label=r"$\mathcal{L}_\mathrm{MSE}$")
# axes[i].plot(xs[i], L.loss_range(truth_values[i], xs[i]) -
# L.loss_range(truth_values[i], truth_values[i]), label=r"$\mathcal{L}_C$")
# axes[i].plot(xs[i], L.loss(truth_values[i], xs[i]) -
# L.loss(truth_values[i], truth_values[i]), label=r"$\mathcal{L}_B$")
# axes[i].axvline(x=truth_values[i], ls="--", color="grey")
# axes[i].set_xlabel("x")
#
# axes[1].legend(loc="best")
# plt.ylim(-0.1, 5)
# axes[0].set_ylabel("Loss")
tr_bound = np.loadtxt("/Users/lls/Desktop/cauchy_selec_bound/test/training.log", delimiter=",", skiprows=1)
tr_gamma = np.loadtxt("/Users/lls/Desktop/cauchy_selec_gamma_bound/training.log", delimiter=",", skiprows=1)
tr_bound = tr
f, axes = plt.subplots(1, 3, sharex=True, figsize=(12, 5))
f, axes = plt.subplots(1, 1)
axes.plot(tr_bound[:,0], tr_bound[:, 1], color="C0", label="$\gamma = 0.2$")
axes.plot(tr_gamma[:,0], tr_gamma[:, 1], color="C1", label="$\gamma$ trainable")
axes.plot(tr_bound[:,0], tr_bound[:, 4], color="C0", ls="--")
axes.plot(tr_gamma[:,0], tr_gamma[:, 5], color="C1", ls="--")
axes.set_ylabel("Loss", fontsize=14)
# axes[1].plot(tr_bound[:,0], tr_bound[:, 2], color="C0")
# # axes[1].plot(tr_gamma[:,0], tr_gamma[:, 3], color="C1")
# axes[1].plot(tr_bound[:,0], tr_bound[:, 5], color="C0", ls="--")
# # axes[1].plot(tr_gamma[:,0], tr_gamma[:, 7], color="C1", ls="--")
# axes[1].set_ylabel("MAE", fontsize=14)
#
# axes[2].plot(tr_bound[:,0], tr_bound[:, 3], color="C0")
# # axes[2].plot(tr_gamma[:,0], tr_gamma[:, 4], color="C1")
# axes[2].plot(tr_bound[:,0], tr_bound[:, 6], color="C0", ls="--")
# # axes[2].plot(tr_gamma[:,0], tr_gamma[:, 8], color="C1", ls="--")
# axes[2].set_ylabel("MSE", fontsize=14)
# axes[1].set_xlabel("Epoch", fontsize=14)
for ax in axes:
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
plt.subplots_adjust(left=0.07, bottom=0.14, wspace=0.35, right=0.96, top=0.91)
############## PLOT #################
scaler_training_set = load(open('../mse/scaler_output.pkl', 'rb'))
epochs = ["15", "25"]
f, axes = plt.subplots(2, len(epochs), figsize=(13, 7), sharey=True, sharex=True)
for i, epoch in enumerate(epochs):
p = np.load("predicted_training_" + epoch + ".npy")
t = np.load("true_training_" + epoch + ".npy")
if i == 0:
axes[0, i].scatter(t, p, s=0.01, color="C0", label="Training set, L=%.3f" % loss(t, p, scaler_training_set))
else:
axes[0, i].scatter(t, p, s=0.01, color="C0", label="L=%.3f" % loss(t, p, scaler_training_set))
axes[0, i].set_title("Epoch " + epoch, fontsize=13)
p = np.load("predicted_val_" + epoch + ".npy")
t = np.load("true_val_" + epoch + ".npy")
if i == 0:
axes[1, i].scatter(t, p, s=0.1, color="C1", label="Validation set, L=%.3f" % loss(t, p, scaler_training_set))
else:
axes[1, i].scatter(t, p, s=0.1, color="C1", label="L=%.3f" % loss(t, p, scaler_training_set))
for ax in axes.flatten():
ax.plot([t.min(), t.max()], [t.min(), t.max()], color="grey")
ax.legend(loc=2, fontsize=13)
plt.subplots_adjust(left=0.08, top=0.94, bottom=0.12, wspace=0, hspace=0)
axes[0,0].set_ylim(10.3, 13.1)
# f.text(0.5, 0.01,r"$\log(M_{\mathrm{truth}}/M_\odot)$")
axes[1, 1].set_xlabel(r"$\log(M_{\mathrm{truth}}/M_\odot)$", fontsize=16)
f.text(0.01, 0.4,r"$\log(M_{\mathrm{predicted}}/M_\odot)$", rotation=90, fontsize=16)
def plot_diff_predicted_true_mass_ranges(predictions, truths, mass_bins, xbins, figure=None,
figsize=(10, 5.1),
col_truth="dimgrey", lw=1.8,
density=True):
if figure is None:
f, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=figsize, sharey=True, sharex=True)
else:
f, (ax1, ax2, ax3) = figure[0], figure[1]
ax1.axvline(x=0, color=col_truth, ls="--")
ax2.axvline(x=0, color=col_truth, ls="--")
ax3.axvline(x=0, color=col_truth, ls="--")
pred_low = (truths >= mass_bins[0]) & (truths < mass_bins[1])
pred_mid = (truths >= mass_bins[1]) & (truths < mass_bins[2])
pred_high = (truths >= mass_bins[2]) & (truths < mass_bins[3])
_ = ax1.hist(predictions[pred_low] - truths[pred_low], bins=xbins,
histtype="step", density=density, lw=lw, color="C0")
ax1.set_title(r"$ %.2f \leq \log(M_{\mathrm{true}}) \leq %.2f$" % (mass_bins[0], mass_bins[1]))
_ = ax2.hist(predictions[pred_mid] - truths[pred_mid], bins=xbins,
histtype="step", density=density, lw=lw, color="C0")
ax2.set_title(r"$%.2f \leq \log(M_{\mathrm{true}}) \leq %.2f$" % (mass_bins[1], mass_bins[2]))
dd = ax3.hist(predictions[pred_high] - truths[pred_high], bins=xbins,
histtype="step", density=density, lw=lw, color="C0")
ax3.set_title(r"$%.2f \leq \log(M_{\mathrm{true}}) \leq %.2f$" % (mass_bins[2], mass_bins[3]))
plt.subplots_adjust(wspace=0, bottom=0.14, left=0.08)
ax1.set_ylabel(r"$n_{\mathrm{particles}}$")
ax1.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$")
ax2.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$")
ax3.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$")
ax3.set_xticks([-4, -3, -2, -1, 0, 1, 2, 3, 4])
return f, (ax1, ax2, ax3)
| 41.64977
| 118
| 0.628347
|
f2685fdf37bdbbf4b97d87c067d4a4aa0ce664d7
| 777
|
py
|
Python
|
tools/sqlmap/waf/modsecurity.py
|
glaudsonml/kurgan-ai
|
c0ad4450f9fb2004f35b8a0201bfe894e01adc8f
|
[
"Apache-2.0"
] | 35
|
2017-05-22T14:42:01.000Z
|
2020-09-07T21:24:41.000Z
|
tools/sqlmap/waf/modsecurity.py
|
tmaxter/kurgan-ai
|
c0ad4450f9fb2004f35b8a0201bfe894e01adc8f
|
[
"Apache-2.0"
] | null | null | null |
tools/sqlmap/waf/modsecurity.py
|
tmaxter/kurgan-ai
|
c0ad4450f9fb2004f35b8a0201bfe894e01adc8f
|
[
"Apache-2.0"
] | 5
|
2017-12-19T03:36:54.000Z
|
2021-04-14T18:05:08.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "ModSecurity: Open Source Web Application Firewall (Trustwave)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = code == 501 and re.search(r"Reference #[0-9A-Fa-f.]+", page, re.I) is None
retval |= re.search(r"Mod_Security|NOYB", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
retval |= "This error was generated by Mod_Security" in page
if retval:
break
return retval
| 28.777778
| 104
| 0.685972
|
94c151527ff4d9456770ff0a0a85d594c08cbb81
| 655
|
py
|
Python
|
search_in_rotated_sorted_array.py
|
sarveshbhatnagar/CompetetiveProgramming
|
09ff483eb17c3292c9b7fa03e622a240406ce8ca
|
[
"MIT"
] | null | null | null |
search_in_rotated_sorted_array.py
|
sarveshbhatnagar/CompetetiveProgramming
|
09ff483eb17c3292c9b7fa03e622a240406ce8ca
|
[
"MIT"
] | null | null | null |
search_in_rotated_sorted_array.py
|
sarveshbhatnagar/CompetetiveProgramming
|
09ff483eb17c3292c9b7fa03e622a240406ce8ca
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> int:
l = 0
r = len(nums)-1
while l <= r:
mid = (l+r) // 2
if target == nums[mid]:
return mid
# left portion sorted...
if nums[l] <= nums[mid]:
if target > nums[mid] or target < nums[l]:
l = mid + 1
else:
r = mid - 1
else:
if target < nums[mid] or target > nums[r]:
r = mid-1
else:
l = mid + 1
return -1
| 23.392857
| 58
| 0.370992
|
a8639b178dc1448f1a94b7cd466a1b82e40dbdaa
| 397
|
py
|
Python
|
QuestionApp/QuestionApp/wsgi.py
|
darpankakadia/Survey
|
6f29836b1bb003bc74a18acc85e8b587d492ba42
|
[
"MIT"
] | null | null | null |
QuestionApp/QuestionApp/wsgi.py
|
darpankakadia/Survey
|
6f29836b1bb003bc74a18acc85e8b587d492ba42
|
[
"MIT"
] | null | null | null |
QuestionApp/QuestionApp/wsgi.py
|
darpankakadia/Survey
|
6f29836b1bb003bc74a18acc85e8b587d492ba42
|
[
"MIT"
] | null | null | null |
"""
WSGI config for QuestionApp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "QuestionApp.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 26.466667
| 78
| 0.793451
|
a830a0598bd668f49de5eee39ca7ee9fbf4b8ad5
| 1,022
|
py
|
Python
|
archive.py
|
towerjoo/certbot-route53
|
e2f406cb233a0266893573b989cee491637e577d
|
[
"MIT"
] | null | null | null |
archive.py
|
towerjoo/certbot-route53
|
e2f406cb233a0266893573b989cee491637e577d
|
[
"MIT"
] | null | null | null |
archive.py
|
towerjoo/certbot-route53
|
e2f406cb233a0266893573b989cee491637e577d
|
[
"MIT"
] | 1
|
2019-10-23T06:30:40.000Z
|
2019-10-23T06:30:40.000Z
|
import os
from zipfile import ZipFile
ROOT = os.path.abspath(os.path.dirname(__file__))
domains = ["tradexport.com",
"tradexport.cn",
"zeno.group",
"zeno-dev.com",
]
finalfiles = []
for domain in domains:
folder = os.path.join(ROOT, "letsencrypt/archive/", domain)
no = 1
while True:
name = "fullchain{}.pem".format(no)
files = os.listdir(folder)
if name not in files:
no -= 1
break
no += 1
fullchain = "fullchain{}.pem".format(no)
privkey = "privkey{}.pem".format(no)
finalfiles.append(
{
"name":"{}.fullchain".format(domain),
"path": os.path.join(folder, fullchain),
},
)
finalfiles.append(
{
"name": "{}.privkey".format(domain),
"path": os.path.join(folder, privkey),
}
)
with ZipFile('certs.zip', 'w') as myzip:
for f in finalfiles:
myzip.write(f["path"], f["name"])
| 23.767442
| 63
| 0.517613
|
9b762526c01e6d1b2aea021d37d61ace989c1c11
| 1,017
|
py
|
Python
|
tests/test_delaunay.py
|
Jeremiah-England/Shapely
|
769b203f2b7cbeeb0a694c21440b4025a563f807
|
[
"BSD-3-Clause"
] | 2,382
|
2015-01-04T03:16:59.000Z
|
2021-12-10T15:48:56.000Z
|
tests/test_delaunay.py
|
Jeremiah-England/Shapely
|
769b203f2b7cbeeb0a694c21440b4025a563f807
|
[
"BSD-3-Clause"
] | 1,009
|
2015-01-03T23:44:02.000Z
|
2021-12-10T16:02:42.000Z
|
tests/test_delaunay.py
|
Jeremiah-England/Shapely
|
769b203f2b7cbeeb0a694c21440b4025a563f807
|
[
"BSD-3-Clause"
] | 467
|
2015-01-19T23:18:33.000Z
|
2021-12-09T18:31:28.000Z
|
import unittest
from shapely.geometry import Polygon, LineString, Point
from shapely.ops import triangulate
from shapely.geos import geos_version
@unittest.skipIf(geos_version < (3, 4, 0),
"Delaunay triangulation not supported")
class DelaunayTriangulation(unittest.TestCase):
"""
Only testing the number of triangles and their type here.
This doesn't actually test the points in the resulting geometries.
"""
def setUp(self):
self.p = Polygon([(0,0), (1,0), (1,1), (0,1)])
def test_polys(self):
polys = triangulate(self.p)
self.assertEqual(len(polys), 2)
for p in polys:
self.assertTrue(isinstance(p, Polygon))
def test_lines(self):
polys = triangulate(self.p, edges=True)
self.assertEqual(len(polys), 5)
for p in polys:
self.assertTrue(isinstance(p, LineString))
def test_point(self):
p = Point(1,1)
polys = triangulate(p)
self.assertEqual(len(polys), 0)
| 29.911765
| 70
| 0.640118
|
8351a5c39eced17b39a08bd2454ccb87c292b155
| 2,090
|
py
|
Python
|
attre2vec/sampling.py
|
attre2vec/attre2vec
|
f36a2581f3d17887d6201a76624d4ced93d6503f
|
[
"MIT"
] | null | null | null |
attre2vec/sampling.py
|
attre2vec/attre2vec
|
f36a2581f3d17887d6201a76624d4ced93d6503f
|
[
"MIT"
] | null | null | null |
attre2vec/sampling.py
|
attre2vec/attre2vec
|
f36a2581f3d17887d6201a76624d4ced93d6503f
|
[
"MIT"
] | null | null | null |
"""Functions for sampling negative and positive neighbor edges."""
import numpy as np
def _get_neighborhood(xe, rws, graph):
neighborhood = {}
for edge in xe:
neighbors = list(set(
e
for walk in rws[edge].values()
for edges in walk
for e in edges
if e in graph.edges()
))
neighborhood[tuple(edge)] = neighbors
return neighborhood
def _sample_neighbors(xe, nh, num_samples):
samples = []
xe_idxs = []
for idx, edge in enumerate(xe):
neighbors = np.array(nh[edge])
if neighbors.shape[0] == 0:
continue
sample_idxs = np.random.choice(
a=neighbors.shape[0],
size=num_samples,
replace=neighbors.shape[0] < num_samples,
)
samples.extend(neighbors[sample_idxs])
xe_idxs.extend([idx] * num_samples)
samples, xe_idxs = np.array(samples), np.array(xe_idxs)
return samples, xe_idxs
def _sample_negatives(xe, nh, graph, num_samples):
samples = []
xe_idxs = []
all_edges = list(graph.edges())
for idx, edge in enumerate(xe):
enh = set(nh[edge])
enh.add(edge)
i = 0
while i < num_samples:
sample_idx = np.random.randint(low=0, high=len(all_edges))
if all_edges[sample_idx] in enh:
continue
samples.append(all_edges[sample_idx])
xe_idxs.append(idx)
i += 1
samples, xe_idxs = np.array(samples), np.array(xe_idxs)
return samples, xe_idxs
def precompute_samples(xe, rws, graph, num_pos, num_neg):
"""Computes positive and negative samples for given edges."""
nh = _get_neighborhood(xe=xe, rws=rws, graph=graph)
xe_plus, idxs_plus = _sample_neighbors(
xe=xe, nh=nh, num_samples=num_pos,
)
xe_minus, idxs_minus = _sample_negatives(
xe=xe, nh=nh, graph=graph, num_samples=num_neg,
)
return {
'plus': {'xe': xe_plus, 'idxs': idxs_plus},
'minus': {'xe': xe_minus, 'idxs': idxs_minus},
}
| 26.455696
| 70
| 0.589474
|
7791fb94151b74ded1f0205e233a075a26d0e999
| 12,202
|
py
|
Python
|
webdataset/autodecode.py
|
sudrich/webdataset
|
315977952b74a87848983518c64c9ad43e66c71f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-14T15:11:36.000Z
|
2021-09-14T15:11:36.000Z
|
webdataset/autodecode.py
|
jacobbieker/webdataset
|
9033c068e5b7bb6d331da8bb3f714980c7dd4738
|
[
"BSD-3-Clause"
] | null | null | null |
webdataset/autodecode.py
|
jacobbieker/webdataset
|
9033c068e5b7bb6d331da8bb3f714980c7dd4738
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
"""Automatically decode webdataset samples."""
import io
import json
import os
import pickle
import re
import tempfile
from functools import partial
import numpy as np
from .checks import checkmember, checknotnone
"""Extensions passed on to the image decoder."""
image_extensions = "jpg jpeg png ppm pgm pbm pnm".split()
################################################################
# handle basic datatypes
################################################################
def torch_loads(data):
"""Load data using torch.loads, importing torch only if needed.
:param data: data to be decoded
"""
import io
import torch
stream = io.BytesIO(data)
return torch.load(stream)
def basichandlers(key, data):
"""Handle basic file decoding.
This function is usually part of the post= decoders.
This handles the following forms of decoding:
- txt -> unicode string
- cls cls2 class count index inx id -> int
- json jsn -> JSON decoding
- pyd pickle -> pickle decoding
- pth -> torch.loads
- ten tenbin -> fast tensor loading
- mp messagepack msg -> messagepack decoding
- npy -> Python NPY decoding
:param key: file name extension
:param data: binary data to be decoded
"""
extension = re.sub(r".*[.]", "", key)
if extension in "txt text transcript":
return data.decode("utf-8")
if extension in "cls cls2 class count index inx id".split():
try:
return int(data)
except ValueError:
return None
if extension in "json jsn":
return json.loads(data)
if extension in "pyd pickle".split():
return pickle.loads(data)
if extension in "pth".split():
return torch_loads(data)
if extension in "ten tb".split():
from . import tenbin
return tenbin.decode_buffer(data)
if extension in "mp msgpack msg".split():
import msgpack
return msgpack.unpackb(data)
if extension in "npy".split():
import numpy.lib.format
stream = io.BytesIO(data)
return numpy.lib.format.read_array(stream)
################################################################
# Generic extension handler.
################################################################
def call_extension_handler(key, data, f, extensions):
"""Call the function f with the given data if the key matches the extensions.
:param key: actual key found in the sample
:param data: binary data
:param f: decoder function
:param extensions: list of matching extensions
"""
extension = key.lower().split(".")
for target in extensions:
target = target.split(".")
if len(target) > len(extension):
continue
if extension[-len(target):] == target:
return f(data)
return None
def handle_extension(extensions, f):
"""Return a decoder function for the list of extensions.
Extensions can be a space separated list of extensions.
Extensions can contain dots, in which case the corresponding number
of extension components must be present in the key given to f.
Comparisons are case insensitive.
Examples:
handle_extension("jpg jpeg", my_decode_jpg) # invoked for any file.jpg
handle_extension("seg.jpg", special_case_jpg) # invoked only for file.seg.jpg
"""
extensions = extensions.lower().split()
return partial(call_extension_handler, f=f, extensions=extensions)
################################################################
# handle images
################################################################
imagespecs = {
"l8": ("numpy", "uint8", "l"),
"rgb8": ("numpy", "uint8", "rgb"),
"rgba8": ("numpy", "uint8", "rgba"),
"l": ("numpy", "float", "l"),
"rgb": ("numpy", "float", "rgb"),
"rgba": ("numpy", "float", "rgba"),
"torchl8": ("torch", "uint8", "l"),
"torchrgb8": ("torch", "uint8", "rgb"),
"torchrgba8": ("torch", "uint8", "rgba"),
"torchl": ("torch", "float", "l"),
"torchrgb": ("torch", "float", "rgb"),
"torch": ("torch", "float", "rgb"),
"torchrgba": ("torch", "float", "rgba"),
"pill": ("pil", None, "l"),
"pil": ("pil", None, "rgb"),
"pilrgb": ("pil", None, "rgb"),
"pilrgba": ("pil", None, "rgba"),
}
class ImageHandler:
"""Decode image data using the given `imagespec`.
The `imagespec` specifies whether the image is decoded
to numpy/torch/pi, decoded to uint8/float, and decoded
to l/rgb/rgba:
- l8: numpy uint8 l
- rgb8: numpy uint8 rgb
- rgba8: numpy uint8 rgba
- l: numpy float l
- rgb: numpy float rgb
- rgba: numpy float rgba
- torchl8: torch uint8 l
- torchrgb8: torch uint8 rgb
- torchrgba8: torch uint8 rgba
- torchl: torch float l
- torchrgb: torch float rgb
- torch: torch float rgb
- torchrgba: torch float rgba
- pill: pil None l
- pil: pil None rgb
- pilrgb: pil None rgb
- pilrgba: pil None rgba
"""
def __init__(self, imagespec, extensions=image_extensions):
"""Create an image handler.
:param imagespec: short string indicating the type of decoding
:param extensions: list of extensions the image handler is invoked for
"""
checkmember(imagespec, list(imagespecs.keys()), "unknown image specification")
self.imagespec = imagespec.lower()
self.extensions = extensions
def __call__(self, key, data):
"""Perform image decoding.
:param key: file name extension
:param data: binary data
"""
import PIL.Image
extension = re.sub(r".*[.]", "", key)
if extension.lower() not in self.extensions:
return None
imagespec = self.imagespec
atype, etype, mode = imagespecs[imagespec]
with io.BytesIO(data) as stream:
img = PIL.Image.open(stream)
img.load()
img = img.convert(mode.upper())
if atype == "pil":
return img
elif atype == "numpy":
result = np.asarray(img)
checkmember(result.dtype, [np.uint8])
if etype == "uint8":
return result
else:
return result.astype("f") / 255.0
elif atype == "torch":
import torch
result = np.asarray(img)
checkmember(result.dtype, [np.uint8])
if etype == "uint8":
result = np.array(result.transpose(2, 0, 1))
return torch.tensor(result)
else:
result = np.array(result.transpose(2, 0, 1))
return torch.tensor(result) / 255.0
return None
def imagehandler(imagespec, extensions=image_extensions):
"""Create an image handler.
This is just a lower case alias for ImageHander.
:param imagespec: textual image spec
:param extensions: list of extensions the handler should be applied for
"""
return ImageHandler(imagespec, extensions)
################################################################
# torch video
################################################################
def torch_video(key, data):
"""Decode video using the torchvideo library.
:param key: file name extension
:param data: data to be decoded
"""
extension = re.sub(r".*[.]", "", key)
if extension not in "mp4 ogv mjpeg avi mov h264 mpg webm wmv".split():
return None
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, f"file.{extension}")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
################################################################
# torchaudio
################################################################
def torch_audio(key, data):
"""Decode audio using the torchaudio library.
:param key: file name extension
:param data: data to be decoded
"""
extension = re.sub(r".*[.]", "", key)
if extension not in ["flac", "mp3", "sox", "wav", "m4a", "ogg", "wma"]:
return None
import torchaudio
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, f"file.{extension}")
with open(fname, "wb") as stream:
stream.write(data)
return torchaudio.load(fname)
################################################################
# special class for continuing decoding
################################################################
class Continue:
"""Special class for continuing decoding.
This is mostly used for decompression, as in:
def decompressor(key, data):
if key.endswith(".gz"):
return Continue(key[:-3], decompress(data))
return None
"""
def __init__(self, key, data):
"""__init__.
:param key:
:param data:
"""
self.key, self.data = key, data
def gzfilter(key, data):
"""Decode .gz files.
This decodes compressed files and the continues decoding.
:param key: file name extension
:param data: binary data
"""
import gzip
if not key.endswith(".gz"):
return None
decompressed = gzip.open(io.BytesIO(data)).read()
return Continue(key[:-3], decompressed)
################################################################
# decode entire training amples
################################################################
default_pre_handlers = [gzfilter]
default_post_handlers = [basichandlers]
class Decoder:
"""Decode samples using a list of handlers.
For each key/data item, this iterates through the list of
handlers until some handler returns something other than None.
"""
def __init__(self, handlers, pre=None, post=None, only=None):
"""Create a Decoder.
:param handlers: main list of handlers
:param pre: handlers called before the main list (.gz handler by default)
:param post: handlers called after the main list (default handlers by default)
:param only: a list of extensions; when give, only ignores files with those extensions
"""
self.only = only if only is None else set(only)
if pre is None:
pre = default_pre_handlers
if post is None:
post = default_post_handlers
assert all(callable(h) for h in handlers), f"one of {handlers} not callable"
assert all(callable(h) for h in pre), f"one of {pre} not callable"
assert all(callable(h) for h in post), f"one of {post} not callable"
self.handlers = pre + handlers + post
def decode1(self, key, data):
"""Decode a single field of a sample.
:param key: file name extension
:param data: binary data
"""
key = "." + key
for f in self.handlers:
result = f(key, data)
if isinstance(result, Continue):
key, data = result.key, result.data
continue
if result is not None:
return result
return data
def decode(self, sample):
"""Decode an entire sample.
:param sample: the sample, a dictionary of key value pairs
"""
result = {}
assert isinstance(sample, dict), sample
for k, v in list(sample.items()):
if self.only is not None and k not in self.only:
continue
if k[0] == "_":
if isinstance(v, bytes):
v = v.decode("utf-8")
result[k] = v
continue
checknotnone(v)
result[k] = self.decode1(k, v)
return result
def __call__(self, sample):
"""Decode an entire sample.
:param sample: the sample
"""
assert isinstance(sample, dict), (len(sample), sample)
return self.decode(sample)
| 29.47343
| 94
| 0.560974
|
79dd2b3d772d74898d1b3135ad3df1ea37a81361
| 5,733
|
py
|
Python
|
src/utils.py
|
vineeths96/Spoken-Keyword-Spotting
|
8cd903171d837e27dfef3b779187a743a818e0e5
|
[
"MIT"
] | 33
|
2020-07-20T09:09:12.000Z
|
2022-03-29T01:01:43.000Z
|
src/utils.py
|
vineeths96/Spoken-Keyword-Spotting
|
8cd903171d837e27dfef3b779187a743a818e0e5
|
[
"MIT"
] | 12
|
2020-09-30T09:38:32.000Z
|
2022-02-10T02:06:31.000Z
|
src/utils.py
|
vineeths96/Spoken-Keyword-Spotting
|
8cd903171d837e27dfef3b779187a743a818e0e5
|
[
"MIT"
] | 9
|
2020-11-27T13:41:10.000Z
|
2022-03-23T05:22:48.000Z
|
import os
import numpy as np
import tensorflow as tf
from scipy.io import wavfile
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from sklearn.metrics import accuracy_score, recall_score, matthews_corrcoef
from sklearn.metrics import precision_score, f1_score, confusion_matrix
from sklearn.metrics._plot.confusion_matrix import ConfusionMatrixDisplay
from python_speech_features import logfbank
from parameters import *
def getDataset(df, batch_size, cache_file=None, shuffle=True, parse_param=PARSE_PARAMS, scale=False):
"""
Creates a Tensorflow Dataset containing filterbanks, labels
:param df: Dataframe with filenames and labels
:param batch_size: Batch size of the input
:param cache_file: Whether to cache the dataset during run
:param shuffle: Whether to shuffle the dataset
:param parse_param: Window parameters
:param scale: Whether to scale filterbank levels
:return: TF Dataset, Steps per epoch
"""
data = tf.data.Dataset.from_tensor_slices((df["files"].tolist(), df["labels"].tolist()))
data = data.map(
lambda filename, label: tuple(
tf.py_function(_parse_fn, inp=[filename, label, parse_param, scale], Tout=[tf.float32, tf.int32])
),
num_parallel_calls=os.cpu_count(),
)
if cache_file:
data = data.cache("../input/" + cache_file)
if shuffle:
data = data.shuffle(buffer_size=df.shape[0])
data = data.batch(batch_size).prefetch(buffer_size=1)
steps = df.shape[0] // batch_size
return data, steps
def _loadfile(filename):
"""
Return a np array containing the wav
:param filename: Filename of wav
"""
_, wave = wavfile.read(filename)
# Pad with noise if audio is short
if len(wave) < AUDIO_LENGTH:
silence_part = np.random.normal(0, 5, AUDIO_LENGTH - len(wave))
wave = np.append(np.asarray(wave), silence_part)
return np.array(wave, dtype=np.float32)
def _logMelFilterbank(wave, parse_param=PARSE_PARAMS):
"""
Computes the log Mel filterbanks
:param wave: Audio as an array
:param parse_param: Window Parameter
:return: Filterbanks
"""
fbank = logfbank(
wave,
samplerate=AUDIO_SR,
winlen=float(parse_param[0]),
winstep=float(parse_param[1]),
highfreq=AUDIO_SR / 2,
nfilt=int(parse_param[2]),
)
fbank = np.array(fbank, dtype=np.float32)
return fbank
def _normalize(data):
"""
Normalizes the data (z-score)
:param data: Data to be normalized
:return: Nomralized data
"""
mean = np.mean(data, axis=0)
sd = np.std(data, axis=0)
# If Std Dev is 0
if not sd:
sd = 1e-7
return (data - mean) / sd
def _parse_fn(filename, label, parse_param=PARSE_PARAMS, scale=False):
"""
Calculates filterbank energies for a given file
:param filename: File name
:param label: Class label
:param parse_param: Window parameters
:param scale: Whether to normalize the filterbanks
:return: Filterbanks, Label
"""
wave = _loadfile(filename.numpy())
fbank = _logMelFilterbank(wave, parse_param)
if scale:
fbank = _normalize(fbank)
return fbank, np.asarray(label, dtype=np.int32)
def plot_confusion_matrix(y_pred, y_true, labels, display_labels):
"""
Plots the confusion matrix for given data
:param y_pred: Predicted labels
:param y_true: True labels
:param labels: Class labels integer
:param display_labels: Class labels to display
:return: None
"""
cm = confusion_matrix(y_pred=y_pred, y_true=y_true, labels=labels)
ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels).plot(
cmap=plt.cm.Blues, values_format="d"
)
plt.grid(False)
return plt
def OC_Statistics(y_pred, y_true, file_name):
"""
Print performance statistics for One Class problem
:param y_pred: Predicted labels
:param y_true: True labels
:param file_name: Plot filename
:return: None
"""
print("Accuracy: {:.4f}".format(accuracy_score(y_true, y_pred)))
print("Precision: {:.4f}".format(precision_score(y_true, y_pred)))
print("Recall: {:.4f}".format(recall_score(y_true, y_pred)))
print("F1-score: {:.4f}".format(f1_score(y_true, y_pred)))
print("Matthews Correlation Coefficient: {:.4f}".format(matthews_corrcoef(y_true, y_pred)))
sns.set(font_scale=1.50)
plot_confusion_matrix(y_pred=y_pred, y_true=y_true, labels=[-1, 1], display_labels=["Other", "Marvin"])
plt.tight_layout()
plt.savefig(f"../docs/results/{file_name}.png", dpi=300)
plt.show()
def plot_history(history):
"""
Plots and saves training history
:param history: Training history
:param model_name: Model name
:return: None
"""
sns.set()
loss = history.history["loss"]
val_loss = history.history["val_loss"]
acc = history.history["sparse_categorical_accuracy"]
val_acc = history.history["val_sparse_categorical_accuracy"]
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.plot(loss, label="Training")
ax1.plot(val_loss, label="Validation")
ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
ax1.set_title("Model loss")
ax1.set_xlabel("Epoch")
ax1.set_ylabel("Loss")
ax1.legend()
ax2.plot(acc, label="Training")
ax2.plot(val_acc, label="Validation")
ax2.xaxis.set_major_locator(MaxNLocator(integer=True))
ax2.set_title("Accuracy")
ax2.set_xlabel("Epoch")
ax2.set_ylabel("Accuracy")
ax2.legend()
plt.tight_layout()
plt.savefig("../docs/results/model_training.png", dpi=300)
fig.show()
| 28.241379
| 109
| 0.683063
|
294133a4c509c2941350447cf7d0b1267232b5e3
| 67,297
|
py
|
Python
|
ironic/tests/unit/drivers/modules/test_ipxe.py
|
dangervon/ironic
|
01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/drivers/modules/test_ipxe.py
|
dangervon/ironic
|
01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/drivers/modules/test_ipxe.py
|
dangervon/ironic
|
01dd06a17673ec5157dda2ecfc51feb9d2f8e5c2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for iPXE driver."""
import os
from unittest import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from ironic.common import boot_devices
from ironic.common import boot_modes
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import image_service
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils as common_utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base as drivers_base
from ironic.drivers.modules import agent_base
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import pxe_base
from ironic.drivers.modules.storage import noop as noop_storage
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
INST_INFO_DICT = db_utils.get_test_pxe_instance_info()
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
# NOTE(TheJulia): This code is essentially a bulk copy of the
# test_pxe file with some contextual modifications to enforce
# use of ipxe while also explicitly having it globally disabled
# in the conductor.
@mock.patch.object(ipxe.iPXEBoot, '__init__', lambda self: None)
class iPXEBootTestCase(db_base.DbTestCase):
driver = 'fake-hardware'
boot_interface = 'ipxe'
driver_info = DRV_INFO_DICT
driver_internal_info = DRV_INTERNAL_INFO_DICT
def setUp(self):
super(iPXEBootTestCase, self).setUp()
self.context.auth_token = 'fake'
self.config_temp_dir('tftp_root', group='pxe')
self.config_temp_dir('images_path', group='pxe')
self.config_temp_dir('http_root', group='deploy')
self.config(group='deploy', http_url='http://myserver')
instance_info = INST_INFO_DICT
self.config(enabled_boot_interfaces=[self.boot_interface,
'ipxe', 'fake'])
self.node = obj_utils.create_test_node(
self.context,
driver=self.driver,
boot_interface=self.boot_interface,
# Avoid fake properties in get_properties() output
vendor_interface='no-vendor',
instance_info=instance_info,
driver_info=self.driver_info,
driver_internal_info=self.driver_internal_info)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
def test_get_properties(self):
expected = pxe_base.COMMON_PROPERTIES
expected.update(agent_base.VENDOR_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
def test_validate_good(self, mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.boot.validate(task)
@mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
def test_validate_good_whole_disk_image(self, mock_glance):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.boot.validate(task)
@mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
def test_validate_skip_check_write_image_false(self, mock_write,
mock_glance):
mock_write.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.boot.validate(task)
self.assertFalse(mock_glance.called)
def test_validate_fail_missing_deploy_kernel(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
del task.node.driver_info['deploy_kernel']
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
def test_validate_fail_missing_deploy_ramdisk(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
del task.node.driver_info['deploy_ramdisk']
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
@mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
autospec=True)
def test_validate_with_boot_iso(self, mock_boot_option, mock_glance):
self.node.instance_info = {
'boot_iso': "glance://image"
}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.boot.validate(task)
mock_boot_option.assert_called_with(task.node)
mock_glance.assert_not_called()
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='ramdisk', autospec=True)
def test_validate_with_boot_iso_and_image_source(self, mock_boot_option):
i_info = self.node.instance_info
i_info['image_source'] = "http://localhost:1234/image"
i_info['boot_iso'] = "http://localhost:1234/boot.iso"
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate,
task)
mock_boot_option.assert_called_once_with(task.node)
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='local', autospec=True)
def test_validate_no_image_source_for_local_boot(self, mock_boot_option):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
del task.node['instance_info']['image_source']
task.driver.boot.validate(task)
mock_boot_option.assert_called_with(task.node)
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='netboot', autospec=True)
def test_validate_fail_missing_image_source(self, mock_boot_option):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
del task.node['instance_info']['image_source']
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
mock_boot_option.assert_called_with(task.node)
def test_validate_fail_no_port(self):
new_node = obj_utils.create_test_node(
self.context,
uuid='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
driver=self.driver, boot_interface=self.boot_interface,
instance_info=INST_INFO_DICT, driver_info=DRV_INFO_DICT)
with task_manager.acquire(self.context, new_node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
def test_validate_fail_trusted_boot_with_secure_boot(self):
instance_info = {"boot_option": "netboot",
"secure_boot": "true",
"trusted_boot": "true"}
properties = {'capabilities': 'trusted_boot:true'}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.instance_info['capabilities'] = instance_info
task.node.properties = properties
task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate, task)
def test_validate_fail_invalid_trusted_boot_value(self):
properties = {'capabilities': 'trusted_boot:value'}
instance_info = {"trusted_boot": "value"}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties = properties
task.node.instance_info['capabilities'] = instance_info
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate, task)
@mock.patch.object(image_service.GlanceImageService, 'show',
autospec=True)
def test_validate_fail_no_image_kernel_ramdisk_props(self, mock_glance):
instance_info = {"boot_option": "netboot"}
mock_glance.return_value = {'properties': {}}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.instance_info['capabilities'] = instance_info
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate,
task)
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='netboot', autospec=True)
@mock.patch.object(image_service.GlanceImageService, 'show',
autospec=True)
def test_validate_fail_glance_image_doesnt_exists(self, mock_glance,
mock_boot_option):
mock_glance.side_effect = exception.ImageNotFound('not found')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate, task)
mock_boot_option.assert_called_with(task.node)
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='netboot', autospec=True)
@mock.patch.object(image_service.GlanceImageService, 'show',
autospec=True)
def test_validate_fail_glance_conn_problem(self, mock_glance,
mock_boot_option):
exceptions = (exception.GlanceConnectionFailed('connection fail'),
exception.ImageNotAuthorized('not authorized'),
exception.Invalid('invalid'))
mock_glance.side_effect = exceptions
for exc in exceptions:
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.boot.validate, task)
mock_boot_option.assert_called_with(task.node)
def test_validate_inspection(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.boot.validate_inspection(task)
def test_validate_inspection_no_inspection_ramdisk(self):
driver_info = self.node.driver_info
del driver_info['deploy_ramdisk']
self.node.driver_info = driver_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.UnsupportedDriverExtension,
task.driver.boot.validate_inspection, task)
# TODO(TheJulia): Many of the interfaces mocked below are private PXE
# interface methods. As time progresses, these will need to be migrated
# and refactored as we begin to separate PXE and iPXE interfaces.
@mock.patch.object(manager_utils, 'node_get_boot_mode', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'get_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'build_pxe_config_options', autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
def _test_prepare_ramdisk(self, mock_pxe_config,
mock_build_pxe, mock_cache_r_k,
mock_deploy_img_info,
mock_instance_img_info,
dhcp_factory_mock,
set_boot_device_mock,
get_boot_mode_mock,
uefi=False,
cleaning=False,
ipxe_use_swift=False,
whole_disk_image=False,
mode='deploy',
node_boot_mode=None,
persistent=False):
mock_build_pxe.return_value = {}
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
mock_deploy_img_info.return_value = {kernel_label: 'a',
ramdisk_label: 'r'}
if whole_disk_image:
mock_instance_img_info.return_value = {}
else:
mock_instance_img_info.return_value = {'kernel': 'b'}
mock_pxe_config.return_value = None
mock_cache_r_k.return_value = None
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
get_boot_mode_mock.return_value = node_boot_mode
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = whole_disk_image
self.node.driver_internal_info = driver_internal_info
if mode == 'rescue':
mock_deploy_img_info.return_value = {
'rescue_kernel': 'a',
'rescue_ramdisk': 'r'}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
task.driver.boot.prepare_ramdisk(task, {'foo': 'bar'})
mock_deploy_img_info.assert_called_once_with(task.node, mode=mode,
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(
task, dhcp_opts)
if self.node.provision_state == states.DEPLOYING:
get_boot_mode_mock.assert_called_once_with(task)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=persistent)
if ipxe_use_swift:
if whole_disk_image:
self.assertFalse(mock_cache_r_k.called)
else:
mock_cache_r_k.assert_called_once_with(
task, {'kernel': 'b'},
ipxe_enabled=True)
mock_instance_img_info.assert_called_once_with(
task, ipxe_enabled=True)
elif not cleaning and mode == 'deploy':
mock_cache_r_k.assert_called_once_with(
task,
{'deploy_kernel': 'a', 'deploy_ramdisk': 'r',
'kernel': 'b'},
ipxe_enabled=True)
mock_instance_img_info.assert_called_once_with(
task, ipxe_enabled=True)
elif mode == 'deploy':
mock_cache_r_k.assert_called_once_with(
task, {'deploy_kernel': 'a', 'deploy_ramdisk': 'r'},
ipxe_enabled=True)
elif mode == 'rescue':
mock_cache_r_k.assert_called_once_with(
task, {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'},
ipxe_enabled=True)
mock_pxe_config.assert_called_once_with(
task, {}, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
def test_prepare_ramdisk(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk()
def test_prepare_ramdisk_rescue(self):
self.node.provision_state = states.RESCUING
self.node.save()
self._test_prepare_ramdisk(mode='rescue')
def test_prepare_ramdisk_uefi(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
self._test_prepare_ramdisk(uefi=True)
@mock.patch.object(os.path, 'isfile', lambda path: True)
@mock.patch.object(common_utils, 'file_has_content', lambda *args: False)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_prepare_ramdisk_ipxe_with_copy_file_different(
self, render_mock, write_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
render_mock.return_value = 'foo'
self._test_prepare_ramdisk()
write_mock.assert_called_once_with(
os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo', 0o644)
render_mock.assert_called_once_with(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/',
'ipxe_fallback_script': None})
@mock.patch.object(os.path, 'isfile', lambda path: False)
@mock.patch('ironic.common.utils.file_has_content', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_prepare_ramdisk_ipxe_with_copy_no_file(
self, render_mock, write_mock, file_has_content_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
render_mock.return_value = 'foo'
self._test_prepare_ramdisk()
self.assertFalse(file_has_content_mock.called)
write_mock.assert_called_once_with(
os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo', 0o644)
render_mock.assert_called_once_with(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/',
'ipxe_fallback_script': None})
@mock.patch.object(os.path, 'isfile', lambda path: True)
@mock.patch.object(common_utils, 'file_has_content', lambda *args: True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_prepare_ramdisk_ipxe_without_copy(
self, render_mock, write_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk()
self.assertFalse(write_mock.called)
@mock.patch.object(common_utils, 'render_template', lambda *args: 'foo')
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
def test_prepare_ramdisk_ipxe_swift(self, write_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(group='pxe', ipxe_use_swift=True)
self._test_prepare_ramdisk(ipxe_use_swift=True)
write_mock.assert_called_once_with(
os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo', 0o644)
@mock.patch.object(common_utils, 'render_template', lambda *args: 'foo')
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
def test_prepare_ramdisk_ipxe_swift_whole_disk_image(
self, write_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(group='pxe', ipxe_use_swift=True)
self._test_prepare_ramdisk(ipxe_use_swift=True, whole_disk_image=True)
write_mock.assert_called_once_with(
os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo', 0o644)
def test_prepare_ramdisk_cleaning(self):
self.node.provision_state = states.CLEANING
self.node.save()
self._test_prepare_ramdisk(cleaning=True)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_boot_mode_on_bm(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
self._test_prepare_ramdisk(uefi=True)
set_boot_mode_mock.assert_called_once_with(mock.ANY, boot_modes.UEFI)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_boot_mode_on_ironic(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk(node_boot_mode=boot_modes.LEGACY_BIOS)
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_internal_info = task.node.driver_internal_info
self.assertIn('deploy_boot_mode', driver_internal_info)
self.assertEqual(boot_modes.LEGACY_BIOS,
driver_internal_info['deploy_boot_mode'])
self.assertEqual(set_boot_mode_mock.call_count, 0)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_default_boot_mode_on_ironic_bios(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(default_boot_mode=boot_modes.LEGACY_BIOS, group='deploy')
self._test_prepare_ramdisk()
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_internal_info = task.node.driver_internal_info
self.assertIn('deploy_boot_mode', driver_internal_info)
self.assertEqual(boot_modes.LEGACY_BIOS,
driver_internal_info['deploy_boot_mode'])
self.assertEqual(set_boot_mode_mock.call_count, 1)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_default_boot_mode_on_ironic_uefi(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self.config(default_boot_mode=boot_modes.UEFI, group='deploy')
self._test_prepare_ramdisk(uefi=True)
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_internal_info = task.node.driver_internal_info
self.assertIn('deploy_boot_mode', driver_internal_info)
self.assertEqual(boot_modes.UEFI,
driver_internal_info['deploy_boot_mode'])
self.assertEqual(set_boot_mode_mock.call_count, 1)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_conflicting_boot_modes(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
self._test_prepare_ramdisk(uefi=True,
node_boot_mode=boot_modes.LEGACY_BIOS)
set_boot_mode_mock.assert_called_once_with(mock.ANY, boot_modes.UEFI)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_conflicting_boot_modes_set_unsupported(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
set_boot_mode_mock.side_effect = exception.UnsupportedDriverExtension(
extension='management', driver='test-driver'
)
self.assertRaises(exception.UnsupportedDriverExtension,
self._test_prepare_ramdisk,
uefi=True, node_boot_mode=boot_modes.LEGACY_BIOS)
@mock.patch.object(manager_utils, 'node_set_boot_mode', autospec=True)
def test_prepare_ramdisk_set_boot_mode_not_called(
self, set_boot_mode_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
properties = self.node.properties
properties['capabilities'] = 'boot_mode:uefi'
self.node.properties = properties
self.node.save()
self._test_prepare_ramdisk(uefi=True, node_boot_mode=boot_modes.UEFI)
self.assertEqual(set_boot_mode_mock.call_count, 0)
@mock.patch.object(pxe_utils, 'clean_up_pxe_env', autospec=True)
@mock.patch.object(pxe_utils, 'get_image_info', autospec=True)
def _test_clean_up_ramdisk(self, get_image_info_mock,
clean_up_pxe_env_mock, mode='deploy'):
with task_manager.acquire(self.context, self.node.uuid) as task:
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
image_info = {kernel_label: ['', '/path/to/' + kernel_label],
ramdisk_label: ['', '/path/to/' + ramdisk_label]}
get_image_info_mock.return_value = image_info
task.driver.boot.clean_up_ramdisk(task)
clean_up_pxe_env_mock.assert_called_once_with(
task, image_info, ipxe_enabled=True)
get_image_info_mock.assert_called_once_with(
task.node, mode=mode, ipxe_enabled=True)
def test_clean_up_ramdisk(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_clean_up_ramdisk()
def test_clean_up_ramdisk_rescue(self):
self.node.provision_state = states.RESCUING
self.node.save()
self._test_clean_up_ramdisk(mode='rescue')
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
instance_info = {"boot_option": "netboot"}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.node.properties['capabilities'] = 'boot_mode:uefi'
task.node.instance_info['capabilities'] = instance_info
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
cache_mock.assert_called_once_with(task, image_info,
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
'uefi', False, False, False, False, ipxe_enabled=True,
anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_bios(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
instance_info = {"boot_option": "netboot",
"boot_mode": "bios"}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.instance_info['capabilities'] = instance_info
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
cache_mock.assert_called_once_with(task, image_info,
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
'bios', False, False, False, False, ipxe_enabled=True,
anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_ramdisk(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, mock_create_pxe_config):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
i_info_caps = {"boot_option": "ramdisk",
"boot_mode": "bios"}
kernel_arg = "meow"
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:bios'
i_info = task.node.instance_info
i_info['capabilities'] = i_info_caps
i_info['kernel_append_params'] = kernel_arg
task.node.instance_info = i_info
task.node.save()
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
cache_mock.assert_called_once_with(task, image_info,
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None,
'bios', False, iscsi_boot=False, ramdisk_boot=True,
ipxe_enabled=True, anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
expected_params = {
'aki_path': 'http://myserver/' + task.node.uuid + '/kernel',
'ari_path': 'http://myserver/' + task.node.uuid + '/ramdisk',
'pxe_append_params': 'meow ipa-debug=1 ipa-global-request-id'
'=' + task.context.request_id,
'tftp_server': mock.ANY,
'ipxe_timeout': 0}
mock_create_pxe_config.assert_called_once_with(
task, expected_params, mock.ANY, ipxe_enabled=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_ramdisk_bios(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, mock_create_pxe_config):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
i_info_caps = {"boot_option": "ramdisk"}
kernel_arg = "meow"
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
i_info = task.node.instance_info
i_info['capabilities'] = i_info_caps
i_info['kernel_append_params'] = kernel_arg
task.node.instance_info = i_info
task.node.save()
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
cache_mock.assert_called_once_with(task, image_info,
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None,
'uefi', False, iscsi_boot=False, ramdisk_boot=True,
ipxe_enabled=True, anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
expected_params = {
'aki_path': 'http://myserver/' + task.node.uuid + '/kernel',
'ari_path': 'http://myserver/' + task.node.uuid + '/ramdisk',
'pxe_append_params': 'meow ipa-debug=1 ipa-global-request-id'
'=' + task.context.request_id,
'tftp_server': mock.ANY,
'ipxe_timeout': 0}
mock_create_pxe_config.assert_called_once_with(
task, expected_params, mock.ANY, ipxe_enabled=True)
@mock.patch('os.path.isfile', return_value=False, autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_active(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock, isfile_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
instance_info = {"boot_option": "netboot"}
get_image_info_mock.return_value = image_info
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.instance_info['capabilities'] = instance_info
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
cache_mock.assert_called_once_with(task, image_info,
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
'bios', False, False, False, False, ipxe_enabled=True,
anaconda_boot=False)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_missing_root_uuid(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
instance_info = {"boot_option": "netboot"}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.instance_info['capabilities'] = instance_info
task.node.driver_internal_info['is_whole_disk_image'] = False
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
cache_mock.assert_called_once_with(task, image_info,
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
self.assertFalse(switch_pxe_config_mock.called)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_missing_root_uuid_default(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
instance_info = self.node.instance_info
instance_info['capabilities'] = {"boot_option": "netboot"}
self.node.instance_info = instance_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
cache_mock.assert_called_once_with(task, image_info,
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
self.assertFalse(switch_pxe_config_mock.called)
self.assertFalse(set_boot_device_mock.called)
# NOTE(TheJulia): The log mock below is attached to the iPXE interface
# which directly logs the warning that is being checked for.
@mock.patch.object(pxe_base.LOG, 'warning', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_whole_disk_image_missing_root_uuid(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, set_boot_device_mock,
clean_up_pxe_mock, log_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
get_image_info_mock.return_value = {}
instance_info = {"boot_option": "netboot",
"boot_mode": "bios"}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.instance_info['capabilities'] = instance_info
task.node.driver_internal_info['is_whole_disk_image'] = True
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
task.driver.boot.prepare_instance(task)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
cache_mock.assert_called_once_with(task, {}, ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
self.assertTrue(log_mock.called)
clean_up_pxe_mock.assert_called_once_with(task, ipxe_enabled=True)
set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK, persistent=True)
@mock.patch('os.path.isfile', lambda filename: False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(deploy_utils, 'is_iscsi_boot', lambda task: True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
lambda task: False)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_iscsi_bios(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock):
http_url = 'http://192.1.2.3:1234'
self.config(http_url=http_url, group='deploy')
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
vol_id = uuidutils.generate_uuid()
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': 0,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_internal_info = {
'boot_from_volume': vol_id}
task.node.properties['capabilities'] = 'boot_mode:bios'
task.node.instance_info['capabilities'] = {'boot_mode': 'bios'}
dhcp_opts = pxe_utils.dhcp_options_for_instance(task,
ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
self.assertFalse(get_image_info_mock.called)
self.assertFalse(cache_mock.called)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None, boot_modes.LEGACY_BIOS, False,
ipxe_enabled=True, iscsi_boot=True, ramdisk_boot=False,
anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch('os.path.isfile', lambda filename: False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(deploy_utils, 'is_iscsi_boot', lambda task: True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
lambda task: False)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_iscsi(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock):
http_url = 'http://192.1.2.3:1234'
self.config(http_url=http_url, group='deploy')
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
vol_id = uuidutils.generate_uuid()
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': 0,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_internal_info = {
'boot_from_volume': vol_id}
dhcp_opts = pxe_utils.dhcp_options_for_instance(task,
ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
self.assertFalse(get_image_info_mock.called)
self.assertFalse(cache_mock.called)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None, boot_modes.UEFI, False,
ipxe_enabled=True, iscsi_boot=True, ramdisk_boot=False,
anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch('os.path.isfile', lambda filename: False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_ramdisk(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock):
http_url = 'http://192.1.2.3:1234'
self.config(http_url=http_url, group='deploy')
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
self.node.instance_info = {'boot_iso': 'http://1.2.3.4:1234/boot.iso',
'capabilities': {'boot_option': 'ramdisk'}}
image_info = {'kernel': ('', '/path/to/kernel'),
'deploy_kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk'),
'deploy_ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
self.node.provision_state = states.DEPLOYING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(task,
ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
self.assertTrue(get_image_info_mock.called)
self.assertTrue(cache_mock.called)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, mock.ANY, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None, boot_modes.UEFI, False,
ipxe_enabled=True, iscsi_boot=False, ramdisk_boot=True,
anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch('os.path.isfile', lambda filename: False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_ramdisk_with_kernel_arg(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock):
http_url = 'http://192.1.2.3:1234'
self.config(http_url=http_url, group='deploy')
self.config(enabled_deploy_interfaces='ramdisk')
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
self.node.instance_info = {'ramdisk_kernel_arguments': 'cat meow'}
image_info = {'kernel': ('', '/path/to/kernel'),
'deploy_kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk'),
'deploy_ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
self.node.provision_state = states.DEPLOYING
self.node.deploy_interface = 'ramdisk'
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(task,
ipxe_enabled=True)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
self.assertTrue(get_image_info_mock.called)
self.assertTrue(cache_mock.called)
uuid = self.node.uuid
expected_params = {
'aki_path': 'http://192.1.2.3:1234/' + uuid + '/kernel',
'ari_path': 'http://192.1.2.3:1234/' + uuid + '/ramdisk',
'ramdisk_opts': 'cat meow',
'pxe_append_params': 'nofb nomodeset vga=normal ipa-debug=1 '
'ipa-global-request-'
'id=' + task.context.request_id,
'tftp_server': mock.ANY,
'ipxe_timeout': 0
}
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
task, expected_params, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None, boot_modes.UEFI, False,
ipxe_enabled=True, iscsi_boot=False, ramdisk_boot=True,
anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(boot_mode_utils, 'configure_secure_boot_if_needed',
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
def test_prepare_instance_localboot(self, clean_up_pxe_config_mock,
set_boot_device_mock,
secure_boot_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
instance_info = task.node.instance_info
instance_info['capabilities'] = {'boot_option': 'local'}
task.node.instance_info = instance_info
task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=True)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
secure_boot_mock.assert_called_once_with(task)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
def test_prepare_instance_localboot_active(self, clean_up_pxe_config_mock,
set_boot_device_mock):
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
instance_info = task.node.instance_info
instance_info['capabilities'] = {'boot_option': 'local'}
task.node.instance_info = instance_info
task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=True)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_localboot_with_fallback(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
clean_up_pxe_config_mock, set_boot_device_mock):
self.config(enable_netboot_fallback=True, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.instance_info = task.node.instance_info
task.node.instance_info['capabilities'] = {'boot_option': 'local'}
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=True)
task.driver.boot.prepare_instance(task)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
'uefi', True, False, False, False, ipxe_enabled=True,
anaconda_boot=False)
# No clean up
self.assertFalse(clean_up_pxe_config_mock.called)
# No netboot configuration beyond the PXE files
self.assertFalse(get_image_info_mock.called)
self.assertFalse(cache_mock.called)
self.assertFalse(dhcp_factory_mock.return_value.update_dhcp.called)
@mock.patch.object(boot_mode_utils, 'deconfigure_secure_boot_if_needed',
autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_env', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_clean_up_instance(self, get_image_info_mock,
clean_up_pxe_env_mock,
secure_boot_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
image_info = {'kernel': ['', '/path/to/kernel'],
'ramdisk': ['', '/path/to/ramdisk']}
get_image_info_mock.return_value = image_info
task.driver.boot.clean_up_instance(task)
clean_up_pxe_env_mock.assert_called_once_with(
task, image_info, ipxe_enabled=True)
get_image_info_mock.assert_called_once_with(
task, ipxe_enabled=True)
secure_boot_mock.assert_called_once_with(task)
@mock.patch.object(ipxe.iPXEBoot, '__init__', lambda self: None)
class iPXEValidateRescueTestCase(db_base.DbTestCase):
def setUp(self):
super(iPXEValidateRescueTestCase, self).setUp()
for iface in drivers_base.ALL_INTERFACES:
impl = 'fake'
if iface == 'network':
impl = 'flat'
if iface == 'rescue':
impl = 'agent'
if iface == 'boot':
impl = 'ipxe'
config_kwarg = {'enabled_%s_interfaces' % iface: [impl],
'default_%s_interface' % iface: impl}
self.config(**config_kwarg)
self.config(enabled_hardware_types=['fake-hardware'])
driver_info = DRV_INFO_DICT
driver_info.update({'rescue_ramdisk': 'my_ramdisk',
'rescue_kernel': 'my_kernel'})
instance_info = INST_INFO_DICT
instance_info.update({'rescue_password': 'password'})
n = {
'driver': 'fake-hardware',
'instance_info': instance_info,
'driver_info': driver_info,
'driver_internal_info': DRV_INTERNAL_INFO_DICT,
}
self.node = obj_utils.create_test_node(self.context, **n)
def test_validate_rescue(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.boot.validate_rescue(task)
def test_validate_rescue_no_rescue_ramdisk(self):
driver_info = self.node.driver_info
del driver_info['rescue_ramdisk']
self.node.driver_info = driver_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaisesRegex(exception.MissingParameterValue,
'Missing.*rescue_ramdisk',
task.driver.boot.validate_rescue, task)
def test_validate_rescue_fails_no_rescue_kernel(self):
driver_info = self.node.driver_info
del driver_info['rescue_kernel']
self.node.driver_info = driver_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaisesRegex(exception.MissingParameterValue,
'Missing.*rescue_kernel',
task.driver.boot.validate_rescue, task)
| 51.926698
| 79
| 0.635333
|
ce53db6900caf72fd8cdb5a89e4d1c86ab3f9a2c
| 14,997
|
py
|
Python
|
lib/axis/tb/test_axis_switch_4x4_64.py
|
junganghu/verilog-ethernet
|
cd6b87e984ff7cbeaf11f9468124019f5e654bdb
|
[
"MIT"
] | 1
|
2021-04-29T08:37:07.000Z
|
2021-04-29T08:37:07.000Z
|
lib/axis/tb/test_axis_switch_4x4_64.py
|
zslwyuan/verilog-ethernet
|
cd6b87e984ff7cbeaf11f9468124019f5e654bdb
|
[
"MIT"
] | null | null | null |
lib/axis/tb/test_axis_switch_4x4_64.py
|
zslwyuan/verilog-ethernet
|
cd6b87e984ff7cbeaf11f9468124019f5e654bdb
|
[
"MIT"
] | 1
|
2021-09-25T05:45:18.000Z
|
2021-09-25T05:45:18.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2016-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import math
module = 'axis_switch'
testbench = 'test_%s_4x4_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/axis_register.v")
srcs.append("../rtl/arbiter.v")
srcs.append("../rtl/priority_encoder.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
S_COUNT = 4
M_COUNT = 4
DATA_WIDTH = 64
KEEP_ENABLE = (DATA_WIDTH>8)
KEEP_WIDTH = (DATA_WIDTH/8)
ID_ENABLE = 1
ID_WIDTH = 8
DEST_WIDTH = math.ceil(math.log(M_COUNT+1, 2))
USER_ENABLE = 1
USER_WIDTH = 1
M_BASE = [0, 1, 2, 3]
M_TOP = [0, 1, 2, 3]
M_CONNECT = [0b1111]*M_COUNT
S_REG_TYPE = 0
M_REG_TYPE = 2
ARB_TYPE = "ROUND_ROBIN"
LSB_PRIORITY = "HIGH"
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_tdata_list = [Signal(intbv(0)[DATA_WIDTH:]) for i in range(S_COUNT)]
s_axis_tkeep_list = [Signal(intbv(1)[KEEP_WIDTH:]) for i in range(S_COUNT)]
s_axis_tvalid_list = [Signal(bool(0)) for i in range(S_COUNT)]
s_axis_tlast_list = [Signal(bool(0)) for i in range(S_COUNT)]
s_axis_tid_list = [Signal(intbv(0)[ID_WIDTH:]) for i in range(S_COUNT)]
s_axis_tdest_list = [Signal(intbv(0)[DEST_WIDTH:]) for i in range(S_COUNT)]
s_axis_tuser_list = [Signal(intbv(0)[USER_WIDTH:]) for i in range(S_COUNT)]
s_axis_tdata = ConcatSignal(*reversed(s_axis_tdata_list))
s_axis_tkeep = ConcatSignal(*reversed(s_axis_tkeep_list))
s_axis_tvalid = ConcatSignal(*reversed(s_axis_tvalid_list))
s_axis_tlast = ConcatSignal(*reversed(s_axis_tlast_list))
s_axis_tid = ConcatSignal(*reversed(s_axis_tid_list))
s_axis_tdest = ConcatSignal(*reversed(s_axis_tdest_list))
s_axis_tuser = ConcatSignal(*reversed(s_axis_tuser_list))
m_axis_tready_list = [Signal(bool(0)) for i in range(M_COUNT)]
m_axis_tready = ConcatSignal(*reversed(m_axis_tready_list))
# Outputs
s_axis_tready = Signal(intbv(0)[S_COUNT:])
s_axis_tready_list = [s_axis_tready(i) for i in range(S_COUNT)]
m_axis_tdata = Signal(intbv(0)[M_COUNT*DATA_WIDTH:])
m_axis_tkeep = Signal(intbv(0xf)[M_COUNT*KEEP_WIDTH:])
m_axis_tvalid = Signal(intbv(0)[M_COUNT:])
m_axis_tlast = Signal(intbv(0)[M_COUNT:])
m_axis_tid = Signal(intbv(0)[M_COUNT*ID_WIDTH:])
m_axis_tdest = Signal(intbv(0)[M_COUNT*DEST_WIDTH:])
m_axis_tuser = Signal(intbv(0)[M_COUNT*USER_WIDTH:])
m_axis_tdata_list = [m_axis_tdata((i+1)*DATA_WIDTH, i*DATA_WIDTH) for i in range(M_COUNT)]
m_axis_tkeep_list = [m_axis_tkeep((i+1)*KEEP_WIDTH, i*KEEP_WIDTH) for i in range(M_COUNT)]
m_axis_tvalid_list = [m_axis_tvalid(i) for i in range(M_COUNT)]
m_axis_tlast_list = [m_axis_tlast(i) for i in range(M_COUNT)]
m_axis_tid_list = [m_axis_tid((i+1)*ID_WIDTH, i*ID_WIDTH) for i in range(M_COUNT)]
m_axis_tdest_list = [m_axis_tdest((i+1)*DEST_WIDTH, i*DEST_WIDTH) for i in range(M_COUNT)]
m_axis_tuser_list = [m_axis_tuser((i+1)*USER_WIDTH, i*USER_WIDTH) for i in range(M_COUNT)]
# sources and sinks
source_pause_list = []
source_list = []
source_logic_list = []
sink_pause_list = []
sink_list = []
sink_logic_list = []
for k in range(S_COUNT):
s = axis_ep.AXIStreamSource()
p = Signal(bool(0))
source_list.append(s)
source_pause_list.append(p)
source_logic_list.append(s.create_logic(
clk,
rst,
tdata=s_axis_tdata_list[k],
tkeep=s_axis_tkeep_list[k],
tvalid=s_axis_tvalid_list[k],
tready=s_axis_tready_list[k],
tlast=s_axis_tlast_list[k],
tid=s_axis_tid_list[k],
tdest=s_axis_tdest_list[k],
tuser=s_axis_tuser_list[k],
pause=p,
name='source_%d' % k
))
for k in range(M_COUNT):
s = axis_ep.AXIStreamSink()
p = Signal(bool(0))
sink_list.append(s)
sink_pause_list.append(p)
sink_logic_list.append(s.create_logic(
clk,
rst,
tdata=m_axis_tdata_list[k],
tkeep=m_axis_tkeep_list[k],
tvalid=m_axis_tvalid_list[k],
tready=m_axis_tready_list[k],
tlast=m_axis_tlast_list[k],
tid=m_axis_tid_list[k],
tdest=m_axis_tdest_list[k],
tuser=m_axis_tuser_list[k],
pause=p,
name='sink_%d' % k
))
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_tdata=s_axis_tdata,
s_axis_tkeep=s_axis_tkeep,
s_axis_tvalid=s_axis_tvalid,
s_axis_tready=s_axis_tready,
s_axis_tlast=s_axis_tlast,
s_axis_tid=s_axis_tid,
s_axis_tdest=s_axis_tdest,
s_axis_tuser=s_axis_tuser,
m_axis_tdata=m_axis_tdata,
m_axis_tkeep=m_axis_tkeep,
m_axis_tvalid=m_axis_tvalid,
m_axis_tready=m_axis_tready,
m_axis_tlast=m_axis_tlast,
m_axis_tid=m_axis_tid,
m_axis_tdest=m_axis_tdest,
m_axis_tuser=m_axis_tuser
)
@always(delay(4))
def clkgen():
clk.next = not clk
def wait_normal():
while s_axis_tvalid:
yield clk.posedge
def wait_pause_source():
while s_axis_tvalid:
source_pause_list[0].next = True
source_pause_list[1].next = True
source_pause_list[2].next = True
source_pause_list[3].next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause_list[0].next = False
source_pause_list[1].next = False
source_pause_list[2].next = False
source_pause_list[3].next = False
yield clk.posedge
def wait_pause_sink():
while s_axis_tvalid:
sink_pause_list[0].next = True
sink_pause_list[1].next = True
sink_pause_list[2].next = True
sink_pause_list[3].next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause_list[0].next = False
sink_pause_list[1].next = False
sink_pause_list[2].next = False
sink_pause_list[3].next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: 0123 -> 0123")
current_test.next = 1
test_frame0 = axis_ep.AXIStreamFrame(b'\x01\x00\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x01\x01\x01\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=1, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x01\x02\x02\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=2, dest=2)
test_frame3 = axis_ep.AXIStreamFrame(b'\x01\x03\x03\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=3, dest=3)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_list[0].send(test_frame0)
source_list[1].send(test_frame1)
source_list[2].send(test_frame2)
source_list[3].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[1].wait()
rx_frame1 = sink_list[1].recv()
assert rx_frame1 == test_frame1
yield sink_list[2].wait()
rx_frame2 = sink_list[2].recv()
assert rx_frame2 == test_frame2
yield sink_list[3].wait()
rx_frame3 = sink_list[3].recv()
assert rx_frame3 == test_frame3
yield delay(100)
yield clk.posedge
print("test 2: 0123 -> 3210")
current_test.next = 2
test_frame0 = axis_ep.AXIStreamFrame(b'\x02\x00\x03\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=3)
test_frame1 = axis_ep.AXIStreamFrame(b'\x02\x01\x02\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=1, dest=2)
test_frame2 = axis_ep.AXIStreamFrame(b'\x02\x02\x01\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=2, dest=1)
test_frame3 = axis_ep.AXIStreamFrame(b'\x02\x03\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=3, dest=0)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_list[0].send(test_frame0)
source_list[1].send(test_frame1)
source_list[2].send(test_frame2)
source_list[3].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame3
yield sink_list[1].wait()
rx_frame1 = sink_list[1].recv()
assert rx_frame1 == test_frame2
yield sink_list[2].wait()
rx_frame2 = sink_list[2].recv()
assert rx_frame2 == test_frame1
yield sink_list[3].wait()
rx_frame3 = sink_list[3].recv()
assert rx_frame3 == test_frame0
yield delay(100)
yield clk.posedge
print("test 3: 0000 -> 0123")
current_test.next = 3
test_frame0 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x02\x00\x01\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x02\x00\x02\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=2)
test_frame3 = axis_ep.AXIStreamFrame(b'\x02\x00\x03\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=3)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_list[0].send(test_frame0)
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
source_list[0].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[1].wait()
rx_frame1 = sink_list[1].recv()
assert rx_frame1 == test_frame1
yield sink_list[2].wait()
rx_frame2 = sink_list[2].recv()
assert rx_frame2 == test_frame2
yield sink_list[3].wait()
rx_frame3 = sink_list[3].recv()
assert rx_frame3 == test_frame3
yield delay(100)
yield clk.posedge
print("test 4: 0123 -> 0000")
current_test.next = 4
test_frame0 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x02\x01\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=1, dest=0)
test_frame2 = axis_ep.AXIStreamFrame(b'\x02\x02\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=2, dest=0)
test_frame3 = axis_ep.AXIStreamFrame(b'\x02\x03\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=3, dest=0)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_list[0].send(test_frame0)
yield clk.posedge
source_list[1].send(test_frame1)
source_list[2].send(test_frame2)
source_list[3].send(test_frame3)
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[0].wait()
rx_frame1 = sink_list[0].recv()
assert rx_frame1 == test_frame1
yield sink_list[0].wait()
rx_frame2 = sink_list[0].recv()
assert rx_frame2 == test_frame2
yield sink_list[0].wait()
rx_frame3 = sink_list[0].recv()
assert rx_frame3 == test_frame3
yield delay(100)
yield clk.posedge
print("test 1: bad decoding")
current_test.next = 1
test_frame0 = axis_ep.AXIStreamFrame(b'\x01\x00\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x01\x01\x01\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=1, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x01\x02\x04\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=2, dest=4)
test_frame3 = axis_ep.AXIStreamFrame(b'\x01\x03\x05\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=3, dest=5)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_list[0].send(test_frame0)
source_list[1].send(test_frame1)
source_list[2].send(test_frame2)
source_list[3].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[1].wait()
rx_frame1 = sink_list[1].recv()
assert rx_frame1 == test_frame1
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 33.179204
| 111
| 0.624325
|
0876e221d0f9e16669aa3f8f008cf68a1b12568d
| 1,130
|
py
|
Python
|
eduu/plugins/hastebin.py
|
Keys-007/EduuRobot
|
13d75fee3b3c08cd6f1f0c6dcec5df0f542ba851
|
[
"MIT"
] | 1
|
2022-01-01T07:19:42.000Z
|
2022-01-01T07:19:42.000Z
|
eduu/plugins/hastebin.py
|
Keys-007/EduuRobot
|
13d75fee3b3c08cd6f1f0c6dcec5df0f542ba851
|
[
"MIT"
] | null | null | null |
eduu/plugins/hastebin.py
|
Keys-007/EduuRobot
|
13d75fee3b3c08cd6f1f0c6dcec5df0f542ba851
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2022 Amano Team
from pyrogram import Client, filters
from pyrogram.types import Message
from eduu.config import prefix
from eduu.utils import commands, http
from eduu.utils.localization import use_chat_lang
@Client.on_message(filters.command("hastebin", prefix))
@use_chat_lang(context="pastes")
async def hastebin(c: Client, m: Message, strings):
if m.reply_to_message:
if m.reply_to_message.document:
tfile = m.reply_to_message
to_file = await tfile.download()
with open(to_file, "rb") as fd:
mean = fd.read().decode("UTF-8")
if m.reply_to_message.text:
mean = m.reply_to_message.text
url = "https://hastebin.com/documents"
r = await http.post(url, data=mean.encode("UTF-8"))
url = f"https://hastebin.com/{r.json()['key']}"
await m.reply_text(url, disable_web_page_preview=True)
else:
await m.reply_text(strings("reply_to_document_or_text"))
commands.add_command(
"hastebin", "tools", "hastebin_description", context_location="pastes"
)
| 32.285714
| 74
| 0.678761
|
2c9ca39645d26c375b32a4b1284c00f94778087f
| 71,280
|
py
|
Python
|
google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py
|
sararob/python-aiplatform
|
e64cd5588848a4dcd9117ff905e9569576541b69
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py
|
sararob/python-aiplatform
|
e64cd5588848a4dcd9117ff905e9569576541b69
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py
|
sararob/python-aiplatform
|
e64cd5588848a4dcd9117ff905e9569576541b69
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.cloud.aiplatform_v1beta1.types import specialist_pool
from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool
from google.cloud.aiplatform_v1beta1.types import specialist_pool_service
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import SpecialistPoolServiceGrpcTransport
from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport
class SpecialistPoolServiceClientMeta(type):
"""Metaclass for the SpecialistPoolService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[SpecialistPoolServiceTransport]]
_transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport
_transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[SpecialistPoolServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta):
"""A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> SpecialistPoolServiceTransport:
"""Returns the transport used by the client instance.
Returns:
SpecialistPoolServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def specialist_pool_path(
project: str,
location: str,
specialist_pool: str,
) -> str:
"""Returns a fully-qualified specialist_pool string."""
return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(
project=project,
location=location,
specialist_pool=specialist_pool,
)
@staticmethod
def parse_specialist_pool_path(path: str) -> Dict[str, str]:
"""Parses a specialist_pool path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/specialistPools/(?P<specialist_pool>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, SpecialistPoolServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the specialist pool service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, SpecialistPoolServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, SpecialistPoolServiceTransport):
# transport is a SpecialistPoolServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_specialist_pool(
self,
request: Union[
specialist_pool_service.CreateSpecialistPoolRequest, dict
] = None,
*,
parent: str = None,
specialist_pool: gca_specialist_pool.SpecialistPool = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Creates a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_create_specialist_pool():
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1beta1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateSpecialistPoolRequest(
parent="parent_value",
specialist_pool=specialist_pool,
)
# Make the request
operation = client.create_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool].
parent (str):
Required. The parent Project name for the new
SpecialistPool. The form is
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool):
Required. The SpecialistPool to
create.
This corresponds to the ``specialist_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for
managing the workers in this pool as well as
customers' data labeling jobs associated with this
pool. Customers create specialist pool as well as
start data labeling jobs on Cloud, managers and
workers handle the jobs using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, specialist_pool])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.CreateSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest):
request = specialist_pool_service.CreateSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if specialist_pool is not None:
request.specialist_pool = specialist_pool
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
gca_specialist_pool.SpecialistPool,
metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata,
)
# Done; return the response.
return response
def get_specialist_pool(
self,
request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> specialist_pool.SpecialistPool:
r"""Gets a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_get_specialist_pool():
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetSpecialistPoolRequest(
name="name_value",
)
# Make the request
response = client.get_specialist_pool(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool].
name (str):
Required. The name of the SpecialistPool resource. The
form is
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.types.SpecialistPool:
SpecialistPool represents customers'
own workforce to work on their data
labeling jobs. It includes a group of
specialist managers and workers.
Managers are responsible for managing
the workers in this pool as well as
customers' data labeling jobs associated
with this pool. Customers create
specialist pool as well as start data
labeling jobs on Cloud, managers and
workers handle the jobs using
CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.GetSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest):
request = specialist_pool_service.GetSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_specialist_pools(
self,
request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSpecialistPoolsPager:
r"""Lists SpecialistPools in a Location.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_list_specialist_pools():
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListSpecialistPoolsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_specialist_pools(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest, dict]):
The request object. Request message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools].
parent (str):
Required. The name of the SpecialistPool's parent
resource. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager:
Response message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.ListSpecialistPoolsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest):
request = specialist_pool_service.ListSpecialistPoolsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListSpecialistPoolsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_specialist_pool(
self,
request: Union[
specialist_pool_service.DeleteSpecialistPoolRequest, dict
] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a SpecialistPool as well as all Specialists
in the pool.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_delete_specialist_pool():
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteSpecialistPoolRequest(
name="name_value",
)
# Make the request
operation = client.delete_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool].
name (str):
Required. The resource name of the SpecialistPool to
delete. Format:
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.DeleteSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest):
request = specialist_pool_service.DeleteSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def update_specialist_pool(
self,
request: Union[
specialist_pool_service.UpdateSpecialistPoolRequest, dict
] = None,
*,
specialist_pool: gca_specialist_pool.SpecialistPool = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Updates a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_update_specialist_pool():
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1beta1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1beta1.UpdateSpecialistPoolRequest(
specialist_pool=specialist_pool,
)
# Make the request
operation = client.update_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool].
specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool):
Required. The SpecialistPool which
replaces the resource on the server.
This corresponds to the ``specialist_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to
the resource.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for
managing the workers in this pool as well as
customers' data labeling jobs associated with this
pool. Customers create specialist pool as well as
start data labeling jobs on Cloud, managers and
workers handle the jobs using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([specialist_pool, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a specialist_pool_service.UpdateSpecialistPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest):
request = specialist_pool_service.UpdateSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if specialist_pool is not None:
request.specialist_pool = specialist_pool
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("specialist_pool.name", request.specialist_pool.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
gca_specialist_pool.SpecialistPool,
metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
def list_operations(
self,
request: operations_pb2.ListOperationsRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.ListOperationsResponse:
r"""Lists operations that match the specified filter in the request.
Args:
request (:class:`~.operations_pb2.ListOperationsRequest`):
The request object. Request message for
`ListOperations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.ListOperationsResponse:
Response message for ``ListOperations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.ListOperationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.list_operations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_operation(
self,
request: operations_pb2.GetOperationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Gets the latest state of a long-running operation.
Args:
request (:class:`~.operations_pb2.GetOperationRequest`):
The request object. Request message for
`GetOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.GetOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_operation(
self,
request: operations_pb2.DeleteOperationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a long-running operation.
This method indicates that the client is no longer interested
in the operation result. It does not cancel the operation.
If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.DeleteOperationRequest`):
The request object. Request message for
`DeleteOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
None
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.DeleteOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.delete_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def cancel_operation(
self,
request: operations_pb2.CancelOperationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Starts asynchronous cancellation on a long-running operation.
The server makes a best effort to cancel the operation, but success
is not guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.CancelOperationRequest`):
The request object. Request message for
`CancelOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
None
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.CancelOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.cancel_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def wait_operation(
self,
request: operations_pb2.WaitOperationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Waits until the specified long-running operation is done or reaches at most
a specified timeout, returning the latest state.
If the operation is already done, the latest state is immediately returned.
If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
timeout is used. If the server does not support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.WaitOperationRequest`):
The request object. Request message for
`WaitOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.WaitOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.wait_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_iam_policy(
self,
request: iam_policy_pb2.SetIamPolicyRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the IAM access control policy on the specified function.
Replaces any existing policy.
Args:
request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`):
The request object. Request message for `SetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**
::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**
::
bindings:
- members:
- user:mike@example.com
- group:admins@example.com
- domain:google.com
- serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin
- members:
- user:eve@example.com
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.set_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_iam_policy(
self,
request: iam_policy_pb2.GetIamPolicyRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does not have a
policy set.
Args:
request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`):
The request object. Request message for `GetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if
any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**
::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**
::
bindings:
- members:
- user:mike@example.com
- group:admins@example.com
- domain:google.com
- serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin
- members:
- user:eve@example.com
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: iam_policy_pb2.TestIamPermissionsRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Tests the specified IAM permissions against the IAM access control
policy for a function.
If the function does not exist, this will return an empty set
of permissions, not a NOT_FOUND error.
Args:
request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`):
The request object. Request message for
`TestIamPermissions` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.iam_policy_pb2.TestIamPermissionsResponse:
Response message for ``TestIamPermissions`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.test_iam_permissions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_location(
self,
request: locations_pb2.GetLocationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.Location:
r"""Gets information about a location.
Args:
request (:class:`~.location_pb2.GetLocationRequest`):
The request object. Request message for
`GetLocation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.location_pb2.Location:
Location object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = locations_pb2.GetLocationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_location,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_locations(
self,
request: locations_pb2.ListLocationsRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.ListLocationsResponse:
r"""Lists information about the supported locations for this service.
Args:
request (:class:`~.location_pb2.ListLocationsRequest`):
The request object. Request message for
`ListLocations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.location_pb2.ListLocationsResponse:
Response message for ``ListLocations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = locations_pb2.ListLocationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.list_locations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SpecialistPoolServiceClient",)
| 40.801374
| 192
| 0.605822
|
6dbc1bd370a67e18966117b85ea21571800fa0f7
| 2,206
|
py
|
Python
|
ml_web_project/ml_project/views.py
|
DJayDixit/ml-web-project
|
33623b537c22f325f86d221a372449a14348dbe1
|
[
"MIT"
] | null | null | null |
ml_web_project/ml_project/views.py
|
DJayDixit/ml-web-project
|
33623b537c22f325f86d221a372449a14348dbe1
|
[
"MIT"
] | null | null | null |
ml_web_project/ml_project/views.py
|
DJayDixit/ml-web-project
|
33623b537c22f325f86d221a372449a14348dbe1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# from apps import MlProjectConfig
import pickle
import pandas as pd
class HousePrice():
def load_model(self, core_cpi_file, year_gni_file, price_file):
# C:\Users\jaydi\Documents\GitHub\ml-web-project\ml_web_project\ml_project\core_cpi_predict.pickle
core_cpi_pickle = open(core_cpi_file, "rb")
year_gni_pickle = open(year_gni_file, "rb")
price_pickle = open(price_file, "rb")
core_cpi_model = pickle.load(core_cpi_pickle)
year_gni_model = pickle.load(year_gni_pickle)
price_model = pickle.load(price_pickle)
return price_model, core_cpi_model, year_gni_model
def calculate(self, date_month, flat_type_number, house_area_sqm, core_cpi_file, year_gni_file, price_file):
date = {"date": [date_month]}
date = pd.DataFrame(date)
date.reset_index(drop=True, inplace=True)
models = self.load_model(core_cpi_file, year_gni_file, price_file)
core_cpi_predict = models[1]
year_gni_predict = models[2]
price_predict = models[0]
core_cpi = core_cpi_predict.predict(date)
year_gni = year_gni_predict.predict(date)
flat_type = flat_type_number
area_sqm = house_area_sqm
size = flat_type + area_sqm
affordability = size*(core_cpi+year_gni)
validate = {
"affordability": affordability, "Core CPI": core_cpi, "year_gni":year_gni,
"area_sqm":[area_sqm], "date":[date.iloc[0]["date"]], "flat_type": [flat_type]
}
validate_df = pd.DataFrame(validate)
pred = price_predict.predict(validate_df)
return pred
if __name__ == "__main__":
price = HousePrice()
core_cpi_file = r"C:\Users\jaydi\Documents\GitHub\ml-web-project\ml_web_project\ml_project\core_cpi_predict.pickle"
year_gni_file = r"C:\Users\jaydi\Documents\GitHub\ml-web-project\ml_web_project\ml_project\year_gni_predict.pickle"
price_file = r"C:\Users\jaydi\Documents\GitHub\ml-web-project\ml_web_project\ml_project\price_predict.pickle"
calc = price.calculate(2022.06, 2, 65, core_cpi_file, year_gni_file, price_file)
print(calc)
| 38.701754
| 119
| 0.69447
|
8cbe196ffa1191e928886794a437ce52f6cca02e
| 1,109
|
py
|
Python
|
tests/emukit/bayesian_optimization/test_cost_sensitive_bayesian_optimization.py
|
DavidJanz/emukit
|
7421cb7f4ed831b6581f3686806521ff7fb97e74
|
[
"Apache-2.0"
] | 6
|
2019-06-02T21:23:27.000Z
|
2020-02-17T09:46:30.000Z
|
tests/emukit/bayesian_optimization/test_cost_sensitive_bayesian_optimization.py
|
Tony-Chiong/emukit
|
a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a
|
[
"Apache-2.0"
] | 4
|
2019-05-17T13:30:21.000Z
|
2019-06-21T13:49:19.000Z
|
tests/emukit/bayesian_optimization/test_cost_sensitive_bayesian_optimization.py
|
Tony-Chiong/emukit
|
a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a
|
[
"Apache-2.0"
] | null | null | null |
import GPy
import numpy as np
from emukit.bayesian_optimization.loops.cost_sensitive_bayesian_optimization_loop import (
CostSensitiveBayesianOptimizationLoop)
from emukit.core import ContinuousParameter, ParameterSpace
from emukit.core.loop.user_function import UserFunctionWrapper
from emukit.model_wrappers import GPyModelWrapper
def test_cost_sensitive_bayesian_optimization_loop():
space = ParameterSpace([ContinuousParameter('x', 0, 1)])
x_init = np.random.rand(10, 1)
def function_with_cost(x):
return np.sin(x), x
user_fcn = UserFunctionWrapper(function_with_cost)
y_init, cost_init = function_with_cost(x_init)
gpy_model_objective = GPy.models.GPRegression(x_init, y_init)
gpy_model_cost = GPy.models.GPRegression(x_init, cost_init)
model_objective = GPyModelWrapper(gpy_model_objective)
model_cost = GPyModelWrapper(gpy_model_cost)
loop = CostSensitiveBayesianOptimizationLoop(space, model_objective, model_cost)
loop.run_loop(user_fcn, 10)
assert loop.loop_state.X.shape[0] == 20
assert loop.loop_state.cost.shape[0] == 20
| 32.617647
| 90
| 0.786294
|
dc8cb41e336c52557fb8981fe2873d410533474c
| 3,334
|
py
|
Python
|
tests/features/basic_batch_test_prediction_steps.py
|
osroca/bigmler
|
8e4ccaea8fa5329674c7e6909381a9688a8bb24b
|
[
"Apache-2.0"
] | 1
|
2021-08-30T20:18:57.000Z
|
2021-08-30T20:18:57.000Z
|
tests/features/basic_batch_test_prediction_steps.py
|
osroca/bigmler
|
8e4ccaea8fa5329674c7e6909381a9688a8bb24b
|
[
"Apache-2.0"
] | null | null | null |
tests/features/basic_batch_test_prediction_steps.py
|
osroca/bigmler
|
8e4ccaea8fa5329674c7e6909381a9688a8bb24b
|
[
"Apache-2.0"
] | 1
|
2021-08-30T20:19:05.000Z
|
2021-08-30T20:19:05.000Z
|
import os
import time
import csv
import json
from lettuce import step, world
from subprocess import check_call, CalledProcessError
from bigml.api import check_resource
@step(r'I check that the batch prediction has been created')
def i_check_create_batch_prediction(step):
batch_prediction_file = "%s%sbatch_prediction" % (world.directory, os.sep)
try:
batch_prediction_file = open(batch_prediction_file, "r")
batch_prediction = check_resource(batch_prediction_file.readline().strip(),
world.api.get_batch_prediction)
world.batch_predictions.append(batch_prediction['resource'])
world.batch_prediction = batch_prediction
batch_prediction_file.close()
assert True
except Exception, exc:
assert False, str(exc)
@step(r'I check that the source has been created from the test file')
def i_check_create_test_source(step):
test_source_file = "%s%ssource_test" % (world.directory, os.sep)
try:
test_source_file = open(test_source_file, "r")
test_source = check_resource(test_source_file.readline().strip(),
world.api.get_source)
world.sources.append(test_source['resource'])
world.test_source = test_source
test_source_file.close()
assert True
except Exception, exc:
assert False, str(exc)
@step(r'I check that the dataset has been created from the test file')
def i_check_create_test_dataset(step):
test_dataset_file = "%s%sdataset_test" % (world.directory, os.sep)
try:
test_dataset_file = open(test_dataset_file, "r")
test_dataset = check_resource(test_dataset_file.readline().strip(),
world.api.get_dataset)
world.datasets.append(test_dataset['resource'])
world.test_dataset = test_dataset
test_dataset_file.close()
assert True
except Exception, exc:
assert False, str(exc)
@step(r'I check that the batch centroid prediction has been created')
def i_check_create_batch_centroid(step):
batch_prediction_file = "%s%sbatch_centroid" % (world.directory, os.sep)
try:
batch_prediction_file = open(batch_prediction_file, "r")
batch_centroid = check_resource(batch_prediction_file.readline().strip(),
world.api.get_batch_centroid)
world.batch_centroids.append(batch_centroid['resource'])
world.batch_centroid = batch_centroid
batch_prediction_file.close()
assert True
except Exception, exc:
assert False, str(exc)
@step(r'I check that the batch anomaly scores prediction has been created')
def i_check_create_batch_anomaly_scores(step):
batch_prediction_file = "%s%sbatch_anomaly_score" % (world.directory, os.sep)
try:
batch_prediction_file = open(batch_prediction_file, "r")
batch_anomaly_score = check_resource(batch_prediction_file.readline().strip(),
world.api.get_batch_anomaly_score)
world.batch_anomaly_scores.append(batch_anomaly_score['resource'])
world.batch_anomaly_score = batch_anomaly_score
batch_prediction_file.close()
assert True
except Exception, exc:
assert False, str(exc)
| 40.168675
| 86
| 0.682064
|
edcae9e12719b800ebdc405caf8f70e82cbdf5dc
| 824
|
py
|
Python
|
memover/mover.py
|
Alecktos/Directory-Tree-File-Mover
|
ac642ba0599534cdd248e56e8db842dbf1972496
|
[
"MIT"
] | 1
|
2021-11-23T21:17:24.000Z
|
2021-11-23T21:17:24.000Z
|
memover/mover.py
|
Alecktos/Directory-Tree-File-Mover
|
ac642ba0599534cdd248e56e8db842dbf1972496
|
[
"MIT"
] | null | null | null |
memover/mover.py
|
Alecktos/Directory-Tree-File-Mover
|
ac642ba0599534cdd248e56e8db842dbf1972496
|
[
"MIT"
] | null | null | null |
from . import episode_mover
from . import file_handler
from . import file_matcher
from . import movie_mover
from . import subtitles
from .media_file_extractor import get_type, Type
def move_media_by_name(name, source_path, show_destination_path, movie_destination_path):
paths = file_matcher.search_files(name, source_path)
for path in paths:
move_media_by_path(path, show_destination_path, movie_destination_path)
def move_media_by_path(path, show_destination_path, movie_destination_path):
subtitles.rename_and_move(path)
media_type = get_type(path)
if media_type is Type.MOVIE:
movie_mover.move(movie_destination_path, path)
else:
episode_mover.move(show_destination_path, path)
if file_handler.path_is_directory(path):
file_handler.delete_directory(path)
| 32.96
| 89
| 0.783981
|
2933f5bbbce9d70a1be70f1b1dfc20317315b391
| 51,857
|
py
|
Python
|
src/sage/misc/misc.py
|
robertwb/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/misc/misc.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/misc/misc.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
"""
Miscellaneous functions
AUTHORS:
- William Stein
- William Stein (2006-04-26): added workaround for Windows where most
users' home directory has a space in it.
- Robert Bradshaw (2007-09-20): Ellipsis range/iterator.
TESTS:
The following test, verifying that :trac:`16181` has been resolved, needs
to stay at the beginning of this file so that its context is not
poisoned by other tests::
sage: sage.misc.misc.inject_variable('a', 0)
sage: a
0
Check the fix from :trac:`8323`::
sage: 'name' in globals()
False
sage: 'func' in globals()
False
Test deprecation::
sage: sage.misc.misc.srange(5)
doctest:...: DeprecationWarning:
Importing srange from here is deprecated. If you need to use it, please import it directly from sage.arith.srange
See http://trac.sagemath.org/20094 for details.
[0, 1, 2, 3, 4]
sage: sage.misc.all.srange(5)
doctest:...: DeprecationWarning:
Importing srange from here is deprecated. If you need to use it, please import it directly from sage.arith.srange
See http://trac.sagemath.org/20334 for details.
[0, 1, 2, 3, 4]
sage: sage.misc.misc.sxrange(5)
doctest:...: DeprecationWarning:
Importing sxrange from here is deprecated. If you need to use it, please import it directly from sage.arith.srange
See http://trac.sagemath.org/20094 for details.
<generator object at 0x...>
sage: sage.misc.misc.cancel_alarm()
doctest:...: DeprecationWarning:
Importing cancel_alarm from here is deprecated. If you need to use it, please import it directly from cysignals.alarm
See http://trac.sagemath.org/20002 for details.
"""
#*****************************************************************************
# Copyright (C) 2006 William Stein <wstein@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function, absolute_import
from six.moves import range
__doc_exclude=["cached_attribute", "cached_class_attribute", "lazy_prop",
"generic_cmp", "to_gmp_hex", "todo",
"typecheck", "prop", "strunc",
"assert_attribute", "LOGFILE"]
from warnings import warn
import os
import stat
import sys
import signal
import time
import resource
import math
import sage.misc.prandom as random
from .lazy_string import lazy_string
from sage.misc.lazy_import import lazy_import
lazy_import('sage.arith.srange', ('xsrange', 'srange', 'ellipsis_range', 'ellipsis_iter'), deprecation=20094)
lazy_import('sage.arith.srange', 'xsrange', 'sxrange', deprecation=20094)
lazy_import('cysignals.alarm', ('alarm', 'cancel_alarm'), deprecation=20002)
from sage.env import DOT_SAGE, HOSTNAME
LOCAL_IDENTIFIER = '%s.%s'%(HOSTNAME , os.getpid())
def sage_makedirs(dir):
"""
Python version of ``mkdir -p``: try to create a directory, and also
create all intermediate directories as necessary. Succeed silently
if the directory already exists (unlike ``os.makedirs()``).
Raise other errors (like permission errors) normally.
EXAMPLES::
sage: from sage.misc.misc import sage_makedirs
sage: sage_makedirs(DOT_SAGE) # no output
The following fails because we are trying to create a directory in
place of an ordinary file (the main Sage executable)::
sage: sage_executable = os.path.join(SAGE_ROOT, 'sage')
sage: sage_makedirs(sage_executable)
Traceback (most recent call last):
...
OSError: ...
"""
try:
os.makedirs(dir)
except OSError:
if not os.path.isdir(dir):
raise
#################################################
# Now that the variable DOT_SAGE has been set,
# we make sure that the DOT_SAGE directory
# has restrictive permissions, since otherwise
# possibly just anybody can easily see every
# command you type, since it is in the history,
# and every worksheet you create, etc.
# We do the following:
# 1. If there is no DOT_SAGE, we create it.
# 2. Check to see if the permissions on DOT_SAGE are
# sufficiently restrictive. If not, we change them.
sage_makedirs(DOT_SAGE)
if hasattr(os, 'chmod'):
_mode = os.stat(DOT_SAGE)[stat.ST_MODE]
_desired_mode = 0o40700 # drwx------
if _mode != _desired_mode:
# On Cygwin, if the sage directory is not in a filesystem mounted with
# 'acl' support, setting the permissions may fail silently, so only
# print the message after we've changed the permissions and confirmed
# that the change succeeded
os.chmod(DOT_SAGE, _desired_mode)
if os.stat(DOT_SAGE)[stat.ST_MODE] == _desired_mode:
print("Setting permissions of DOT_SAGE directory so only you "
"can read and write it.")
#################################################
# Next we create the Sage temporary directory.
#################################################
@lazy_string
def SAGE_TMP():
"""
EXAMPLES::
sage: from sage.misc.misc import SAGE_TMP
sage: SAGE_TMP
l'.../temp/...'
"""
d = os.path.join(DOT_SAGE, 'temp', HOSTNAME, str(os.getpid()))
sage_makedirs(d)
return d
@lazy_string
def SPYX_TMP():
"""
EXAMPLES::
sage: from sage.misc.misc import SPYX_TMP
sage: SPYX_TMP
l'.../temp/.../spyx'
"""
return os.path.join(SAGE_TMP, 'spyx')
@lazy_string
def SAGE_TMP_INTERFACE():
"""
EXAMPLES::
sage: from sage.misc.misc import SAGE_TMP_INTERFACE
sage: SAGE_TMP_INTERFACE
l'.../temp/.../interface'
"""
d = os.path.join(SAGE_TMP, 'interface')
sage_makedirs(d)
return d
SAGE_DB = os.path.join(DOT_SAGE, 'db')
sage_makedirs(SAGE_DB)
try:
# Create the matplotlib config directory.
sage_makedirs(os.environ["MPLCONFIGDIR"])
except KeyError:
pass
#################################################################
# Functions to help with interfacing with CXX code that
# uses the GMP library
#################################################################
def to_gmp_hex(n):
return hex(n).replace("L","").replace("0x","")
#################################################################
# timing
#################################################################
def cputime(t=0, subprocesses=False):
"""
Return the time in CPU seconds since Sage started, or with
optional argument ``t``, return the time since ``t``. This is how
much time Sage has spent using the CPU. If ``subprocesses=False``
this does not count time spent in subprocesses spawned by Sage
(e.g., Gap, Singular, etc.). If ``subprocesses=True`` this
function tries to take all subprocesses with a working
``cputime()`` implementation into account.
The measurement for the main Sage process is done via a call to
:func:`resource.getrusage()`, so it avoids the wraparound problems in
:func:`time.clock()` on Cygwin.
INPUT:
- ``t`` - (optional) time in CPU seconds, if ``t`` is a result
from an earlier call with ``subprocesses=True``, then
``subprocesses=True`` is assumed.
- subprocesses -- (optional), include subprocesses (default:
``False``)
OUTPUT:
- ``float`` - time in CPU seconds if ``subprocesses=False``
- :class:`GlobalCputime` - object which holds CPU times of
subprocesses otherwise
EXAMPLES::
sage: t = cputime()
sage: F = gp.factor(2^199-1)
sage: cputime(t) # somewhat random
0.010999000000000092
sage: t = cputime(subprocesses=True)
sage: F = gp.factor(2^199-1)
sage: cputime(t) # somewhat random
0.091999
sage: w = walltime()
sage: F = gp.factor(2^199-1)
sage: walltime(w) # somewhat random
0.58425593376159668
.. note ::
Even with ``subprocesses=True`` there is no guarantee that the
CPU time is reported correctly because subprocesses can be
started and terminated at any given time.
"""
if isinstance(t, GlobalCputime):
subprocesses=True
if not subprocesses:
try:
t = float(t)
except TypeError:
t = 0.0
u,s = resource.getrusage(resource.RUSAGE_SELF)[:2]
return u+s - t
else:
if t == 0:
ret = GlobalCputime(cputime())
for s in sage.interfaces.quit.expect_objects:
S = s()
if S and S.is_running():
try:
ct = S.cputime()
ret.total += ct
ret.interfaces[s] = ct
except NotImplementedError:
pass
return ret
else:
if not isinstance(t, GlobalCputime):
t = GlobalCputime(t)
ret = GlobalCputime(cputime() - t.local)
for s in sage.interfaces.quit.expect_objects:
S = s()
if S and S.is_running():
try:
ct = S.cputime() - t.interfaces.get(s, 0.0)
ret.total += ct
ret.interfaces[s] = ct
except NotImplementedError:
pass
return ret
class GlobalCputime:
"""
Container for CPU times of subprocesses.
AUTHOR:
- Martin Albrecht - (2008-12): initial version
EXAMPLE:
Objects of this type are returned if ``subprocesses=True`` is
passed to :func:`cputime`::
sage: cputime(subprocesses=True) # indirect doctest, output random
0.2347431
We can use it to keep track of the CPU time spent in Singular for
example::
sage: t = cputime(subprocesses=True)
sage: P = PolynomialRing(QQ,7,'x')
sage: I = sage.rings.ideal.Katsura(P)
sage: gb = I.groebner_basis() # calls Singular
sage: cputime(subprocesses=True) - t # output random
0.462987
For further processing we can then convert this container to a
float::
sage: t = cputime(subprocesses=True)
sage: float(t) #output somewhat random
2.1088339999999999
.. seealso::
:func:`cputime`
"""
def __init__(self, t):
"""
Create a new CPU time object which also keeps track of
subprocesses.
EXAMPLE::
sage: from sage.misc.misc import GlobalCputime
sage: ct = GlobalCputime(0.0); ct
0.0...
"""
self.total = t
self.local = t
self.interfaces = {}
def __repr__(self):
"""
EXAMPLE::
sage: cputime(subprocesses=True) # indirect doctest, output random
0.2347431
"""
return str(self.total)
def __add__(self, other):
"""
EXAMPLE::
sage: t = cputime(subprocesses=True)
sage: P = PolynomialRing(QQ,7,'x')
sage: I = sage.rings.ideal.Katsura(P)
sage: gb = I.groebner_basis() # calls Singular
sage: cputime(subprocesses=True) + t # output random
2.798708
"""
if not isinstance(other, GlobalCputime):
other = GlobalCputime(other)
ret = GlobalCputime(self.total + other.total)
return ret
def __sub__(self, other):
"""
EXAMPLE::
sage: t = cputime(subprocesses=True)
sage: P = PolynomialRing(QQ,7,'x')
sage: I = sage.rings.ideal.Katsura(P)
sage: gb = I.groebner_basis() # calls Singular
sage: cputime(subprocesses=True) - t # output random
0.462987
"""
if not isinstance(other, GlobalCputime):
other = GlobalCputime(other)
ret = GlobalCputime(self.total - other.total)
return ret
def __float__(self):
"""
EXAMPLE::
sage: t = cputime(subprocesses=True)
sage: float(t) #output somewhat random
2.1088339999999999
"""
return float(self.total)
def walltime(t=0):
"""
Return the wall time in second, or with optional argument t, return
the wall time since time t. "Wall time" means the time on a wall
clock, i.e., the actual time.
INPUT:
- ``t`` - (optional) float, time in CPU seconds
OUTPUT:
- ``float`` - time in seconds
EXAMPLES::
sage: w = walltime()
sage: F = factor(2^199-1)
sage: walltime(w) # somewhat random
0.8823847770690918
"""
return time.time() - t
#def clock(cmd):
# t=cputime()
# eval(compile(cmd,"clock",'single'))
# return cputime(t)
#################################################################
# simple verbosity system
#################################################################
LEVEL=0 # default
verbose_files = []
def verbose(mesg="", t=0, level=1, caller_name=None):
"""
Print a message if the current verbosity is at least level.
INPUT:
- ``mesg`` - str, a message to print
- ``t`` - int, optional, if included, will also print
cputime(t), - which is the time since time t. Thus t should have
been obtained with t=cputime()
- ``level`` - int, (default: 1) the verbosity level of
what we are printing
- ``caller_name`` - string (default: None), the name
of the calling function; in most cases Python can deduce this, so
it need not be provided.
OUTPUT: possibly prints a message to stdout; also returns
cputime()
EXAMPLE::
sage: set_verbose(1)
sage: t = cputime()
sage: t = verbose("This is Sage.", t, level=1, caller_name="william") # not tested
VERBOSE1 (william): This is Sage. (time = 0.0)
sage: set_verbose(0)
"""
if level>LEVEL:
return cputime()
frame = sys._getframe(1).f_code
file_name = frame.co_filename
lineno = frame.co_firstlineno
if 'all' in verbose_files or level<=0:
show = True
else:
show = False
for X in verbose_files:
if file_name.find(X) != -1:
show = True
break
if not show:
return cputime()
if t != 0 and mesg=="":
mesg = "Finished."
# see recipe 14.7 in Python Cookbook
if caller_name is None:
caller_name = frame.co_name
if caller_name == "?: ":
caller_name = ""
short_file_name = os.path.split(frame.co_filename)[1]
if '<' in short_file_name and '>' in short_file_name:
s = "verbose %s (%s) %s"%(level, caller_name, mesg)
else:
s = "verbose %s (%s: %s, %s) %s"%(level, lineno, short_file_name, caller_name, mesg)
if t!=0:
s = s + " (time = %s)"%cputime(t)
print(s)
sys.stdout.flush()
#open(LOGFILE,"a").write(s+"\n")
return cputime()
def todo(mesg=""):
caller_name = sys._getframe(1).f_code.co_name
raise NotImplementedError("{}: todo -- {}".format(caller_name, mesg))
def set_verbose(level, files='all'):
"""
Set the global Sage verbosity level.
INPUT:
- ``level`` - an integer between 0 and 2, inclusive.
- ``files`` (default: 'all'): list of files to make verbose, or
'all' to make ALL files verbose (the default).
OUTPUT: changes the state of the verbosity flag and possibly
appends to the list of files that are verbose.
EXAMPLES::
sage: set_verbose(2)
sage: verbose("This is Sage.", level=1) # not tested
VERBOSE1 (?): This is Sage.
sage: verbose("This is Sage.", level=2) # not tested
VERBOSE2 (?): This is Sage.
sage: verbose("This is Sage.", level=3) # not tested
[no output]
sage: set_verbose(0)
"""
if isinstance(level, str):
set_verbose_files([level])
global LEVEL
LEVEL = level
if isinstance(files, str):
files = [files]
set_verbose_files(files)
def set_verbose_files(file_name):
"""
"""
if not isinstance(file_name, list):
file_name = [file_name]
global verbose_files
verbose_files = file_name
def get_verbose_files():
"""
"""
return verbose_files
def unset_verbose_files(file_name):
"""
"""
if not isinstance(file_name, list):
file_name = [file_name]
for X in file_name:
verbose_files.remove(X)
def get_verbose():
"""
Return the global Sage verbosity level.
INPUT: int level: an integer between 0 and 2, inclusive.
OUTPUT: changes the state of the verbosity flag.
EXAMPLES::
sage: get_verbose()
0
sage: set_verbose(2)
sage: get_verbose()
2
sage: set_verbose(0)
"""
global LEVEL
return LEVEL
def generic_cmp(x,y):
"""
Compare x and y and return -1, 0, or 1.
This is similar to x.__cmp__(y), but works even in some cases
when a .__cmp__ method isn't defined.
"""
if x<y:
return -1
elif x==y:
return 0
return 1
def cmp_props(left, right, props):
for a in props:
c = cmp(left.__getattribute__(a)(), right.__getattribute__(a)())
if c: return c
return 0
def union(x, y=None):
"""
Return the union of x and y, as a list. The resulting list need not
be sorted and can change from call to call.
INPUT:
- ``x`` - iterable
- ``y`` - iterable (may optionally omitted)
OUTPUT: list
EXAMPLES::
sage: answer = union([1,2,3,4], [5,6]); answer
[1, 2, 3, 4, 5, 6]
sage: union([1,2,3,4,5,6], [5,6]) == answer
True
sage: union((1,2,3,4,5,6), [5,6]) == answer
True
sage: union((1,2,3,4,5,6), set([5,6])) == answer
True
"""
if y is None:
return list(set(x))
return list(set(x).union(y))
def uniq(x):
"""
Return the sublist of all elements in the list x that is sorted and
is such that the entries in the sublist are unique.
EXAMPLES::
sage: v = uniq([1,1,8,-5,3,-5,'a','x','a'])
sage: v # potentially random ordering of output
['a', 'x', -5, 1, 3, 8]
sage: set(v) == set(['a', 'x', -5, 1, 3, 8])
True
"""
v = sorted(set(x))
return v
def coeff_repr(c, is_latex=False):
if not is_latex:
try:
return c._coeff_repr()
except AttributeError:
pass
if isinstance(c, (int, long, float)):
return str(c)
if is_latex and hasattr(c, '_latex_'):
s = c._latex_()
else:
s = str(c).replace(' ','')
if s.find("+") != -1 or s.find("-") != -1:
if is_latex:
return "\\left(%s\\right)"%s
else:
return "(%s)"%s
return s
def repr_lincomb(terms, is_latex=False, scalar_mult="*", strip_one=False, repr_monomial = None, latex_scalar_mult = None):
"""
Compute a string representation of a linear combination of some
formal symbols.
INPUT:
- ``terms`` -- list of terms, as pairs (support, coefficient)
- ``is_latex`` -- whether to produce latex (default: ``False``)
- ``scalar_mult`` -- string representing the multiplication (default:``'*'``)
- ``latex_scalar_mult`` -- latex string representing the multiplication
(default: ``''`` if ``scalar_mult`` is ``'*'``; otherwise ``scalar_mult``)
- ``coeffs`` -- for backward compatibility
OUTPUT:
- ``str`` - a string
EXAMPLES::
sage: repr_lincomb([('a',1), ('b',-2), ('c',3)])
'a - 2*b + 3*c'
sage: repr_lincomb([('a',0), ('b',-2), ('c',3)])
'-2*b + 3*c'
sage: repr_lincomb([('a',0), ('b',2), ('c',3)])
'2*b + 3*c'
sage: repr_lincomb([('a',1), ('b',0), ('c',3)])
'a + 3*c'
sage: repr_lincomb([('a',-1), ('b','2+3*x'), ('c',3)])
'-a + (2+3*x)*b + 3*c'
sage: repr_lincomb([('a', '1+x^2'), ('b', '2+3*x'), ('c', 3)])
'(1+x^2)*a + (2+3*x)*b + 3*c'
sage: repr_lincomb([('a', '1+x^2'), ('b', '-2+3*x'), ('c', 3)])
'(1+x^2)*a + (-2+3*x)*b + 3*c'
sage: repr_lincomb([('a', 1), ('b', -2), ('c', -3)])
'a - 2*b - 3*c'
sage: t = PolynomialRing(RationalField(),'t').gen()
sage: repr_lincomb([('a', -t), ('s', t - 2), ('', t^2 + 2)])
'-t*a + (t-2)*s + (t^2+2)'
Examples for ``scalar_mult``::
sage: repr_lincomb([('a',1), ('b',2), ('c',3)], scalar_mult='*')
'a + 2*b + 3*c'
sage: repr_lincomb([('a',2), ('b',0), ('c',-3)], scalar_mult='**')
'2**a - 3**c'
sage: repr_lincomb([('a',-1), ('b',2), ('c',3)], scalar_mult='**')
'-a + 2**b + 3**c'
Examples for ``scalar_mult`` and ``is_latex``::
sage: repr_lincomb([('a',-1), ('b',2), ('c',3)], is_latex=True)
'-a + 2b + 3c'
sage: repr_lincomb([('a',-1), ('b',-1), ('c',3)], is_latex=True, scalar_mult='*')
'-a - b + 3c'
sage: repr_lincomb([('a',-1), ('b',2), ('c',-3)], is_latex=True, scalar_mult='**')
'-a + 2**b - 3**c'
sage: repr_lincomb([('a',-2), ('b',-1), ('c',-3)], is_latex=True, latex_scalar_mult='*')
'-2*a - b - 3*c'
Examples for ``strip_one``::
sage: repr_lincomb([ ('a',1), (1,-2), ('3',3) ])
'a - 2*1 + 3*3'
sage: repr_lincomb([ ('a',-1), (1,1), ('3',3) ])
'-a + 1 + 3*3'
sage: repr_lincomb([ ('a',1), (1,-2), ('3',3) ], strip_one = True)
'a - 2 + 3*3'
sage: repr_lincomb([ ('a',-1), (1,1), ('3',3) ], strip_one = True)
'-a + 1 + 3*3'
sage: repr_lincomb([ ('a',1), (1,-1), ('3',3) ], strip_one = True)
'a - 1 + 3*3'
Examples for ``repr_monomial``::
sage: repr_lincomb([('a',1), ('b',2), ('c',3)], repr_monomial = lambda s: s+"1")
'a1 + 2*b1 + 3*c1'
"""
# Setting scalar_mult: symbol used for scalar multiplication
if is_latex:
if latex_scalar_mult is not None:
scalar_mult = latex_scalar_mult
elif scalar_mult == "*":
scalar_mult = ""
if repr_monomial is None:
if is_latex:
repr_monomial = lambda monomial: monomial._latex_() if hasattr(monomial, '_latex_') else str(monomial)
else:
repr_monomial = str
s = ""
first = True
if scalar_mult is None:
scalar_mult = "" if is_latex else "*"
for (monomial,c) in terms:
if c != 0:
coeff = coeff_repr(c)
negative = False
if len(coeff)>0 and coeff[0] == "-":
negative = True
try:
if c < 0:
negative = True
except NotImplementedError:
# comparisons may not be implemented for some coefficients
pass
if negative:
coeff = coeff_repr(-c, is_latex)
else:
coeff = coeff_repr(c, is_latex)
if coeff == "1":
coeff = ""
if coeff != "0":
if negative:
if first:
sign = "-" # add trailing space?
else:
sign = " - "
else:
if first:
sign = ""
else:
sign= " + "
b = repr_monomial(monomial)
if len(b) > 0:
if coeff != "":
if b =="1" and strip_one:
b = ""
else:
b = scalar_mult + b
s += "%s%s%s"%(sign, coeff, b)
first = False
if first:
return "0" # this can happen only if are only terms with coeff_repr(c) == "0"
#elif s == "":
#return "1" # is empty string representation invalid?
else:
return s
def strunc(s, n = 60):
"""
Truncate at first space after position n, adding '...' if
nontrivial truncation.
"""
n = int(n)
s = str(s)
if len(s) > n:
i = n
while i < len(s) and s[i] != ' ':
i += 1
return s[:i] + " ..."
#return s[:n-4] + " ..."
return s
def newton_method_sizes(N):
r"""
Returns a sequence of integers
`1 = a_1 \leq a_2 \leq \cdots \leq a_n = N` such that
`a_j = \lceil a_{j+1} / 2 \rceil` for all `j`.
This is useful for Newton-style algorithms that double the
precision at each stage. For example if you start at precision 1
and want an answer to precision 17, then it's better to use the
intermediate stages 1, 2, 3, 5, 9, 17 than to use 1, 2, 4, 8, 16,
17.
INPUT:
- ``N`` - positive integer
EXAMPLES::
sage: newton_method_sizes(17)
[1, 2, 3, 5, 9, 17]
sage: newton_method_sizes(16)
[1, 2, 4, 8, 16]
sage: newton_method_sizes(1)
[1]
AUTHORS:
- David Harvey (2006-09-09)
"""
N = int(N)
if N < 1:
raise ValueError("N (={}) must be a positive integer".format(N))
output = []
while N > 1:
output.append(N)
N = (N + 1) >> 1
output.append(1)
output.reverse()
return output
#################################################################
# Generally useful
#################################################################
def assert_attribute(x, attr, init=None):
"""
If the object x has the attribute attr, do nothing. If not, set
x.attr to init.
"""
if attr in x.__dict__: return
if attr[:2] == "__":
z = str(x.__class__).split("'")
if len(z) > 1:
z = z[1]
else:
z = z[0]
attr = "_" + z[len(x.__module__)+1:] + attr
x.__dict__[attr] = init
def compose(f, g):
"""
Return the composition of one-variable functions: `f \circ g`
See also :func:`self_compose()` and :func:`nest()`
INPUT:
- `f` -- a function of one variable
- `g` -- another function of one variable
OUTPUT:
A function, such that compose(f,g)(x) = f(g(x))
EXAMPLES::
sage: def g(x): return 3*x
sage: def f(x): return x + 1
sage: h1 = compose(f,g)
sage: h2 = compose(g,f)
sage: _ = var ('x')
sage: h1(x)
3*x + 1
sage: h2(x)
3*x + 3
::
sage: _ = function('f g')
sage: _ = var ('x')
sage: compose(f,g)(x)
f(g(x))
"""
return lambda x: f(g(x))
def self_compose(f, n):
"""
Return the function `f` composed with itself `n` times.
See :func:`nest()` if you want `f(f(...(f(x))...))` for
known `x`.
INPUT:
- `f` -- a function of one variable
- `n` -- a nonnegative integer
OUTPUT:
A function, the result of composing `f` with itself `n` times
EXAMPLES::
sage: def f(x): return x^2 + 1
sage: g = self_compose(f, 3)
sage: x = var('x')
sage: g(x)
((x^2 + 1)^2 + 1)^2 + 1
::
sage: def f(x): return x + 1
sage: g = self_compose(f, 10000)
sage: g(0)
10000
::
sage: x = var('x')
sage: self_compose(sin, 0)(x)
x
"""
from sage.rings.all import Integer
typecheck(n, (int, long, Integer), 'n')
if n < 0:
raise ValueError("n must be a nonnegative integer, not {}.".format(n))
return lambda x: nest(f, n, x)
def nest(f, n, x):
"""
Return `f(f(...f(x)...))`, where the composition occurs n times.
See also :func:`compose()` and :func:`self_compose()`
INPUT:
- `f` -- a function of one variable
- `n` -- a nonnegative integer
- `x` -- any input for `f`
OUTPUT:
`f(f(...f(x)...))`, where the composition occurs n times
EXAMPLES::
sage: def f(x): return x^2 + 1
sage: x = var('x')
sage: nest(f, 3, x)
((x^2 + 1)^2 + 1)^2 + 1
::
sage: _ = function('f')
sage: _ = var('x')
sage: nest(f, 10, x)
f(f(f(f(f(f(f(f(f(f(x))))))))))
::
sage: _ = function('f')
sage: _ = var('x')
sage: nest(f, 0, x)
x
"""
from sage.rings.all import Integer
typecheck(n, (int, long, Integer), 'n')
if n < 0:
raise ValueError("n must be a nonnegative integer, not {}.".format(n))
for i in range(n):
x = f(x)
return x
#################################################################
# The A \ b operator
#################################################################
class BackslashOperator:
"""
Implements Matlab-style backslash operator for solving systems::
A \\ b
The preparser converts this to multiplications using
``BackslashOperator()``.
EXAMPLES::
sage: preparse("A \ matrix(QQ,2,1,[1/3,'2/3'])")
"A * BackslashOperator() * matrix(QQ,Integer(2),Integer(1),[Integer(1)/Integer(3),'2/3'])"
sage: preparse("A \ matrix(QQ,2,1,[1/3,2*3])")
'A * BackslashOperator() * matrix(QQ,Integer(2),Integer(1),[Integer(1)/Integer(3),Integer(2)*Integer(3)])'
sage: preparse("A \ B + C")
'A * BackslashOperator() * B + C'
sage: preparse("A \ eval('C+D')")
"A * BackslashOperator() * eval('C+D')"
sage: preparse("A \ x / 5")
'A * BackslashOperator() * x / Integer(5)'
sage: preparse("A^3 \ b")
'A**Integer(3) * BackslashOperator() * b'
"""
def __rmul__(self, left):
"""
EXAMPLES::
sage: A = random_matrix(ZZ, 4)
sage: B = random_matrix(ZZ, 4)
sage: temp = A * BackslashOperator()
sage: temp.left is A
True
sage: X = temp * B
sage: A * X == B
True
"""
self.left = left
return self
def __mul__(self, right):
"""
EXAMPLES::
sage: A = matrix(RDF, 5, 5, 2)
sage: b = vector(RDF, 5, range(5))
sage: v = A \ b
sage: v.zero_at(1e-19) # On at least one platform, we get a "negative zero"
(0.0, 0.5, 1.0, 1.5, 2.0)
sage: v = A._backslash_(b)
sage: v.zero_at(1e-19)
(0.0, 0.5, 1.0, 1.5, 2.0)
sage: v = A * BackslashOperator() * b
sage: v.zero_at(1e-19)
(0.0, 0.5, 1.0, 1.5, 2.0)
"""
return self.left._backslash_(right)
#################################################################
# is_iterator function
#################################################################
def is_iterator(it):
"""
Tests if it is an iterator.
The mantra ``if hasattr(it, 'next')`` was used to tests if ``it`` is an
iterator. This is not quite correct since ``it`` could have a ``next``
methods with a different semantic.
EXAMPLES::
sage: it = iter([1,2,3])
sage: is_iterator(it)
True
sage: class wrong():
... def __init__(self): self.n = 5
... def next(self):
... self.n -= 1
... if self.n == 0: raise StopIteration
... return self.n
sage: x = wrong()
sage: is_iterator(x)
False
sage: list(x)
Traceback (most recent call last):
...
TypeError: iteration over non-sequence
sage: class good(wrong):
... def __iter__(self): return self
sage: x = good()
sage: is_iterator(x)
True
sage: list(x)
[4, 3, 2, 1]
sage: P = Partitions(3)
sage: is_iterator(P)
False
sage: is_iterator(iter(P))
True
"""
# see trac #7398 for a discussion
try:
return it is iter(it)
except Exception:
return False
#################################################################
# Useful but hard to classify
#################################################################
def random_sublist(X, s):
"""
Return a pseudo-random sublist of the list X where the probability
of including a particular element is s.
INPUT:
- ``X`` - list
- ``s`` - floating point number between 0 and 1
OUTPUT: list
EXAMPLES::
sage: S = [1,7,3,4,18]
sage: random_sublist(S, 0.5)
[1, 3, 4]
sage: random_sublist(S, 0.5)
[1, 3]
"""
return [a for a in X if random.random() <= s]
def some_tuples(elements, repeat, bound):
r"""
Return an iterator over at most ``bound`` number of ``repeat``-tuples of
``elements``.
TESTS::
sage: from sage.misc.misc import some_tuples
sage: l = some_tuples([0,1,2,3], 2, 3)
sage: l
<itertools.islice object at ...>
sage: len(list(l))
3
sage: l = some_tuples(range(50), 3, 10)
sage: len(list(l))
10
.. TODO::
Currently, this only return an iterator over the first element of the
Cartesian product. It would be smarter to return something more
"random like" as it is used in tests. However, this should remain
deterministic.
"""
from itertools import islice, product
return islice(product(elements, repeat=repeat), bound)
def powerset(X):
r"""
Iterator over the *list* of all subsets of the iterable X, in no
particular order. Each list appears exactly once, up to order.
INPUT:
- ``X`` - an iterable
OUTPUT: iterator of lists
EXAMPLES::
sage: list(powerset([1,2,3]))
[[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]]
sage: [z for z in powerset([0,[1,2]])]
[[], [0], [[1, 2]], [0, [1, 2]]]
Iterating over the power set of an infinite set is also allowed::
sage: i = 0
sage: L = []
sage: for x in powerset(ZZ):
....: if i > 10:
....: break
....: else:
....: i += 1
....: L.append(x)
sage: print(" ".join(str(x) for x in L))
[] [0] [1] [0, 1] [-1] [0, -1] [1, -1] [0, 1, -1] [2] [0, 2] [1, 2]
You may also use subsets as an alias for powerset::
sage: subsets([1,2,3])
<generator object powerset at 0x...>
sage: list(subsets([1,2,3]))
[[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]]
The reason we return lists instead of sets is that the elements of
sets must be hashable and many structures on which one wants the
powerset consist of non-hashable objects.
AUTHORS:
- William Stein
- Nils Bruin (2006-12-19): rewrite to work for not-necessarily
finite objects X.
"""
yield []
pairs = []
for x in X:
pairs.append((2**len(pairs),x))
for w in range(2**(len(pairs)-1), 2**(len(pairs))):
yield [x for m, x in pairs if m & w]
subsets = powerset
#################################################################
# Type checking
#################################################################
def typecheck(x, C, var="x"):
"""
Check that x is of instance C. If not raise a TypeError with an
error message.
"""
if not isinstance(x, C):
raise TypeError("{} (={}) must be of type {}.".format(var, x, C))
#################################################################
# This will likely eventually be useful.
#################################################################
# From the Python Cookbook Ver 2, Recipe 20.4
class cached_attribute(object):
"""
Computes attribute value and caches it in the instance.
"""
def __init__(self, method, name=None):
# record the unbound-method and the name
self.method = method
self.name = name or method.__name__
def __get__(self, inst, cls):
if inst is None:
# instance attribute accessed on class, return self
return self
# compute, cache and return the instance's attribute value
result = self.method(inst)
setattr(inst, self.name, result)
return result
class lazy_prop(object):
def __init__(self, calculate_function):
self._calculate = calculate_function
self.__doc__ = calculate_function.__doc__
def __call__(self, obj, _=None):
if obj is None:
return self
value = self._calculate(obj)
setattr(obj, self._calculate.__name__, value)
return value
def prop(f):
return property(f, None, None, f.__doc__)
#################################################################
# Misc.
#################################################################
def exists(S, P):
"""
If S contains an element x such that P(x) is True, this function
returns True and the element x. Otherwise it returns False and
None.
Note that this function is NOT suitable to be used in an
if-statement or in any place where a boolean expression is
expected. For those situations, use the Python built-in
any(P(x) for x in S)
INPUT:
- ``S`` - object (that supports enumeration)
- ``P`` - function that returns True or False
OUTPUT:
- ``bool`` - whether or not P is True for some element
x of S
- ``object`` - x
EXAMPLES: lambda functions are very useful when using the exists
function::
sage: exists([1,2,5], lambda x : x > 7)
(False, None)
sage: exists([1,2,5], lambda x : x > 3)
(True, 5)
The following example is similar to one in the MAGMA handbook. We
check whether certain integers are a sum of two (small) cubes::
sage: cubes = [t**3 for t in range(-10,11)]
sage: exists([(x,y) for x in cubes for y in cubes], lambda v : v[0]+v[1] == 218)
(True, (-125, 343))
sage: exists([(x,y) for x in cubes for y in cubes], lambda v : v[0]+v[1] == 219)
(False, None)
"""
for x in S:
if P(x): return True, x
return False, None
def forall(S, P):
"""
If P(x) is true every x in S, return True and None. If there is
some element x in S such that P is not True, return False and x.
Note that this function is NOT suitable to be used in an
if-statement or in any place where a boolean expression is
expected. For those situations, use the Python built-in
all(P(x) for x in S)
INPUT:
- ``S`` - object (that supports enumeration)
- ``P`` - function that returns True or False
OUTPUT:
- ``bool`` - whether or not P is True for all elements
of S
- ``object`` - x
EXAMPLES: lambda functions are very useful when using the forall
function. As a toy example we test whether certain integers are
greater than 3.
::
sage: forall([1,2,5], lambda x : x > 3)
(False, 1)
sage: forall([1,2,5], lambda x : x > 0)
(True, None)
Next we ask whether every positive integer less than 100 is a
product of at most 2 prime factors::
sage: forall(range(1,100), lambda n : len(factor(n)) <= 2)
(False, 30)
The answer is no, and 30 is a counterexample. However, every
positive integer 100 is a product of at most 3 primes.
::
sage: forall(range(1,100), lambda n : len(factor(n)) <= 3)
(True, None)
"""
for x in S:
if not P(x): return False, x
return True, None
#################################################################
# which source file?
#################################################################
import inspect
def sourcefile(object):
"""
Work out which source or compiled file an object was defined in.
"""
return inspect.getfile(object)
#################################################################
# debug tracing
#################################################################
import pdb
set_trace = pdb.set_trace
#################################################################
# Word wrap lines
#################################################################
def word_wrap(s, ncols=85):
t = []
if ncols == 0:
return s
for x in s.split('\n'):
if len(x) == 0 or x.lstrip()[:5] == 'sage:':
t.append(x)
continue
while len(x) > ncols:
k = ncols
while k > 0 and x[k] != ' ':
k -= 1
if k == 0:
k = ncols
end = '\\'
else:
end = ''
t.append(x[:k] + end)
x = x[k:]
k=0
while k < len(x) and x[k] == ' ':
k += 1
x = x[k:]
t.append(x)
return '\n'.join(t)
def getitem(v, n):
r"""
Variant of getitem that coerces to an int if a TypeError is
raised.
(This is not needed anymore - classes should define an
__index__ method.)
Thus, e.g., ``getitem(v,n)`` will work even if
`v` is a Python list and `n` is a Sage integer.
EXAMPLES::
sage: v = [1,2,3]
The following used to fail in Sage <= 1.3.7. Now it works fine::
sage: v[ZZ(1)]
2
This always worked.
::
sage: getitem(v, ZZ(1))
2
"""
try:
return v[n]
except TypeError:
return v[int(n)]
def pad_zeros(s, size=3):
"""
EXAMPLES::
sage: pad_zeros(100)
'100'
sage: pad_zeros(10)
'010'
sage: pad_zeros(10, 5)
'00010'
sage: pad_zeros(389, 5)
'00389'
sage: pad_zeros(389, 10)
'0000000389'
"""
return "0"*(size-len(str(s))) + str(s)
import sage.server.support
def embedded():
"""
Return True if this copy of Sage is running embedded in the Sage
notebook.
EXAMPLES::
sage: sage.misc.misc.embedded() # output True if in the notebook
False
"""
return sage.server.support.EMBEDDED_MODE
#############################################
# Operators
#############################################
class AttrCallObject(object):
def __init__(self, name, args, kwds):
"""
TESTS::
sage: f = attrcall('core', 3); f
*.core(3)
sage: TestSuite(f).run()
"""
self.name = name
self.args = args
self.kwds = kwds
def __call__(self, x, *args):
"""
Gets the ``self.name`` method from ``x``, calls it with
``self.args`` and ``args`` as positional parameters and
``self.kwds`` as keyword parameters, and returns the result.
EXAMPLES::
sage: core = attrcall('core', 3)
sage: core(Partition([4,2]))
[4, 2]
sage: series = attrcall('series', x)
sage: series(sin(x), 4)
1*x + (-1/6)*x^3 + Order(x^4)
"""
return getattr(x, self.name)(*(self.args+args), **self.kwds)
def __repr__(self):
"""
Returns a string representation of this object. The star in the
output represents the object passed into self.
EXAMPLES::
sage: attrcall('core', 3)
*.core(3)
sage: attrcall('hooks', flatten=True)
*.hooks(flatten=True)
sage: attrcall('hooks', 3, flatten=True)
*.hooks(3, flatten=True)
"""
s = "*.%s(%s"%(self.name, ", ".join(map(repr, self.args)))
if self.kwds:
if len(self.args) > 0:
s += ", "
s += ", ".join("%s=%s"%keyvalue for keyvalue in self.kwds.items())
s += ")"
return s
def __eq__(self, other):
"""
Equality testing
EXAMPLES::
sage: attrcall('core', 3, flatten = True) == attrcall('core', 3, flatten = True)
True
sage: attrcall('core', 2) == attrcall('core', 3)
False
sage: attrcall('core', 2) == 1
False
"""
return self.__class__ == other.__class__ and self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Equality testing
EXAMPLES::
sage: attrcall('core', 3, flatten = True) != attrcall('core', 3, flatten = True)
False
sage: attrcall('core', 2) != attrcall('core', 3)
True
sage: attrcall('core', 2) != 1
True
"""
return not self == other
def __hash__(self):
"""
Hash value
This method tries to ensure that, when two ``attrcall``
objects are equal, they have the same hash value.
.. warning:: dicts are not hashable, so we instead hash their
items; however the order of those items might differ. The
proper fix would be to use a frozen dict for ``kwds``, when
frozen dicts will be available in Python.
EXAMPLES::
sage: x = attrcall('core', 3, flatten = True, blah = 1)
sage: hash(x) # random # indirect doctest
210434060
sage: type(hash(x))
<type 'int'>
sage: y = attrcall('core', 3, blah = 1, flatten = True)
sage: hash(y) == hash(x)
True
sage: y = attrcall('core', 3, flatten = True, blah = 2)
sage: hash(y) != hash(x)
True
sage: hash(attrcall('core', 2)) != hash(attrcall('core', 3))
True
sage: hash(attrcall('core', 2)) != hash(1)
True
Note: a missing ``__hash__`` method here used to break the
unique representation of parents taking ``attrcall`` objects
as input; see :trac:`8911`.
"""
return hash((self.args, tuple(self.kwds.items())))
def attrcall(name, *args, **kwds):
"""
Returns a callable which takes in an object, gets the method named
name from that object, and calls it with the specified arguments
and keywords.
INPUT:
- ``name`` - a string of the name of the method you
want to call
- ``args, kwds`` - arguments and keywords to be passed
to the method
EXAMPLES::
sage: f = attrcall('core', 3); f
*.core(3)
sage: [f(p) for p in Partitions(5)]
[[2], [1, 1], [1, 1], [3, 1, 1], [2], [2], [1, 1]]
"""
return AttrCallObject(name, args, kwds)
def call_method(obj, name, *args, **kwds):
"""
Call the method ``name`` on ``obj``.
This has to exist somewhere in Python!!!
.. SEEALSO:: :func:`operator.methodcaller` :func:`attrcal`
EXAMPLES::
sage: from sage.misc.misc import call_method
sage: call_method(1, "__add__", 2)
3
"""
return getattr(obj, name)(*args, **kwds)
def is_in_string(line, pos):
r"""
Returns True if the character at position pos in line occurs
within a string.
EXAMPLES::
sage: from sage.misc.misc import is_in_string
sage: line = 'test(\'#\')'
sage: is_in_string(line, line.rfind('#'))
True
sage: is_in_string(line, line.rfind(')'))
False
"""
i = 0
in_single_quote = False
in_double_quote = False
in_triple_quote = False
def in_quote():
return in_single_quote or in_double_quote or in_triple_quote
while i < pos:
# Update quote parsing
# We only do this if this quote isn't backquoted itself,
# which is the case if the previous character isn't
# a backslash, or it is but both previous characters
# are backslashes.
if line[i-1:i] != '\\' or line[i-2:i] == '\\\\':
if line[i:i+3] in ['"""', "'''"]:
if not in_quote():
in_triple_quote = True
elif in_triple_quote:
in_triple_quote = False
elif line[i] == "'":
if not in_quote():
in_single_quote = True
elif in_single_quote:
in_single_quote = False
elif line[i] == '"':
if not in_quote():
in_double_quote = True
elif in_double_quote:
in_double_quote = False
i += 1
return in_quote()
def get_main_globals():
"""
Return the main global namespace.
EXAMPLES::
sage: from sage.misc.misc import get_main_globals
sage: G = get_main_globals()
sage: bla = 1
sage: G['bla']
1
sage: bla = 2
sage: G['bla']
2
sage: G['ble'] = 5
sage: ble
5
This is analogous to :func:`globals`, except that it can be called
from any function, even if it is in a Python module::
sage: def f():
....: G = get_main_globals()
....: assert G['bli'] == 14
....: G['blo'] = 42
sage: bli = 14
sage: f()
sage: blo
42
ALGORITHM:
The main global namespace is discovered by going up the frame
stack until the frame for the :mod:`__main__` module is found.
Should this frame not be found (this should not occur in normal
operation), an exception "ValueError: call stack is not deep
enough" will be raised by ``_getframe``.
See :meth:`inject_variable_test` for a real test that this works
within deeply nested calls in a function defined in a Python
module.
"""
import sys
depth = 0
while True:
G = sys._getframe(depth).f_globals
if G.get("__name__", None) == "__main__":
break
depth += 1
return G
def inject_variable(name, value):
"""
Inject a variable into the main global namespace.
INPUT:
- ``name`` -- a string
- ``value`` -- anything
EXAMPLES::
sage: from sage.misc.misc import inject_variable
sage: inject_variable("a", 314)
sage: a
314
A warning is issued the first time an existing value is overwritten::
sage: inject_variable("a", 271)
doctest:...: RuntimeWarning: redefining global value `a`
sage: a
271
sage: inject_variable("a", 272)
sage: a
272
That's because warn seem to not reissue twice the same warning:
sage: from warnings import warn
sage: warn("blah")
doctest:...: UserWarning: blah
sage: warn("blah")
Use with care!
"""
assert isinstance(name, str)
# Using globals() does not work, even in Cython, because
# inject_variable is called not only from the interpreter, but
# also from functions in various modules.
G = get_main_globals()
if name in G:
warn("redefining global value `%s`"%name, RuntimeWarning, stacklevel = 2)
G[name] = value
def inject_variable_test(name, value, depth):
"""
A function for testing deep calls to inject_variable
TESTS::
sage: from sage.misc.misc import inject_variable_test
sage: inject_variable_test("a0", 314, 0)
sage: a0
314
sage: inject_variable_test("a1", 314, 1)
sage: a1
314
sage: inject_variable_test("a2", 314, 2)
sage: a2
314
sage: inject_variable_test("a2", 271, 2)
doctest:...: RuntimeWarning: redefining global value `a2`
sage: a2
271
"""
if depth == 0:
inject_variable(name, value)
else:
inject_variable_test(name, value, depth - 1)
| 27.671825
| 122
| 0.524539
|
a6ff74c47d8a202ce51eca532ad87f7c65ee1341
| 3,224
|
py
|
Python
|
deblurrer/scripts/datasets/kaggle_blur.py
|
ElPapi42/DeepDeblurring
|
8649f607ddf70a14c067cf902fbba341f99635af
|
[
"MIT"
] | 2
|
2020-06-23T21:56:53.000Z
|
2021-02-02T10:21:35.000Z
|
deblurrer/scripts/datasets/kaggle_blur.py
|
ElPapi42/DeepDeblurring
|
8649f607ddf70a14c067cf902fbba341f99635af
|
[
"MIT"
] | 6
|
2020-05-27T15:07:07.000Z
|
2021-02-11T12:24:46.000Z
|
deblurrer/scripts/datasets/kaggle_blur.py
|
ElPapi42/DeepDeblurring
|
8649f607ddf70a14c067cf902fbba341f99635af
|
[
"MIT"
] | 2
|
2020-09-28T21:04:31.000Z
|
2021-02-11T12:26:56.000Z
|
#!/usr/bin/python
# coding=utf-8
"""
Downloads the kaggle blur dataset training data.
The data must be downloaded to "/datasets/kaggle_blur"
The module must define the data extraction logic.
# You can run this on google colab for get faster downloads speeds
"""
import os
import shutil
import pathlib
from kaggle import api
import pandas as pd
def refactor_folder(path):
"""
Refactor dataset folder for be structered as sharp/blurred images.
Args:
path (str): The path where the function will operate
"""
old_sharp_path = os.path.join(path, 'old_sharp')
old_defocus_path = os.path.join(path, 'defocused_blurred')
old_motion_path = os.path.join(path, 'motion_blurred')
new_sharp_path = os.path.join(path, 'sharp')
new_blur_path = os.path.join(path, 'blur')
# Rename sharp folder to old_sharp
os.rename(
new_sharp_path,
old_sharp_path,
)
# Create final dataset folders
os.mkdir(new_sharp_path)
os.mkdir(new_blur_path)
# rename everything from old_sharp to sharp only keeping the image id
images = os.listdir(old_sharp_path)
for sharp_image in images:
os.rename(
os.path.join(old_sharp_path, sharp_image),
os.path.join(new_sharp_path, '{path}.jpg'.format(path=sharp_image.split('_')[0])),
)
# Duplicates the sharp images, with its own id
images = os.listdir(new_sharp_path)
image_count = len(images)
for source_image in images:
shutil.copy2(
os.path.join(new_sharp_path, source_image),
os.path.join(new_sharp_path, '{path}.jpg'.format(path=str(int(source_image.split('.')[0]) + image_count))),
)
# Rename everything from defocused_blurred to blur only keeping the id
images = os.listdir(old_defocus_path)
for defocus_image in images:
os.rename(
os.path.join(old_defocus_path, defocus_image),
os.path.join(new_blur_path, '{path}.jpg'.format(path=defocus_image.split('_')[0])),
)
# Rename everything from motion_blurred to blur and assigning a new id
images = os.listdir(old_motion_path)
for motion_image in images:
os.rename(
os.path.join(old_motion_path, motion_image),
os.path.join(new_blur_path, '{path}.jpg'.format(path=str(int(motion_image.split('_')[0]) + image_count))),
)
def run(path):
"""
Run the script.
Args:
path (str): Path to download dataset files.
"""
# Logs
print('Downloading kaggle_blur')
download_path = pathlib.Path(path)/'kaggle_blur'
file_name = download_path/'blur-dataset.zip'
if (not file_name.exists()):
api.dataset_download_cli(
'kwentar/blur-dataset',
path=download_path,
unzip=True,
)
refactor_folder(download_path)
if (__name__ == '__main__'):
folder_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__),
),
),
),
),
'datasets',
)
run(folder_path)
| 26.866667
| 119
| 0.626861
|
44fe0468c9ea960fdf806e4d82a7f61598d6dbbb
| 4,132
|
py
|
Python
|
benchmark/startQiskit_noisy2225.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy2225.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy2225.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=40
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=21
prog.cz(input_qubit[0],input_qubit[3]) # number=22
prog.h(input_qubit[3]) # number=23
prog.h(input_qubit[3]) # number=27
prog.cz(input_qubit[0],input_qubit[3]) # number=28
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=30
prog.x(input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=18
prog.rx(-0.364424747816416,input_qubit[3]) # number=36
prog.y(input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=19
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2225.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.433333
| 140
| 0.653437
|
77c5a2836f081cbc63744765fd6b0b64f50b6d8b
| 4,529
|
py
|
Python
|
tests/unit/ppr/test_draft_summary.py
|
doug-lovett/registry-schemas
|
3a54f4f1b2264f1f5e3ac060583914c117dd34b6
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/ppr/test_draft_summary.py
|
doug-lovett/registry-schemas
|
3a54f4f1b2264f1f5e3ac060583914c117dd34b6
|
[
"Apache-2.0"
] | 10
|
2021-01-25T22:24:29.000Z
|
2021-12-17T21:40:15.000Z
|
tests/unit/ppr/test_draft_summary.py
|
doug-lovett/registry-schemas
|
3a54f4f1b2264f1f5e3ac060583914c117dd34b6
|
[
"Apache-2.0"
] | 21
|
2021-01-25T22:27:45.000Z
|
2022-01-19T19:43:37.000Z
|
# Copyright © 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure the PPR Draft Summary schema is valid."""
import copy
from registry_schemas import validate
from registry_schemas.example_data.ppr import DRAFT_SUMMARY
def test_valid_draft_summary():
"""Assert that the schema is performing as expected for a draft summary list."""
is_valid, errors = validate(DRAFT_SUMMARY, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_draft_summary_empty():
"""Assert that the schema is performing as expected for an empty draft summary list."""
draft = copy.deepcopy(DRAFT_SUMMARY)
del draft[2]
del draft[1]
del draft[0]
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_invalid_draft_summary_missing_type():
"""Assert that an invalid draft summary fails - type is missing."""
draft = copy.deepcopy(DRAFT_SUMMARY)
del draft[0]['type']
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_draft_summary_missing_regtype():
"""Assert that an invalid draft summary fails - registration type is missing."""
draft = copy.deepcopy(DRAFT_SUMMARY)
del draft[0]['registrationType']
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_draft_summary_missing_docid():
"""Assert that an invalid draft summary fails - document ID is missing."""
draft = copy.deepcopy(DRAFT_SUMMARY)
del draft[0]['documentId']
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_draft_summary_missing_path():
"""Assert that an invalid draft summary fails - path is missing."""
draft = copy.deepcopy(DRAFT_SUMMARY)
del draft[0]['path']
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_draft_summary_missing_timestamp():
"""Assert that an invalid draft summary fails - create date time is missing."""
draft = copy.deepcopy(DRAFT_SUMMARY)
del draft[0]['createDateTime']
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_draft_summary_docid():
"""Assert that an invalid draft summary fails - document Id is too long."""
draft = copy.deepcopy(DRAFT_SUMMARY)
draft[0]['documentId'] = 'XXXXXXXXXXXXX'
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_draft_summary_type():
"""Assert that an invalid draft summary fails - type is too long."""
draft = copy.deepcopy(DRAFT_SUMMARY)
draft[0]['type'] = 'XXXXXXXXXXX'
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_draft_summary_regtype():
"""Assert that an invalid draft summary fails - registration type is too long."""
draft = copy.deepcopy(DRAFT_SUMMARY)
draft[0]['registrationType'] = 'XXX'
is_valid, errors = validate(draft, 'draftSummary', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
| 26.958333
| 91
| 0.681828
|
5f586c2bdd14140474896436317e8096ed2c8c80
| 424
|
py
|
Python
|
tinyfunds/users/migrations/0005_user_total_hours_pledged.py
|
sleepy/tinyfunds
|
e66c9ee158fa4c3b5ec14b2a5ea20cbdcc1c29e1
|
[
"MIT"
] | 1
|
2020-12-02T19:47:33.000Z
|
2020-12-02T19:47:33.000Z
|
tinyfunds/users/migrations/0005_user_total_hours_pledged.py
|
sleepy/tinyfunds
|
e66c9ee158fa4c3b5ec14b2a5ea20cbdcc1c29e1
|
[
"MIT"
] | 4
|
2021-04-08T20:35:24.000Z
|
2021-09-22T19:40:03.000Z
|
tinyfunds/users/migrations/0005_user_total_hours_pledged.py
|
sleepy/tinyfunds
|
e66c9ee158fa4c3b5ec14b2a5ea20cbdcc1c29e1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-11-17 20:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_user_total_donated'),
]
operations = [
migrations.AddField(
model_name='user',
name='total_hours_pledged',
field=models.DecimalField(decimal_places=2, default=0, max_digits=8),
),
]
| 22.315789
| 81
| 0.620283
|
047bdd7e654b881f78be80746f756d0ae997d651
| 6,456
|
py
|
Python
|
phasic_policy_gradient/tree_util.py
|
Leo-xh/Phasic-policy-graident
|
8cc55726d2d3ee20a9bd410384099326d8657f73
|
[
"MIT"
] | null | null | null |
phasic_policy_gradient/tree_util.py
|
Leo-xh/Phasic-policy-graident
|
8cc55726d2d3ee20a9bd410384099326d8657f73
|
[
"MIT"
] | null | null | null |
phasic_policy_gradient/tree_util.py
|
Leo-xh/Phasic-policy-graident
|
8cc55726d2d3ee20a9bd410384099326d8657f73
|
[
"MIT"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# JDS: copied this from jax, made it self-contained
# Currently just used for improved_checkpoint
# pylint: disable=bad-indentation
import functools
import itertools as it
import collections
def unzip2(xys):
xs = []
ys = []
for x, y in xys:
xs.append(x)
ys.append(y)
return tuple(xs), tuple(ys)
def partial(fun, *args, **kwargs):
wrapped = functools.partial(fun, *args, **kwargs)
functools.update_wrapper(wrapped, fun)
wrapped._bound_args = args # pylint: disable=protected-access
return wrapped
def concatenate(xs):
return list(it.chain.from_iterable(xs))
def tree_map(f, tree):
"""Map a function over a pytree to produce a new pytree.
Args:
f: function to be applied at each leaf.
tree: a pytree to be mapped over.
Returns:
A new pytree with the same structure as `tree` but with the value at each
leaf given by `f(x)` where `x` is the value at the corresponding leaf in
`tree`.
"""
node_type = node_types.get(type(tree))
if node_type:
children, node_spec = node_type.to_iterable(tree)
new_children = [tree_map(f, child) for child in children]
return node_type.from_iterable(node_spec, new_children)
else:
return f(tree)
def tree_multimap(f, tree, *rest):
"""Map a multi-input function over pytree args to produce a new pytree.
Args:
f: function that takes `1 + len(rest)` arguments, to be applied at the
corresponding leaves of the pytrees.
tree: a pytree to be mapped over, with each leaf providing the first
positional argument to `f`.
*rest: a tuple of pytrees, each with the same structure as `tree`.
Returns:
A new pytree with the same structure as `tree` but with the value at each
leaf given by `f(x, *xs)` where `x` is the value at the corresponding leaf
in `tree` and `xs` is the tuple of values at corresponding leaves in `rest`.
"""
# equivalent to prefix_multimap(f, tree_structure(tree), tree, *rest)
node_type = node_types.get(type(tree))
if node_type:
children, node_spec = node_type.to_iterable(tree)
all_children = [children]
for other_tree in rest:
# other_node_type = node_types.get(type(other_tree))
# if node_type != other_node_type:
# raise TypeError('Mismatch: {} != {}'.format(other_node_type, node_type))
other_children, other_node_data = node_type.to_iterable(other_tree)
if other_node_data != node_spec:
raise TypeError("Mismatch: {} != {}".format(other_node_data, node_spec))
all_children.append(other_children)
new_children = [tree_multimap(f, *xs) for xs in zip(*all_children)]
return node_type.from_iterable(node_spec, new_children)
else:
return f(tree, *rest)
def tree_reduce(f, tree):
flat, _ = tree_flatten(tree)
return functools.reduce(f, flat)
def tree_all(tree):
flat, _ = tree_flatten(tree)
return all(flat)
def walk_pytree(f_node, f_leaf, tree):
node_type = node_types.get(type(tree))
if node_type:
children, node_spec = node_type.to_iterable(tree)
proc_children, child_specs = unzip2(
[walk_pytree(f_node, f_leaf, child) for child in children]
)
tree_def = PyTreeDef(node_type, node_spec, child_specs)
return f_node(proc_children), tree_def
else:
return f_leaf(tree), PyLeaf()
tree_flatten = partial(walk_pytree, concatenate, lambda x: [x])
class PyTreeDef(object):
def __init__(self, node_type, node_data, children):
self.node_type = node_type
self.node_data = node_data
self.children = children
def __repr__(self):
if self.node_data is None:
data_repr = ""
else:
data_repr = "[{}]".format(self.node_data)
return "PyTree({}{}, [{}])".format(
self.node_type.name, data_repr, ",".join(safe_map(repr, self.children))
)
def __hash__(self):
return hash((self.node_type, self.node_data, tuple(self.children)))
def __eq__(self, other):
if isinstance(other, PyLeaf):
return False
else:
return (
self.node_type == other.node_type
and self.node_data == other.node_data
and self.children == other.children
)
def __ne__(self, other):
return not self == other
class PyLeaf(object):
def __repr__(self):
return "*"
def __eq__(self, other):
return isinstance(other, PyLeaf)
class NodeType(object):
def __init__(self, name, to_iterable, from_iterable):
self.name = name
self.to_iterable = to_iterable
self.from_iterable = from_iterable
node_types = {}
def register_pytree_node(py_type, to_iterable, from_iterable):
assert py_type not in node_types
node_types[py_type] = NodeType(str(py_type), to_iterable, from_iterable)
def tuple_to_iterable(xs):
return xs, None
def tuple_from_iterable(_keys, xs):
return tuple(xs)
def list_to_iterable(xs):
return tuple(xs), None
def list_from_iterable(_keys, xs):
return list(xs)
def dict_to_iterable(xs):
keys = tuple(sorted(xs.keys()))
return tuple(map(xs.get, keys)), keys
def dict_from_iterable(keys, xs):
return dict(zip(keys, xs))
def none_to_iterable(_xs):
return (), None
def none_from_iterable(_keys, _xs):
return None
register_pytree_node(tuple, tuple_to_iterable, tuple_from_iterable)
register_pytree_node(list, list_to_iterable, list_from_iterable)
register_pytree_node(dict, dict_to_iterable, dict_from_iterable)
register_pytree_node(collections.OrderedDict, dict_to_iterable, dict_from_iterable)
register_pytree_node(type(None), none_to_iterable, none_from_iterable)
| 29.345455
| 88
| 0.671623
|
bc691134870bdc37555afdb85457f84d41260d34
| 14,387
|
py
|
Python
|
tools/targets/NU_M2354.py
|
adelcrosge1/mbed-os
|
1443257e40c97141c975f1c5cd7f4cf308a58b5e
|
[
"Apache-2.0"
] | 3,897
|
2015-09-04T13:42:23.000Z
|
2022-03-30T16:53:07.000Z
|
tools/targets/NU_M2354.py
|
adelcrosge1/mbed-os
|
1443257e40c97141c975f1c5cd7f4cf308a58b5e
|
[
"Apache-2.0"
] | 13,030
|
2015-09-17T10:30:05.000Z
|
2022-03-31T13:36:44.000Z
|
tools/targets/NU_M2354.py
|
adelcrosge1/mbed-os
|
1443257e40c97141c975f1c5cd7f4cf308a58b5e
|
[
"Apache-2.0"
] | 2,950
|
2015-09-08T19:07:05.000Z
|
2022-03-31T13:37:23.000Z
|
#!/usr/bin/python
# Copyright (c) 2017-2021 Arm Limited
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import abspath, basename, dirname, splitext, isdir
from os.path import join as path_join
import re
import subprocess
import shutil
from intelhex import IntelHex
from datetime import datetime
SCRIPT_DIR = dirname(abspath(__file__))
MBED_OS_ROOT = abspath(path_join(SCRIPT_DIR, os.pardir, os.pardir))
def m2354_tfm_bin(t_self, non_secure_image, secure_bin):
assert os.path.isfile(secure_bin)
assert os.path.isfile(non_secure_image)
secure_bin = abspath(secure_bin)
non_secure_image = abspath(non_secure_image)
SECURE_ROOT = dirname(secure_bin)
build_dir = dirname(non_secure_image)
tempdir = path_join(build_dir, 'temp')
if not isdir(tempdir):
os.makedirs(tempdir)
flash_layout = path_join(SECURE_ROOT, 'partition', 'flash_layout.h')
bl2_bin = path_join(SECURE_ROOT, 'bl2.bin')
s_bin_basename = splitext(basename(secure_bin))[0]
ns_bin_basename, output_ext = splitext(basename(non_secure_image))
# Convert NS image to BIN format if it is HEX
if output_ext == ".hex":
non_secure_bin = abspath(path_join(tempdir, ns_bin_basename + ".bin"))
ns_ih = IntelHex(non_secure_image)
ns_ih.tobinfile(non_secure_bin)
else:
non_secure_bin = non_secure_image
signing_key = path_join(SCRIPT_DIR, 'nuvoton_m2354-root-rsa-3072.pem')
assert os.path.isfile(signing_key)
# Find Python 3 command name across platforms
python3_cmd = "python3" if shutil.which("python3") is not None else "python"
# Specify image version
#
# MCUboot image version format: Major.Minor.Revision+Build
#
# Requirements for image version:
# 1. Major.Minor.Revision must be non-decremental when used to derive security
# counter (-s 'auto').
# 2. Make Major.Minor.Revision+Build incremental to identify the firmware
# itself uniquely through psa_fwu_query().
# 3. Get around MCUboot failure with:
# [INF] Starting bootloader
# [INF] Swap type: none
# [ERR] Failed to add Image 0 data to shared memory area
# [ERR] Unable to find bootable image
# This is because TF-M underestimates MAX_BOOT_RECORD_SZ for boot record
# where Major.Minor.Revision will pack into during signing. The more digits
# of the Major.Minor.Revision, the larger the needed boot record size. And
# then MCUboot errors in boot_save_boot_status().
#
# To meet all the above requirements, we apply the following policy:
# 1. To not change MAX_BOOT_RECORD_SZ in TF-M, specify Major.Minor.Revision
# with TF-M version instead of modified Unix timestamp. This needs less digits to
# fit into MAX_BOOT_RECORD_SZ.
# 2. To make Major.Minor.Revision+Build incremental, specify the Build part with
# modified Unix timestamp.
# 3. To make security counter non-decremental, we can derive it from
# Major.Minor.Revision (-s 'auto') or explicitly specify it with modified
# Unix timestamp, depending on security consideration.
#
# NOTE: To get around Y2038 problem, we modify Unix timestamp by setting new base
# point. Using 32-bit unsigned integer to hold the modified Unix timestamp,
# it will break (wrap around) after Y2156 (2106 + 2020 - 1970).
# https://en.wikipedia.org/wiki/Year_2038_problem
#
modified_timestamp = int(datetime.now().timestamp()) - int(datetime(2020, 1, 1).timestamp())
img_ver_major = 1 # Instead of (modified_timestamp >> 24) & 0xFF
img_ver_minor = 4 # Instead of (modified_timestamp >> 16) & 0xFF
img_ver_revision = 0 # Instead of modified_timestamp & 0xFFFF
img_ver_build = modified_timestamp
# wrapper.py command template
cmd_wrapper = [
python3_cmd,
path_join(MBED_OS_ROOT, "tools", "psa", "tfm", "bin_utils", "wrapper.py"),
"-v",
"{}.{}.{}+{}".format(img_ver_major, img_ver_minor, img_ver_revision, img_ver_build),
"-k",
"SIGNING_KEY_PATH",
"--layout",
"IMAGE_MACRO_PATH",
"--public-key-format",
'full',
"--align",
'1',
# Reasons for removing padding and boot magic option "--pad":
# 1. PSA FWU API psa_fwu_install() will be responsible for writing boot magic to enable upgradeable.
# 2. The image size gets smaller instead of slot size.
#"--pad",
"--pad-header",
"-H",
'0x400',
"--overwrite-only",
"-s",
'auto', # Or modified_timestamp
"-d",
'(IMAGE_ID,MAJOR.MINOR.REVISION+BUILD)',
"RAW_BIN_PATH",
"SIGNED_BIN_PATH",
]
pos_wrapper_signing_key = cmd_wrapper.index("-k") + 1
pos_wrapper_layout = cmd_wrapper.index("--layout") + 1
pos_wrapper_dependency = cmd_wrapper.index("-d") + 1
pos_wrapper_raw_bin = len(cmd_wrapper) - 2
pos_wrapper_signed_bin = len(cmd_wrapper) - 1
# assemble.py command template
cmd_assemble = [
python3_cmd,
path_join(MBED_OS_ROOT, "tools", "psa", "tfm", "bin_utils", "assemble.py"),
"--layout",
"IMAGE_MACRO_PATH",
"-s",
"SECURE_BIN_PATH",
"-n",
"NONSECURE_BIN_PATH",
"-o",
"CONCATENATED_BIN_PATH",
]
pos_assemble_layout = cmd_assemble.index("--layout") + 1
pos_assemble_secure_bin = cmd_assemble.index("-s") + 1
pos_assemble_nonsecure_bin = cmd_assemble.index("-n") + 1
pos_assemble_concat_bin = cmd_assemble.index("-o") + 1
# If second signing key is passed down, go signing separately; otherwise, go signing together.
if os.path.isfile(path_join(SECURE_ROOT, 'partition', 'signing_layout_ns_preprocessed.h')):
signing_key_1 = 'nuvoton_m2354-root-rsa-3072_1.pem'
else:
signing_key_1 = None
if signing_key_1 is not None:
signing_key_1 = path_join(SCRIPT_DIR, signing_key_1)
assert os.path.isfile(signing_key_1)
image_macros_s = path_join(SECURE_ROOT, 'partition', 'signing_layout_s_preprocessed.h')
image_macros_ns = path_join(SECURE_ROOT, 'partition', 'signing_layout_ns_preprocessed.h')
assert os.path.isfile(image_macros_s)
assert os.path.isfile(image_macros_ns)
s_signed_bin = abspath(path_join(tempdir, 'tfm_s_signed' + '.bin'))
ns_signed_bin = abspath(path_join(tempdir, 'tfm_' + ns_bin_basename + '_signed' + '.bin'))
signed_concat_bin = abspath(path_join(tempdir, 'tfm_s_signed_' + ns_bin_basename + '_signed_concat' + '.bin'))
s_update_bin = abspath(path_join(build_dir, s_bin_basename + '_update' + '.bin'))
ns_update_bin = abspath(path_join(build_dir, ns_bin_basename + '_update' + '.bin'))
#1. Run wrapper to sign the secure TF-M binary
cmd_wrapper[pos_wrapper_signing_key] = signing_key
cmd_wrapper[pos_wrapper_layout] = image_macros_s
cmd_wrapper[pos_wrapper_dependency] = '(1,0.0.0+0)' # Minimum version of non-secure image required for upgrading to the secure image
cmd_wrapper[pos_wrapper_raw_bin] = secure_bin
cmd_wrapper[pos_wrapper_signed_bin] = s_signed_bin
retcode = run_cmd(cmd_wrapper, MBED_OS_ROOT)
if retcode:
raise Exception("Unable to sign " + "TF-M Secure" +
" binary, Error code: " + str(retcode))
return
#2. Run wrapper to sign the non-secure mbed binary
cmd_wrapper[pos_wrapper_signing_key] = signing_key_1
cmd_wrapper[pos_wrapper_layout] = image_macros_ns
cmd_wrapper[pos_wrapper_dependency] = '(0,0.0.0+0)' # Minimum version of secure image required for upgrading to the non-secure image
cmd_wrapper[pos_wrapper_raw_bin] = non_secure_bin
cmd_wrapper[pos_wrapper_signed_bin] = ns_signed_bin
retcode = run_cmd(cmd_wrapper, MBED_OS_ROOT)
if retcode:
raise Exception("Unable to sign " + "TF-M Secure" +
" binary, Error code: " + str(retcode))
return
#3. Concatenate signed secure TF-M binary and signed non-secure mbed binary
cmd_assemble[pos_assemble_layout] = image_macros_s
cmd_assemble[pos_assemble_secure_bin] = s_signed_bin
cmd_assemble[pos_assemble_nonsecure_bin] = ns_signed_bin
cmd_assemble[pos_assemble_concat_bin] = signed_concat_bin
retcode = run_cmd(cmd_assemble, MBED_OS_ROOT)
if retcode:
raise Exception("Unable to concatenate " + "Secure TF-M (signed)/Non-secure Mbed (signed)" +
" binaries, Error code: " + str(retcode))
return
#4. Concatenate MCUboot and concatenated signed secure TF-M binary/signed non-secure mbed binary
flash_area_0_offset = find_flash_area_0_offset(flash_layout)
out_ih = IntelHex()
out_ih.loadbin(bl2_bin)
out_ih.loadbin(signed_concat_bin, flash_area_0_offset)
out_ih.tofile(non_secure_image, 'hex' if output_ext == ".hex" else "bin")
# Generate firmware update file for PSA Firmware Update
shutil.copy(s_signed_bin, s_update_bin)
shutil.copy(ns_signed_bin, ns_update_bin)
else:
image_macros_s_ns = path_join(SECURE_ROOT, 'partition', 'signing_layout_preprocessed.h')
assert os.path.isfile(image_macros_s_ns)
concat_bin = abspath(path_join(tempdir, 'tfm_s_' + ns_bin_basename + ".bin"))
concat_signed_bin = abspath(path_join(tempdir, 'tfm_s_' + ns_bin_basename + '_signed' + ".bin"))
update_bin = abspath(path_join(build_dir, ns_bin_basename + '_update' + '.bin'))
#1. Concatenate secure TFM and non-secure mbed binaries
cmd_assemble[pos_assemble_layout] = image_macros_s_ns
cmd_assemble[pos_assemble_secure_bin] = secure_bin
cmd_assemble[pos_assemble_nonsecure_bin] = non_secure_bin
cmd_assemble[pos_assemble_concat_bin] = concat_bin
retcode = run_cmd(cmd_assemble, MBED_OS_ROOT)
if retcode:
raise Exception("Unable to concatenate " + "Secure TF-M/Non-secure Mbed" +
" binaries, Error code: " + str(retcode))
return
#2. Run wrapper to sign the concatenated binary
cmd_wrapper[pos_wrapper_signing_key] = signing_key
cmd_wrapper[pos_wrapper_layout] = image_macros_s_ns
cmd_wrapper[pos_wrapper_dependency] = '(1,0.0.0+0)' # No effect for single image boot
cmd_wrapper[pos_wrapper_raw_bin] = concat_bin
cmd_wrapper[pos_wrapper_signed_bin] = concat_signed_bin
retcode = run_cmd(cmd_wrapper, MBED_OS_ROOT)
if retcode:
raise Exception("Unable to sign " + "concatenated" +
" binary, Error code: " + str(retcode))
return
#3. Concatenate MCUboot and signed binary
flash_area_0_offset = find_flash_area_0_offset(flash_layout)
out_ih = IntelHex()
out_ih.loadbin(bl2_bin)
out_ih.loadbin(concat_signed_bin, flash_area_0_offset)
out_ih.tofile(non_secure_image, 'hex' if output_ext == ".hex" else "bin")
# Generate firmware update file for PSA Firmware Update
shutil.copy(concat_signed_bin, update_bin)
def find_flash_area_0_offset(configFile):
# Compiled regular expressions
flash_area_bl2_offset_re = re.compile(r"^#define\s+FLASH_AREA_BL2_OFFSET\s+\({0,1}(0x[0-9a-fA-F]+)\){0,1}")
flash_area_bl2_size_re = re.compile(r"^#define\s+FLASH_AREA_BL2_SIZE\s+\({0,1}(0x[0-9a-fA-F]+)\){0,1}")
rsvd_stor_size_re = re.compile(r"^#define\s+FLASH_AREA_0_OFFSET\s+\(FLASH_AREA_BL2_OFFSET\s+\+\s+FLASH_AREA_BL2_SIZE\s+\+\s+\({0,1}(0x[0-9a-fA-F]+)\){0,1}\)")
# Match values
flash_area_bl2_offset = None
flash_area_bl2_size = None
rsvd_stor_size = None
flash_area_0_offset = None
with open(configFile, 'r') as configFile_:
for line in configFile_:
# Seek "#define FLASH_AREA_BL2_OFFSET..."
if flash_area_bl2_offset is None:
m = flash_area_bl2_offset_re.match(line)
if m is not None:
flash_area_bl2_offset = int(m.group(1), 0)
continue
# Seek "#define FLASH_AREA_BL2_SIZE..."
if flash_area_bl2_size is None:
m = flash_area_bl2_size_re.match(line)
if m is not None:
flash_area_bl2_size = int(m.group(1), 0)
continue
# Seek "#define FLASH_AREA_0_OFFSET..."
if rsvd_stor_size is None:
m = rsvd_stor_size_re.match(line)
if m is not None:
rsvd_stor_size = int(m.group(1), 0)
continue
# FLASH_AREA_0_OFFSET = FLASH_AREA_BL2_OFFSET + FLASH_AREA_BL2_SIZE + Reserved storage area size
if flash_area_bl2_offset is not None and \
flash_area_bl2_size is not None and \
rsvd_stor_size is not None:
flash_area_0_offset = flash_area_bl2_offset + flash_area_bl2_size + rsvd_stor_size
break
return flash_area_0_offset
def run_cmd(cmd, directory):
# Redirect stdout/stderr to pipe, text mode
POPEN_INSTANCE = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=directory,
universal_newlines=True
)
# Command line
print("COMMAND: {}".format(POPEN_INSTANCE.args))
stdout_data, stderr_data = POPEN_INSTANCE.communicate()
# stdout/stderr messages
if (stdout_data):
print(stdout_data)
if (stderr_data):
print(stderr_data)
# Return code
return POPEN_INSTANCE.returncode
| 42.439528
| 162
| 0.665045
|
bf50066ec02341e5098512f3313e3ddef61ee41d
| 1,789
|
py
|
Python
|
GoogleCodeJam/FairAndSquare.py
|
aajjbb/contest-files
|
b8842681b96017063a7baeac52ae1318bf59d74d
|
[
"Apache-2.0"
] | 1
|
2018-08-28T19:58:40.000Z
|
2018-08-28T19:58:40.000Z
|
GoogleCodeJam/FairAndSquare.py
|
aajjbb/contest-files
|
b8842681b96017063a7baeac52ae1318bf59d74d
|
[
"Apache-2.0"
] | 2
|
2017-04-16T00:48:05.000Z
|
2017-08-03T20:12:26.000Z
|
GoogleCodeJam/FairAndSquare.py
|
aajjbb/contest-files
|
b8842681b96017063a7baeac52ae1318bf59d74d
|
[
"Apache-2.0"
] | 4
|
2016-03-04T19:42:00.000Z
|
2018-01-08T11:42:00.000Z
|
import sys
#sys.stdin = open("i.in", "r")
#sys.stdout = open("o.ot", "w")
MAXN = pow(10, 10)
buff = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '101', '202', '303', '404', '505', '606', '707', '808', '909']
values = set()
#LIB
def is_square(apositiveint):
x = apositiveint // 2
seen = set([x])
while x * x != apositiveint:
x = (x + (apositiveint // x)) // 2
if x in seen: return False
seen.add(x)
return True
def func(x):
y = str(int(x))[::-1]
return str(int(x)) == str(int(y))
def generateA(s):
if int(s) * int(s) > MAXN or int(s)* int(s) in values:
return
if func(s) and func(int(s)*int(s)) and int(s) * int(s) <= MAXN:
values.add(int(s)*int(s))
for _ in buff:
generateA(_+s+_)
generateA(s+str(s)[::-1])
def binary_search(a, x, lo=0, hi=None):
mid = 0
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
midval = a[mid]
if midval < x:
lo = mid + 1
elif midval > x:
hi = mid
else:
return mid
return mid
#END LIB
for i in range(1000000000000):
if func(int(i)) and func(int(i)*int(i)):
print(i*i)
values = list(sorted(values))
T = int(input())
for _ in range(1, T + 1, 1):
N, M = map(int, input().split(' '))
low = binary_search(values, N, 0, len(values))
high = binary_search(values, M, 0, len(values))
if values[low] < N:
low += 1
if values[high] > M:
high -= 1
ans = 0
if low != high:
ans = high - low + 1
else:
if values[low] >= N and values[high] <= M:
ans = 1
print("Case #%d: %d" % (_, ans))
| 21.817073
| 121
| 0.464505
|
61c896404f7912d4b47b73bec2285ffd6387e9b2
| 6,681
|
py
|
Python
|
_pytest/unittest.py
|
michilu/pytest
|
65ee5c11ff0b281adb345b55b062aed94ecaf213
|
[
"MIT"
] | null | null | null |
_pytest/unittest.py
|
michilu/pytest
|
65ee5c11ff0b281adb345b55b062aed94ecaf213
|
[
"MIT"
] | null | null | null |
_pytest/unittest.py
|
michilu/pytest
|
65ee5c11ff0b281adb345b55b062aed94ecaf213
|
[
"MIT"
] | null | null | null |
""" discovery and running of std-library "unittest" style tests. """
import pytest, py
import sys, pdb
# for transfering markers
from _pytest.python import transfer_markers
def pytest_pycollect_makeitem(collector, name, obj):
unittest = sys.modules.get('unittest')
if unittest is None:
return # nobody can have derived unittest.TestCase
try:
isunit = issubclass(obj, unittest.TestCase)
except KeyboardInterrupt:
raise
except Exception:
pass
else:
if isunit:
return UnitTestCase(name, parent=collector)
class UnitTestCase(pytest.Class):
nofuncargs = True # marker for fixturemanger.getfixtureinfo()
# to declare that our children do not support funcargs
def collect(self):
self.session._fixturemanager.parsefactories(self, unittest=True)
loader = py.std.unittest.TestLoader()
module = self.getparent(pytest.Module).obj
cls = self.obj
foundsomething = False
for name in loader.getTestCaseNames(self.obj):
x = getattr(self.obj, name)
funcobj = getattr(x, 'im_func', x)
transfer_markers(funcobj, cls, module)
if hasattr(funcobj, 'todo'):
pytest.mark.xfail(reason=str(funcobj.todo))(funcobj)
yield TestCaseFunction(name, parent=self)
foundsomething = True
if not foundsomething:
runtest = getattr(self.obj, 'runTest', None)
if runtest is not None:
ut = sys.modules.get("twisted.trial.unittest", None)
if ut is None or runtest != ut.TestCase.runTest:
yield TestCaseFunction('runTest', parent=self)
def setup(self):
if getattr(self.obj, '__unittest_skip__', False):
return
meth = getattr(self.obj, 'setUpClass', None)
if meth is not None:
meth()
super(UnitTestCase, self).setup()
def teardown(self):
if getattr(self.obj, '__unittest_skip__', False):
return
meth = getattr(self.obj, 'tearDownClass', None)
if meth is not None:
meth()
super(UnitTestCase, self).teardown()
class TestCaseFunction(pytest.Function):
_excinfo = None
def setup(self):
self._testcase = self.parent.obj(self.name)
self._obj = getattr(self._testcase, self.name)
if hasattr(self._testcase, 'skip'):
pytest.skip(self._testcase.skip)
if hasattr(self._obj, 'skip'):
pytest.skip(self._obj.skip)
if hasattr(self._testcase, 'setup_method'):
self._testcase.setup_method(self._obj)
if hasattr(self, "_request"):
self._request._fillfixtures()
def teardown(self):
if hasattr(self._testcase, 'teardown_method'):
self._testcase.teardown_method(self._obj)
def startTest(self, testcase):
pass
def _addexcinfo(self, rawexcinfo):
# unwrap potential exception info (see twisted trial support below)
rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo)
try:
excinfo = py.code.ExceptionInfo(rawexcinfo)
except TypeError:
try:
try:
l = py.std.traceback.format_exception(*rawexcinfo)
l.insert(0, "NOTE: Incompatible Exception Representation, "
"displaying natively:\n\n")
pytest.fail("".join(l), pytrace=False)
except (pytest.fail.Exception, KeyboardInterrupt):
raise
except:
pytest.fail("ERROR: Unknown Incompatible Exception "
"representation:\n%r" %(rawexcinfo,), pytrace=False)
except KeyboardInterrupt:
raise
except pytest.fail.Exception:
excinfo = py.code.ExceptionInfo()
self.__dict__.setdefault('_excinfo', []).append(excinfo)
def addError(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addFailure(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addSkip(self, testcase, reason):
try:
pytest.skip(reason)
except pytest.skip.Exception:
self._addexcinfo(sys.exc_info())
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
try:
pytest.xfail(str(reason))
except pytest.xfail.Exception:
self._addexcinfo(sys.exc_info())
def addUnexpectedSuccess(self, testcase, reason=""):
self._unexpectedsuccess = reason
def addSuccess(self, testcase):
pass
def stopTest(self, testcase):
pass
def runtest(self):
self._testcase(result=self)
def _prunetraceback(self, excinfo):
pytest.Function._prunetraceback(self, excinfo)
traceback = excinfo.traceback.filter(
lambda x:not x.frame.f_globals.get('__unittest'))
if traceback:
excinfo.traceback = traceback
@pytest.mark.tryfirst
def pytest_runtest_makereport(item, call):
if isinstance(item, TestCaseFunction):
if item._excinfo:
call.excinfo = item._excinfo.pop(0)
del call.result
# twisted trial support
def pytest_runtest_protocol(item, __multicall__):
if isinstance(item, TestCaseFunction):
if 'twisted.trial.unittest' in sys.modules:
ut = sys.modules['twisted.python.failure']
Failure__init__ = ut.Failure.__init__.im_func
check_testcase_implements_trial_reporter()
def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
captureVars=None):
if exc_value is None:
self._rawexcinfo = sys.exc_info()
else:
if exc_type is None:
exc_type = type(exc_value)
self._rawexcinfo = (exc_type, exc_value, exc_tb)
try:
Failure__init__(self, exc_value, exc_type, exc_tb,
captureVars=captureVars)
except TypeError:
Failure__init__(self, exc_value, exc_type, exc_tb)
ut.Failure.__init__ = excstore
try:
return __multicall__.execute()
finally:
ut.Failure.__init__ = Failure__init__
def check_testcase_implements_trial_reporter(done=[]):
if done:
return
from zope.interface import classImplements
from twisted.trial.itrial import IReporter
classImplements(TestCaseFunction, IReporter)
done.append(1)
| 36.113514
| 79
| 0.605897
|
79705d4fc8cb4cfb03bc299d722c8723fd7c417b
| 791
|
py
|
Python
|
test/terra/__init__.py
|
awcross1/qiskit-aer
|
72863e804ec3d07c0da2ebeb665a31db7a7e3010
|
[
"Apache-2.0"
] | null | null | null |
test/terra/__init__.py
|
awcross1/qiskit-aer
|
72863e804ec3d07c0da2ebeb665a31db7a7e3010
|
[
"Apache-2.0"
] | null | null | null |
test/terra/__init__.py
|
awcross1/qiskit-aer
|
72863e804ec3d07c0da2ebeb665a31db7a7e3010
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
'''
Terra tests
'''
import os
def load_tests(loader, standard_tests, pattern):
"""
test suite for unittest discovery
"""
this_dir = os.path.dirname(__file__)
if pattern in ['test*.py', '*_test.py']:
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
elif pattern in ['profile*.py', '*_profile.py']:
loader.testMethodPrefix = 'profile'
package_tests = loader.discover(start_dir=this_dir, pattern='test*.py')
standard_tests.addTests(package_tests)
return standard_tests
| 28.25
| 79
| 0.687737
|
ca34288f466af309c0af4442f98356605b5b5082
| 12,155
|
py
|
Python
|
examples/neq-switching/run_rj_neq.py
|
mikemhenry/perses
|
e8a2b7ef7ec500a16cbc83c6ea1ca444c35024d7
|
[
"MIT"
] | null | null | null |
examples/neq-switching/run_rj_neq.py
|
mikemhenry/perses
|
e8a2b7ef7ec500a16cbc83c6ea1ca444c35024d7
|
[
"MIT"
] | null | null | null |
examples/neq-switching/run_rj_neq.py
|
mikemhenry/perses
|
e8a2b7ef7ec500a16cbc83c6ea1ca444c35024d7
|
[
"MIT"
] | null | null | null |
import numpy as np
import tqdm
from openmmtools import integrators, states, mcmc, constants
from perses.rjmc.topology_proposal import TopologyProposal
from perses.rjmc.geometry import FFAllAngleGeometryEngine
from perses.annihilation.ncmc_switching import NCMCEngine
from simtk import openmm, unit
from io import StringIO
from simtk.openmm import app
from perses.dispersed.feptasks import compute_reduced_potential
from dask import distributed
import mdtraj as md
temperature = 300.0*unit.kelvin
beta = 1.0 / (temperature*constants.kB)
def iupac_to_oemol(iupac_name='bosutinib'):
from openeye import oechem, oeiupac, oeomega
# Create molecule.
mol = oechem.OEMol()
oeiupac.OEParseIUPACName(mol, iupac_name)
mol.SetTitle("MOL")
# Assign aromaticity and hydrogens.
oechem.OEAssignAromaticFlags(mol, oechem.OEAroModelOpenEye)
oechem.OEAddExplicitHydrogens(mol)
# Create atom names.
oechem.OETriposAtomNames(mol)
# Create bond types
oechem.OETriposBondTypeNames(mol)
# Assign geometry
omega = oeomega.OEOmega()
omega.SetMaxConfs(1)
omega.SetIncludeInput(False)
omega.SetStrictStereo(True)
omega(mol)
return mol
def createSystemFromIUPAC(iupac_name):
"""
Create an openmm system out of an oemol
Parameters
----------
iupac_name : str
IUPAC name
Returns
-------
molecule : openeye.OEMol
OEMol molecule
system : openmm.System object
OpenMM system
positions : [n,3] np.array of floats
Positions
topology : openmm.app.Topology object
Topology
"""
from perses.utils.data import get_data_filename
from perses.utils.openeye import extractPositionsFromOEMol
# Create OEMol
molecule = iupac_to_oemol(iupac_name)
# Generate a topology.
from openmoltools.forcefield_generators import generateTopologyFromOEMol
topology = generateTopologyFromOEMol(molecule)
# Initialize a forcefield with GAFF.
# TODO: Fix path for `gaff.xml` since it is not yet distributed with OpenMM
from simtk.openmm.app import ForceField
gaff_xml_filename = get_data_filename('data/gaff.xml')
forcefield = ForceField(gaff_xml_filename)
# Generate template and parameters.
from openmoltools.forcefield_generators import generateResidueTemplate
[template, ffxml] = generateResidueTemplate(molecule)
# Register the template.
forcefield.registerResidueTemplate(template)
# Add the parameters.
forcefield.loadFile(StringIO(ffxml))
# Create the system.
system = forcefield.createSystem(topology, removeCMMotion=False)
# Extract positions
positions = extractPositionsFromOEMol(molecule)
return (molecule, system, positions, topology)
def generate_solvated_hybrid_test_topology(current_mol_name="naphthalene", proposed_mol_name="benzene"):
"""
Generate a test solvated topology proposal, current positions, and new positions triplet
from two IUPAC molecule names.
Parameters
----------
current_mol_name : str, optional
name of the first molecule
proposed_mol_name : str, optional
name of the second molecule
Returns
-------
topology_proposal : perses.rjmc.topology_proposal
The topology proposal representing the transformation
current_positions : np.array, unit-bearing
The positions of the initial system
new_positions : np.array, unit-bearing
The positions of the new system
"""
import simtk.openmm.app as app
from openmoltools import forcefield_generators
from openeye import oechem
from perses.rjmc.topology_proposal import SystemGenerator, SmallMoleculeSetProposalEngine
from perses.rjmc import geometry
from perses.utils.data import get_data_filename
current_mol, unsolv_old_system, pos_old, top_old = createSystemFromIUPAC(current_mol_name)
proposed_mol = iupac_to_oemol(proposed_mol_name)
proposed_mol.SetTitle("MOL")
initial_smiles = oechem.OEMolToSmiles(current_mol)
final_smiles = oechem.OEMolToSmiles(proposed_mol)
gaff_xml_filename = get_data_filename("data/gaff.xml")
forcefield = app.ForceField(gaff_xml_filename, 'tip3p.xml')
forcefield.registerTemplateGenerator(forcefield_generators.gaffTemplateGenerator)
modeller = app.Modeller(top_old, pos_old)
modeller.addSolvent(forcefield, model='tip3p', padding=9.0*unit.angstrom)
solvated_topology = modeller.getTopology()
solvated_positions = modeller.getPositions()
solvated_system = forcefield.createSystem(solvated_topology, nonbondedMethod=app.PME, removeCMMotion=False)
barostat = openmm.MonteCarloBarostat(1.0*unit.atmosphere, temperature, 50)
solvated_system.addForce(barostat)
gaff_filename = get_data_filename('data/gaff.xml')
system_generator = SystemGenerator([gaff_filename, 'amber99sbildn.xml', 'tip3p.xml'], barostat=barostat, forcefield_kwargs={'removeCMMotion': False},periodic_forcefield_kwargs={'nonbondedMethod': app.PME})
geometry_engine = geometry.FFAllAngleGeometryEngine()
canonicalized_smiles_list = [SmallMoleculeSetProposalEngine.canonicalize_smiles(smiles) for smiles in [initial_smiles, final_smiles]]
proposal_engine = SmallMoleculeSetProposalEngine(
canonicalized_smiles_list, system_generator, residue_name="MOL")
#generate topology proposal
topology_proposal = proposal_engine.propose(solvated_system, solvated_topology)
#generate new positions with geometry engine
new_positions, _ = geometry_engine.propose(topology_proposal, solvated_positions, beta)
return topology_proposal, solvated_positions, new_positions
def run_equilibrium(system, topology, configuration, n_steps, report_interval, filename):
from mdtraj.reporters import HDF5Reporter
integrator = integrators.LangevinIntegrator()
simulation = app.Simulation(topology, system, integrator)
simulation.context.setPositions(configuration)
#equilibrate a little bit:
simulation.step(10000)
reporter = HDF5Reporter(filename, report_interval)
simulation.reporters.append(reporter)
simulation.step(n_steps)
def generate_solvated_topology_proposals(mol_a, mol_b):
top_prop, cpos, npos = generate_solvated_hybrid_test_topology(current_mol_name=mol_a, proposed_mol_name=mol_b)
reverse_top_prop = TopologyProposal(new_topology=top_prop.old_topology, new_system=top_prop.old_system,
old_topology=top_prop.new_topology, old_system=top_prop.new_system,
logp_proposal=0, new_to_old_atom_map=top_prop.old_to_new_atom_map, old_chemical_state_key=mol_b, new_chemical_state_key=mol_a)
return top_prop, reverse_top_prop, cpos, npos
def traj_frame_to_sampler_state(traj: md.Trajectory, frame_number: int):
xyz = traj.xyz[frame_number, :, :]
box_vectors = traj.openmm_boxes(frame_number)
sampler_state = states.SamplerState(unit.Quantity(xyz, unit=unit.nanometers), box_vectors=box_vectors)
return sampler_state
def run_rj_proposals(top_prop, configuration_traj, use_sterics, ncmc_nsteps, n_replicates, bond_softening_constant=1.0, angle_softening_constant=1.0):
ncmc_engine = NCMCEngine(nsteps=ncmc_nsteps, pressure=1.0*unit.atmosphere, bond_softening_constant=bond_softening_constant, angle_softening_constant=angle_softening_constant)
geometry_engine = FFAllAngleGeometryEngine(use_sterics=use_sterics, bond_softening_constant=bond_softening_constant, angle_softening_constant=angle_softening_constant)
initial_thermodynamic_state = states.ThermodynamicState(top_prop.old_system, temperature=temperature, pressure=1.0*unit.atmosphere)
final_thermodynamic_state = states.ThermodynamicState(top_prop.new_system, temperature=temperature, pressure=1.0*unit.atmosphere)
traj_indices = np.arange(0, configuration_traj.n_frames)
results = np.zeros([n_replicates, 4])
for i in tqdm.trange(n_replicates):
frame_index = np.random.choice(traj_indices)
initial_sampler_state = traj_frame_to_sampler_state(configuration_traj, frame_index)
initial_logP = - compute_reduced_potential(initial_thermodynamic_state, initial_sampler_state)
proposed_geometry, logP_geometry_forward = geometry_engine.propose(top_prop, initial_sampler_state.positions, beta)
proposed_sampler_state = states.SamplerState(proposed_geometry, box_vectors=initial_sampler_state.box_vectors)
final_old_sampler_state, final_sampler_state, logP_work, initial_hybrid_logP, final_hybrid_logP = ncmc_engine.integrate(top_prop, initial_sampler_state, proposed_sampler_state)
final_logP = - compute_reduced_potential(final_thermodynamic_state, final_sampler_state)
logP_reverse = geometry_engine.logp_reverse(top_prop, final_sampler_state.positions, final_old_sampler_state.positions, beta)
results[i, 0] = initial_hybrid_logP - initial_logP
results[i, 1] = logP_reverse - logP_geometry_forward
results[i, 2] = final_logP - final_hybrid_logP
results[i, 3] = logP_work
return results
if __name__=="__main__":
import yaml
import sys
import itertools
import os
input_filename = sys.argv[1]
equilibrium = False if sys.argv[2] == '0' else True
if not equilibrium:
index = int(sys.argv[3]) - 1 # Correct for being off by one
with open(input_filename, 'r') as yamlfile:
options_dict = yaml.load(yamlfile)
equilibrium_filename_a = "{}_{}.h5".format(options_dict['traj_prefix'], options_dict['molecules'][0])
equilibrium_filename_b = "{}_{}.h5".format(options_dict['traj_prefix'], options_dict['molecules'][1])
top_prop_forward_filename = "{}_{}.h5".format(options_dict['traj_prefix'], "top_prop_forward.npy")
top_prop_reverse_filename = "{}_{}.h5".format(options_dict['traj_prefix'], "top_prop_reverse.npy")
#if we need to set up equilibrium, then generate the topology proposals and
if equilibrium:
#now generate the topology proposals:
fwd_top_prop, reverse_top_prop, cpos, npos = generate_solvated_topology_proposals(options_dict['molecules'][0], options_dict['molecules'][1])
#write out the topology proposals:
np.save(top_prop_forward_filename, fwd_top_prop)
np.save(top_prop_reverse_filename, reverse_top_prop)
n_steps = options_dict['eq_time'] * 1000 # timestep is 1fs, but we express time in ps
report_interval = options_dict['eq_write_interval'] * 1000 # convert from ps -> fs again
#run the equilibrium
run_equilibrium(fwd_top_prop.old_system, fwd_top_prop.old_topology, cpos, n_steps, report_interval, equilibrium_filename_a)
run_equilibrium(fwd_top_prop.new_system, fwd_top_prop.new_topology, npos, n_steps, report_interval, equilibrium_filename_b)
# Otherwise, we want to run nonequilibrium from the equilibrium samples
else:
configuration_traj_a = md.load(equilibrium_filename_a)
configuration_traj_b = md.load(equilibrium_filename_b)
fwd_top_prop = np.load(top_prop_forward_filename).item()
reverse_top_prop = np.load(top_prop_reverse_filename).item()
n_replicates_neq = options_dict['n_replicates_neq']
lengths = options_dict['lengths']
bond_softenings = options_dict['bond_softenings']
use_sterics = options_dict['use_sterics']
top_props = [fwd_top_prop, reverse_top_prop]
config_trajs = [configuration_traj_a, configuration_traj_b]
import itertools
parameters = list(itertools.product([top_props[0]], [config_trajs[0]], use_sterics, lengths, [n_replicates_neq]))
parameters.extend(list(itertools.product([top_props[1]], [config_trajs[1]], use_sterics, lengths, [n_replicates_neq])))
parms_to_run = parameters[index]
results = run_rj_proposals(parms_to_run[0], parms_to_run[1], parms_to_run[2], parms_to_run[3], parms_to_run[4])
np.save("ncmc_{}_{}_{}_{}.npy".format(parms_to_run[0].old_chemical_state_key, parms_to_run[0].new_chemical_state_key, parms_to_run[2], parms_to_run[3]), results)
| 42.95053
| 209
| 0.756397
|
69028bc3c4e5a4ba1c689ed019b1fae9ec41e5f8
| 16,422
|
py
|
Python
|
mnist_multiple_classes.py
|
Pehlevan-Group/NTK_Learning_Curves
|
66729510df9fb47b4f836bbbf994325a7c088a58
|
[
"MIT"
] | 3
|
2020-09-21T06:46:48.000Z
|
2021-03-05T02:47:04.000Z
|
mnist_multiple_classes.py
|
Pehlevan-Group/NTK_Learning_Curves
|
66729510df9fb47b4f836bbbf994325a7c088a58
|
[
"MIT"
] | 4
|
2021-06-19T19:36:10.000Z
|
2021-06-19T22:45:15.000Z
|
mnist_multiple_classes.py
|
Pehlevan-Group/NTK_Learning_Curves
|
66729510df9fb47b4f836bbbf994325a7c088a58
|
[
"MIT"
] | 2
|
2020-09-15T22:38:23.000Z
|
2021-06-19T20:28:13.000Z
|
import jax
import jax.numpy as np
from jax import random
from jax.experimental import optimizers
from jax.api import jit, grad, vmap
import functools
import neural_tangents as nt
from neural_tangents import stax
import matplotlib.pyplot as plt
import scipy as sp
import scipy.special
import scipy.optimize
import numpy as npo
from mnist import MNIST
import pandas as pd
import learning_curves
from jax.config import config
import tensorflow as tf
import tensorflow.keras
import sys
from mpl_toolkits.mplot3d import Axes3D
# set precision
config.update("jax_enable_x64", True)
(images,labels), (x_test,y_test) = tf.keras.datasets.mnist.load_data()
print(images.shape)
images = npo.reshape(images, (images.shape[0], images.shape[1]**2))
x_test = npo.reshape(x_test, (x_test.shape[0], x_test.shape[1]**2))
print(images.shape)
print(x_test.shape)
print(labels.shape)
print(y_test.shape)
# set as numpy array
images = npo.array(images)
labels = npo.array(labels)
x_test = npo.array(x_test)
y_test = npo.array(y_test)
d = 784
num_classes = 10
y_test_mat = npo.zeros((len(y_test), num_classes))
y_train_mat = npo.zeros( (len(labels), num_classes) )
for i in range(len(labels)):
y_train_mat[i,labels[i]] = 1
for i in range(len(y_test)):
y_test_mat[i,y_test[i]] = 1
ptot = len(labels)
labels = y_train_mat
y_test = y_test_mat
d = 784
M = 800
# create 3 layer NN and NTK model
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Dense(M), stax.Relu(),
stax.Dense(M), stax.Relu(),
stax.Dense(M), stax.Relu(),
stax.Dense(num_classes)
)
key = random.PRNGKey(10)
_, params = init_fn(key, (-1,d))
opt_init, opt_update, get_params = optimizers.adam(2e-3)
nn_loss = jit(lambda params, X, y: np.mean( (apply_fn(params, X) -y)**2 ) )
grad_loss = jit(lambda state, x, y: grad(nn_loss)(get_params(state), x, y))
def train_network(train_set, opt_state, num_iter):
train_loss = []
for i in range(num_iter):
opt_state = opt_update(i, grad_loss(opt_state, *train_set), opt_state )
loss_i = nn_loss(get_params(opt_state), *train_set)
train_loss += [loss_i]
sys.stdout.write('\r loss: %.7f' % loss_i)
if loss_i < 1e-4:
break
sys.stdout.flush()
return opt_state, train_loss
def neural_net_gen_expt(X, y, nvals):
num_repeats = 10
key = random.PRNGKey(10)
all_keys = random.split(key, num_repeats)
all_errs = npo.zeros(len(nvals))
std = npo.zeros(len(nvals))
#opt_init, opt_update, get_params = optimizers.adam(2.5e-2)
for i, n in enumerate(nvals):
print("n = %d" % n)
errors = npo.zeros(num_repeats)
for j in range(num_repeats):
_, params = init_fn(all_keys[j,:], (-1,784))
opt_state = opt_init(params)
inds = npo.random.choice(range(len(y)), size = n, replace = False)
X_j = X[inds,:]
y_j = y[inds]
train_set = (X_j,y_j)
opt_state, train_loss = train_network(train_set, opt_state, 200)
yhat = apply_fn(get_params(opt_state), X)
errors[j] = 1/len(y) * npo.linalg.norm(yhat-y)**2
all_errs[i] = npo.mean(errors)
std[i] = npo.std(errors)
sys.stdout.write(' test loss: %.7f | std: %.7f' % (all_errs[i], std[i]) )
print(" ")
return all_errs, std
def kernel_gen_expt():
n = 100
errors = []
test_errors = []
nvals = [5,10,20,50, 100,200,500,1000]
#nvals = [200]
num_repeats = 5
#num_repeats = 1
all_test_predictions = npo.zeros(len(y_test))
for n in nvals:
error = 0
error_test = 0
for i in range(num_repeats):
inds = npo.random.choice(range(len(labels)), size = n, replace = False)
yhat = nt.predict.gp_inference(kernel_fn, images[inds,:], labels[inds,:], images, get='ntk', diag_reg = 1e-10, compute_cov=False)
yhat_test = nt.predict.gp_inference(kernel_fn, images[inds,:], labels[inds,:], x_test, get='ntk', diag_reg = 1e-10, compute_cov=False)
error += 0.5 * 1/len(labels) * npo.linalg.norm(yhat-labels)**2 / num_repeats
error_test += 0.5 * 1/len(y_test) * npo.linalg.norm( yhat_test[:,0] - y_test )**2 / num_repeats
all_test_predictions = yhat_test
print("largest prediction")
print(np.amax(np.abs(yhat_test)))
errors.append(error)
test_errors.append(error_test)
print(errors)
print(test_errors)
return errors, test_errors
# v is eigenvectors
def kernel_gen_expt2(X, y, nvals, v):
num_repeats = 20
all_errs = npo.zeros( (len(nvals), v.shape[1]) )
std = npo.zeros( (len(nvals), v.shape[1]) )
mode_agg = npo.zeros((len(nvals), 5))
mode_agg_std = npo.zeros((len(nvals), 5))
for i, n in enumerate(nvals):
print("n = %d" % n)
errors = npo.zeros( (num_repeats, v.shape[1]) )
for j in range(num_repeats):
inds = npo.random.choice(range(len(y)), size = n, replace = False)
yhat = nt.predict.gp_inference(kernel_fn, X[inds,:], y[inds,:], X, get='ntk', diag_reg = 1e-11, compute_cov=False)
proj_residual = v.T @ (y-yhat)
errors[j,:] = 1/len(y) * npo.sum( proj_residual**2, axis = 1)
total = 1/len(y) * npo.linalg.norm(yhat - y)**2
diff = npo.sum(errors[j,:]) - total
print("diff: %.8f" % diff)
#errors[j] = 1/len(y) * npo.linalg.norm(yhat-y)**2
all_errs[i,:] = npo.mean(errors, axis = 0)
std[i,:] = npo.std(errors, axis = 0)
mode_agg[i,0] = np.mean( np.sum(errors[:,0:100], axis = 1) , axis=0)
mode_agg[i,1] = np.mean( np.sum(errors[:,100:500], axis = 1), axis=0)
mode_agg[i,2] = np.mean( np.sum(errors[:,500:1000], axis = 1), axis=0)
mode_agg[i,3] = np.mean( np.sum(errors[:,1000:5000], axis = 1), axis=0)
mode_agg[i,4] = np.mean( np.sum(errors[:,1000:5000], axis = 1), axis=0)
mode_agg_std[i,0] = np.std( np.sum(errors[:,0:100], axis = 1) , axis=0)
mode_agg_std[i,1] = np.std( np.sum(errors[:,100:500], axis = 1), axis=0)
mode_agg_std[i,2] = np.std( np.sum(errors[:,500:1000], axis = 1), axis=0)
mode_agg_std[i,3] = np.std( np.sum(errors[:,1000:5000], axis = 1), axis=0)
mode_agg_std[i,4] = np.std( np.sum(errors[:,1000:5000], axis = 1), axis=0)
#mode_agg_std[i,0] = np.std( np.sum(errors[:,0:200], axis = 1) , axis=0)
#mode_agg_std[i,1] = np.std( np.sum(errors[:,200:1000], axis = 1), axis=0)
#mode_agg_std[i,2] = np.std( np.sum(errors[:,1000:5000], axis = 1), axis=0)
return all_errs, std, mode_agg, mode_agg_std
# solve lambda = 0 for convenience
def solve_implicit_negative_moment(pvals, moments, spectrum):
m1 = npo.sum(spectrum)
roots = npo.zeros(len(pvals))
for i in range(len(pvals)):
p = pvals[i]
args = (p, moments)
# find polynomial coefficients!!!!!!
npo.roots()
sol = sp.optimize.root_scalar(implicit_equation, fprime = f_prime_imp, x0 = m1, method = 'newton', args = (p,npo.array(moments)))
roots[i] = sol.root
print(sol.root)
return roots
def implicit_equation(t, *args):
p, moments = args
z = (-1)*t/p
z_powers = npo.array( [z**(i) for i in range(len(moments))] )
return 1 - 1/p * npo.dot(moments, z_powers)
def f_prime_imp(t, *args):
p, moments = args
z = (-1)*t/p
z_powers = npo.array( [ (i+1) * z**(i+1)/t for i in range(len(moments)-1)] )
return - 1/p * npo.dot(moments[1:len(moments)], z_powers)
def implicit_fn_true(z,*args):
(p, lamb, spectrum) = args
return z - lamb - z * npo.dot(spectrum, (p*spectrum + z*npo.ones(len(spectrum)) )**(-1))
def f_prime_true(z,*args):
(p, lamb, spectrum) = args
return 1 - npo.dot(spectrum, (p*spectrum + z*np.ones(len(spectrum)) )**(-1)) + z* npo.dot(spectrum, (p*spectrum + z*npo.ones(len(spectrum)) )**(-2))
def solve_implicit_z(spectrum, pvals, lamb):
sols = npo.zeros(len(pvals))
for i, p in enumerate(pvals):
args = (p, lamb, spectrum)
sols[i] = sp.optimize.root_scalar(implicit_fn_true, x0= p * npo.amax(spectrum), args = args, fprime = f_prime_true, method = 'newton').root
return sols
def gamma(spectrum, p,lamb, z):
return z**2 * npo.dot(spectrum**2, (z*np.ones(len(spectrum)) + spectrum*p)**(-2))
def theory_learning_curves(spectrum, eig_vecs, pvals, lamb, y):
coeffs = eig_vecs.T @ y
w = np.diag(spectrum**(-0.5)) @ coeffs
z_vals = solve_implicit_z(spectrum, pvals, lamb)
gamma_vals = npo.array( [gamma(spectrum, pvals[i],lamb, z_vals[i]) for i in range(len(pvals))] )
mode_errs = npo.zeros( (len(pvals),len(spectrum)) )
for i,lambda_rho in enumerate(spectrum):
mode_errs[:,i] = np.sum(w[i,:]**2)/lambda_rho * z_vals**2 /(z_vals**2 - gamma_vals*pvals) * lambda_rho**2 * z_vals**2 / (lambda_rho*pvals + z_vals)**2
return mode_errs
# kernel generalization experiment
errors, test_errors = kernel_gen_expt()
df = pd.DataFrame(errors)
df.to_csv('mnist_train_errs_expt.csv', header = None)
nvals = [5,10,20,50, 100,200,500,1000]
plt.loglog(nvals,errors, label = 'training data')
plt.loglog(nvals, test_errors, label = 'test set')
#plt.loglog(nvals, test_errors, label = 'test set')
plt.legend()
plt.xlabel(r'$p$')
plt.ylabel(r'$E_g$')
plt.tight_layout()
plt.savefig('expt_kernel_regression_mnist_01.pdf')
plt.show()
npo.random.seed(100)
plt.rcParams.update({'font.size': 12})
# kernel PCA
num_pca = 8000
inds = npo.random.choice(images.shape[0], size = num_pca, replace = False)
X = images[inds,:]
y = labels[inds,:]
K = kernel_fn(X,X, get='ntk')
print("getting eigenspectrum")
spectrum, vecs = npo.linalg.eigh(1/num_pca * K)
sort_inds = npo.argsort(spectrum)[::-1]
#nvals = npo.logspace(1, np.log10(0.5*num_pca), 7).astype('int')
nvals = npo.logspace(1, np.log10(0.3*num_pca), 6).astype('int')
# sort the spectrum and vectors
spectrum = spectrum[sort_inds]
vecs = vecs[:,sort_inds]
plt.loglog(spectrum)
plt.xlabel(r'$k$')
plt.ylabel(r'$\lambda_k$')
plt.savefig('NTK_MNIST_spectrum.pdf')
plt.show()
df_spec = pd.DataFrame(spectrum)
df_spec.to_csv('MNIST_spectrum_depth3.csv')
eig_vecs = vecs[:,sort_inds]
coeffs = eig_vecs.T @ y
w_teach = coeffs
df_teach = pd.DataFrame(w_teach**2)
df_teach.to_csv('MNIST_teacher_spectrum_depth3.csv')
lamb = 1e-10
print("len of spectrum")
print(len(spectrum))
pvals = npo.logspace(np.log10(10), np.log10(num_pca-1), 500)
sols = solve_implicit_z(npo.array(spectrum), pvals, lamb)
mode_errs = theory_learning_curves(spectrum, vecs, pvals, lamb, y)
sort_inds = np.argsort(spectrum)[::-1]
theory0 = npo.sum(mode_errs, axis = 1)
theory_adj = npo.sum(mode_errs, axis = 1) * num_pca / (num_pca - pvals + 1e-3)
plt.loglog(pvals, theory0, label = 'original theory')
plt.loglog(pvals, theory_adj, label = 'rescaled theory')
plt.legend()
plt.ylim([np.amin(theory_adj), np.amax(theory_adj)])
plt.xlabel(r'$p$', fontsize = 20)
plt.ylabel(r'$E_g$', fontsize=20)
plt.tight_layout()
plt.savefig('rescale_risk.pdf')
plt.show()
inds = [10, 100, 1000]
for i, j in enumerate(sort_inds[inds]):
if inds[i]==0:
plt.loglog(pvals, mode_errs[:,j] / mode_errs[0,j], label = r'$k=0$')
else:
plt.loglog(pvals, mode_errs[:,j] / mode_errs[0,j], label = r'$k = 10^{%d}$' % int(np.log10(inds[i])+0.01) )
plt.legend()
plt.xlabel(r'$p$', fontsize= 20)
plt.ylabel(r'$E_{k}(p) / E_{k}(0)$', fontsize =20)
plt.tight_layout()
plt.savefig('theory_mode_errs_mnist_3layer.pdf')
plt.show()
# train NN on least-squares objective
nn_errors, nn_std = neural_net_gen_expt(X,y,nvals)
print("finished NN expt")
# get mode errors from kernel expt
expt_mode_errors, expt_mode_std, mode_agg, mode_agg_std = kernel_gen_expt2(X,y,nvals, vecs)
plt.plot( np.log10(pvals) , np.log10( 1/num_pca * npo.sum(mode_errs[:,0:100], axis=1) ), color='C0', label = 'k=1-100')
plt.plot( np.log10(pvals), np.log10(1/num_pca *npo.sum(mode_errs[:,100:500], axis=1) ), color = 'C1', label = 'k=101-500')
plt.plot( np.log10(pvals), np.log10( 1/num_pca *npo.sum(mode_errs[:,500:1000], axis=1) ) , color = 'C2', label = 'k=501-1000')
plt.plot( np.log10(pvals), np.log10( 1/num_pca *npo.sum(mode_errs[:,1000:5000], axis=1) ) , color = 'C3', label = 'k=1001-5000')
#plt.plot( np.log10(pvals), np.log10( 1/num_pca *npo.sum(mode_errs[:,5000:5000], axis=1) ) , color = 'C4', label = 'k=1001-5000')
plt.errorbar( np.log10(nvals), np.log10(mode_agg[:,0]), mode_agg_std[:,0] / mode_agg[:,0], fmt='^', color='C0')
plt.errorbar( np.log10(nvals), np.log10(mode_agg[:,1]), mode_agg_std[:,1] / mode_agg[:,1], fmt='^', color='C1')
plt.errorbar( np.log10(nvals), np.log10(mode_agg[:,2]), mode_agg_std[:,2] / mode_agg[:,2], fmt='^', color = 'C2')
plt.errorbar( np.log10(nvals), np.log10(mode_agg[:,3]), mode_agg_std[:,3] / mode_agg[:,3], fmt='^', color = 'C3')
#plt.errorbar( np.log10(nvals), np.log10(mode_agg[:,4]), mode_agg_std[:,4] / mode_agg[:,4], fmt='^', color = 'C4')
plt.xticks([1,2,3], [r'$10^{1}$', r'$10^{2}$', r'$10^{3}$'], fontsize=16)
plt.yticks([0,-1,-2,-3], [r'$10^{0}$', r'$10^{-1}$', r'$10^{-2}$', r'$10^{-3}$'], fontsize=16)
plt.xlabel(r'$p$', fontsize=20)
plt.ylabel(r'$E_g$', fontsize=20)
plt.legend()
plt.tight_layout()
plt.savefig('mode_errs_log_scale.pdf')
plt.show()
theory_agg = npo.zeros((len(pvals), 3))
theory_agg[:,0]= np.sum(mode_errs[:,0:10])
theory_agg[:,1]= np.sum(mode_errs[:,10:100])
theory_agg[:,2]= np.sum(mode_errs[:,100:1000])
for i in range(3):
plt.loglog(nvals, mode_agg[:,i])
plt.loglog(pvals, theory_agg[:,i])
plt.show()
pd.DataFrame(expt_mode_errors).to_csv('mnist_mode.csv')
pd.DataFrame(expt_mode_std).to_csv('mnist_std.csv')
inds = [1,10,100, 1000]
rescale = mode_errs[0,sort_inds[1]] / expt_mode_errors[0, sort_inds[1]]
expt_mode_errors = rescale* expt_mode_errors
for i, j in enumerate(sort_inds[inds]):
plt.plot( np.log10(pvals), np.log10(mode_errs[:,j]), label = r'$k = 10^{%d}$' % int(np.log10(inds[i])+0.01) , color = 'C%d' % i)
plt.errorbar( np.log10(nvals), np.log10(expt_mode_errors[:,j]), expt_mode_std[:,j] / expt_mode_errors[:,j] * rescale ,fmt ='o', color= 'C%d' % i)
#plt.plot(pvals, mode_errs[:,j] / mode_errs[0,j])
#plt.plot(nvals, expt_mode_errors[:,j] / expt_mode_errors[0,j], 'o')
plt.legend()
plt.yticks([3,2,1,0,-1,-2], [r'$10^{0}$', r'$10^{-1}$', r'$10^{-2}$', r'$10^{-3}$', r'$10^{-4}$', r'$10^{-5}$'], fontsize=16)
plt.xticks([1,2,3], [r'$10^{1}$', r'$10^{2}$', r'$10^{3}$'], fontsize=16)
plt.xlabel(r'$p$', fontsize= 20)
plt.ylabel(r'$E_{k}(p)$', fontsize =20)
plt.tight_layout()
plt.savefig('theory_expt_mode_errs_mnist_3layer.pdf')
plt.show()
print("finished kernel expt")
plt.loglog(nvals, errors)
plt.loglog(nvals, nn_errors)
plt.show()
#nn_errors = nn_errors/ nn_errors[0] * errors[0]
#nn_std = nn_std / nn_errors[0] * errors[0]
all_expt_data = npo.zeros((5,len(nvals)))
all_expt_data[0,:] = nvals
all_expt_data[1,:] = errors
all_expt_data[2,:] = std
all_expt_data[3,:] = nn_errors / num_pca
all_expt_data[4,:] = nn_std / num_pca
df = pd.DataFrame(all_expt_data)
df.to_csv('mnist_expt_data_M%d.csv' % M)
#spectrum = spectrum[sort_inds]
#spectrum = spectrum[::-1]
#print(spectrum)
#pve = npo.zeros(len(spectrum)-1)
#for i in range(len(spectrum)-1):
# pve[i] = npo.sum(spectrum[0:i+1]) / npo.sum(spectrum)
#plt.loglog(spectrum)
#plt.savefig('spectrum_random_subsample.pdf')
#plt.show()
theory = np.sum(mode_errs, axis = 1) / num_pca
theory_vals = npo.zeros((2,len(theory)))
theory_vals[0,:] = pvals
theory_vals[1,:] = theory
df_theory = pd.DataFrame(theory_vals)
df_theory.to_csv('mnist_theory_M%d.csv' % M)
plt.plot( npo.log10(pvals), npo.log10(theory), label = 'MNIST Theory', color = 'C0')
plt.errorbar( npo.log10(nvals), npo.log10(errors), std/errors, fmt = '^', label = 'kernel', color = 'C0')
plt.errorbar( npo.log10(nvals), npo.log10(nn_errors), nn_std/nn_errors, fmt='o', label = 'NN', color = 'C0')
#plt.errorbar( npo.log10(nvals), npo.log10(errors), std/errors, fmt = 'o', label = 'MNIST Expt', color = 'C0')
plt.yticks([-2,-1,0,1], [r'$10^{-2}$', r'$10^{-1}$', r'$10^{0}$', r'$10^{1}$'], fontsize=16)
plt.xticks([1,2,3], [r'$10^{1}$', r'$10^{2}$', r'$10^{3}$'], fontsize=16)
plt.xlabel(r'$p$', fontsize=20)
plt.ylabel(r'$E_g$', fontsize=20)
plt.legend()
plt.tight_layout()
plt.savefig('MNIST_expt_vs_theory_3_layer_nn.pdf')
plt.show()
| 34
| 158
| 0.640848
|
00e381ad0cd10285e4dbd19e24c1a630471e484f
| 188
|
py
|
Python
|
astraviso/test/__init__.py
|
bradsease/test2
|
0925f1a170b2e1d7c09902cd8550dc844b7d7764
|
[
"MIT"
] | null | null | null |
astraviso/test/__init__.py
|
bradsease/test2
|
0925f1a170b2e1d7c09902cd8550dc844b7d7764
|
[
"MIT"
] | 44
|
2017-02-22T23:38:52.000Z
|
2017-03-02T03:27:11.000Z
|
astraviso/test/__init__.py
|
bradsease/test
|
0925f1a170b2e1d7c09902cd8550dc844b7d7764
|
[
"MIT"
] | null | null | null |
from .imageutils import *
from .pointingutils import *
from .projectionutils import *
from .starcam import *
from .starmap import *
from .worldobject import *
from .verification import *
| 20.888889
| 30
| 0.771277
|
735f794a14cfff1211ca83b34ce401c1a505f267
| 6,100
|
py
|
Python
|
qa/rpc-tests/proxy_test.py
|
NaniteCurrency/Nanite
|
f30178931f922d088e5317f95522dfdf02c02116
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/proxy_test.py
|
NaniteCurrency/Nanite
|
f30178931f922d088e5317f95522dfdf02c02116
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/proxy_test.py
|
NaniteCurrency/Nanite
|
f30178931f922d088e5317f95522dfdf02c02116
|
[
"MIT"
] | 2
|
2018-03-27T00:41:16.000Z
|
2018-03-31T03:24:10.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("nanitevj7kcklujarx.onion:51472", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "nanitevj7kcklujarx.onion")
assert_equal(cmd.port, 51472)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.780822
| 145
| 0.653115
|
95c53f9d470c3bd8509ca3715d178e3cd1cb99be
| 2,496
|
py
|
Python
|
learning/indexer.py
|
dokaptur/neural-fanfiction-generator
|
41b2305f96d97beb6752b306ebd8e05e14ffae12
|
[
"MIT"
] | null | null | null |
learning/indexer.py
|
dokaptur/neural-fanfiction-generator
|
41b2305f96d97beb6752b306ebd8e05e14ffae12
|
[
"MIT"
] | null | null | null |
learning/indexer.py
|
dokaptur/neural-fanfiction-generator
|
41b2305f96d97beb6752b306ebd8e05e14ffae12
|
[
"MIT"
] | null | null | null |
class Indexer(object):
def __init__(self, first_words=()):
self._index = {}
self._index_to_string = []
self._frozen = False
self._reference_ids = [0]
for w in first_words:
self.string_to_int(w)
def freeze(self, frozen = True):
self._frozen = frozen
def remember_state(self):
if self._frozen:
raise ValueError('Cannot remember the state of an Indexer that is frozen.')
self._reference_ids.append(len(self._index_to_string))
def string_to_int(self, string):
if string in self._index:
return self._index[string]
else:
if not self.frozen:
result = len(self._index_to_string)
self._index[string] = result
self._index_to_string.append(string)
return result
else:
raise KeyError('{} not indexed yet and indexer is frozen'.format(string))
def int_to_string(self, int):
return self._index_to_string[int]
def inv(self, string):
return self.int_to_string(string)
def __call__(self, string):
return self.string_to_int(string)
def __iter__(self):
return self._index.__iter__()
def items(self):
return self._index.items()
def ints(self, *strings):
return [self.string_to_int(string) for string in strings]
def strings(self, *ints):
return [self.int_to_string(i) for i in ints]
def __len__(self):
return len(self._index_to_string)
@property
def index(self):
return self._index
@property
def index(self):
return self._index
@property
def reference_ids(self):
return self._reference_ids
@property
def frozen(self):
return self._frozen
@property
def frozen(self):
return self._frozen
def __str__(self):
l = len(self._index_to_string)
if l > 20:
a = min(l, 10)
b = max(len(self._index_to_string) - 10, 0)
mid = ', ..., '
else:
a, b = l, l
mid = ''
return "%s(%s%s%s)" % (self.__class__.__name__,
', '.join([str(x) for x in self._index_to_string[:a]]),
mid, ', '.join([str(x) for x in self._index_to_string[b:]]))
def __repr__(self):
s = str(self)
return s + ' with references %s' % str(self._reference_ids)
| 27.733333
| 91
| 0.564904
|
fe15836394d229055c17534c70fb39cea592e871
| 725
|
py
|
Python
|
src/GraphQueries/MyErrorListener.py
|
lanasheep/graph_DB
|
139aca8d1fa376291a181606b4d6fbc5be82de55
|
[
"MIT"
] | null | null | null |
src/GraphQueries/MyErrorListener.py
|
lanasheep/graph_DB
|
139aca8d1fa376291a181606b4d6fbc5be82de55
|
[
"MIT"
] | 4
|
2020-09-27T10:46:29.000Z
|
2020-10-03T08:34:38.000Z
|
src/GraphQueries/MyErrorListener.py
|
lanasheep/graph_DB
|
139aca8d1fa376291a181606b4d6fbc5be82de55
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from antlr4.error.ErrorListener import ErrorListener
class ParseError(Exception):
pass
class MyErrorListener(ErrorListener):
def __init__(self):
super(MyErrorListener, self).__init__()
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise ParseError()
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
raise ParseError()
def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
raise ParseError()
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
raise ParseError()
| 31.521739
| 108
| 0.732414
|
0c14f161d136e0e5366e6d07b183cf5b2c46abd1
| 5,875
|
py
|
Python
|
src/3x3 Tic Tac Toe/AI_algo.py
|
ketan-lambat/AI_Games
|
a41481c806a4c274562f3bead92329496c5f5d50
|
[
"MIT"
] | 1
|
2020-09-29T02:05:32.000Z
|
2020-09-29T02:05:32.000Z
|
src/3x3 Tic Tac Toe/AI_algo.py
|
ketan-lambat/AI_Games
|
a41481c806a4c274562f3bead92329496c5f5d50
|
[
"MIT"
] | null | null | null |
src/3x3 Tic Tac Toe/AI_algo.py
|
ketan-lambat/AI_Games
|
a41481c806a4c274562f3bead92329496c5f5d50
|
[
"MIT"
] | 1
|
2020-09-30T21:10:25.000Z
|
2020-09-30T21:10:25.000Z
|
import random
from TicTacToe_AI import *
# if AI turn isMax = True
def random_cell(TTT):
rndm_num = random.randint(0, len(empty_cells(TTT))-1)
cells = empty_cells(TTT)
rndm_cell = cells[rndm_num]
return rndm_cell
def minimax(TTT, isMax):
if isMax:
best = [-1, -1, -inf]
else:
best = [-1, -1, inf]
if len(empty_cells(TTT)) == 0 or is_winner(TTT):
score = eval(TTT)
return [-1, -1, score]
for cell in empty_cells(TTT):
x, y = cell[0], cell[1]
if isMax:
TTT[x][y] = 'o'
else:
TTT[x][y] = 'x'
score = minimax(TTT, not isMax)
TTT[x][y] = None
score[0], score[1] = x, y
if isMax:
if score[2] > best[2]:
best = score
else:
if score[2] < best[2]:
best = score
return best
def alpha_beta(TTT, alpha, beta, isMax):
if isMax:
best = [-1, -1, -inf]
else:
best = [-1, -1, inf]
if len(empty_cells(TTT)) == 0 or is_winner(TTT):
score = eval(TTT)
return [-1, -1, score]
for cell in empty_cells(TTT):
x, y = cell[0], cell[1]
if isMax:
TTT[x][y] = 'o'
else:
TTT[x][y] = 'x'
score = alpha_beta(TTT, alpha, beta, not isMax)
TTT[x][y] = None
score[0], score[1] = x, y
if isMax:
if score[2] > best[2]:
best = score
alpha = max(alpha, best[2])
if beta <= alpha:
break
else:
if score[2] < best[2]:
best = score
beta = min(beta, best[2])
if beta <= alpha:
break
return best
def minimax_depth_limit(TTT, depth, isMax):
if isMax:
best = [-1, -1, -inf]
else:
best = [-1, -1, inf]
if len(empty_cells(TTT)) == 0 or is_winner(TTT):
score = eval(TTT)
return [-1, -1, score]
# cutoff at depth of 3 and evaluate TTT state
if depth == 3:
result = eval_heuristic(TTT)
return [-1, -1, result]
for cell in empty_cells(TTT):
x, y = cell[0], cell[1]
if isMax:
TTT[x][y] = 'o'
else:
TTT[x][y] = 'x'
score = minimax_depth_limit(TTT, depth+1, not isMax)
TTT[x][y] = None
score[0], score[1] = x, y
if isMax:
if score[2] > best[2]:
best = score
else:
if score[2] < best[2]:
best = score
return best
def depth_alphabeta(TTT, depth, alpha, beta, isMax):
if isMax:
best = [-1, -1, -inf]
else:
best = [-1, -1, inf]
if len(empty_cells(TTT)) == 0 or is_winner(TTT):
score = eval(TTT)
return [-1, -1, score]
# cutoff at depth of 3 and evaluate TTT state
if depth == 3:
result = eval_heuristic(TTT)
return [-1, -1, result]
for cell in empty_cells(TTT):
x, y = cell[0], cell[1]
if isMax:
TTT[x][y] = 'o'
else:
TTT[x][y] = 'x'
score = depth_alphabeta(TTT, depth+1, alpha, beta, not isMax)
TTT[x][y] = None
score[0], score[1] = x, y
if isMax:
if score[2] > best[2]:
best = score
alpha = max(alpha, best[2])
if beta <= alpha:
break
else:
if score[2] < best[2]:
best = score
beta = min(beta, best[2])
if beta <= alpha:
break
return best
def minimax_exper(TTT, depth, alpha, beta, isMax):
if isMax:
best = [-1, -1, -inf]
else:
best = [-1, -1, inf]
if len(empty_cells(TTT)) == 0 or is_winner(TTT):
score = eval(TTT)
return [-1, -1, score]
# cutoff at depth of 3 and evaluate TTT state
if depth == 8:
result = eval_heuristic(TTT)
return [-1, -1, result]
for cell in empty_cells(TTT):
x, y = cell[0], cell[1]
if isMax:
TTT[x][y] = 'o'
else:
TTT[x][y] = 'x'
score = minimax_exper(TTT, depth+1, alpha, beta, not isMax)
TTT[x][y] = None
score[0], score[1] = x, y
if isMax:
if score[2] > best[2]:
best = score
alpha = max(alpha, best[2])
if beta <= alpha:
break
else:
if score[2] < best[2]:
best = score
beta = min(beta, best[2])
if beta <= alpha:
break
return best
def eval_heuristic(TTT):
# no of possible wins in next 2 moves of AI
score_AI = 0
for cell_i in empty_cells(TTT):
x_i, y_i = cell_i[0], cell_i[1]
TTT[x_i][y_i] = 'o'
for cell_j in empty_cells(TTT):
x_j, y_j = cell_j[0], cell_j[1]
TTT[x_j][y_j] = 'o'
if is_winner(TTT) == 'o':
score_AI = score_AI + 1
TTT[x_j][y_j] = None
TTT[x_i][y_i] = None
# no of possible wins in next 2 moves of User
score_User = 0
for cell_i in empty_cells(TTT):
x_i, y_i = cell_i[0], cell_i[1]
TTT[x_i][y_i] = 'x'
for cell_j in empty_cells(TTT):
x_j, y_j = cell_j[0], cell_j[1]
TTT[x_j][y_j] = 'x'
if is_winner(TTT) == 'x':
score_User = score_User + 1
TTT[x_j][y_j] = None
TTT[x_i][y_i] = None
if score_AI > score_User:
score = 10
elif score_AI < score_User:
score = -10
else:
score = 0
return score
def get_algo_1():
return 1
def get_algo_2():
return 2
def get_algo_3():
return 3
def get_algo_4():
return 4
def get_algo_5():
return 5
def get_algo_6():
return 6
| 23.039216
| 69
| 0.470638
|
2a02e1916ab734dda9bc87ea3df4c786e65bc48b
| 3,977
|
py
|
Python
|
ext/opentelemetry-ext-http-requests/tests/test_requests_integration.py
|
Jamim/opentelemetry-python
|
6d1cd1f8f826bd7f36baaee949ff66f3c8007243
|
[
"Apache-2.0"
] | null | null | null |
ext/opentelemetry-ext-http-requests/tests/test_requests_integration.py
|
Jamim/opentelemetry-python
|
6d1cd1f8f826bd7f36baaee949ff66f3c8007243
|
[
"Apache-2.0"
] | null | null | null |
ext/opentelemetry-ext-http-requests/tests/test_requests_integration.py
|
Jamim/opentelemetry-python
|
6d1cd1f8f826bd7f36baaee949ff66f3c8007243
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from unittest import mock
import requests
import urllib3
import opentelemetry.ext.http_requests
from opentelemetry import trace
class TestRequestsIntegration(unittest.TestCase):
# TODO: Copy & paste from test_wsgi_middleware
def setUp(self):
self.span_attrs = {}
self.tracer = trace.tracer()
self.span_context_manager = mock.MagicMock()
self.span = mock.create_autospec(trace.Span, spec_set=True)
self.span_context_manager.__enter__.return_value = self.span
def setspanattr(key, value):
self.assertIsInstance(key, str)
self.span_attrs[key] = value
self.span.set_attribute = setspanattr
self.start_span_patcher = mock.patch.object(
self.tracer,
"start_as_current_span",
autospec=True,
spec_set=True,
return_value=self.span_context_manager,
)
self.start_as_current_span = self.start_span_patcher.start()
mocked_response = requests.models.Response()
mocked_response.status_code = 200
mocked_response.reason = "Roger that!"
self.send_patcher = mock.patch.object(
requests.Session,
"send",
autospec=True,
spec_set=True,
return_value=mocked_response,
)
self.send = self.send_patcher.start()
opentelemetry.ext.http_requests.enable(self.tracer)
def tearDown(self):
opentelemetry.ext.http_requests.disable()
self.send_patcher.stop()
self.start_span_patcher.stop()
def test_basic(self):
url = "https://www.example.org/foo/bar?x=y#top"
requests.get(url=url)
self.assertEqual(1, len(self.send.call_args_list))
self.tracer.start_as_current_span.assert_called_with(
"/foo/bar", kind=trace.SpanKind.CLIENT
)
self.span_context_manager.__enter__.assert_called_with()
self.span_context_manager.__exit__.assert_called_with(None, None, None)
self.assertEqual(
self.span_attrs,
{
"component": "http",
"http.method": "GET",
"http.url": url,
"http.status_code": 200,
"http.status_text": "Roger that!",
},
)
def test_invalid_url(self):
url = "http://[::1/nope"
exception_type = requests.exceptions.InvalidURL
if sys.version_info[:2] < (3, 5) and tuple(
map(int, urllib3.__version__.split(".")[:2])
) < (1, 25):
exception_type = ValueError
with self.assertRaises(exception_type):
requests.post(url=url)
self.assertTrue(
self.tracer.start_as_current_span.call_args[0][0].startswith(
"<Unparsable URL"
),
msg=self.tracer.start_as_current_span.call_args,
)
self.span_context_manager.__enter__.assert_called_with()
exitspan = self.span_context_manager.__exit__
self.assertEqual(1, len(exitspan.call_args_list))
self.assertIs(exception_type, exitspan.call_args[0][0])
self.assertIsInstance(exitspan.call_args[0][1], exception_type)
self.assertEqual(
self.span_attrs,
{"component": "http", "http.method": "POST", "http.url": url},
)
| 34.885965
| 79
| 0.637918
|
76aadd980dfb75d99092327e44f3902533362dd0
| 4,770
|
py
|
Python
|
q2_gneiss/regression/_regression.py
|
nbokulich/q2-gneiss
|
b9ee3efb27f8a050546a231a62a47b3a7a490263
|
[
"BSD-3-Clause"
] | null | null | null |
q2_gneiss/regression/_regression.py
|
nbokulich/q2-gneiss
|
b9ee3efb27f8a050546a231a62a47b3a7a490263
|
[
"BSD-3-Clause"
] | null | null | null |
q2_gneiss/regression/_regression.py
|
nbokulich/q2-gneiss
|
b9ee3efb27f8a050546a231a62a47b3a7a490263
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pandas as pd
import skbio
from gneiss.regression._ols import ols
from gneiss.regression._mixedlm import mixedlm
from q2_types.feature_table import FeatureTable, Balance
from q2_types.tree import Hierarchy
from qiime2.plugin import Str, Metadata
from q2_gneiss.plugin_setup import plugin
from gneiss.plot._regression_plot import ols_summary, lme_summary
import numpy as np
def ols_regression(output_dir: str,
table: pd.DataFrame, tree: skbio.TreeNode,
metadata: Metadata, formula: str) -> None:
if np.any(table.var(axis=0) == 0):
message = ('Detected zero variance balances - '
'double check your table for unobserved features.')
raise UserWarning(message)
res = ols(table=table, metadata=metadata.to_dataframe(),
formula=formula)
res.fit()
ols_summary(output_dir, res, tree)
plugin.visualizers.register_function(
function=ols_regression,
inputs={'table': FeatureTable[Balance],
'tree': Hierarchy},
parameters={'formula': Str, 'metadata': Metadata},
name='Simplicial Ordinary Least Squares Regression',
input_descriptions={
'table': ('The feature table containing the samples in which '
'simplicial regression will be performed.'),
'tree': ('A hierarchy of feature identifiers where each tip'
'corresponds to the feature identifiers in the table. '
'This tree can contain tip ids that are not present in '
'the table, but all feature ids in the table must be '
'present in this tree.')
},
parameter_descriptions={
'formula': 'Formula specifying the statistical model. '
'In other words, a list of the metadata categories that '
'will be used in the regression model, '
'typically separated by "+". For more information see '
'https://patsy.readthedocs.io/en/latest/API-reference.html',
'metadata': ('Metadata information that contains the '
'covariates of interest.')
},
description=("Perform linear regression on balances. This will tell you"
"how much variability is explained by metadata "
"categories in your formula.")
)
def lme_regression(output_dir: str,
table: pd.DataFrame, tree: skbio.TreeNode,
metadata: Metadata, formula: str,
groups: str) -> None:
if np.any(table.var(axis=0) == 0):
message = ('Detected zero variance balances - '
'double check your table for unobserved features.')
raise UserWarning(message)
res = mixedlm(table=table, metadata=metadata.to_dataframe(),
formula=formula, groups=groups)
res.fit()
lme_summary(output_dir, res, tree)
plugin.visualizers.register_function(
function=lme_regression,
inputs={'table': FeatureTable[Balance],
'tree': Hierarchy},
parameters={'metadata': Metadata, 'formula': Str, 'groups': Str},
name='Simplicial Linear mixed effects regression',
input_descriptions={
'table': ('The feature table containing the samples in which '
'simplicial regression with mixed effect '
'will be performed.'),
'tree': ('A hierarchy of feature identifiers where each tip '
'corresponds to the feature identifiers in the table. '
'This tree can contain tip ids that are not present in '
'the table, but all feature ids in the table must be '
'present in this tree.')
},
parameter_descriptions={
'formula': 'Statistical formula specifying the statistical model. '
'In other words, a list of the metadata categories that '
'will be used in the linear mixed effect model, '
'typically separated by "+". For more information see '
'https://patsy.readthedocs.io/en/latest/API-reference.html',
'metadata': ('Metadata information that contains the '
'covariates of interest.')
},
description="Build and run linear mixed effects model on balances. "
"Use LME over OLS when you have repeated measurements "
"such as (timeseries)."
)
| 42.972973
| 79
| 0.607338
|
b7f20a4d599a7662138d7ed7d19a6b3c195dffe3
| 9,739
|
py
|
Python
|
lib/python3/green_environment/giesomat.py
|
l-hahn/green-environment
|
ff76de1c2170ffc4ffae3fd565a42b9e7c419dfe
|
[
"MIT"
] | null | null | null |
lib/python3/green_environment/giesomat.py
|
l-hahn/green-environment
|
ff76de1c2170ffc4ffae3fd565a42b9e7c419dfe
|
[
"MIT"
] | null | null | null |
lib/python3/green_environment/giesomat.py
|
l-hahn/green-environment
|
ff76de1c2170ffc4ffae3fd565a42b9e7c419dfe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
This module provides a class to interact with the Gies-O-Mat soil moisture
sensor from ramser-elektro.at and is considered mostly to be used on a
Raspberry Pi. With aprorpiate naming and if pigpio is also available,
this class might be used for other SoC like Arduino, Genuino etc.
This module can also be used as a standalone script to retrieve values from.
attached sensors.
Classes: GiesOMat
Functions: main
"""
import argparse
import time
import pigpio
class GiesOMat:
"""
This class provides a set of functions and methods that can be used to
interact with the Gies-O-Mat sensor. As values can be set/adjusted during
runtime, there is no need to instantiate a new object for every config
change. Multiple sensors can be handled at once by passing a list of
GPIO pins.
Methods:
default_functor(values, **kwargs)
__init__
set_gpio(gpio)
gpio()
set_pulse(pulse)
pulse()
set_sample_rate(sample_rate)
sample_rate()
set_callback(call_back_id)
callback()
reset()
get(iteration)
run(iteration, functor, **kwargs)
run_endless(iteration, functor, **kwargs)
"""
def default_functor(values: "list of ints" or int, **kwargs):
"""
An example of how functions can be passed to the run function,
to make use of a value handling (e.g. printing values).
Keyword arguments:
values (list of ints) -- the measured values for one run.
Args:
**kwargs -- arguments that can be evaluated in another function
"""
if not isinstance(values, list):
print(values, **kwargs)
else:
print("\t".join(str(value) for value in values), **kwargs)
def __init__(
self, gpio, pulse=20, sample_rate=5,
call_back_id=pigpio.RISING_EDGE):
"""
Constructor to instantiate an object, that is able to handle multiple
sensors at once.
Keyword arguments:
gpio (int, list of ints) -- GPIO pins that are connected to 'OUT'
pulse (int, optional) -- The time for the charging wave. Defaults to 20.
sample_rate (int, optional) -- Time span how long to count switches. Defaults to 5.
call_back_id (int, optional) -- Callback id. Defaults to pigpio.RISING_EDGE.
"""
self.set_gpio(gpio)
self.set_pulse(pulse)
self.set_sample_rate(sample_rate)
self.set_callback(call_back_id)
self._pin_mask = 0
self.reset()
def set_gpio(self, gpio: "list of ints" or int):
"""
The function allows to set or update the GPIO pin list of a GiesOMat
instance, so that pins can be changed/updated.
Keyword arguments:
gpio (int, list of ints) -- Sensor pins that are connected to "OUT".
"""
self._gpio = gpio if isinstance(gpio, list) else [gpio]
def gpio(self):
"""
The function to get (by return) the current list of used GPIO pins
where data is taken from.
Returns:
(list of ints) -- Returns the list of used GPIO pins.
"""
return [gpio_pin for gpio_pin in self._gpio]
def set_pulse(self, pulse: int):
"""
Sets the pulse value (in µs) to the instance and on runtime.
Keyword arguments:
pulse (int) -- The pulse value in µs.
"""
self._pulse = pulse
def pulse(self):
"""
The function to get (by return) the current pulse value.
Returns:
(int) -- The currently used pulse value.
"""
return self._pulse
def set_sample_rate(self, sample_rate: int or float):
"""
Sets the sample_rate value (in deciseconds [10^-1 s])to the instance
and on runtime.
Keyword arguments:
sample_rate (int) -- The sample_rate value in deciseconds.
"""
self._sample_rate = sample_rate
def sample_rate(self):
"""
The function to get (by return) the current sample_rate value.
Returns:
(int) -- The currently used sample_rate value.
"""
return self._sample_rate
def set_callback(self, call_back_id: int):
"""
Sets the used callback trigger (when to count, rising, falling switch
point).
Keyword arguments:
call_back_id (int) -- The callback id, e.g. pigpio.RISING_EDGE.
"""
self._call_back_id = call_back_id
def callback(self):
"""
The function to get (by return) the current callback_id value.
Returns:
(int) -- The callback_id, please relate to e.g. pigpio.RISING_EDGE.
"""
return self._call_back_id
def reset(self):
"""
This functions resets all necessary runtime variables, so that after
a configuration change, everything is correctly loaded.
"""
self._pi = pigpio.pi()
self._pi.wave_clear()
for gpio_pin in self._gpio:
self._pin_mask |= 1 << gpio_pin
self._pi.set_mode(gpio_pin, pigpio.INPUT)
self._pulse_gpio = [
pigpio.pulse(self._pin_mask, 0, self._pulse),
pigpio.pulse(0, self._pin_mask, self._pulse)
]
self._pi.wave_add_generic(self._pulse_gpio)
self._wave_id = self._pi.wave_create()
self._call_backs = [
self._pi.callback(
gpio_pin, self._call_back_id
) for gpio_pin in self._gpio
]
for callback in self._call_backs:
callback.reset_tally()
def get(self, iteration: int = 1):
"""
Function to get a certain amount of measured values; there is no
on-line handling. Values will be measured and returned.
Keyword arguments:
iteration (int, optional) -- Measured values amount. Defaults to 1.
Returns:
(list of list of ints or list of ints) -- The measured values
"""
# initialise/reset once to have values with beginning!
for call in self._call_backs:
call.reset_tally()
time.sleep(0.1 * self._sample_rate)
iteration_values = []
for _ in range(iteration):
values = [call.tally() for call in self._call_backs]
iteration_values.append(values)
for call in self._call_backs:
call.reset_tally()
time.sleep(0.1 * self._sample_rate)
if iteration == 1:
return iteration_values[0]
return iteration_values
def run(self, iteration: int = 1, functor=default_functor, **kwargs):
"""
Function to measure a certain amount of values and evaluate them directly.
Evaluation can be a print function or a self-defined function.
Options can be passed with **kwargs.
Keyword arguments:
iteration (int, optional) -- Number of measurements to be done.
Defaults to 1.
functor (function_ptr, optional) -- An evaluationfunction.
Defaults to default_functor.
Args:
**kwargs -- arguments that can be evaluated in another function.
"""
# initialise/reset once to have values with beginning!
for call in self._call_backs:
call.reset_tally()
time.sleep(0.1 * self._sample_rate)
while iteration != 0:
if iteration > 0:
iteration -= 1
values = [call.tally() for call in self._call_backs]
functor(values, **kwargs)
for call in self._call_backs:
call.reset_tally()
time.sleep(0.1 * self._sample_rate)
def run_endless(self, functor=default_functor, **kwargs):
"""
Function to permanently measure values and evaluate them directly.
Evaluation can be a print function or a self-defined function.
Options can be passed with **kwargs.
Keyword arguments:
functor (function_ptr, optional) -- An evaluationfunction.
Defaults to default_functor.
Args:
**kwargs -- arguments that can be evaluated in another function.
"""
self.run(iteration=-1, functor=functor, **kwargs)
def main():
"""
A main function that is used, when this module is used as a stand-alone script.
Arguments can be passed and it will simply print results to std-out.
"""
parser = argparse.ArgumentParser(
description="A short programm to print values from Gies-O-Mat sensor."
)
parser.add_argument(
"-g", metavar="G", nargs="+", type=int, required=True,
help="GPIO pin number(s), where the OUT sensor(s) pin is/are attached to."
)
parser.add_argument(
"-p", metavar="P", default=20, type=int, required=False,
help="Set Pulse to P µs, default p = 20µs."
)
parser.add_argument(
"-s", metavar="S", default=5, type=int, required=False,
help="Set sample rate to S deciseconds [10^-1 s]; default s = 5."
)
parser.add_argument(
"-i", metavar="I", default=10, type=int, required=False,
help="Number of iterations to get a value; use -1 for infinity."
)
args = parser.parse_args()
gpio_pins = args.g
iterations = -1 if args.i < 0 else args.i
pulse = args.p
sample_rate = args.s
connector = GiesOMat(
gpio=gpio_pins,
pulse=pulse,
sample_rate=sample_rate,
)
connector.run(iterations)
if __name__ == "__main__":
# execute only if run as a script
main()
| 33.582759
| 95
| 0.605093
|
e38cd2eae955dabe6bd83436329b4821cc00bfc0
| 16,374
|
py
|
Python
|
test_autoarray/unit/dataset/test_interferometer.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
test_autoarray/unit/dataset/test_interferometer.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
test_autoarray/unit/dataset/test_interferometer.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
import os
import shutil
import numpy as np
import pytest
import autoarray as aa
from autoarray.dataset import interferometer
from autoarray import exc
test_data_dir = "{}/../test_files/array/".format(
os.path.dirname(os.path.realpath(__file__))
)
class TestInterferometerMethods:
def test__data_with_resized_primary_beam(self):
interferometer = aa.interferometer.manual(
visibilities=aa.visibilities.manual_1d(visibilities=[[1, 1]]),
primary_beam=aa.kernel.zeros(shape_2d=(5, 5), pixel_scales=1.0),
noise_map=aa.visibilities.manual_1d(visibilities=[[2, 2]]),
exposure_time_map=1,
uv_wavelengths=1,
)
interferometer = interferometer.resized_primary_beam_from_new_shape_2d(
new_shape_2d=(1, 1)
)
assert (interferometer.primary_beam.in_2d == np.zeros((1, 1))).all()
def test__data_with_modified_visibilities(self):
interferometer = aa.interferometer.manual(
visibilities=np.array([[1, 1]]),
primary_beam=aa.kernel.zeros(shape_2d=(5, 5), pixel_scales=1.0),
noise_map=1,
exposure_time_map=2,
uv_wavelengths=3,
)
interferometer = interferometer.modified_visibilities_from_visibilities(
visibilities=np.array([[2, 2]])
)
assert (interferometer.visibilities == np.array([[2, 2]])).all()
assert (interferometer.primary_beam.in_2d == np.zeros((1, 1))).all()
assert interferometer.noise_map == 1
assert interferometer.exposure_time_map == 2
assert interferometer.uv_wavelengths == 3
class TestSimulateInterferometer:
def test__setup_with_all_features_off(self, transformer_7x7_7):
image = aa.array.manual_2d([[2.0, 0.0, 0.0], [0.0, 1.0, 0.0], [3.0, 0.0, 0.0]])
exposure_time_map = aa.array.full(
fill_value=1.0, pixel_scales=0.1, shape_2d=image.shape_2d
)
interferometer_simulated = aa.interferometer.simulate(
real_space_image=image,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
real_space_pixel_scales=0.1,
transformer=transformer_7x7_7,
noise_sigma=None,
)
simulated_visibilities = transformer_7x7_7.visibilities_from_image(image=image)
assert interferometer_simulated.visibilities == pytest.approx(
simulated_visibilities, 1.0e-4
)
assert interferometer_simulated.real_space_pixel_scales == (0.1, 0.1)
def test__setup_with_background_sky_on__noise_off__no_noise_in_image__noise_map_is_noise_value(
self, transformer_7x7_7
):
image = aa.array.manual_2d([[2.0, 0.0, 0.0], [0.0, 1.0, 0.0], [3.0, 0.0, 0.0]])
exposure_time_map = aa.array.full(
fill_value=1.0, pixel_scales=0.1, shape_2d=image.shape_2d
)
background_sky_map = aa.array.full(
fill_value=2.0, pixel_scales=0.1, shape_2d=image.shape_2d
)
interferometer_simulated = aa.interferometer.simulate(
real_space_image=image,
real_space_pixel_scales=0.1,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
background_sky_map=background_sky_map,
transformer=transformer_7x7_7,
noise_sigma=None,
noise_if_add_noise_false=0.2,
noise_seed=1,
)
simulated_visibilities = transformer_7x7_7.visibilities_from_image(
image=image + background_sky_map
)
assert interferometer_simulated.visibilities == pytest.approx(
simulated_visibilities, 1.0e-4
)
assert (
interferometer_simulated.exposure_time_map.in_2d == 1.0 * np.ones((3, 3))
).all()
assert (interferometer_simulated.noise_map == 0.2 * np.ones((7, 2))).all()
assert interferometer_simulated.real_space_pixel_scales == (0.1, 0.1)
def test__setup_with_noise(self, transformer_7x7_7):
image = aa.array.manual_2d([[2.0, 0.0, 0.0], [0.0, 1.0, 0.0], [3.0, 0.0, 0.0]])
exposure_time_map = aa.array.full(
fill_value=20.0, pixel_scales=0.1, shape_2d=image.shape_2d
)
interferometer_simulated = aa.interferometer.simulate(
real_space_image=image,
real_space_pixel_scales=0.1,
exposure_time=20.0,
exposure_time_map=exposure_time_map,
transformer=transformer_7x7_7,
noise_sigma=0.1,
noise_seed=1,
)
simulated_visibilities = transformer_7x7_7.visibilities_from_image(image=image)
assert (
interferometer_simulated.exposure_time_map.in_2d == 20.0 * np.ones((3, 3))
).all()
assert interferometer_simulated.real_space_pixel_scales == (0.1, 0.1)
assert interferometer_simulated.visibilities[0, :] == pytest.approx(
[-0.005364, -2.36682], 1.0e-4
)
noise_map_realization = (
interferometer_simulated.visibilities - simulated_visibilities
)
assert noise_map_realization == pytest.approx(
interferometer_simulated.noise_map_realization, 1.0e-4
)
assert (interferometer_simulated.noise_map == 0.1 * np.ones((7, 2))).all()
class TestCreateGaussianNoiseMap:
def test__gaussian_noise_sigma_0__gaussian_noise_map_all_0__image_is_identical_to_input(
self
):
simulate_gaussian_noise = interferometer.gaussian_noise_map_from_shape_and_sigma(
shape=(9,), sigma=0.0, noise_seed=1
)
assert (simulate_gaussian_noise == np.zeros((9,))).all()
def test__gaussian_noise_sigma_1__gaussian_noise_map_all_non_0__image_has_noise_added(
self
):
simulate_gaussian_noise = interferometer.gaussian_noise_map_from_shape_and_sigma(
shape=(9,), sigma=1.0, noise_seed=1
)
# Use seed to give us a known gaussian noises map we'll test_autoarray for
assert simulate_gaussian_noise == pytest.approx(
np.array([1.62, -0.61, -0.53, -1.07, 0.87, -2.30, 1.74, -0.76, 0.32]),
1e-2,
)
class TestInterferometerFromFits:
def test__no_settings_just_pass_fits(self):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x2_threes_fours.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
primary_beam_path=test_data_dir + "3x3_fives.fits",
renormalize_primary_beam=False,
)
assert (interferometer.visibilities.real == np.ones(3)).all()
assert (interferometer.visibilities.imag == 2.0 * np.ones(3)).all()
assert (interferometer.noise_map.real == 3.0 * np.ones(3)).all()
assert (interferometer.noise_map.imag == 4.0 * np.ones(3)).all()
assert (interferometer.uv_wavelengths[:, 0] == 5.0 * np.ones(3)).all()
assert (interferometer.uv_wavelengths[:, 1] == 6.0 * np.ones(3)).all()
assert interferometer.amplitudes == pytest.approx(
np.sqrt(5) * np.ones(3), 1.0e-4
)
assert interferometer.phases == pytest.approx(1.10714 * np.ones(3), 1.0e-4)
assert interferometer.uv_distances == pytest.approx(
np.sqrt(61) * np.ones(3), 1.0e-4
)
assert (interferometer.primary_beam.in_2d == 5.0 * np.ones((3, 3))).all()
def test__optional_array_paths_included__loads_optional_array(self):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x2_threes_fours.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
primary_beam_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3_sixes.fits",
renormalize_primary_beam=False,
)
assert (interferometer.primary_beam.in_2d == 5.0 * np.ones((3, 3))).all()
assert (interferometer.exposure_time_map == 6.0 * np.ones((3,))).all()
def test__all_files_in_one_fits__load_using_different_hdus(self):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_multiple_hdu.fits",
visibilities_hdu=0,
noise_map_path=test_data_dir + "3x2_multiple_hdu.fits",
noise_map_hdu=1,
uv_wavelengths_path=test_data_dir + "3x2_multiple_hdu.fits",
uv_wavelengths_hdu=2,
primary_beam_path=test_data_dir + "3x3_multiple_hdu.fits",
primary_beam_hdu=3,
exposure_time_map_path=test_data_dir + "3x3_multiple_hdu.fits",
exposure_time_map_hdu=4,
renormalize_primary_beam=False,
)
assert (interferometer.visibilities.real == np.ones(3)).all()
assert (interferometer.visibilities.imag == np.ones(3)).all()
assert (interferometer.noise_map.real == 2.0 * np.ones(3)).all()
assert (interferometer.noise_map.imag == 2.0 * np.ones(3)).all()
assert (interferometer.uv_wavelengths[:, 0] == 3.0 * np.ones(3)).all()
assert (interferometer.uv_wavelengths[:, 1] == 3.0 * np.ones(3)).all()
assert (interferometer.primary_beam.in_2d == 4.0 * np.ones((3, 3))).all()
assert (interferometer.exposure_time_map == 5.0 * np.ones((3, 3))).all()
def test__exposure_time_included__creates_exposure_time_map_using_exposure_time(
self
):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x2_threes_fours.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
primary_beam_path=test_data_dir + "3x3_ones.fits",
exposure_time_map_from_single_value=3.0,
)
assert (interferometer.exposure_time_map == 3.0 * np.ones((3,))).all()
def test__pad_shape_of_primary_beam(self):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x2_threes_fours.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
primary_beam_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3_sixes.fits",
resized_primary_beam_shape_2d=(9, 9),
renormalize_primary_beam=False,
)
primary_beam_padded_array = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 5.0, 5.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 5.0, 5.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 5.0, 5.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
assert (interferometer.primary_beam.in_2d == primary_beam_padded_array).all()
def test__trim_shape_of_primary_beam(self):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x2_threes_fours.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
primary_beam_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3_sixes.fits",
resized_primary_beam_shape_2d=(1, 1),
renormalize_primary_beam=False,
)
trimmed_array = np.array([[1.0]])
assert (interferometer.primary_beam.in_2d == 5.0 * trimmed_array).all()
def test__primary_beam_renormalized_false__does_not_renormalize_primary_beam(self):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x2_threes_fours.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
primary_beam_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3_sixes.fits",
renormalize_primary_beam=False,
)
assert (interferometer.primary_beam.in_2d == 5.0 * np.ones((3, 3))).all()
assert (interferometer.exposure_time_map == 6.0 * np.ones((3,))).all()
def test__primary_beam_renormalized_true__renormalized_primary_beam(self):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x2_threes_fours.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
primary_beam_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3_sixes.fits",
renormalize_primary_beam=True,
)
assert interferometer.primary_beam.in_2d == pytest.approx(
(1.0 / 9.0) * np.ones((3, 3)), 1e-2
)
assert (interferometer.exposure_time_map == 6.0 * np.ones((3,))).all()
def test__exposure_time_and_exposure_time_map_included__raies_imaging_error(self):
with pytest.raises(exc.DataException):
aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
exposure_time_map_path=test_data_dir + "3x3_ones.fits",
exposure_time_map_from_single_value=1.0,
)
def test__output_all_arrays(self):
interferometer = aa.interferometer.from_fits(
visibilities_path=test_data_dir + "3x2_ones_twos.fits",
noise_map_path=test_data_dir + "3x2_threes_fours.fits",
uv_wavelengths_path=test_data_dir + "3x2_fives_sixes.fits",
primary_beam_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3_sixes.fits",
renormalize_primary_beam=False,
)
output_data_dir = "{}/../test_files/array/output_test/".format(
os.path.dirname(os.path.realpath(__file__))
)
if os.path.exists(output_data_dir):
shutil.rmtree(output_data_dir)
os.makedirs(output_data_dir)
interferometer.output_to_fits(
visibilities_path=output_data_dir + "visibilities.fits",
noise_map_path=output_data_dir + "noise_map.fits",
primary_beam_path=output_data_dir + "primary_beam.fits",
exposure_time_map_path=output_data_dir + "exposure_time_map.fits",
uv_wavelengths_path=output_data_dir + "uv_wavelengths.fits",
overwrite=True,
)
interferometer = aa.interferometer.from_fits(
visibilities_path=output_data_dir + "visibilities.fits",
noise_map_path=output_data_dir + "noise_map.fits",
primary_beam_path=output_data_dir + "primary_beam.fits",
exposure_time_map_path=output_data_dir + "exposure_time_map.fits",
uv_wavelengths_path=output_data_dir + "uv_wavelengths.fits",
renormalize_primary_beam=False,
)
assert (interferometer.visibilities.real == np.ones(3)).all()
assert (interferometer.visibilities.imag == 2.0 * np.ones(3)).all()
assert (interferometer.noise_map.real == 3.0 * np.ones((3,))).all()
assert (interferometer.noise_map.imag == 4.0 * np.ones((3,))).all()
assert (interferometer.uv_wavelengths[:, 0] == 5.0 * np.ones(3)).all()
assert (interferometer.uv_wavelengths[:, 1] == 6.0 * np.ones(3)).all()
assert (interferometer.primary_beam.in_2d == 5.0 * np.ones((3, 3))).all()
assert (interferometer.exposure_time_map == 6.0 * np.ones((3,))).all()
| 42.092545
| 99
| 0.637535
|
08aadd81d8b3305dc6e575b67b4a471362270802
| 18,228
|
py
|
Python
|
examples/oandav20test/oandav20test.py
|
josipbudzaki/btoandav20
|
f6b7db07e37ed0d97df8a9c2337ce4b7e5953f56
|
[
"Apache-2.0"
] | 60
|
2018-03-01T20:55:40.000Z
|
2020-08-24T13:13:42.000Z
|
examples/oandav20test/oandav20test.py
|
josipbudzaki/btoandav20
|
f6b7db07e37ed0d97df8a9c2337ce4b7e5953f56
|
[
"Apache-2.0"
] | 54
|
2018-03-02T14:14:23.000Z
|
2020-08-18T12:09:35.000Z
|
examples/oandav20test/oandav20test.py
|
josipbudzaki/btoandav20
|
f6b7db07e37ed0d97df8a9c2337ce4b7e5953f56
|
[
"Apache-2.0"
] | 33
|
2018-07-10T00:56:21.000Z
|
2020-07-28T12:56:44.000Z
|
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import backtrader as bt
from backtrader.utils import flushfile # win32 quick stdout flushing
import btoandav20
StoreCls = btoandav20.stores.OandaV20Store
DataCls = btoandav20.feeds.OandaV20Data
# BrokerCls = btoandav20.brokers.OandaV20Broker
# available timeframes for oanda
TIMEFRAMES = [bt.TimeFrame.Names[bt.TimeFrame.Seconds],
bt.TimeFrame.Names[bt.TimeFrame.Minutes],
bt.TimeFrame.Names[bt.TimeFrame.Days],
bt.TimeFrame.Names[bt.TimeFrame.Weeks],
bt.TimeFrame.Names[bt.TimeFrame.Months]]
class TestStrategy(bt.Strategy):
params = dict(
smaperiod=5,
trade=False,
stake=10,
exectype=bt.Order.Market,
stopafter=0,
valid=None,
cancel=0,
donotcounter=False,
sell=False,
usebracket=False,
)
def __init__(self):
# To control operation entries
self.orderid = list()
self.order = None
self.counttostop = 0
self.datastatus = 0
# Create SMA on 2nd data
self.sma = bt.indicators.MovAv.SMA(self.data, period=self.p.smaperiod)
print('--------------------------------------------------')
print('Strategy Created')
print('--------------------------------------------------')
def notify_data(self, data, status, *args, **kwargs):
print('*' * 5, 'DATA NOTIF:', data._getstatusname(status), *args)
if status == data.LIVE:
self.counttostop = self.p.stopafter
self.datastatus = 1
def notify_store(self, msg, *args, **kwargs):
print('*' * 5, 'STORE NOTIF:', msg)
def notify_order(self, order):
if order.status in [order.Completed, order.Cancelled, order.Rejected]:
self.order = None
print('-' * 50, 'ORDER BEGIN', datetime.datetime.now())
print(order)
print('-' * 50, 'ORDER END')
def notify_trade(self, trade):
print('-' * 50, 'TRADE BEGIN', datetime.datetime.now())
print(trade)
print('-' * 50, 'TRADE END')
def prenext(self):
self.next(frompre=True)
def next(self, frompre=False):
txt = list()
txt.append('Data0')
txt.append('%04d' % len(self.data0))
dtfmt = '%Y-%m-%dT%H:%M:%S.%f'
txt.append('{:f}'.format(self.data.datetime[0]))
txt.append('%s' % self.data.datetime.datetime(0).strftime(dtfmt))
txt.append('{:f}'.format(self.data.open[0]))
txt.append('{:f}'.format(self.data.high[0]))
txt.append('{:f}'.format(self.data.low[0]))
txt.append('{:f}'.format(self.data.close[0]))
txt.append('{:6d}'.format(int(self.data.volume[0])))
txt.append('{:d}'.format(int(self.data.openinterest[0])))
txt.append('{:f}'.format(self.sma[0]))
print(', '.join(txt))
if len(self.datas) > 1 and len(self.data1):
txt = list()
txt.append('Data1')
txt.append('%04d' % len(self.data1))
dtfmt = '%Y-%m-%dT%H:%M:%S.%f'
txt.append('{}'.format(self.data1.datetime[0]))
txt.append('%s' % self.data1.datetime.datetime(0).strftime(dtfmt))
txt.append('{}'.format(self.data1.open[0]))
txt.append('{}'.format(self.data1.high[0]))
txt.append('{}'.format(self.data1.low[0]))
txt.append('{}'.format(self.data1.close[0]))
txt.append('{}'.format(self.data1.volume[0]))
txt.append('{}'.format(self.data1.openinterest[0]))
txt.append('{}'.format(float('NaN')))
print(', '.join(txt))
if self.counttostop: # stop after x live lines
self.counttostop -= 1
if not self.counttostop:
self.env.runstop()
return
if not self.p.trade:
return
if self.datastatus and not self.position and len(self.orderid) < 1:
if not self.p.usebracket:
if not self.p.sell:
# price = round(self.data0.close[0] * 0.90, 2)
price = self.data0.close[0] - 0.005
self.order = self.buy(size=self.p.stake,
exectype=self.p.exectype,
price=price,
valid=self.p.valid)
else:
# price = round(self.data0.close[0] * 1.10, 4)
price = self.data0.close[0] - 0.05
self.order = self.sell(size=self.p.stake,
exectype=self.p.exectype,
price=price,
valid=self.p.valid)
else:
print('USING BRACKET')
price = self.data0.close[0] - 0.05
self.order, _, _ = self.buy_bracket(size=self.p.stake,
exectype=bt.Order.Market,
price=price,
stopprice=price - 0.10,
limitprice=price + 0.10,
valid=self.p.valid)
self.orderid.append(self.order)
elif self.position and not self.p.donotcounter:
if self.order is None:
if not self.p.sell:
self.order = self.sell(size=self.p.stake // 2,
exectype=bt.Order.Market,
price=self.data0.close[0])
else:
self.order = self.buy(size=self.p.stake // 2,
exectype=bt.Order.Market,
price=self.data0.close[0])
self.orderid.append(self.order)
elif self.order is not None and self.p.cancel:
if self.datastatus > self.p.cancel:
self.cancel(self.order)
if self.datastatus:
self.datastatus += 1
def start(self):
if self.data0.contractdetails is not None:
print('-- Contract Details:')
print(self.data0.contractdetails)
header = ['Datetime', 'Open', 'High', 'Low', 'Close', 'Volume',
'OpenInterest', 'SMA']
print(', '.join(header))
self.done = False
def runstrategy():
args = parse_args()
# Create a cerebro
cerebro = bt.Cerebro()
storekwargs = dict(
token=args.token,
account=args.account,
practice=not args.live
)
if not args.no_store:
store = StoreCls(**storekwargs)
if args.broker:
if args.no_store:
broker = BrokerCls(**storekwargs)
else:
broker = store.getbroker()
cerebro.setbroker(broker)
timeframe = bt.TimeFrame.TFrame(args.timeframe)
# Manage data1 parameters
tf1 = args.timeframe1
tf1 = bt.TimeFrame.TFrame(tf1) if tf1 is not None else timeframe
cp1 = args.compression1
cp1 = cp1 if cp1 is not None else args.compression
if args.resample or args.replay:
datatf = datatf1 = bt.TimeFrame.Ticks
datacomp = datacomp1 = 1
else:
datatf = timeframe
datacomp = args.compression
datatf1 = tf1
datacomp1 = cp1
fromdate = None
if args.fromdate:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.fromdate))
fromdate = datetime.datetime.strptime(args.fromdate, dtformat)
DataFactory = DataCls if args.no_store else store.getdata
datakwargs = dict(
timeframe=datatf, compression=datacomp,
qcheck=args.qcheck,
historical=args.historical,
fromdate=fromdate,
bidask=args.bidask,
useask=args.useask,
backfill_start=not args.no_backfill_start,
backfill=not args.no_backfill,
tz=args.timezone
)
if args.no_store and not args.broker: # neither store nor broker
datakwargs.update(storekwargs) # pass the store args over the data
data0 = DataFactory(dataname=args.data0, **datakwargs)
data1 = None
if args.data1 is not None:
if args.data1 != args.data0:
datakwargs['timeframe'] = datatf1
datakwargs['compression'] = datacomp1
data1 = DataFactory(dataname=args.data1, **datakwargs)
else:
data1 = data0
rekwargs = dict(
timeframe=timeframe, compression=args.compression,
bar2edge=not args.no_bar2edge,
adjbartime=not args.no_adjbartime,
rightedge=not args.no_rightedge,
takelate=not args.no_takelate,
)
if args.replay:
cerebro.replaydata(data0, **rekwargs)
if data1 is not None:
rekwargs['timeframe'] = tf1
rekwargs['compression'] = cp1
cerebro.replaydata(data1, **rekwargs)
elif args.resample:
cerebro.resampledata(data0, **rekwargs)
if data1 is not None:
rekwargs['timeframe'] = tf1
rekwargs['compression'] = cp1
cerebro.resampledata(data1, **rekwargs)
else:
cerebro.adddata(data0)
if data1 is not None:
cerebro.adddata(data1)
if args.valid is None:
valid = None
else:
valid = datetime.timedelta(seconds=args.valid)
# Add the strategy
cerebro.addstrategy(TestStrategy,
smaperiod=args.smaperiod,
trade=args.trade,
exectype=bt.Order.ExecType(args.exectype),
stake=args.stake,
stopafter=args.stopafter,
valid=valid,
cancel=args.cancel,
donotcounter=args.donotcounter,
sell=args.sell,
usebracket=args.usebracket)
# Live data ... avoid long data accumulation by switching to "exactbars"
cerebro.run(exactbars=args.exactbars)
if args.exactbars < 1: # plotting is possible
if args.plot:
pkwargs = dict(style='line')
if args.plot is not True: # evals to True but is not True
npkwargs = eval('dict(' + args.plot + ')') # args were passed
pkwargs.update(npkwargs)
cerebro.plot(**pkwargs)
def parse_args(pargs=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Test Oanda v20 integration')
parser.add_argument('--exactbars', default=1, type=int,
required=False, action='store',
help='exactbars level, use 0/-1/-2 to enable plotting')
parser.add_argument('--stopafter', default=0, type=int,
required=False, action='store',
help='Stop after x lines of LIVE data')
parser.add_argument('--no-store',
required=False, action='store_true',
help='Do not use the store pattern')
parser.add_argument('--debug',
required=False, action='store_true',
help='Display all info received from source')
parser.add_argument('--token', default=None,
required=True, action='store',
help='Access token to use')
parser.add_argument('--account', default=None,
required=True, action='store',
help='Account identifier to use')
parser.add_argument('--live', default=None,
required=False, action='store',
help='Go to live server rather than practice')
parser.add_argument('--qcheck', default=0.5, type=float,
required=False, action='store',
help=('Timeout for periodic '
'notification/resampling/replaying check'))
parser.add_argument('--data0', default=None,
required=True, action='store',
help='data 0 into the system')
parser.add_argument('--data1', default=None,
required=False, action='store',
help='data 1 into the system')
parser.add_argument('--timezone', default=None,
required=False, action='store',
help='timezone to get time output into (pytz names)')
parser.add_argument('--bidask', default=None,
required=False, action='store_true',
help='Use bidask ... if False use midpoint')
parser.add_argument('--useask', default=None,
required=False, action='store_true',
help='Use the "ask" of bidask prices/streaming')
parser.add_argument('--no-backfill_start',
required=False, action='store_true',
help='Disable backfilling at the start')
parser.add_argument('--no-backfill',
required=False, action='store_true',
help='Disable backfilling after a disconnection')
parser.add_argument('--historical',
required=False, action='store_true',
help='do only historical download')
parser.add_argument('--fromdate',
required=False, action='store',
help=('Starting date for historical download '
'with format: YYYY-MM-DD[THH:MM:SS]'))
parser.add_argument('--smaperiod', default=5, type=int,
required=False, action='store',
help='Period to apply to the Simple Moving Average')
pgroup = parser.add_mutually_exclusive_group(required=False)
pgroup.add_argument('--replay',
required=False, action='store_true',
help='replay to chosen timeframe')
pgroup.add_argument('--resample',
required=False, action='store_true',
help='resample to chosen timeframe')
parser.add_argument('--timeframe', default=TIMEFRAMES[0],
choices=TIMEFRAMES,
required=False, action='store',
help='TimeFrame for Resample/Replay')
parser.add_argument('--compression', default=5, type=int,
required=False, action='store',
help='Compression for Resample/Replay')
parser.add_argument('--timeframe1', default=None,
choices=TIMEFRAMES,
required=False, action='store',
help='TimeFrame for Resample/Replay - Data1')
parser.add_argument('--compression1', default=None, type=int,
required=False, action='store',
help='Compression for Resample/Replay - Data1')
parser.add_argument('--no-takelate',
required=False, action='store_true',
help=('resample/replay, do not accept late samples'))
parser.add_argument('--no-bar2edge',
required=False, action='store_true',
help='no bar2edge for resample/replay')
parser.add_argument('--no-adjbartime',
required=False, action='store_true',
help='no adjbartime for resample/replay')
parser.add_argument('--no-rightedge',
required=False, action='store_true',
help='no rightedge for resample/replay')
parser.add_argument('--broker',
required=False, action='store_true',
help='Use Oanda as broker')
parser.add_argument('--trade',
required=False, action='store_true',
help='Do Sample Buy/Sell operations')
parser.add_argument('--sell',
required=False, action='store_true',
help='Start by selling')
parser.add_argument('--usebracket',
required=False, action='store_true',
help='Test buy_bracket')
parser.add_argument('--donotcounter',
required=False, action='store_true',
help='Do not counter the 1st operation')
parser.add_argument('--exectype', default=bt.Order.ExecTypes[0],
choices=bt.Order.ExecTypes,
required=False, action='store',
help='Execution to Use when opening position')
parser.add_argument('--stake', default=10, type=int,
required=False, action='store',
help='Stake to use in buy operations')
parser.add_argument('--valid', default=None, type=float,
required=False, action='store',
help='Seconds to keep the order alive (0 means DAY)')
parser.add_argument('--cancel', default=0, type=int,
required=False, action='store',
help=('Cancel a buy order after n bars in operation,'
' to be combined with orders like Limit'))
# Plot options
parser.add_argument('--plot', '-p', nargs='?', required=False,
metavar='kwargs', const=True,
help=('Plot the read data applying any kwargs passed\n'
'\n'
'For example (escape the quotes if needed):\n'
'\n'
' --plot style="candle" (to plot candles)\n'))
if pargs is not None:
return parser.parse_args(pargs)
return parser.parse_args()
if __name__ == '__main__':
runstrategy()
| 37.352459
| 79
| 0.528363
|
c78bb45f17c19848fb7f47b72329e488c1ef35f0
| 1,189
|
py
|
Python
|
ex256/legendre_pi.py
|
joe-antognini/praxis
|
11e7fbd1805d9d77402f83f8a4101a90c75dc0a7
|
[
"MIT"
] | null | null | null |
ex256/legendre_pi.py
|
joe-antognini/praxis
|
11e7fbd1805d9d77402f83f8a4101a90c75dc0a7
|
[
"MIT"
] | null | null | null |
ex256/legendre_pi.py
|
joe-antognini/praxis
|
11e7fbd1805d9d77402f83f8a4101a90c75dc0a7
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
from math import log, sqrt
from sieve_erato import sieve_erato
def memoize(f):
class memodict(dict):
def __init__(self, f):
self.f = f
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return memodict(f)
def prime_n(n):
'''Return the n'th prime.'''
if n == 0:
return 1
j = 0
count = 1
while count != n:
primes = sieve_erato(int(2**j * log(n) * n))
count = 1
for i, elem in enumerate(primes):
if elem:
count += 1
if count == n:
break
j += 1
return 2*i + 3
@memoize
def legendre_phi(x, a, acc=0):
'''Calculate Legendre's phi function.'''
if x == 0:
return 0
while a > 1:
p_a = prime_n(a)
(x, a, acc) = (x, a-1, legendre_phi(x/p_a, a-1) + acc)
return (x+1)/2 - acc
def legendre_pi(n):
'''Calculate the number of primes less than or equal to n using Legendre's
algorithm.'''
if n == 2:
return 1
elif n == 3:
return 2
else:
a = legendre_pi(int(sqrt(n)))
return legendre_phi(n, a) + a - 1
if __name__ == '__main__':
print legendre_pi(int(1e6))
| 19.816667
| 76
| 0.576114
|
0930e4482c40398b0a2a5b4b99bc2a1b14e5bfa2
| 6,515
|
py
|
Python
|
calculation/gmhazard_calc/gmhazard_calc/directivity/scripts/mc_nhyp_runs.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
calculation/gmhazard_calc/gmhazard_calc/directivity/scripts/mc_nhyp_runs.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | 8
|
2021-10-13T02:33:23.000Z
|
2022-03-29T21:01:08.000Z
|
calculation/gmhazard_calc/gmhazard_calc/directivity/scripts/mc_nhyp_runs.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
"""
Compute directivity values multiple times specified by a repeating value
To understand the standard deviation in results for the different number of hypocentres
"""
import time
import argparse
import multiprocessing as mp
from pathlib import Path
import numpy as np
from qcore import nhm
import gmhazard_calc
from gmhazard_calc.im import IM, IMType
from gmhazard_calc import directivity
import common
def perform_mp_directivity(
fault_name,
hypo_along_strike,
hypo_down_dip,
nhypo,
method,
repeats,
period,
grid_space,
nhm_dict,
output_dir,
repeat_n_procs: int,
hypo_n_procs: int,
):
assert repeat_n_procs == 1 or hypo_n_procs == 1
if nhypo is None:
nhypo = hypo_along_strike * hypo_down_dip
print(f"Computing for {fault_name} {nhypo}")
fault, site_coords, planes, lon_lat_depth, x, y = directivity.utils.load_fault_info(
fault_name, nhm_dict, grid_space
)
n_hypo_data = directivity.NHypoData(
gmhazard_calc.HypoMethod(method), nhypo, hypo_along_strike, hypo_down_dip
)
if n_hypo_data.method in [
gmhazard_calc.HypoMethod.MONTE_CARLO,
gmhazard_calc.HypoMethod.LATIN_HYPERCUBE,
]:
total_fd = np.zeros((repeats, len(site_coords), 1))
total_fd_array = np.zeros((repeats, nhypo, len(site_coords), 1))
if repeat_n_procs == 1:
for i in range(repeats):
fdi, fdi_array, _ = directivity.compute_fault_directivity(
lon_lat_depth,
planes,
site_coords,
n_hypo_data,
fault.mw,
fault.rake,
periods=[period],
n_procs=hypo_n_procs,
)
total_fd[i] = fdi
total_fd_array[i] = fdi_array
else:
with mp.Pool(repeat_n_procs) as pool:
results = pool.starmap(
directivity.compute_fault_directivity,
[
(
lon_lat_depth,
planes,
site_coords,
n_hypo_data,
fault.mw,
fault.rake,
[period],
1,
)
for ix in range(repeats)
],
)
for ix, cur_result in enumerate(results):
total_fd[ix] = cur_result[0]
total_fd_array[ix] = cur_result[1]
fdi_average = np.mean(total_fd, axis=0)
fdi_average = fdi_average.reshape((grid_space, grid_space))
else:
fdi, fdi_array, _ = directivity.compute_fault_directivity(
lon_lat_depth,
planes,
site_coords,
n_hypo_data,
fault.mw,
fault.rake,
periods=[period],
n_procs=hypo_n_procs,
)
total_fd = fdi
fdi_average = fdi.reshape((100, 100))
total_fd_array = fdi_array
title = f"{fault_name} Length={fault.length} Dip={fault.dip} Rake={fault.rake}"
directivity.validation.plots.plot_fdi(
x,
y,
fdi_average,
lon_lat_depth,
Path(f"{output_dir}/{fault_name}_{nhypo}.png"),
title,
)
np.save(
f"{output_dir}/{fault_name}_{nhypo}_fd_mc_hypo_array.npy",
np.exp(total_fd_array),
)
np.save(
f"{output_dir}/{fault_name}_{nhypo}_fd_mc.npy",
np.exp(total_fd),
)
np.save(
f"{output_dir}/{fault_name}_{nhypo}_fd_average.npy",
np.exp(fdi_average),
)
def parse_args():
nhm_dict, faults, im, grid_space, nhyps = common.default_variables()
parser = argparse.ArgumentParser()
parser.add_argument("output_dir")
parser.add_argument(
"--faults",
default=faults,
nargs="+",
help="List of faults to calculate",
)
parser.add_argument(
"--nstrikes",
default=None,
nargs="+",
type=int,
help="List of hypocentres along strike",
)
parser.add_argument(
"--ndips",
default=None,
nargs="+",
type=int,
help="List of hypocentres down dip",
)
parser.add_argument(
"--nhypos",
default=None,
nargs="+",
type=int,
help="List of hypocentre totals",
)
parser.add_argument(
"--method",
default="LATIN_HYPERCUBE",
help="Method to place hypocentres",
)
parser.add_argument(
"--repeats",
default=100,
type=int,
help="Times to repeat directivity calculation",
)
parser.add_argument(
"--period",
default=im.period,
type=float,
help="Period to calculate directivity for",
)
parser.add_argument(
"--grid_space",
default=grid_space,
type=int,
help="Number of sites to do along each axis",
)
parser.add_argument(
"--repeat_n_procs",
default=1,
type=int,
help="Number of processes to use to process the number of repeats."
"Note: Only one of repeat_n_procs and hypo_n_procs can be greater than one",
)
parser.add_argument(
"--hypo_n_procs",
default=1,
type=int,
help="Number of processes to use for hypocentre computation. "
"Note: Only one of repeat_n_procs and hypo_n_procs can be greater than one",
)
return parser.parse_args(), nhm_dict
if __name__ == "__main__":
args, nhm_dict = parse_args()
n_hypo_comb = len(args.nhypos) if args.nhypos is not None else len(args.nstrikes)
start_time = time.time()
for fault in args.faults:
for ix in range(n_hypo_comb):
perform_mp_directivity(
fault,
None if args.nstrikes is None else args.nstrikes[ix],
None if args.ndips is None else args.ndips[ix],
None if args.nhypos is None else args.nhypos[ix],
args.method,
args.repeats,
args.period,
args.grid_space,
nhm_dict,
args.output_dir,
args.repeat_n_procs,
args.hypo_n_procs,
)
print(f"FINISHED and took {time.time() - start_time}")
| 28.574561
| 88
| 0.552878
|
7fece85a0a17805ada21f70bd9db4431facfe854
| 1,793
|
py
|
Python
|
stacked_bar_kmers.py
|
borisz264/toeprint_seq
|
370bf91b3487b84286c42f2f7e41ab6cc41ba958
|
[
"MIT"
] | null | null | null |
stacked_bar_kmers.py
|
borisz264/toeprint_seq
|
370bf91b3487b84286c42f2f7e41ab6cc41ba958
|
[
"MIT"
] | null | null | null |
stacked_bar_kmers.py
|
borisz264/toeprint_seq
|
370bf91b3487b84286c42f2f7e41ab6cc41ba958
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import collections
import numpy
import math
import aColors
import tps_utils
def plot_stack(sp, bin_edges, sequence_mappings_passing_cutoff, protein, scale='linear', thresh='2sig'):
assert scale in ['linear', 'log']
sorted_mappings = sorted(sequence_mappings_passing_cutoff)
enrichments = [sequence_mapping.enrichment for sequence_mapping in sorted_mappings]
if thresh == '2sig':
thresh = numpy.mean(enrichments) + 2 * numpy.std(enrichments)
bottoms = []
for bin_left, bin_right in tps_utils.pairwise(bin_edges):
color2height = collections.Counter()
width = bin_right - bin_left
for sequence_mapping, enrichment in zip(sorted_mappings, enrichments):
if enrichment > bin_right or enrichment < bin_left:
continue
color = aColors.protein_colors(sequence_mapping, protein, enrichment >= thresh)
color2height[color] += 1
if scale == 'linear':
bottom = 0.
for color in aColors.ordered_colors:
if not color2height[color]:
continue
sp.bar(bin_left, color2height[color], width=width, facecolor=color, edgecolor='none', bottom=bottom)
bottom += color2height[color]
elif scale == 'log':
bottom = 0.
for color in aColors.ordered_colors:
if not color2height[color]:
continue
sp.bar(bin_left, math.log(color2height[color] + bottom + 1, 2) - math.log(bottom + 1, 2),
width=width, facecolor=color,
edgecolor='none', bottom=math.log(bottom + 1, 2))
bottom += color2height[color]
bottoms.append(bottom)
return max(bottoms)
| 43.731707
| 116
| 0.625209
|
3e4186cbd91850c04567f1a99c6a5d761bee7f17
| 6,433
|
py
|
Python
|
zerver/tests/test_muting.py
|
ankita-2798/zulip
|
4e11e7ee5b3406f17120a05a4702c69a7e12d1e0
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_muting.py
|
ankita-2798/zulip
|
4e11e7ee5b3406f17120a05a4702c69a7e12d1e0
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_muting.py
|
ankita-2798/zulip
|
4e11e7ee5b3406f17120a05a4702c69a7e12d1e0
|
[
"Apache-2.0"
] | null | null | null |
from django.utils.timezone import now as timezone_now
from datetime import timedelta
from typing import Any, Dict
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.models import (
get_stream,
UserProfile,
MutedTopic
)
from zerver.lib.topic_mutes import (
add_topic_mute,
get_topic_mutes,
remove_topic_mute,
topic_is_muted,
)
class MutedTopicsTests(ZulipTestCase):
def test_user_ids_muting_topic(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
realm = hamlet.realm
stream = get_stream('Verona', realm)
recipient = stream.recipient
topic_name = 'teST topic'
stream_topic_target = StreamTopicTarget(
stream_id=stream.id,
topic_name=topic_name,
)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, set())
def mute_user(user: UserProfile) -> None:
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name='test TOPIC',
date_muted=timezone_now(),
)
mute_user(hamlet)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, {hamlet.id})
hamlet_date_muted = MutedTopic.objects.filter(user_profile=hamlet)[0].date_muted
self.assertTrue(timezone_now() - hamlet_date_muted <= timedelta(seconds=100))
mute_user(cordelia)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, {hamlet.id, cordelia.id})
cordelia_date_muted = MutedTopic.objects.filter(user_profile=cordelia)[0].date_muted
self.assertTrue(timezone_now() - cordelia_date_muted <= timedelta(seconds=100))
def test_add_muted_topic(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
stream = get_stream('Verona', user.realm)
url = '/api/v1/users/me/subscriptions/muted_topics'
payloads = [
{'stream': stream.name, 'topic': 'Verona3', 'op': 'add'},
{'stream_id': stream.id, 'topic': 'Verona3', 'op': 'add'},
]
for data in payloads:
result = self.api_patch(user, url, data)
self.assert_json_success(result)
self.assertIn([stream.name, 'Verona3'], get_topic_mutes(user))
self.assertTrue(topic_is_muted(user, stream.id, 'Verona3'))
self.assertTrue(topic_is_muted(user, stream.id, 'verona3'))
remove_topic_mute(
user_profile=user,
stream_id=stream.id,
topic_name='Verona3',
)
def test_remove_muted_topic(self) -> None:
user = self.example_user('hamlet')
realm = user.realm
self.login_user(user)
stream = get_stream('Verona', realm)
recipient = stream.recipient
url = '/api/v1/users/me/subscriptions/muted_topics'
payloads = [
{'stream': stream.name, 'topic': 'vERONA3', 'op': 'remove'},
{'stream_id': stream.id, 'topic': 'vEroNA3', 'op': 'remove'},
]
for data in payloads:
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name='Verona3',
date_muted=timezone_now(),
)
self.assertIn([stream.name, 'Verona3'], get_topic_mutes(user))
result = self.api_patch(user, url, data)
self.assert_json_success(result)
self.assertNotIn([stream.name, 'Verona3'], get_topic_mutes(user))
self.assertFalse(topic_is_muted(user, stream.id, 'verona3'))
def test_muted_topic_add_invalid(self) -> None:
user = self.example_user('hamlet')
realm = user.realm
self.login_user(user)
stream = get_stream('Verona', realm)
recipient = stream.recipient
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name='Verona3',
date_muted=timezone_now(),
)
url = '/api/v1/users/me/subscriptions/muted_topics'
data = {'stream': stream.name, 'topic': 'Verona3', 'op': 'add'} # type: Dict[str, Any]
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic already muted")
data = {'stream_id': 999999999, 'topic': 'Verona3', 'op': 'add'}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Invalid stream id")
data = {'topic': 'Verona3', 'op': 'add'}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please supply 'stream'.")
data = {'stream': stream.name, 'stream_id': stream.id, 'topic': 'Verona3', 'op': 'add'}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please choose one: 'stream' or 'stream_id'.")
def test_muted_topic_remove_invalid(self) -> None:
user = self.example_user('hamlet')
realm = user.realm
self.login_user(user)
stream = get_stream('Verona', realm)
url = '/api/v1/users/me/subscriptions/muted_topics'
data = {'stream': 'BOGUS', 'topic': 'Verona3', 'op': 'remove'} # type: Dict[str, Any]
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {'stream': stream.name, 'topic': 'BOGUS', 'op': 'remove'}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {'stream_id': 999999999, 'topic': 'BOGUS', 'op': 'remove'}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {'topic': 'Verona3', 'op': 'remove'}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please supply 'stream'.")
data = {'stream': stream.name, 'stream_id': stream.id, 'topic': 'Verona3', 'op': 'remove'}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please choose one: 'stream' or 'stream_id'.")
| 36.551136
| 98
| 0.611534
|
db04488c201a346e65fa0f38105931eb66b2bac0
| 2,269
|
py
|
Python
|
bin/entities2questions.py
|
mitchelljeff/hack1
|
990d873cbcd40d2978f44560016d18a76800908e
|
[
"MIT"
] | 1
|
2018-10-23T12:07:31.000Z
|
2018-10-23T12:07:31.000Z
|
bin/entities2questions.py
|
mitchelljeff/hack1
|
990d873cbcd40d2978f44560016d18a76800908e
|
[
"MIT"
] | null | null | null |
bin/entities2questions.py
|
mitchelljeff/hack1
|
990d873cbcd40d2978f44560016d18a76800908e
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, abort, request
import tensorflow as tf
from jack.core.data_structures import jtr_to_qasetting
from jack.readers import readers, reader_from_file
app = Flask(__name__)
et2rels={}
rel2qf={}
with open("en-r2q.format") as f:
for line in f:
fields=line.rstrip("\n").split("\t")
rel=fields[0]
e1type=rel[0:3]
qformat=fields[1]
if e1type not in et2rels:
et2rels[e1type]=[]
et2rels[e1type].append(rel)
rel2qf[rel]=qformat
reader = reader_from_file("remqa_reader")
@app.route('/api/qa', methods=['POST'])
def get_relations():
if not request.json:
abort(400)
support="NoAnswerFound "+request.json["text"]
entities=request.json["nel"]["entities"]
qas=[]
entdict={}
mentions={}
for e in entities:
entity=e["entity"]
ementions=e["mentions"]
eid=entity["id"]
entdict[eid]=entity
mentions[eid]=ementions
e1type=entity["type"].lower()
name=entity["currlangForm"]
rels=et2rels[e1type]
for rel in rels:
qid=eid+"\t"+rel
question=rel2qf[rel].format(name)
qas.append({"question":{"text":question,"id":qid},"answers":[{"text":"","span":[0,0]}]})
instances=[{"questions":qas,"support":[{"text":support}]}]
data ={'meta':'SUMMA','instances':instances}
qa = jtr_to_qasetting(data)
answers = reader.process_dataset(qa, 1)
results=[]
for i, a in enumerate(answers):
if a.text != "NoAnswerFound" and a.score > 0.0:
eid, rel = qa[i][0].id.split("\t")
ent = entdict[eid]
source = mentions[eid][0]["souceDocument"]["id"]
name = mentions[eid][0]["text"]
arg0 = [{"entities":{eid:name},"name":name}]
arg1 = [{"text":a.text}]
roles = {"ARG0":arg0, "ARG1":arg1}
fact = {"entities":{eid:ent}, "source":source, "name":rel, "roles":roles, "score":str(a.score)}
results.append({"question":qa[i][0].question, "answer":a.text, "span":[int(a.span[0]),int(a.span[1])], "score":str(a.score)})
return jsonify(results)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| 29.855263
| 137
| 0.575143
|
a8d10facbeb498bc8e94c30b1172f9fd8448e954
| 9,172
|
py
|
Python
|
aioredis/commands/string.py
|
emorozov/aioredis
|
bad0bd2d1435e56688d189cf3209beec3d239557
|
[
"MIT"
] | null | null | null |
aioredis/commands/string.py
|
emorozov/aioredis
|
bad0bd2d1435e56688d189cf3209beec3d239557
|
[
"MIT"
] | 75
|
2020-12-09T06:53:47.000Z
|
2022-03-25T01:05:24.000Z
|
aioredis/commands/string.py
|
emorozov/aioredis
|
bad0bd2d1435e56688d189cf3209beec3d239557
|
[
"MIT"
] | null | null | null |
from itertools import chain
from aioredis.util import wait_convert, wait_ok, _NOTSET
class StringCommandsMixin:
"""String commands mixin.
For commands details see: http://redis.io/commands/#string
"""
SET_IF_NOT_EXIST = "SET_IF_NOT_EXIST" # NX
SET_IF_EXIST = "SET_IF_EXIST" # XX
def append(self, key, value):
"""Append a value to key."""
return self.execute(b"APPEND", key, value)
def bitcount(self, key, start=None, end=None):
"""Count set bits in a string.
:raises TypeError: if only start or end specified.
"""
if start is None and end is not None:
raise TypeError("both start and stop must be specified")
elif start is not None and end is None:
raise TypeError("both start and stop must be specified")
elif start is not None and end is not None:
args = (start, end)
else:
args = ()
return self.execute(b"BITCOUNT", key, *args)
def bitfield(self):
raise NotImplementedError()
def bitop_and(self, dest, key, *keys):
"""Perform bitwise AND operations between strings."""
return self.execute(b"BITOP", b"AND", dest, key, *keys)
def bitop_or(self, dest, key, *keys):
"""Perform bitwise OR operations between strings."""
return self.execute(b"BITOP", b"OR", dest, key, *keys)
def bitop_xor(self, dest, key, *keys):
"""Perform bitwise XOR operations between strings."""
return self.execute(b"BITOP", b"XOR", dest, key, *keys)
def bitop_not(self, dest, key):
"""Perform bitwise NOT operations between strings."""
return self.execute(b"BITOP", b"NOT", dest, key)
def bitpos(self, key, bit, start=None, end=None):
"""Find first bit set or clear in a string.
:raises ValueError: if bit is not 0 or 1
"""
if bit not in (1, 0):
raise ValueError("bit argument must be either 1 or 0")
bytes_range = []
if start is not None:
bytes_range.append(start)
if end is not None:
if start is None:
bytes_range = [0, end]
else:
bytes_range.append(end)
return self.execute(b"BITPOS", key, bit, *bytes_range)
def decr(self, key):
"""Decrement the integer value of a key by one."""
return self.execute(b"DECR", key)
def decrby(self, key, decrement):
"""Decrement the integer value of a key by the given number.
:raises TypeError: if decrement is not int
"""
if not isinstance(decrement, int):
raise TypeError("decrement must be of type int")
return self.execute(b"DECRBY", key, decrement)
def get(self, key, *, encoding=_NOTSET):
"""Get the value of a key."""
return self.execute(b"GET", key, encoding=encoding)
def getbit(self, key, offset):
"""Returns the bit value at offset in the string value stored at key.
:raises TypeError: if offset is not int
:raises ValueError: if offset is less than 0
"""
if not isinstance(offset, int):
raise TypeError("offset argument must be int")
if offset < 0:
raise ValueError("offset must be greater equal 0")
return self.execute(b"GETBIT", key, offset)
def getrange(self, key, start, end, *, encoding=_NOTSET):
"""Get a substring of the string stored at a key.
:raises TypeError: if start or end is not int
"""
if not isinstance(start, int):
raise TypeError("start argument must be int")
if not isinstance(end, int):
raise TypeError("end argument must be int")
return self.execute(b"GETRANGE", key, start, end, encoding=encoding)
def getset(self, key, value, *, encoding=_NOTSET):
"""Set the string value of a key and return its old value."""
return self.execute(b"GETSET", key, value, encoding=encoding)
def incr(self, key):
"""Increment the integer value of a key by one."""
return self.execute(b"INCR", key)
def incrby(self, key, increment):
"""Increment the integer value of a key by the given amount.
:raises TypeError: if increment is not int
"""
if not isinstance(increment, int):
raise TypeError("increment must be of type int")
return self.execute(b"INCRBY", key, increment)
def incrbyfloat(self, key, increment):
"""Increment the float value of a key by the given amount.
:raises TypeError: if increment is not int
"""
if not isinstance(increment, float):
raise TypeError("increment must be of type int")
fut = self.execute(b"INCRBYFLOAT", key, increment)
return wait_convert(fut, float)
def mget(self, key, *keys, encoding=_NOTSET):
"""Get the values of all the given keys."""
return self.execute(b"MGET", key, *keys, encoding=encoding)
def mset(self, *args):
"""Set multiple keys to multiple values or unpack dict to keys & values.
:raises TypeError: if len of args is not event number
:raises TypeError: if len of args equals 1 and it is not a dict
"""
data = args
if len(args) == 1:
if not isinstance(args[0], dict):
raise TypeError("if one arg it should be a dict")
data = chain.from_iterable(args[0].items())
elif len(args) % 2 != 0:
raise TypeError("length of pairs must be even number")
fut = self.execute(b"MSET", *data)
return wait_ok(fut)
def msetnx(self, key, value, *pairs):
"""Set multiple keys to multiple values,
only if none of the keys exist.
:raises TypeError: if len of pairs is not event number
"""
if len(pairs) % 2 != 0:
raise TypeError("length of pairs must be even number")
return self.execute(b"MSETNX", key, value, *pairs)
def psetex(self, key, milliseconds, value):
"""Set the value and expiration in milliseconds of a key.
:raises TypeError: if milliseconds is not int
"""
if not isinstance(milliseconds, int):
raise TypeError("milliseconds argument must be int")
fut = self.execute(b"PSETEX", key, milliseconds, value)
return wait_ok(fut)
def set(self, key, value, *, expire=0, pexpire=0, exist=None):
"""Set the string value of a key.
:raises TypeError: if expire or pexpire is not int
"""
if expire and not isinstance(expire, int):
raise TypeError("expire argument must be int")
if pexpire and not isinstance(pexpire, int):
raise TypeError("pexpire argument must be int")
args = []
if expire:
args[:] = [b"EX", expire]
if pexpire:
args[:] = [b"PX", pexpire]
if exist is self.SET_IF_EXIST:
args.append(b"XX")
elif exist is self.SET_IF_NOT_EXIST:
args.append(b"NX")
fut = self.execute(b"SET", key, value, *args)
return wait_ok(fut)
def setbit(self, key, offset, value):
"""Sets or clears the bit at offset in the string value stored at key.
:raises TypeError: if offset is not int
:raises ValueError: if offset is less than 0 or value is not 0 or 1
"""
if not isinstance(offset, int):
raise TypeError("offset argument must be int")
if offset < 0:
raise ValueError("offset must be greater equal 0")
if value not in (0, 1):
raise ValueError("value argument must be either 1 or 0")
return self.execute(b"SETBIT", key, offset, value)
def setex(self, key, seconds, value):
"""Set the value and expiration of a key.
If seconds is float it will be multiplied by 1000
coerced to int and passed to `psetex` method.
:raises TypeError: if seconds is neither int nor float
"""
if isinstance(seconds, float):
return self.psetex(key, int(seconds * 1000), value)
if not isinstance(seconds, int):
raise TypeError("milliseconds argument must be int")
fut = self.execute(b"SETEX", key, seconds, value)
return wait_ok(fut)
def setnx(self, key, value):
"""Set the value of a key, only if the key does not exist."""
fut = self.execute(b"SETNX", key, value)
return wait_convert(fut, bool)
def setrange(self, key, offset, value):
"""Overwrite part of a string at key starting at the specified offset.
:raises TypeError: if offset is not int
:raises ValueError: if offset less than 0
"""
if not isinstance(offset, int):
raise TypeError("offset argument must be int")
if offset < 0:
raise ValueError("offset must be greater equal 0")
return self.execute(b"SETRANGE", key, offset, value)
def strlen(self, key):
"""Get the length of the value stored in a key."""
return self.execute(b"STRLEN", key)
| 36.835341
| 80
| 0.602159
|
ef2847afc4c9ac868a63c5ba9a93f8b811317087
| 22,131
|
py
|
Python
|
plugins/module_utils/oci_compute_custom_helpers.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 108
|
2020-05-19T20:46:10.000Z
|
2022-03-25T14:10:01.000Z
|
plugins/module_utils/oci_compute_custom_helpers.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 90
|
2020-06-14T22:07:11.000Z
|
2022-03-07T05:40:29.000Z
|
plugins/module_utils/oci_compute_custom_helpers.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 42
|
2020-08-30T23:09:12.000Z
|
2022-03-25T16:58:01.000Z
|
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_config_utils,
)
from ansible.module_utils import six
try:
from oci.core import VirtualNetworkClient
from oci.util import to_dict
from oci.exceptions import ServiceError
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = oci_common_utils.get_logger("oci_compute_custom_helpers")
def _debug(s):
get_logger().debug(s)
def get_logger():
return logger
class AppCatalogSubscriptionHelperCustom:
def __init__(self, module, resource_type, service_client_class, namespace):
module.params["resource_version"] = module.params["listing_resource_version"]
super(AppCatalogSubscriptionHelperCustom, self).__init__(
module, resource_type, service_client_class, namespace
)
# app_catalog_subscription does not have a resource id. It only has a create and delete operation.
# Both can be distinguished based on the `state` attribute.
def is_create(self):
if not self.module.params.get("state") == "present":
return False
return True
def is_delete(self):
if not self.module.params.get("state") == "absent":
return False
return True
def get_resource(self):
app_catalog_subscriptions = oci_common_utils.list_all_resources(
self.client.list_app_catalog_subscriptions,
compartment_id=self.module.params["compartment_id"],
listing_id=self.module.params["listing_id"],
)
for app_catalog_subscription in app_catalog_subscriptions:
if (
app_catalog_subscription.listing_resource_version
== self.module.params["resource_version"]
):
return oci_common_utils.get_default_response_from_resource(
resource=app_catalog_subscription
)
oci_common_utils.raise_does_not_exist_service_error(
message="The app catalog subscription does not exist."
)
def get_primary_ips(compute_client, network_client, instance):
if not instance:
return None, None
primary_public_ip = None
primary_private_ip = None
vnic_attachments = oci_common_utils.list_all_resources(
compute_client.list_vnic_attachments,
compartment_id=instance["compartment_id"],
instance_id=instance["id"],
)
if vnic_attachments:
for vnic_attachment in vnic_attachments:
if vnic_attachment.lifecycle_state == "ATTACHED":
try:
vnic = network_client.get_vnic(vnic_attachment.vnic_id).data
if vnic.is_primary:
if vnic.public_ip:
primary_public_ip = vnic.public_ip
if vnic.private_ip:
primary_private_ip = vnic.private_ip
except ServiceError as ex:
if ex.status == 404:
_debug(
"Either VNIC with ID {0} does not exist or you are not authorized to access it.".format(
vnic_attachment.vnic_id
)
)
return primary_public_ip, primary_private_ip
def add_primary_ip_info(module, compute_client, network_client, instance):
try:
primary_public_ip, primary_private_ip = get_primary_ips(
compute_client, network_client, instance
)
instance["primary_public_ip"] = primary_public_ip
instance["primary_private_ip"] = primary_private_ip
except ServiceError as ex:
instance["primary_public_ip"] = None
instance["primary_private_ip"] = None
module.fail_json(msg=ex.message)
class InstanceHelperCustom:
def __init__(self, *args, **kwargs):
super(InstanceHelperCustom, self).__init__(*args, **kwargs)
self.network_client = oci_config_utils.create_service_client(
self.module, VirtualNetworkClient
)
def get_create_model_dict_for_idempotence_check(self, create_model):
create_model_dict = super(
InstanceHelperCustom, self
).get_create_model_dict_for_idempotence_check(create_model)
# is_pv_encryption_in_transit_enabled is a top level param on LaunchInstanceDetails but it gets returned
# inside Instance.LaunchOptions so we need to propagate the value so that the existing resource matching
# logic works properly
if create_model_dict.get("is_pv_encryption_in_transit_enabled") is not None:
if create_model_dict.get("launch_options"):
if (
create_model_dict["launch_options"].get(
"is_pv_encryption_in_transit_enabled"
)
is None
):
create_model_dict["launch_options"][
"is_pv_encryption_in_transit_enabled"
] = create_model_dict.pop("is_pv_encryption_in_transit_enabled")
else:
# is_pv_encryption_in_transit_enabled is set both as a top level parameter and also under
# launch_options. If the values match ignore the top level parameter. Else throw an error.
if (
create_model_dict["launch_options"][
"is_pv_encryption_in_transit_enabled"
]
!= create_model_dict["is_pv_encryption_in_transit_enabled"]
):
self.module.fail_json(
"Conflicting values specified for is_pv_encryption_in_transit_enabled as a top level parameter and under launch_options parameter."
)
create_model_dict.pop("is_pv_encryption_in_transit_enabled")
else:
create_model_dict["launch_options"] = dict(
is_pv_encryption_in_transit_enabled=create_model_dict.pop(
"is_pv_encryption_in_transit_enabled"
)
)
# kms_key_id comes as null from get_instance even when instance has it. So ignore for idempotence.
if create_model_dict.get("source_details"):
create_model_dict["source_details"].pop("kms_key_id", None)
return create_model_dict
def prepare_result(self, *args, **kwargs):
result = super(InstanceHelperCustom, self).prepare_result(*args, **kwargs)
if result.get("instance"):
add_primary_ip_info(
self.module, self.client, self.network_client, result["instance"]
)
return result
class InstanceFactsHelperCustom:
def __init__(self, *args, **kwargs):
super(InstanceFactsHelperCustom, self).__init__(*args, **kwargs)
self.network_client = oci_config_utils.create_service_client(
self.module, VirtualNetworkClient
)
def get(self, *args, **kwargs):
instance = super(InstanceFactsHelperCustom, self).get(*args, **kwargs)
add_primary_ip_info(self.module, self.client, self.network_client, instance)
return instance
def list(self, *args, **kwargs):
instances = super(InstanceFactsHelperCustom, self).list(*args, **kwargs)
for instance in instances:
add_primary_ip_info(self.module, self.client, self.network_client, instance)
return instances
class BootVolumeAttachmentHelperCustom:
# An instance can only be attached to one boot volume and the name given to the attachment does not affect the
# resource. Also a display_name update to the attachment resource does not seem to take affect.
# So exclude display_name for idempotency.
# Irrespective of the value we pass for display_name we get
# "Remote boot attachment for instance" as the name in response model
def get_exclude_attributes(self):
return super(
BootVolumeAttachmentHelperCustom, self
).get_exclude_attributes() + ["display_name"]
class ImageShapeCompatibilityEntryHelperCustom:
def is_update(self):
if not self.module.params.get("state") == "present":
return False
return True
def get_existing_resource_dict_for_update(self):
try:
get_response = self.get_resource()
except ServiceError as se:
if se.status != 404:
raise
return dict()
else:
return to_dict(get_response.data)
def is_update_necessary(self, existing_resource_dict):
if not existing_resource_dict:
return True
return super(
ImageShapeCompatibilityEntryHelperCustom, self
).is_update_necessary(existing_resource_dict)
class VnicAttachmentHelperCustom:
def get_create_model_dict_for_idempotence_check(self, create_model):
create_model_dict = super(
VnicAttachmentHelperCustom, self
).get_create_model_dict_for_idempotence_check(create_model)
# The VNIC details specified in create_vnic_details are not available in the vnic_attachment directly. It has
# vnic_id which can be used to fetch the required information. So update the key name for create_vnic_details
# in the create model. The vnic information is added to the existing resource with the same key in
# get_existing_resource_dict_for_idempotence_check so that the idempotence logic compares the vnic details.
if create_model_dict.get("create_vnic_details") is not None:
create_model_dict["vnic"] = create_model_dict.pop("create_vnic_details")
return create_model_dict
def get_existing_resource_dict_for_idempotence_check(self, existing_resource):
existing_resource_dict = super(
VnicAttachmentHelperCustom, self
).get_existing_resource_dict_for_idempotence_check(existing_resource)
if existing_resource_dict.get("vnic_id"):
# The information provided in create_vnic_details attr of create model does not exist directly in the
# get model but have to be fetched from the vnic details. Fetch and add the information to the existing
# resource so that the idempotence logic can compare the vnic details.
virtual_network_client = oci_config_utils.create_service_client(
self.module, VirtualNetworkClient
)
existing_vnic = to_dict(
virtual_network_client.get_vnic(
vnic_id=existing_resource_dict.get("vnic_id")
).data
)
existing_resource_dict["vnic"] = existing_vnic
return existing_resource_dict
# currently using string comparison method
def is_windows_instance(instance_os):
if instance_os is None:
return False
return "windows" in instance_os.lower()
def get_windows_iscsi_attach_commands(iqn, ipv4, chap_username, chap_secret):
connection_command = "Connect-IscsiTarget -NodeAddress {0} -TargetPortalAddress {1}".format(
iqn, ipv4
)
if chap_username:
connection_command = (
connection_command
+ " -AuthenticationType ONEWAYCHAP -ChapUsername {0} -ChapSecret {1}".format(
chap_username, chap_secret
)
)
connection_command = connection_command + " -IsPersistent $True"
iscsi_attach_commands = [
"Set-Service -Name msiscsi -StartupType Automatic",
"Start-Service msiscsi",
"New-IscsiTargetPortal -TargetPortalAddress {0}".format(ipv4),
connection_command,
]
return iscsi_attach_commands
def get_iscsi_attach_commands(volume_attachment, instance_os):
if not volume_attachment.get("attachment_type") == "iscsi":
return []
iqn = volume_attachment.get("iqn")
ipv4 = volume_attachment.get("ipv4")
port = volume_attachment.get("port")
chap_username = volume_attachment.get("chap_username")
chap_secret = volume_attachment.get("chap_secret")
# os specific commands
if is_windows_instance(instance_os):
iscsi_attach_commands = get_windows_iscsi_attach_commands(
iqn, ipv4, chap_username, chap_secret
)
else:
iscsi_attach_commands = [
"sudo iscsiadm -m node -o new -T {0} -p {1}:{2}".format(iqn, ipv4, port),
"sudo iscsiadm -m node -o update -T {0} -n node.startup -v automatic".format(
iqn
),
]
if chap_username:
iscsi_attach_commands.extend(
[
"sudo iscsiadm -m node -T {0} -p {1}:{2} -o update -n node.session.auth.authmethod -v CHAP".format(
iqn, ipv4, port
),
"sudo iscsiadm -m node -T {0} -p {1}:{2} -o update -n node.session.auth.username -v {3}".format(
iqn, ipv4, port, chap_username
),
"sudo iscsiadm -m node -T {0} -p {1}:{2} -o update -n node.session.auth.password -v {3}".format(
iqn, ipv4, port, chap_secret
),
]
)
iscsi_attach_commands.append(
"sudo iscsiadm -m node -T {0} -p {1}:{2} -l".format(iqn, ipv4, port)
)
return iscsi_attach_commands
def get_iscsi_detach_commands(volume_attachment, instance_os):
if not volume_attachment.get("attachment_type") == "iscsi":
return []
if is_windows_instance(instance_os):
return []
return [
"sudo iscsiadm -m node -T {0} -p {1}:{2} -u".format(
volume_attachment.get("iqn"),
volume_attachment.get("ipv4"),
volume_attachment.get("port"),
),
"sudo iscsiadm -m node -o delete -T {0}".format(volume_attachment.get("iqn")),
]
def with_iscsi_commands(volume_attachment, instance_os):
if not volume_attachment:
return volume_attachment
attach_commands = get_iscsi_attach_commands(volume_attachment, instance_os)
detach_commands = get_iscsi_detach_commands(volume_attachment, instance_os)
volume_attachment["iscsi_attach_commands"] = attach_commands
volume_attachment["iscsi_detach_commands"] = detach_commands
return volume_attachment
def get_instance(compute_client, instance_id):
return oci_common_utils.call_with_backoff(
compute_client.get_instance, instance_id=instance_id,
)
def get_image(compute_client, image_id):
return oci_common_utils.call_with_backoff(
compute_client.get_image, image_id=image_id,
)
def get_instance_os(compute_client, instance_id):
instance = get_instance(compute_client, instance_id)
image = get_image(compute_client, getattr(instance.data, "image_id", None))
operating_system = getattr(image.data, "operating_system", None)
return operating_system
def with_os_iscsi_commands(compute_client, volume_attachment):
instance_os = get_instance_os(compute_client, volume_attachment.get("instance_id"))
return with_iscsi_commands(volume_attachment, instance_os)
class VolumeAttachmentHelperCustom:
def prepare_result(self, *args, **kwargs):
result = super(VolumeAttachmentHelperCustom, self).prepare_result(
*args, **kwargs
)
if not result.get("volume_attachment"):
return result
result["volume_attachment"] = with_os_iscsi_commands(
self.client, result["volume_attachment"]
)
return result
def get_create_model_dict_for_idempotence_check(self, create_model):
create_model_dict = super(
VolumeAttachmentHelperCustom, self
).get_create_model_dict_for_idempotence_check(create_model)
# get model has the attribute name "attachment_type" which defines the attachment type where as it "type" in
# get model. So change the key name to avoid mismatch.
create_model_dict["attachment_type"] = create_model_dict.pop("type", None)
return create_model_dict
class VolumeAttachmentFactsHelperCustom:
def get(self, *args, **kwargs):
volume_attachment = super(VolumeAttachmentFactsHelperCustom, self).get(
*args, **kwargs
)
return with_os_iscsi_commands(self.client, volume_attachment)
def list(self, *args, **kwargs):
return [
with_iscsi_commands(volume_attachment, "Linux")
for volume_attachment in super(
VolumeAttachmentFactsHelperCustom, self
).list(*args, **kwargs)
]
# The subtypes of ImageCapabilitySchemaDescriptor model has parameters with the same name but different types.
# Below is the list of subtypes and parameters (with different types):
# EnumStringImageCapabilitySchemaDescriptor -> values (list[str]), default_value (str)
# EnumIntegerImageCapabilityDescriptor -> values (list[int]), default_value (int)
# BooleanImageCapabilitySchemaDescriptor -> default_value (bool)
# We cannot use the original parameter names in the module since we need to specify the type of each parameter in the
# module. So the parameter names are changed and we have a parameter for each sub type. You can see the parameter name
# mapping in the function `get_updated_image_capabilities_schema_data_parameter_name`. The parameter names are changed
# using the renamingConfig (poms/core/renamingConfig.yaml).
# All the below customisations (related to image capabilities) are either to update the models before sending to sdk
# or to update the return properties after getting the results from sdk.
def get_updated_image_capabilities_schema_data_parameter_name(descriptor_type, param):
param_descriptor_type_map = {
("enumstring", "values"): "enum_string_values",
("enumstring", "default_value"): "enum_string_default_value",
("enuminteger", "values"): "enum_integer_values",
("enuminteger", "default_value"): "enum_integer_default_value",
("boolean", "default_value"): "boolean_default_value",
}
return param_descriptor_type_map.get((descriptor_type, param), param)
def get_resource_with_updated_schema_data_param_names(resource):
if resource and resource.get("schema_data"):
resource["schema_data"] = dict(
(
schema_data_key,
dict(
(
get_updated_image_capabilities_schema_data_parameter_name(
schema_data.get("descriptor_type"), k
),
v,
)
for k, v in six.iteritems(schema_data)
),
)
for schema_data_key, schema_data in six.iteritems(resource["schema_data"])
)
return resource
class ComputeImageCapabilitySchemaHelperCustom:
def __init__(self, *args, **kwargs):
super(ComputeImageCapabilitySchemaHelperCustom, self).__init__(*args, **kwargs)
if self.module.params.get("schema_data"):
self.module.params["schema_data"] = dict(
(
schema_data_key,
dict(
(self.get_original_sdk_parameter_name(k), v)
for k, v in six.iteritems(schema_data)
),
)
for schema_data_key, schema_data in six.iteritems(
self.module.params.get("schema_data")
)
)
def get_original_sdk_parameter_name(self, param):
if param in ["enum_string_values", "enum_integer_values"]:
return "values"
if param in [
"enum_string_default_value",
"enum_integer_default_value",
"boolean_default_value",
]:
return "default_value"
return param
def prepare_result(self, changed, resource_type, resource=None, msg=None):
result = super(ComputeImageCapabilitySchemaHelperCustom, self).prepare_result(
changed, resource_type, resource, msg
)
result[resource_type] = get_resource_with_updated_schema_data_param_names(
result[resource_type]
)
return result
class ComputeImageCapabilitySchemaFactsHelperCustom:
def get(self):
resource = super(ComputeImageCapabilitySchemaFactsHelperCustom, self).get()
return get_resource_with_updated_schema_data_param_names(resource)
class ComputeGlobalImageCapabilitySchemaVersionFactsHelperCustom:
def get(self):
resource = super(
ComputeGlobalImageCapabilitySchemaVersionFactsHelperCustom, self
).get()
return get_resource_with_updated_schema_data_param_names(resource)
def get_compute_instane_action_fn_attr(action):
if action == "change_compartment":
return "change_compartment"
return "instance_action"
class InstanceActionsHelperCustom:
def get_action_fn(self, action):
action_fn_name = get_compute_instane_action_fn_attr(action)
return super(InstanceActionsHelperCustom, self).get_action_fn(action_fn_name)
class InstanceConsoleConnectionHelperCustom:
def get_exclude_attributes(self):
exclude_attriubtes = super(
InstanceConsoleConnectionHelperCustom, self
).get_exclude_attributes()
remove_exclude_attributes = ["public_key"]
exclude_attriubtes = [
x for x in exclude_attriubtes if x not in remove_exclude_attributes
]
return exclude_attriubtes
| 39.66129
| 159
| 0.659979
|
b2963bf74ef5e38cfa925df823639577ceba941f
| 37,398
|
py
|
Python
|
larch/wxlib/columnframe.py
|
kbuc/xraylarch
|
3abb0d6bdc65cf2747a03dd114d98df317c0ac9f
|
[
"BSD-2-Clause"
] | 90
|
2015-01-10T21:57:25.000Z
|
2022-03-29T15:21:52.000Z
|
larch/wxlib/columnframe.py
|
kbuc/xraylarch
|
3abb0d6bdc65cf2747a03dd114d98df317c0ac9f
|
[
"BSD-2-Clause"
] | 225
|
2015-01-09T19:08:47.000Z
|
2022-03-31T15:55:54.000Z
|
larch/wxlib/columnframe.py
|
kbuc/xraylarch
|
3abb0d6bdc65cf2747a03dd114d98df317c0ac9f
|
[
"BSD-2-Clause"
] | 51
|
2015-03-13T10:03:28.000Z
|
2022-03-17T07:54:38.000Z
|
#!/usr/bin/env python
"""
"""
import os
import re
import numpy as np
np.seterr(all='ignore')
from functools import partial
import wx
import wx.lib.scrolledpanel as scrolled
import wx.lib.agw.flatnotebook as fnb
from wxmplot import PlotPanel
from wxutils import (SimpleText, FloatCtrl, GUIColors, Button, Choice,
pack, Popup, Check, MenuItem, CEN, RIGHT, LEFT,
FRAMESTYLE, HLine, Font)
import larch
from larch import Group
from larch.xafs.xafsutils import guess_energy_units
from larch.utils.strutils import fix_varname, file2groupname
from larch.io import look_for_nans
from larch.utils.physical_constants import PLANCK_HC, DEG2RAD
CEN |= wx.ALL
FNB_STYLE = fnb.FNB_NO_X_BUTTON|fnb.FNB_SMART_TABS
FNB_STYLE |= fnb.FNB_NO_NAV_BUTTONS|fnb.FNB_NODRAG
XPRE_OPS = ('', 'log(', '-log(')
YPRE_OPS = ('', 'log(', '-log(')
ARR_OPS = ('+', '-', '*', '/')
YERR_OPS = ('Constant', 'Sqrt(Y)', 'Array')
CONV_OPS = ('Lorenztian', 'Gaussian')
DATATYPES = ('raw', 'xas')
ENUNITS_TYPES = ('eV', 'keV', 'degrees', 'not energy')
class AddColumnsFrame(wx.Frame):
"""Add Column Labels for a larch grouop"""
def __init__(self, parent, group, on_ok=None):
self.parent = parent
self.group = group
self.on_ok = on_ok
wx.Frame.__init__(self, None, -1, 'Add Selected Columns',
style=wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL)
self.SetFont(Font(10))
sizer = wx.GridBagSizer(2, 2)
panel = scrolled.ScrolledPanel(self)
self.SetMinSize((550, 550))
self.wids = {}
lab_aname = SimpleText(panel, label=' Save Array Name:')
lab_range = SimpleText(panel, label=' Use column index:')
lab_regex = SimpleText(panel, label=' Use column label:')
wids = self.wids = {}
wids['arrayname'] = wx.TextCtrl(panel, value='sum', size=(175, -1))
wids['tc_nums'] = wx.TextCtrl(panel, value='1,3-10', size=(175, -1))
wids['tc_regex'] = wx.TextCtrl(panel, value='*fe*', size=(175, -1))
savebtn = Button(panel, 'Save', action=self.onOK)
plotbtn = Button(panel, 'Plot Sum', action=self.onPlot)
sel_nums = Button(panel, 'Select by Index',
action=self.onSelColumns)
sel_re = Button(panel, 'Select by Pattern',
action=self.onSelRegex)
sizer.Add(lab_aname, (0, 0), (1, 2), LEFT, 3)
sizer.Add(wids['arrayname'], (0, 2), (1, 1), LEFT, 3)
sizer.Add(plotbtn, (0, 3), (1, 1), LEFT, 3)
sizer.Add(savebtn, (0, 4), (1, 1), LEFT, 3)
sizer.Add(lab_range, (1, 0), (1, 2), LEFT, 3)
sizer.Add(wids['tc_nums'], (1, 2), (1, 1), LEFT, 3)
sizer.Add(sel_nums, (1, 3), (1, 2), LEFT, 3)
sizer.Add(lab_regex, (2, 0), (1, 2), LEFT, 3)
sizer.Add(wids['tc_regex'], (2, 2), (1, 1), LEFT, 3)
sizer.Add(sel_re, (2, 3), (1, 2), LEFT, 3)
sizer.Add(HLine(panel, size=(550, 2)), (3, 0), (1, 5), LEFT, 3)
ir = 4
cind = SimpleText(panel, label=' Index ')
csel = SimpleText(panel, label=' Select ')
cname = SimpleText(panel, label=' Array Name ')
sizer.Add(cind, (ir, 0), (1, 1), LEFT, 3)
sizer.Add(csel, (ir, 1), (1, 1), LEFT, 3)
sizer.Add(cname, (ir, 2), (1, 3), LEFT, 3)
for i, name in enumerate(group.array_labels):
ir += 1
cind = SimpleText(panel, label=' %i ' % (i+1))
cname = SimpleText(panel, label=' %s ' % name)
csel = Check(panel, label='', default=False)
self.wids["col_%d" % i] = csel
sizer.Add(cind, (ir, 0), (1, 1), LEFT, 3)
sizer.Add(csel, (ir, 1), (1, 1), LEFT, 3)
sizer.Add(cname, (ir, 2), (1, 3), LEFT, 3)
pack(panel, sizer)
panel.SetupScrolling()
mainsizer = wx.BoxSizer(wx.VERTICAL)
mainsizer.Add(panel, 1, wx.GROW|wx.ALL, 1)
pack(self, mainsizer)
self.Show()
self.Raise()
def make_sum(self):
sel =[]
for name, wid in self.wids.items():
if name.startswith('col_') and wid.IsChecked():
sel.append(int(name[4:]))
self.selected_columns = np.array(sel)
narr, npts = self.group.raw.data.shape
ydat = np.zeros(npts, dtype=np.float)
for i in sel:
ydat += self.group.raw.data[i, :]
return ydat
def get_label(self):
label_in = self.wids["arrayname"].GetValue()
label = fix_varname(label_in)
if label in self.group.array_labels:
count = 1
while label in self.group.array_labels and count < 1000:
label = "%s_%d" % (label, count)
count +=1
if label != label_in:
self.wids["arrayname"].SetValue(label)
return label
def onOK(self, event=None):
ydat = self.make_sum()
npts = len(ydat)
label = self.get_label()
self.group.array_labels.append(label)
new = np.append(self.group.raw.data, ydat.reshape(1, npts), axis=0)
self.group.raw.data = new
self.on_ok(label, self.selected_columns)
def onPlot(self, event=None):
ydat = self.make_sum()
xdat = self.group.xdat
label = self.get_label()
label = "%s (not saved)" % label
popts = dict(marker='o', markersize=4, linewidth=1.5, ylabel=label,
label=label, xlabel=self.group.plot_xlabel)
self.parent.plotpanel.plot(xdat, ydat, **popts)
def onSelColumns(self, event=None):
pattern = self.wids['tc_nums'].GetValue().split(',')
sel = []
for part in pattern:
if '-' in part:
start, stop = part.split('-')
try:
istart = int(start)
except ValueError:
istart = 1
try:
istop = int(stop)
except ValueError:
istop = len(self.group.array_labels) + 1
sel.extend(range(istart-1, istop))
else:
try:
sel.append(int(part)-1)
except:
pass
for name, wid in self.wids.items():
if name.startswith('col_'):
wid.SetValue(int(name[4:]) in sel)
def onSelRegex(self, event=None):
pattern = self.wids['tc_regex'].GetValue().replace('*', '.*')
pattern = pattern.replace('..*', '.*')
sel =[]
for i, name in enumerate(self.group.array_labels):
sel = re.search(pattern, name, flags=re.IGNORECASE) is not None
self.wids["col_%d" % i].SetValue(sel)
class EditColumnFrame(wx.Frame) :
"""Edit Column Labels for a larch grouop"""
def __init__(self, parent, group, on_ok=None):
self.parent = parent
self.group = group
self.on_ok = on_ok
wx.Frame.__init__(self, None, -1, 'Edit Array Names',
style=wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL)
self.SetFont(Font(10))
sizer = wx.GridBagSizer(2, 2)
panel = scrolled.ScrolledPanel(self)
self.SetMinSize((675, 450))
self.wids = {}
ir = 0
sizer.Add(Button(panel, 'Apply Changes', size=(200, -1),
action=self.onOK),
(0, 1), (1, 2), LEFT, 3)
sizer.Add(Button(panel, 'Use Column Number', size=(200, -1),
action=self.onColNumber),
(0, 3), (1, 2), LEFT, 3)
sizer.Add(HLine(panel, size=(550, 2)),
(1, 1), (1, 5), LEFT, 3)
cind = SimpleText(panel, label='Column')
cold = SimpleText(panel, label='Current Name')
cnew = SimpleText(panel, label='Enter New Name')
cret = SimpleText(panel, label=' Result ', size=(150, -1))
cinfo = SimpleText(panel, label=' Data Range')
cplot = SimpleText(panel, label=' Plot')
ir = 2
sizer.Add(cind, (ir, 0), (1, 1), LEFT, 3)
sizer.Add(cold, (ir, 1), (1, 1), LEFT, 3)
sizer.Add(cnew, (ir, 2), (1, 1), LEFT, 3)
sizer.Add(cret, (ir, 3), (1, 1), LEFT, 3)
sizer.Add(cinfo, (ir, 4), (1, 1), LEFT, 3)
sizer.Add(cplot, (ir, 5), (1, 1), LEFT, 3)
for i, name in enumerate(group.array_labels):
ir += 1
cind = SimpleText(panel, label=' %i ' % (i+1))
cold = SimpleText(panel, label=' %s ' % name)
cret = SimpleText(panel, label=fix_varname(name), size=(150, -1))
cnew = wx.TextCtrl(panel, value=name, size=(150, -1),
style=wx.TE_PROCESS_ENTER)
cnew.Bind(wx.EVT_TEXT_ENTER, partial(self.update, index=i))
cnew.Bind(wx.EVT_KILL_FOCUS, partial(self.update, index=i))
arr = group.data[i,:]
info_str = " [ %8g : %8g ] " % (arr.min(), arr.max())
cinfo = SimpleText(panel, label=info_str)
cplot = Button(panel, 'Plot', action=partial(self.onPlot, index=i))
self.wids["%d" % i] = cnew
self.wids["ret_%d" % i] = cret
sizer.Add(cind, (ir, 0), (1, 1), LEFT, 3)
sizer.Add(cold, (ir, 1), (1, 1), LEFT, 3)
sizer.Add(cnew, (ir, 2), (1, 1), LEFT, 3)
sizer.Add(cret, (ir, 3), (1, 1), LEFT, 3)
sizer.Add(cinfo, (ir, 4), (1, 1), LEFT, 3)
sizer.Add(cplot, (ir, 5), (1, 1), LEFT, 3)
pack(panel, sizer)
panel.SetupScrolling()
mainsizer = wx.BoxSizer(wx.VERTICAL)
mainsizer.Add(panel, 1, wx.GROW|wx.ALL, 1)
pack(self, mainsizer)
self.Show()
self.Raise()
def onPlot(self, event=None, index=None):
if index is not None:
x = self.parent.workgroup.index
y = self.parent.workgroup.data[index, :]
label = self.wids["ret_%i" % index].GetLabel()
popts = dict(marker='o', markersize=4, linewidth=1.5,
ylabel=label, xlabel='data point', label=label)
self.parent.plotpanel.plot(x, y, **popts)
def onColNumber(self, evt=None, index=-1):
for name, wid in self.wids.items():
val = name
if name.startswith('ret_'):
val = name[4:]
setter = wid.SetLabel
else:
setter = wid.SetValue
setter("col_%d" % (int(val) +1))
def update(self, evt=None, index=-1):
newval = fix_varname(self.wids["%d" % index].GetValue())
self.wids["ret_%i" % index].SetLabel(newval)
def update_char(self, evt=None, index=-1):
if evt.GetKeyCode() == wx.WXK_RETURN:
self.update(evt=evt, index=index)
# evt.Skip()
def onOK(self, evt=None):
group = self.group
array_labels = []
for i in range(len(self.group.array_labels)):
newname = self.wids["ret_%i" % i].GetLabel()
array_labels.append(newname)
if callable(self.on_ok):
self.on_ok(array_labels)
self.Destroy()
class ColumnDataFileFrame(wx.Frame) :
"""Column Data File, select columns"""
def __init__(self, parent, filename=None, groupname=None,
last_array_sel=None, read_ok_cb=None,
edit_groupname=True, _larch=None):
self.parent = parent
self._larch = _larch
self.path = filename
self.extra_sums = {}
group = self.initgroup = self.read_column_file(self.path)
self.subframes = {}
self.workgroup = Group(raw=group)
for attr in ('path', 'filename', 'groupname', 'datatype',
'array_labels', 'data'):
setattr(self.workgroup, attr, getattr(group, attr, None))
arr_labels = [l.lower() for l in self.initgroup.array_labels]
self.orig_labels = arr_labels[:]
if self.workgroup.datatype is None:
self.workgroup.datatype = 'raw'
en_units = 'not energy'
for arrlab in arr_labels[:4]:
if 'ener' in arrlab.lower():
en_units = 'eV'
self.workgroup.datatype = 'xas'
self.read_ok_cb = read_ok_cb
self.array_sel = dict(xarr=None, yarr1=None, yarr2=None, yop='/',
ypop='', monod=3.1355316, en_units=en_units,
yerror='constant', yerr_val=1, yerr_arr=None)
if last_array_sel is not None:
self.array_sel.update(last_array_sel)
if self.array_sel['yarr2'] is None and 'i0' in arr_labels:
self.array_sel['yarr2'] = 'i0'
if self.array_sel['yarr1'] is None:
if 'itrans' in arr_labels:
self.array_sel['yarr1'] = 'itrans'
elif 'i1' in arr_labels:
self.array_sel['yarr1'] = 'i1'
message = "Data Columns for %s" % group.filename
wx.Frame.__init__(self, None, -1,
'Build Arrays from Data Columns for %s' % group.filename,
style=FRAMESTYLE)
x0, y0 = parent.GetPosition()
self.SetPosition((x0+60, y0+60))
self.SetFont(Font(10))
panel = wx.Panel(self)
self.SetMinSize((600, 600))
self.colors = GUIColors()
# title row
title = SimpleText(panel, message, font=Font(12),
colour=self.colors.title, style=LEFT)
yarr_labels = self.yarr_labels = arr_labels + ['1.0', '0.0', '']
xarr_labels = self.xarr_labels = arr_labels + ['_index']
self.xarr = Choice(panel, choices=xarr_labels, action=self.onXSelect, size=(150, -1))
self.yarr1 = Choice(panel, choices= arr_labels, action=self.onUpdate, size=(150, -1))
self.yarr2 = Choice(panel, choices=yarr_labels, action=self.onUpdate, size=(150, -1))
self.yerr_arr = Choice(panel, choices=yarr_labels, action=self.onUpdate, size=(150, -1))
self.yerr_arr.Disable()
self.datatype = Choice(panel, choices=DATATYPES, action=self.onUpdate, size=(150, -1))
self.datatype.SetStringSelection(self.workgroup.datatype)
self.datatype.SetStringSelection(self.workgroup.datatype)
self.en_units = Choice(panel, choices=ENUNITS_TYPES,
action=self.onEnUnitsSelect, size=(150, -1))
self.ypop = Choice(panel, choices=YPRE_OPS, action=self.onUpdate, size=(150, -1))
self.yop = Choice(panel, choices=ARR_OPS, action=self.onUpdate, size=(50, -1))
self.yerr_op = Choice(panel, choices=YERR_OPS, action=self.onYerrChoice, size=(150, -1))
self.yerr_op.SetSelection(0)
self.yerr_val = FloatCtrl(panel, value=1, precision=4, size=(90, -1))
self.monod_val = FloatCtrl(panel, value=3.1355316, precision=7, size=(90, -1))
xlab = SimpleText(panel, ' X array: ')
ylab = SimpleText(panel, ' Y array: ')
units_lab = SimpleText(panel, ' Units: ')
yerr_lab = SimpleText(panel, ' Yerror: ')
dtype_lab = SimpleText(panel, ' Data Type: ')
monod_lab = SimpleText(panel, ' Mono D spacing (Ang): ')
yerrval_lab = SimpleText(panel, ' Value:')
self.ysuf = SimpleText(panel, '')
self.message = SimpleText(panel, '', font=Font(11),
colour=self.colors.title, style=LEFT)
self.ypop.SetStringSelection(self.array_sel['ypop'])
self.yop.SetStringSelection(self.array_sel['yop'])
self.monod_val.SetValue(self.array_sel['monod'])
self.monod_val.SetAction(self.onUpdate)
self.monod_val.Enable(self.array_sel['en_units'].startswith('deg'))
self.en_units.SetStringSelection(self.array_sel['en_units'])
self.yerr_op.SetStringSelection(self.array_sel['yerror'])
self.yerr_val.SetValue(self.array_sel['yerr_val'])
if '(' in self.array_sel['ypop']:
self.ysuf.SetLabel(')')
ixsel, iysel, iy2sel, iyesel = 0, 1, len(yarr_labels)-1, len(yarr_labels)-1
if self.array_sel['xarr'] in xarr_labels:
ixsel = xarr_labels.index(self.array_sel['xarr'])
if self.array_sel['yarr1'] in arr_labels:
iysel = arr_labels.index(self.array_sel['yarr1'])
if self.array_sel['yarr2'] in yarr_labels:
iy2sel = yarr_labels.index(self.array_sel['yarr2'])
if self.array_sel['yerr_arr'] in yarr_labels:
iyesel = yarr_labels.index(self.array_sel['yerr_arr'])
self.xarr.SetSelection(ixsel)
self.yarr1.SetSelection(iysel)
self.yarr2.SetSelection(iy2sel)
self.yerr_arr.SetSelection(iyesel)
bpanel = wx.Panel(panel)
bsizer = wx.BoxSizer(wx.HORIZONTAL)
_ok = Button(bpanel, 'OK', action=self.onOK)
_cancel = Button(bpanel, 'Cancel', action=self.onCancel)
_edit = Button(bpanel, 'Edit Array Names', action=self.onEditNames)
_add = Button(bpanel, 'Select Columns to Sum', action=self.onAddColumns)
bsizer.Add(_ok)
bsizer.Add(_cancel)
bsizer.Add(_edit)
bsizer.Add(_add)
_ok.SetDefault()
pack(bpanel, bsizer)
sizer = wx.GridBagSizer(2, 2)
sizer.Add(title, (0, 0), (1, 7), LEFT, 5)
ir = 1
sizer.Add(xlab, (ir, 0), (1, 1), LEFT, 0)
sizer.Add(self.xarr, (ir, 1), (1, 1), LEFT, 0)
sizer.Add(units_lab, (ir, 2), (1, 2), RIGHT, 0)
sizer.Add(self.en_units, (ir, 4), (1, 2), LEFT, 0)
ir += 1
sizer.Add(dtype_lab, (ir, 0), (1, 1), LEFT, 0)
sizer.Add(self.datatype, (ir, 1), (1, 1), LEFT, 0)
sizer.Add(monod_lab, (ir, 2), (1, 2), RIGHT, 0)
sizer.Add(self.monod_val, (ir, 4), (1, 1), LEFT, 0)
ir += 1
sizer.Add(ylab, (ir, 0), (1, 1), LEFT, 0)
sizer.Add(self.ypop, (ir, 1), (1, 1), LEFT, 0)
sizer.Add(self.yarr1, (ir, 2), (1, 1), LEFT, 0)
sizer.Add(self.yop, (ir, 3), (1, 1), RIGHT, 0)
sizer.Add(self.yarr2, (ir, 4), (1, 1), LEFT, 0)
sizer.Add(self.ysuf, (ir, 5), (1, 1), LEFT, 0)
ir += 1
sizer.Add(yerr_lab, (ir, 0), (1, 1), LEFT, 0)
sizer.Add(self.yerr_op, (ir, 1), (1, 1), LEFT, 0)
sizer.Add(self.yerr_arr, (ir, 2), (1, 1), LEFT, 0)
sizer.Add(yerrval_lab, (ir, 3), (1, 1), RIGHT, 0)
sizer.Add(self.yerr_val, (ir, 4), (1, 2), LEFT, 0)
self.wid_filename = wx.TextCtrl(panel, value=group.filename,
size=(250, -1))
self.wid_groupname = wx.TextCtrl(panel, value=group.groupname,
size=(150, -1))
if not edit_groupname:
self.wid_groupname.Disable()
ir += 1
sizer.Add(SimpleText(panel, 'Display Name:'), (ir, 0), (1, 1), LEFT, 0)
sizer.Add(self.wid_filename, (ir, 1), (1, 2), LEFT, 0)
sizer.Add(SimpleText(panel, 'Group Name:'), (ir, 3), (1, 1), RIGHT, 0)
sizer.Add(self.wid_groupname, (ir, 4), (1, 2), LEFT, 0)
ir +=1
sizer.Add(self.message, (ir, 1), (1, 4), LEFT, 0)
ir += 1
sizer.Add(bpanel, (ir, 0), (1, 5), LEFT, 3)
pack(panel, sizer)
self.nb = fnb.FlatNotebook(self, -1, agwStyle=FNB_STYLE)
self.nb.SetTabAreaColour(wx.Colour(248,248,240))
self.nb.SetActiveTabColour(wx.Colour(254,254,195))
self.nb.SetNonActiveTabTextColour(wx.Colour(40,40,180))
self.nb.SetActiveTabTextColour(wx.Colour(80,0,0))
self.plotpanel = PlotPanel(self, messenger=self.plot_messages)
self.plotpanel.SetMinSize((200, 200))
textpanel = wx.Panel(self)
ftext = wx.TextCtrl(textpanel, style=wx.TE_MULTILINE|wx.TE_READONLY,
size=(400, 250))
ftext.SetValue(group.text)
ftext.SetFont(Font(10))
textsizer = wx.BoxSizer(wx.VERTICAL)
textsizer.Add(ftext, 1, LEFT|wx.GROW, 1)
pack(textpanel, textsizer)
self.nb.AddPage(textpanel, ' Text of Data File ', True)
self.nb.AddPage(self.plotpanel, ' Plot of Selected Arrays ', True)
mainsizer = wx.BoxSizer(wx.VERTICAL)
mainsizer.Add(panel, 0, wx.GROW|wx.ALL, 2)
mainsizer.Add(self.nb, 1, LEFT|wx.GROW, 2)
pack(self, mainsizer)
self.statusbar = self.CreateStatusBar(2, 0)
self.statusbar.SetStatusWidths([-1, -1])
statusbar_fields = [group.filename, ""]
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
self.set_energy_units()
self.Show()
self.Raise()
self.onUpdate(self)
def read_column_file(self, path):
"""read column file, generally as initial read"""
parent, filename = os.path.split(path)
with open(path, 'r') as fh:
lines = fh.readlines()
text = ''.join(lines)
line1 = lines[0].lower()
reader = 'read_ascii'
if 'epics scan' in line1:
reader = 'read_gsescan'
if 'xdi' in line1:
reader = 'read_xdi'
if 'epics stepscan file' in line1 :
reader = 'read_gsexdi'
elif ("#s" in line1) or ("#f" in line1):
reader = 'read_specfile'
if reader in ('read_xdi', 'read_gsexdi'):
# first check for Nans and Infs
nan_result = look_for_nans(path)
if 'read error' in nan_result.message:
title = "Cannot read %s" % path
message = "Error reading %s\n%s" %(path, nan_result.message)
r = Popup(self.parent, message, title)
return None
if 'no data' in nan_result.message:
title = "No data in %s" % path
message = "No data found in file %s" % path
r = Popup(self.parent, message, title)
return None
if ('has nans' in nan_result.message or
'has infs' in nan_result.message):
reader = 'read_ascii'
tmpname = '_tmp_file_'
read_cmd = "%s = %s('%s')" % (tmpname, reader, path)
self.reader = reader
_larch = self._larch
if (not isinstance(_larch, larch.Interpreter) and
hasattr(_larch, '_larch')):
_larch = _larch._larch
try:
_larch.eval(read_cmd, add_history=True)
except:
pass
if _larch.error:
msg = ["Error trying to read '%s':" % path, ""]
for err in _larch.error:
exc_name, errmsg = err.get_error()
msg.append(errmsg)
title = "Cannot read %s" % path
r = Popup(self.parent, "\n".join(msg), title)
return None
group = _larch.symtable.get_symbol(tmpname)
# _larch.symtable.del_symbol(tmpname)
group.text = text
group.path = path
group.filename = filename
group.groupname = file2groupname(filename,
symtable=self._larch.symtable)
return group
def show_subframe(self, name, frameclass, **opts):
shown = False
if name in self.subframes:
try:
self.subframes[name].Raise()
shown = True
except:
pass
if not shown:
self.subframes[name] = frameclass(self, **opts)
self.subframes[name].Show()
self.subframes[name].Raise()
def onAddColumns(self, event=None):
self.show_subframe('addcol', AddColumnsFrame,
group=self.workgroup,
on_ok=self.add_columns)
def add_columns(self, label, selection):
new_labels = self.workgroup.array_labels
self.set_array_labels(new_labels)
self.yarr1.SetStringSelection(new_labels[-1])
self.extra_sums[label] = selection
self.onUpdate()
def onEditNames(self, evt=None):
self.show_subframe('editcol', EditColumnFrame,
group=self.workgroup,
on_ok=self.set_array_labels)
def set_array_labels(self, arr_labels):
self.workgroup.array_labels = arr_labels
yarr_labels = self.yarr_labels = arr_labels + ['1.0', '0.0', '']
xarr_labels = self.xarr_labels = arr_labels + ['_index']
def update(wid, choices):
curstr = wid.GetStringSelection()
curind = wid.GetSelection()
wid.SetChoices(choices)
if curstr in choices:
wid.SetStringSelection(curstr)
else:
wid.SetSelection(curind)
update(self.xarr, xarr_labels)
update(self.yarr1, yarr_labels)
update(self.yarr2, yarr_labels)
update(self.yerr_arr, yarr_labels)
self.onUpdate()
def onOK(self, event=None):
""" build arrays according to selection """
user_filename = self.wid_filename.GetValue()
if self.wid_groupname is not None:
groupname = fix_varname(self.wid_groupname.GetValue())
en_units = self.en_units.GetStringSelection()
dspace = float(self.monod_val.GetValue())
xarr = self.xarr.GetStringSelection()
yarr1 = self.yarr1.GetStringSelection()
yarr2 = self.yarr2.GetStringSelection()
ypop = self.ypop.GetStringSelection()
yop = self.yop.GetStringSelection()
yerr_op = self.yerr_op.GetStringSelection()
yerr_arr = self.yerr_arr.GetStringSelection()
yerr_idx = self.yerr_arr.GetSelection()
yerr_val = self.yerr_val.GetValue()
yerr_expr = '1'
if yerr_op.startswith('const'):
yerr_expr = "%f" % self.yerr_val.GetValue()
elif yerr_op.startswith('array'):
yerr_expr = '%%s.data[%i, :]' % self.yerr_arr.GetSelection()
elif yerr_op.startswith('sqrt'):
yerr_expr = 'sqrt(%s.ydat)'
self.expressions['yerr'] = yerr_expr
# generate script to pass back to calling program:
read_cmd = "%s('{path}', labels='%s')" % (self.reader,
', '.join(self.orig_labels))
buff = ["{group} = %s" % read_cmd,
"{group}.path = '{path}'",
"{group}.is_frozen = False"]
for label, selection in self.extra_sums.items():
buff.append("{group}.array_labels.append('%s')" % label)
buff.append("_tmparr = {group}.data[%s, :].sum(axis=0)" % repr(selection))
buff.append("_tmpn = len(_tmparr)")
buff.append("{group}.data = append({group}.data, _tmparr.reshape(1, _tmpn), axis=0)")
buff.append("del _tmparr, _tmpn")
for attr in ('datatype', 'plot_xlabel', 'plot_ylabel'):
val = getattr(self.workgroup, attr)
buff.append("{group}.%s = '%s'" % (attr, val))
expr = self.expressions['xdat'].replace('%s', '{group:s}')
if en_units.startswith('deg'):
buff.append(f"mono_dspace = {dspace:.9f}")
buff.append(f"{{group}}.xdat = PLANCK_HC/(2*mono_dspace*sin(DEG2RAD*({expr:s})))")
elif en_units.startswith('keV'):
buff.append(f"{{group}}.xdat = 1000.0*{expr:s}")
else:
buff.append(f"{{group}}.xdat = {expr:s}")
for aname in ('ydat', 'yerr'):
expr = self.expressions[aname].replace('%s', '{group:s}')
buff.append("{group}.%s = %s" % (aname, expr))
if getattr(self.workgroup, 'datatype', 'raw') == 'xas':
if self.reader == 'read_gsescan':
buff.append("{group}.energy = {group}.x")
else:
buff.append("{group}.energy = {group}.xdat")
buff.append("{group}.mu = {group}.ydat")
buff.append("sort_xafs({group}, overwrite=True, fix_repeats=True)")
else:
buff.append("{group}.scale = 1./({group}.ydat.ptp()+1.e-16)")
script = "\n".join(buff)
self.array_sel['xarr'] = xarr
self.array_sel['yarr1'] = yarr1
self.array_sel['yarr2'] = yarr2
self.array_sel['yop'] = yop
self.array_sel['ypop'] = ypop
self.array_sel['yerror'] = yerr_op
self.array_sel['yerr_val'] = yerr_val
self.array_sel['yerr_arr'] = yerr_arr
self.array_sel['monod'] = dspace
self.array_sel['en_units'] = en_units
if self.read_ok_cb is not None:
self.read_ok_cb(script, self.path, groupname=groupname,
filename=user_filename,
array_sel=self.array_sel)
for f in self.subframes.values():
try:
f.Destroy()
except:
pass
self.Destroy()
def onCancel(self, event=None):
self.workgroup.import_ok = False
for f in self.subframes.values():
try:
f.Destroy()
except:
pass
self.Destroy()
def onYerrChoice(self, evt=None):
yerr_choice = evt.GetString()
self.yerr_arr.Disable()
self.yerr_val.Disable()
if 'const' in yerr_choice.lower():
self.yerr_val.Enable()
elif 'array' in yerr_choice.lower():
self.yerr_arr.Enable()
self.onUpdate()
def onXSelect(self, evt=None):
ix = self.xarr.GetSelection()
xname = self.xarr.GetStringSelection()
workgroup = self.workgroup
rdata = self.initgroup.data
ncol, npts = rdata.shape
if xname.startswith('_index') or ix >= ncol:
workgroup.xdat = 1.0*np.arange(npts)
else:
workgroup.xdat = 1.0*rdata[ix, :]
self.monod_val.Disable()
if self.datatype.GetStringSelection().strip().lower() == 'raw':
self.en_units.SetSelection(4)
else:
eguess = guess_energy_units(workgroup.xdat)
if eguess.startswith('keV'):
self.en_units.SetSelection(1)
elif eguess.startswith('deg'):
self.en_units.SetSelection(2)
self.monod_val.Enable()
else:
self.en_units.SetSelection(0)
def onEnUnitsSelect(self, evt=None):
self.monod_val.Enable(self.en_units.GetStringSelection().startswith('deg'))
self.onUpdate()
def set_energy_units(self):
ix = self.xarr.GetSelection()
xname = self.xarr.GetStringSelection()
rdata = self.initgroup.data
ncol, npts = rdata.shape
workgroup = self.workgroup
if xname.startswith('_index') or ix >= ncol:
workgroup.xdat = 1.0*np.arange(npts)
else:
workgroup.xdat = 1.0*rdata[ix, :]
if self.datatype.GetStringSelection().strip().lower() != 'raw':
eguess = guess_energy_units(workgroup.xdat)
if eguess.startswith('eV'):
self.en_units.SetStringSelection('eV')
elif eguess.startswith('keV'):
self.en_units.SetStringSelection('keV')
def onUpdate(self, value=None, evt=None):
"""column selections changed calc xdat and ydat"""
# dtcorr = self.dtcorr.IsChecked()
dtcorr = False
rawgroup = self.initgroup
workgroup = self.workgroup
rdata = self.initgroup.data
ix = self.xarr.GetSelection()
xname = self.xarr.GetStringSelection()
exprs = dict(xdat=None, ydat=None, yerr=None)
ncol, npts = rdata.shape
workgroup.index = 1.0*np.arange(npts)
if xname.startswith('_index') or ix >= ncol:
workgroup.xdat = 1.0*np.arange(npts)
xname = '_index'
exprs['xdat'] = 'arange(%i)' % npts
else:
workgroup.xdat = 1.0*rdata[ix, :]
exprs['xdat'] = '%%s.data[%i, : ]' % ix
workgroup.datatype = self.datatype.GetStringSelection().strip().lower()
if workgroup.datatype == 'raw':
self.en_units.SetStringSelection('not energy')
xlabel = xname
en_units = self.en_units.GetStringSelection()
if en_units.startswith('deg'):
dspace = float(self.monod_val.GetValue())
workgroup.xdat = PLANCK_HC/(2*dspace*np.sin(DEG2RAD*workgroup.xdat))
xlabel = xname + ' (eV)'
elif en_units.startswith('keV'):
workgroup.xdat *= 1000.0
xlabel = xname + ' (eV)'
def pre_op(opwid, arr):
opstr = opwid.GetStringSelection().strip()
suf = ''
if opstr in ('-log(', 'log('):
suf = ')'
if opstr == 'log(':
arr = np.log(arr)
elif opstr == '-log(':
arr = -np.log(arr)
arr[np.where(np.isnan(arr))] = 0
return suf, opstr, arr
yname1 = self.yarr1.GetStringSelection().strip()
yname2 = self.yarr2.GetStringSelection().strip()
iy1 = self.yarr1.GetSelection()
iy2 = self.yarr2.GetSelection()
yop = self.yop.GetStringSelection().strip()
ylabel = yname1
if len(yname2) == 0:
yname2 = '1.0'
else:
ylabel = "%s%s%s" % (ylabel, yop, yname2)
if yname1 == '0.0':
yarr1 = np.zeros(npts)*1.0
yexpr1 = 'zeros(%i)' % npts
elif len(yname1) == 0 or yname1 == '1.0' or iy1 >= ncol:
yarr1 = np.ones(npts)*1.0
yexpr1 = 'ones(%i)' % npts
else:
yarr1 = rdata[iy1, :]
yexpr1 = '%%s.data[%i, : ]' % iy1
if yname2 == '0.0':
yarr2 = np.zeros(npts)*1.0
yexpr2 = '0.0'
elif len(yname2) == 0 or yname2 == '1.0' or iy2 >= ncol:
yarr2 = np.ones(npts)*1.0
yexpr2 = '1.0'
else:
yarr2 = rdata[iy2, :]
yexpr2 = '%%s.data[%i, : ]' % iy2
workgroup.ydat = yarr1
exprs['ydat'] = yexpr1
if yop in ('+', '-', '*', '/'):
exprs['ydat'] = "%s %s %s" % (yexpr1, yop, yexpr2)
if yop == '+':
workgroup.ydat = yarr1.__add__(yarr2)
elif yop == '-':
workgroup.ydat = yarr1.__sub__(yarr2)
elif yop == '*':
workgroup.ydat = yarr1.__mul__(yarr2)
elif yop == '/':
workgroup.ydat = yarr1.__truediv__(yarr2)
ysuf, ypop, workgroup.ydat = pre_op(self.ypop, workgroup.ydat)
self.ysuf.SetLabel(ysuf)
exprs['ydat'] = '%s%s%s' % (ypop, exprs['ydat'], ysuf)
yerr_op = self.yerr_op.GetStringSelection().lower()
exprs['yerr'] = '1'
if yerr_op.startswith('const'):
yerr = self.yerr_val.GetValue()
exprs['yerr'] = '%f' % yerr
elif yerr_op.startswith('array'):
iyerr = self.yerr_arr.GetSelection()
yerr = rdata[iyerr, :]
exprs['yerr'] = '%%s.data[%i, :]' % iyerr
elif yerr_op.startswith('sqrt'):
yerr = np.sqrt(workgroup.ydat)
exprs['yerr'] = 'sqrt(%s.ydat)'
self.expressions = exprs
self.array_sel = {'xarr': xname,
'ypop': ypop, 'yop': yop,
'yarr1': yname1, 'yarr2': yname2}
try:
npts = min(len(workgroup.xdat), len(workgroup.ydat))
except AttributeError:
return
except ValueError:
return
en = workgroup.xdat
if ((workgroup.datatype == 'xas') and
((len(en) > 1000 or any(np.diff(en) < 0) or
((max(en)-min(en)) > 350 and
(np.diff(en[:100]).mean() < 1.0))))):
self.message.SetLabel("Warning: XAS data may need to be rebinned!")
else:
self.message.SetLabel("")
workgroup.filename = rawgroup.filename
workgroup.npts = npts
workgroup.plot_xlabel = xlabel
workgroup.plot_ylabel = ylabel
workgroup.xdat = np.array(workgroup.xdat[:npts])
workgroup.ydat = np.array(workgroup.ydat[:npts])
workgroup.y = workgroup.ydat
workgroup.yerr = yerr
if isinstance(yerr, np.ndarray):
workgroup.yerr = np.array(yerr[:npts])
if workgroup.datatype == 'xas':
workgroup.energy = workgroup.xdat
workgroup.mu = workgroup.ydat
path, fname = os.path.split(workgroup.filename)
popts = dict(marker='o', markersize=4, linewidth=1.5,
title=fname, ylabel=ylabel, xlabel=xlabel,
label="%s: %s" % (fname, workgroup.plot_ylabel))
self.plotpanel.plot(workgroup.xdat, workgroup.ydat, **popts)
for i in range(self.nb.GetPageCount()):
if 'plot' in self.nb.GetPageText(i).lower():
self.nb.SetSelection(i)
def plot_messages(self, msg, panel=1):
self.statusbar.SetStatusText(msg, panel)
| 37.890578
| 97
| 0.542542
|
c2ca2b3a38087cb4d32b0008fe29ed6587ef3ee6
| 320
|
py
|
Python
|
Python/lc_70_climbing_stairs.py
|
cmattey/leetcode_problems
|
fe57e668db23f7c480835c0a10f363d718fbaefd
|
[
"MIT"
] | 6
|
2019-07-01T22:03:25.000Z
|
2020-04-06T15:17:46.000Z
|
Python/lc_70_climbing_stairs.py
|
cmattey/leetcode_problems
|
fe57e668db23f7c480835c0a10f363d718fbaefd
|
[
"MIT"
] | null | null | null |
Python/lc_70_climbing_stairs.py
|
cmattey/leetcode_problems
|
fe57e668db23f7c480835c0a10f363d718fbaefd
|
[
"MIT"
] | 1
|
2020-04-01T22:31:41.000Z
|
2020-04-01T22:31:41.000Z
|
# 70. Climbing Stairs
# Time: O(n)
# Space: O(1)
class Solution:
def climbStairs(self, n: int) -> int:
if n<=2:
return n
first = 1
second = 2
for i in range(3,n+1):
ans = first+second
first = second
second = ans
return ans
| 16
| 41
| 0.4625
|
19bf7cd146f0ca4589831af303b5a7b0a3db47dd
| 898
|
py
|
Python
|
contiguous/types.py
|
bmcollier/division
|
45f4bef826855da3c18adb5e8cab74adc07db1c2
|
[
"BSD-3-Clause"
] | null | null | null |
contiguous/types.py
|
bmcollier/division
|
45f4bef826855da3c18adb5e8cab74adc07db1c2
|
[
"BSD-3-Clause"
] | null | null | null |
contiguous/types.py
|
bmcollier/division
|
45f4bef826855da3c18adb5e8cab74adc07db1c2
|
[
"BSD-3-Clause"
] | null | null | null |
class ContiguousType:
def __init__(self, name: str, length: int):
self.name = name
self.length = int(length)
class String(ContiguousType):
def __init__(self, name: str, length: int):
super().__init__(name, length)
class Number(ContiguousType):
"""
The number class inherits a great deal from COBOL. We use the same
terminology as COBOL's data divisions to define numbers, and they are
intended to behave in the same way. For more information on the syntax for
initialising a number, please see the documentation in the README.md, in
addition to the tests and examples below.
>>> balance = Number("balance", "S9(4)")
>>> balance.set(-41)
>>> output = balance.get()
>>> "-0041"
"""
def __init__(self, name: str, format: str):
self.name = name
self.length = len(format)
self.format = format
| 29.933333
| 78
| 0.646993
|
ff437051595f7183e63111185a19f4b4ce1e4258
| 544
|
py
|
Python
|
wazimap_ng/datasets/migrations/0093_auto_20200520_2031.py
|
arghyaiitb/wazimap-ng
|
2a77860526d865b8fd0c22a2204f121fdb3b28a0
|
[
"Apache-2.0"
] | 11
|
2019-12-31T20:27:22.000Z
|
2022-03-10T03:55:38.000Z
|
wazimap_ng/datasets/migrations/0093_auto_20200520_2031.py
|
arghyaiitb/wazimap-ng
|
2a77860526d865b8fd0c22a2204f121fdb3b28a0
|
[
"Apache-2.0"
] | 164
|
2020-02-06T15:02:22.000Z
|
2022-03-30T22:42:00.000Z
|
wazimap_ng/datasets/migrations/0093_auto_20200520_2031.py
|
arghyaiitb/wazimap-ng
|
2a77860526d865b8fd0c22a2204f121fdb3b28a0
|
[
"Apache-2.0"
] | 16
|
2020-01-03T20:30:24.000Z
|
2022-01-11T11:05:15.000Z
|
# Generated by Django 2.2.10 on 2020-05-20 20:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profile', '0036_auto_20200508_0751'),
('datasets', '0092_auto_20200520_1048'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='profile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='profile.Profile'),
)
]
| 25.904762
| 126
| 0.645221
|
313b2d63b884f99a56feeb307abf8aded5ba1c1c
| 5,031
|
py
|
Python
|
etc/dbus-serialbattery/daly.py
|
mikejager/dbus-serialbattery
|
6c705db2167b65a8c1a60d2575f91d4edbb84091
|
[
"MIT"
] | null | null | null |
etc/dbus-serialbattery/daly.py
|
mikejager/dbus-serialbattery
|
6c705db2167b65a8c1a60d2575f91d4edbb84091
|
[
"MIT"
] | null | null | null |
etc/dbus-serialbattery/daly.py
|
mikejager/dbus-serialbattery
|
6c705db2167b65a8c1a60d2575f91d4edbb84091
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from battery import Protection, Battery, Cell
from utils import *
from struct import *
class Daly(Battery):
def __init__(self, port,baud,address):
super(Daly, self).__init__(port,baud)
self.charger_connected = None
self.load_connected = None
self.command_address = address
self.cell_min_voltage = None
self.cell_max_voltage = None
self.cell_min_no = None
self.cell_max_no = None
self.poll_interval = 2000
self.type = self.BATTERYTYPE
# command bytes [StartFlag=A5][Address=40][Command=94][DataLength=8][8x zero bytes][checksum]
command_base = b"\xA5\x40\x94\x08\x00\x00\x00\x00\x00\x00\x00\x00\x81"
command_soc = b"\x90"
command_minmax_cell_volts = b"\x91"
command_minmax_temp = b"\x92"
command_fet = b"\x93"
command_status = b"\x94"
command_cell_volts = b"\x95"
command_temp = b"\x96"
command_cell_balance = b"\x97"
command_alarm = b"\x98"
BATTERYTYPE = "Daly"
LENGTH_CHECK = 4
LENGTH_POS = 3
CURRENT_ZERO_CONSTANT = 30000
TEMP_ZERO_CONSTANT = 40
def test_connection(self):
return self.read_status_data()
def get_settings(self):
self.max_battery_current = MAX_BATTERY_CURRENT
self.max_battery_discharge_current = MAX_BATTERY_DISCHARGE_CURRENT
return True
def refresh_data(self):
result = self.read_soc_data()
result = result and self.read_cell_voltage_range_data()
result = result and self.read_temperature_range_data()
result = result and self.read_fed_data()
return result
def read_status_data(self):
status_data = self.read_serial_data_daly(self.command_status)
# check if connection success
if status_data is False:
return False
self.cell_count, self.temp_sensors, self.charger_connected, self.load_connected, \
state, self.cycles = unpack_from('>bb??bhx', status_data)
self.max_battery_voltage = MAX_CELL_VOLTAGE * self.cell_count
self.min_battery_voltage = MIN_CELL_VOLTAGE * self.cell_count
self.hardware_version = "DalyBMS " + str(self.cell_count) + " cells"
logger.info(self.hardware_version)
return True
def read_soc_data(self):
soc_data = self.read_serial_data_daly(self.command_soc)
# check if connection success
if soc_data is False:
return False
voltage, tmp, current, soc = unpack_from('>hhhh', soc_data)
self.voltage = voltage / 10
self.current = (current - self.CURRENT_ZERO_CONSTANT) / -10 * INVERT_CURRENT_MEASUREMENT
self.soc = soc / 10
return True
def read_cell_voltage_range_data(self):
minmax_data = self.read_serial_data_daly(self.command_minmax_cell_volts)
# check if connection success
if minmax_data is False:
return False
cell_max_voltage,self.cell_max_no,cell_min_voltage, self.cell_min_no = unpack_from('>hbhb', minmax_data)
# Daly cells numbers are 1 based and not 0 based
self.cell_min_no -= 1
self.cell_max_no -= 1
# Voltage is returned in mV
self.cell_max_voltage = cell_max_voltage / 1000
self.cell_min_voltage = cell_min_voltage / 1000
return True
def read_temperature_range_data(self):
minmax_data = self.read_serial_data_daly(self.command_minmax_temp)
# check if connection success
if minmax_data is False:
return False
max_temp,max_no,min_temp, min_no = unpack_from('>bbbb', minmax_data)
self.temp1 = min_temp - self.TEMP_ZERO_CONSTANT
self.temp2 = max_temp - self.TEMP_ZERO_CONSTANT
return True
def read_fed_data(self):
fed_data = self.read_serial_data_daly(self.command_fet)
# check if connection success
if fed_data is False:
return False
status, self.charge_fet, self.discharge_fet, bms_cycles, capacity_remain = unpack_from('>b??BL', fed_data)
self.capacity_remain = capacity_remain / 1000
return True
def generate_command(self, command):
buffer = bytearray(self.command_base)
buffer[1] = self.command_address[0] # Always serial 40 or 80
buffer[2] = command[0]
buffer[12] = sum(buffer[:12]) & 0xFF #checksum calc
return buffer
def read_serial_data_daly(self, command):
data = read_serial_data(self.generate_command(command), self.port, self.baud_rate, self.LENGTH_POS, self.LENGTH_CHECK)
if data is False:
return False
start, flag, command_ret, length = unpack_from('BBBB', data)
checksum = sum(data[:-1]) & 0xFF
if start == 165 and length == 8 and checksum == data[12]:
return data[4:length+4]
else:
logger.error(">>> ERROR: Incorrect Reply")
return False
| 36.722628
| 126
| 0.665077
|
fdfdabff354a07cf82fe4ecd1a043416321cc1bf
| 269
|
py
|
Python
|
heat/utils/tests/test_vision_transforms.py
|
shssf/heat
|
9db0a936c92491fa5aa862f558cb385c9916216b
|
[
"MIT"
] | 105
|
2018-05-18T11:34:03.000Z
|
2022-03-29T06:37:23.000Z
|
heat/utils/tests/test_vision_transforms.py
|
shssf/heat
|
9db0a936c92491fa5aa862f558cb385c9916216b
|
[
"MIT"
] | 909
|
2018-05-18T07:50:26.000Z
|
2022-03-31T20:16:30.000Z
|
heat/utils/tests/test_vision_transforms.py
|
shssf/heat
|
9db0a936c92491fa5aa862f558cb385c9916216b
|
[
"MIT"
] | 28
|
2018-05-24T14:39:18.000Z
|
2022-03-31T19:18:47.000Z
|
import heat as ht
import unittest
class TestVisionTransforms(unittest.TestCase):
def test_vision_transforms_getattr(self):
ht.utils.vision_transforms.ToTensor()
with self.assertRaises(AttributeError):
ht.utils.vision_transforms.asdf()
| 26.9
| 47
| 0.743494
|
50d0552a01d8597dda3d716524b61b1cacc7d61d
| 1,762
|
py
|
Python
|
sort/merge_sort.py
|
marcusljx/algorithms
|
9b388c6a9bf456f937fd09985d13a550d074d466
|
[
"MIT"
] | null | null | null |
sort/merge_sort.py
|
marcusljx/algorithms
|
9b388c6a9bf456f937fd09985d13a550d074d466
|
[
"MIT"
] | null | null | null |
sort/merge_sort.py
|
marcusljx/algorithms
|
9b388c6a9bf456f937fd09985d13a550d074d466
|
[
"MIT"
] | null | null | null |
import argparse
import math
from random import shuffle
def mergesort(unsorted_list, compare_func):
"""
MergeSort returns a sorted list from the elements of unsorted_list.
compare_func is a function that takes in two arguments a and b(where
a and b are of the same type in unsorted_list) and returns:
True if a should be sorted to the left of b
False if a should be sorted to the right of b
The compare_func should be able to handle situations where a==b.
:param unsorted_list: list
:param compare_func: func
:return: list
"""
if len(unsorted_list) < 2:
return unsorted_list
# split list and sort recursively
split_pos = math.ceil(len(unsorted_list) / 2)
A = mergesort(unsorted_list[:split_pos], compare_func)
B = mergesort(unsorted_list[split_pos:], compare_func)
# join the sorted halves
result = []
while len(A) > 0 and len(B) > 0:
if compare_func(A[0], B[0]):
result += [A[0]]
A = A[1:]
else:
result += [B[0]]
B = B[1:]
if len(A) > 0:
return result + A
if len(B) > 0:
return result + B
return result
def main(args):
print("max={}".format(args.max))
def c_func(a, b):
return a <= b
eg_list = [i for i in range(0, args.max)]
shuffle(eg_list)
print("list = {}".format(eg_list))
result = mergesort(eg_list, c_func)
print("sorted : {}".format(result))
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='example of insertion sort')
argparser.add_argument('--max', type=int, default=1000, help='maximum value in the list to run the example')
args = argparser.parse_args()
main(args)
| 26.298507
| 112
| 0.625426
|
131b0a2a6c3e2cc9be4b5d6fd0170358dbbc8636
| 3,177
|
py
|
Python
|
label_maker/preview.py
|
PallawiSinghal/label-maker
|
9b6759f002c92818b11348ffff17e2d94d320ca6
|
[
"MIT"
] | null | null | null |
label_maker/preview.py
|
PallawiSinghal/label-maker
|
9b6759f002c92818b11348ffff17e2d94d320ca6
|
[
"MIT"
] | null | null | null |
label_maker/preview.py
|
PallawiSinghal/label-maker
|
9b6759f002c92818b11348ffff17e2d94d320ca6
|
[
"MIT"
] | null | null | null |
# pylint: disable=unused-argument
"""Produce imagery examples for specified classes"""
from os import path as op
from os import makedirs
import numpy as np
from PIL import Image, ImageDraw
from label_maker.utils import class_match, get_image_function
def preview(dest_folder, number, classes, imagery, ml_type, imagery_offset=False, **kwargs):
"""Produce imagery examples for specified classes
Parameters
------------
dest_folder: str
Folder to save labels and example tiles into
number: int
Number of preview images to download per class
classes: list
A list of classes for machine learning training. Each class is defined as a dict
with two required properties:
- name: class name
- filter: A Mapbox GL Filter.
See the README for more details
imagery: str
Imagery template to download satellite images from.
Ex: http://a.tiles.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.jpg?access_token=ACCESS_TOKEN
ml_type: str
Defines the type of machine learning. One of "classification", "object-detection", or "segmentation"
imagery_offset: list
An optional list of integers representing the number of pixels to offset imagery. Ex. [15, -5] will
move the images 15 pixels right and 5 pixels up relative to the requested tile bounds
**kwargs: dict
Other properties from CLI config passed as keywords to other utility functions
"""
# open labels file
labels_file = op.join(dest_folder, 'labels.npz')
tiles = np.load(labels_file)
# create example tiles directory
examples_dir = op.join(dest_folder, 'examples')
if not op.isdir(examples_dir):
makedirs(examples_dir)
# find examples tiles for each class and download
print('Writing example images to {}'.format(examples_dir))
# get image acquisition function based on imagery string
image_function = get_image_function(imagery)
for i, cl in enumerate(classes):
# create class directory
class_dir = op.join(dest_folder, 'examples', cl.get('name'))
if not op.isdir(class_dir):
makedirs(class_dir)
class_tiles = (t for t in tiles.files
if class_match(ml_type, tiles[t], i + 1))
print('Downloading at most {} tiles for class {}'.format(number, cl.get('name')))
for n, tile in enumerate(class_tiles):
if n >= number:
break
tile_img = image_function(tile, imagery, class_dir, imagery_offset,
kwargs)
if ml_type == 'object-detection':
img = Image.open(tile_img)
draw = ImageDraw.Draw(img)
for box in tiles[tile]:
draw.rectangle(((box[0], box[1]), (box[2], box[3])), outline='red')
img.save(tile_img)
elif ml_type == 'segmentation':
final = Image.new('RGB', (256, 256))
img = Image.open(tile_img)
mask = Image.fromarray(tiles[tile] * 255)
final.paste(img, mask)
final.save(tile_img)
| 38.743902
| 108
| 0.629839
|
0e6f176b2c1cf2abd299d48f4d96b846c82176c0
| 9,756
|
py
|
Python
|
src/tables.py
|
XiaZeng0223/alps
|
72c5f9b02424bfef6b19c8ec9675774ae827242a
|
[
"MIT"
] | null | null | null |
src/tables.py
|
XiaZeng0223/alps
|
72c5f9b02424bfef6b19c8ec9675774ae827242a
|
[
"MIT"
] | null | null | null |
src/tables.py
|
XiaZeng0223/alps
|
72c5f9b02424bfef6b19c8ec9675774ae827242a
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy import dot, mean, absolute
from numpy.linalg import norm
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import glob, os
import json
parser = argparse.ArgumentParser()
parser.add_argument('--results_dir', type=str, default='/home/xia/work/results/alps/models')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--output_dir', type=str, default='analysis')
parser.add_argument('--task', type=str, default='cfever')
parser.add_argument('--method', type=str, default='ft')
parser.add_argument('--commitee6_hs', action='store_true')
parser.add_argument('--rand', action='store_true')
parser.add_argument('--pattern', action='store_true')
parser.add_argument('--pooling', action='store_true')
args = parser.parse_args()
def get_rand():
model=[]
instance = []
strategy = []
acc = []
f1=[]
if args.method == 'ft':
for filename in glob.glob('{}/*/{}/{}/*/*/eval_results.txt'.format(args.results_dir, args.seed, args.task)):
# print('filename', filename)
instance.append(int(filename.split('/')[-2].split('_')[-1]))
strategy.append(filename.split('/')[-2].split('_')[0])
with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
text = f.read().splitlines()
# print(text)
acc.append(float(text[0].replace("acc = ", "")))
f1.append(float(text[1].replace("f1 = ", "")))
model.append(filename.split('/')[-6])
elif args.method == 'pet':
for filename in glob.glob('{}/*/{}/{}/*/*/results.json'.format(args.results_dir, args.seed, args.task)):
# print(filename)
instance.append(int(filename.split('/')[-2].split('_')[-1]))
strategy.append(filename.split('/')[-2].split('_')[0])
with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
text = json.load(f)['test_set_after_training']
# print(text)
acc.append(text['acc'])
f1.append(text['f1-macro'])
model.append(filename.split('/')[-6])
df = pd.DataFrame(
{'Model':model,
'Strategy':strategy,
'Instance':instance,
'Acc':acc,
'F1':f1,
}).sort_values('Instance')
df =df.reset_index().drop(columns=['index'])
for m in ['albert', 'bert', 'roberta', 'xlnet', 'bart', 'deberta']:
df ['Model'] = df['Model'].replace(f'{m}nli_10', m)
print(df)
# df=df.groupby(['Model', 'Instance', 'Strategy']).mean().reset_index()
# print(df)
os.makedirs(args.output_dir, exist_ok=True)
df.to_csv("{}/{}.csv".format(args.output_dir, args.seed),
sep='\t', encoding='utf-8', float_format='%.3f')
def get():
# global set
model=[]
instance = []
strategy = []
acc = []
f1=[]
pattern=[]
pooling=[]
if args.commitee:
if args.method == 'ft':
for filename in glob.glob('{}/{}/{}/*/*/eval_results.txt'.format(args.results_dir, args.seed, args.task)):
# print('filename', filename)
instance.append(int(filename.split('/')[-3].split('_')[-1]))
strategy.append("_".join(filename.split('/')[-3].split('_')[:-1]))
with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
text = f.read().splitlines()
# print(text)
acc.append(float(text[0].replace("acc = ", "")))
f1.append(float(text[1].replace("f1 = ", "")))
model.append(filename.split('/')[-2])
elif args.method == 'pet':
if args.pattern:
all_ = glob.glob('{}/{}/{}/*/*/results.json'.format(args.results_dir, args.seed, args.task))
logits_ = glob.glob('{}/{}/{}/*/logits_*/results.json'.format(args.results_dir, args.seed, args.task))
files = set(all_) - set(logits_)
for filename in files:
# print(filename)
instance.append(int(filename.split('/')[-3].split('_')[-1]))
strategy.append("_".join(filename.split('/')[-3].split('_')[:-1]))
with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
text = json.load(f)['test_set_after_training']
# print(text)
acc.append(text['acc'])
f1.append(text['f1-macro'])
model.append(filename.split('/')[-2].split('_')[0])
pattern.append(int(filename.split('/')[-2].split('_')[1]))
else:
all_ = glob.glob('{}/{}/{}/*/*/results.json'.format(args.results_dir, args.seed, args.task))
logits_ = glob.glob('{}/{}/{}/*/logits_*/results.json'.format(args.results_dir, args.seed, args.task))
files = set(all_) - set(logits_)
for filename in files:
# print(filename)
instance.append(int(filename.split('/')[-3].split('_')[-1]))
strategy.append("_".join(filename.split('/')[-3].split('_')[:-1]))
with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
text = json.load(f)['test_set_after_training']
# print(text)
acc.append(text['acc'])
f1.append(text['f1-macro'])
model.append(filename.split('/')[-2])
else:
for dir in glob.glob('{}/*'.format(args.results_dir)):
# print(dir)
if os.path.isdir(dir) and dir.split('/')[-1] != 'scripts':
if args.method =='ft':
for filename in glob.glob('{}/{}/{}/*/eval_results.txt'.format(dir, args.seed, args.task)):
# print('filename',filename)
instance.append(int(filename.split('/')[-2].split('_')[-1]))
strategy.append(filename.split('/')[-2].split('_')[0])
with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
text = f.read().splitlines()
# print(text)
acc.append(float(text[0].replace("acc = ", "")))
f1.append(float(text[1].replace("f1 = ", "")))
model.append(dir.split('/')[-1])
elif args.method =='pet':
if args.pooling:
for filename in glob.glob('{}/{}/{}/*/*/results.json'.format(dir, args.seed, args.task)):
# print(filename)
instance.append(int(filename.split('/')[-2].split('_')[-1]))
strategy.append('_'.join(filename.split('/')[-2].split('_')[:-1]))
pooling.append(filename.split('/')[-3])
with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
text = json.load(f)['test_set_after_training']
# print(text)
acc.append(text['acc'])
f1.append(text['f1-macro'])
model.append(dir.split('/')[-1])
else:
for filename in glob.glob('{}/{}/{}/*/results.json'.format(dir, args.seed, args.task)):
# print(filename)
instance.append(int(filename.split('/')[-2].split('_')[-1]))
strategy.append("_".join(filename.split('/')[-2].split('_')[:-1]))
with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
text = json.load(f)['test_set_after_training']
# print(text)
acc.append(text['acc'])
f1.append(text['f1-macro'])
model.append(dir.split('/')[-1])
if args.pattern:
df = pd.DataFrame(
{'Model': model,
'Pattern':pattern,
'Strategy': strategy,
'Instance': instance,
'Acc': acc,
'F1': f1,
}).sort_values('Instance')
elif args.pooling:
df = pd.DataFrame(
{'Model': model,
'Pooling':pooling,
'Strategy': strategy,
'Instance': instance,
'Acc': acc,
'F1': f1,
}).sort_values('Instance')
else:
df = pd.DataFrame(
{'Model':model,
'Strategy':strategy,
'Instance':instance,
'Acc':acc,
'F1':f1,
}).sort_values('Instance')
df =df.reset_index().drop(columns=['index'])
if args.commitee:
mapping = {0:'bert-large', 1:'roberta-large', 2:'deberta-large', 3:'bert-base', 4:'roberta-base', 5:'deberta-base'}
for k in mapping.keys():
df['Model'] = df['Model'].replace(f'model_{k}', mapping[k])
else:
for m in ['albert', 'bert', 'roberta', 'xlnet', 'bart', 'deberta']:
df ['Model'] = df['Model'].replace(f'{m}nli_10', m)
print(df)
os.makedirs(args.output_dir, exist_ok=True)
df.to_csv("{}/{}.csv".format(args.output_dir, args.seed),
sep='\t', encoding='utf-8', float_format='%.3f')
if __name__ == '__main__':
if args.rand:
get_rand()
else:
get()
| 46.018868
| 123
| 0.492927
|
76c8d10972bf7016db7c6afebc7bfdbea0b7f1e5
| 2,026
|
py
|
Python
|
zdiscord/util/logging/LogClear.py
|
xxdunedainxx/zdiscord
|
e79039621969fd7a2987ccac4e8d6fcff11ee754
|
[
"MIT"
] | null | null | null |
zdiscord/util/logging/LogClear.py
|
xxdunedainxx/zdiscord
|
e79039621969fd7a2987ccac4e8d6fcff11ee754
|
[
"MIT"
] | 57
|
2020-06-05T18:33:17.000Z
|
2020-08-17T18:28:37.000Z
|
zdiscord/util/logging/LogClear.py
|
xxdunedainxx/zdiscord
|
e79039621969fd7a2987ccac4e8d6fcff11ee754
|
[
"MIT"
] | null | null | null |
from zdiscord.util.logging.LogFactory import LogFactory
import os
import datetime
import random
import shutil
# TODO code could be enhanced & cleaned up
class LogClear:
Logger = None
LogDir = None
LogZipDestination = None
LogMegabyteTop = None
def __init__(self):
pass
@staticmethod
def init(logName='LogClear', log_dir=LogFactory.log_dir, LogZipDestination='LOCAL', LogMegabyteTop=5):
LogClear.Logger = LogFactory.get_logger(logName='LogClear')
LogClear.LogDir = LogFactory.log_dir
LogClear.LogZipDestination: str = 'LOCAL'
LogClear.LogMegabyteTop: int = 5
@staticmethod
def clean_up_logs():
LogClear.Logger.info("Running log clean up!")
logs_to_zip:[str] = []
for file in os.listdir(LogClear.LogDir):
if file.endswith(".log"):
# print(os.path.join(directory, filename))
LogClear.Logger.info(f"Checking log {file}")
file_info = os.stat(f"{LogClear.LogDir}{os.sep}{file}")
if(datetime.datetime.now() - datetime.datetime.fromtimestamp(file_info.st_birthtime)).days > 1:
logs_to_zip.append(file)
LogClear.Logger.info(f"File {file}, is old!")
elif file_info.st_size * .000001 > LogClear.LogMegabyteTop:
LogClear.Logger.info(f"File {file}, is too large!")
logs_to_zip.append(file)
else:
continue
if len(logs_to_zip) > 2:
zip_folder = f"{LogClear.LogDir}{os.sep}tmp{os.sep}{datetime.datetime.now().toordinal()}-files"
os.makedirs(f"{zip_folder}", exist_ok=True)
for file in logs_to_zip:
shutil.move(f"{LogClear.LogDir}{os.sep}{file}",f"{zip_folder}{os.sep}{file}")
LogFactory.touch_file(path=f"{LogClear.LogDir}{os.sep}{file}")
shutil.make_archive(f"{zip_folder}z", 'zip', zip_folder)
shutil.rmtree(zip_folder)
| 35.54386
| 111
| 0.611056
|
acb05056b10b08a8bbbac745ce20f83821e406b5
| 64
|
py
|
Python
|
pyqtetreemodel/__init__.py
|
summoningdark/pyqtetreemodel
|
ed83eaf7494c27ccffe501975bbca471f7ad797e
|
[
"MIT"
] | null | null | null |
pyqtetreemodel/__init__.py
|
summoningdark/pyqtetreemodel
|
ed83eaf7494c27ccffe501975bbca471f7ad797e
|
[
"MIT"
] | null | null | null |
pyqtetreemodel/__init__.py
|
summoningdark/pyqtetreemodel
|
ed83eaf7494c27ccffe501975bbca471f7ad797e
|
[
"MIT"
] | null | null | null |
from .Models import EtreeModel
from .Widgets import XmlTreeView
| 21.333333
| 32
| 0.84375
|
bd01247ec70fd8b4e7e0891e2214305875037759
| 11,529
|
py
|
Python
|
venv/Lib/site-packages/skimage/filters/_gaussian.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 4
|
2021-10-20T12:39:09.000Z
|
2022-02-26T15:02:08.000Z
|
venv/Lib/site-packages/skimage/filters/_gaussian.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 7
|
2021-06-08T21:46:24.000Z
|
2022-03-12T00:35:31.000Z
|
venv/Lib/site-packages/skimage/filters/_gaussian.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 20
|
2021-11-07T13:55:56.000Z
|
2021-12-02T10:54:01.000Z
|
from collections.abc import Iterable
import numpy as np
from scipy import ndimage as ndi
from ..util import img_as_float
from .._shared.utils import warn, convert_to_float
__all__ = ['gaussian', 'difference_of_gaussians']
def gaussian(image, sigma=1, output=None, mode='nearest', cval=0,
multichannel=None, preserve_range=False, truncate=4.0):
"""Multi-dimensional Gaussian filter.
Parameters
----------
image : array-like
Input image (grayscale or color) to filter.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'nearest'.
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
multichannel : bool, optional (default: None)
Whether the last axis of the image is to be interpreted as multiple
channels. If True, each channel is filtered separately (channels are
not mixed together). Only 3 channels are supported. If ``None``,
the function will attempt to guess this, and raise a warning if
ambiguous, when the array has shape (M, N, 3).
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of ``img_as_float``.
Also see
https://scikit-image.org/docs/dev/user_guide/data_types.html
truncate : float, optional
Truncate the filter at this many standard deviations.
Returns
-------
filtered_image : ndarray
the filtered array
Notes
-----
This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.
Integer arrays are converted to float.
The ``output`` should be floating point data type since gaussian converts
to float provided ``image``. If ``output`` is not provided, another array
will be allocated and returned as the result.
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[1, 1] = 1
>>> a
array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
>>> gaussian(a, sigma=0.4) # mild smoothing
array([[0.00163116, 0.03712502, 0.00163116],
[0.03712502, 0.84496158, 0.03712502],
[0.00163116, 0.03712502, 0.00163116]])
>>> gaussian(a, sigma=1) # more smoothing
array([[0.05855018, 0.09653293, 0.05855018],
[0.09653293, 0.15915589, 0.09653293],
[0.05855018, 0.09653293, 0.05855018]])
>>> # Several modes are possible for handling boundaries
>>> gaussian(a, sigma=1, mode='reflect')
array([[0.08767308, 0.12075024, 0.08767308],
[0.12075024, 0.16630671, 0.12075024],
[0.08767308, 0.12075024, 0.08767308]])
>>> # For RGB images, each is filtered separately
>>> from skimage.data import astronaut
>>> image = astronaut()
>>> filtered_img = gaussian(image, sigma=1, multichannel=True)
"""
spatial_dims = None
try:
spatial_dims = _guess_spatial_dimensions(image)
except ValueError:
spatial_dims = image.ndim
if spatial_dims is None and multichannel is None:
msg = ("Images with dimensions (M, N, 3) are interpreted as 2D+RGB "
"by default. Use `multichannel=False` to interpret as "
"3D image with last dimension of length 3.")
warn(RuntimeWarning(msg))
multichannel = True
if np.any(np.asarray(sigma) < 0.0):
raise ValueError("Sigma values less than zero are not valid")
if multichannel:
# do not filter across channels
if not isinstance(sigma, Iterable):
sigma = [sigma] * (image.ndim - 1)
if len(sigma) != image.ndim:
sigma = np.concatenate((np.asarray(sigma), [0]))
image = convert_to_float(image, preserve_range)
if output is None:
output = np.empty_like(image)
elif not np.issubdtype(output.dtype, np.floating):
raise ValueError("Provided output data type is not float")
ndi.gaussian_filter(image, sigma, output=output, mode=mode, cval=cval,
truncate=truncate)
return output
def _guess_spatial_dimensions(image):
"""Make an educated guess about whether an image has a channels dimension.
Parameters
----------
image : ndarray
The input image.
Returns
-------
spatial_dims : int or None
The number of spatial dimensions of ``image``. If ambiguous, the value
is ``None``.
Raises
------
ValueError
If the image array has less than two or more than four dimensions.
"""
if image.ndim == 2:
return 2
if image.ndim == 3 and image.shape[-1] != 3:
return 3
if image.ndim == 3 and image.shape[-1] == 3:
return None
if image.ndim == 4 and image.shape[-1] == 3:
return 3
else:
raise ValueError("Expected 2D, 3D, or 4D array, got %iD." % image.ndim)
def difference_of_gaussians(image, low_sigma, high_sigma=None, *,
mode='nearest', cval=0, multichannel=False,
truncate=4.0):
"""Find features between ``low_sigma`` and ``high_sigma`` in size.
This function uses the Difference of Gaussians method for applying
band-pass filters to multi-dimensional arrays. The input array is
blurred with two Gaussian kernels of differing sigmas to produce two
intermediate, filtered images. The more-blurred image is then subtracted
from the less-blurred image. The final output image will therefore have
had high-frequency components attenuated by the smaller-sigma Gaussian, and
low frequency components will have been removed due to their presence in
the more-blurred intermediate.
Parameters
----------
image : ndarray
Input array to filter.
low_sigma : scalar or sequence of scalars
Standard deviation(s) for the Gaussian kernel with the smaller sigmas
across all axes. The standard deviations are given for each axis as a
sequence, or as a single number, in which case the single number is
used as the standard deviation value for all axes.
high_sigma : scalar or sequence of scalars, optional (default is None)
Standard deviation(s) for the Gaussian kernel with the larger sigmas
across all axes. The standard deviations are given for each axis as a
sequence, or as a single number, in which case the single number is
used as the standard deviation value for all axes. If None is given
(default), sigmas for all axes are calculated as 1.6 * low_sigma.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'nearest'.
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
multichannel : bool, optional (default: False)
Whether the last axis of the image is to be interpreted as multiple
channels. If True, each channel is filtered separately (channels are
not mixed together).
truncate : float, optional (default is 4.0)
Truncate the filter at this many standard deviations.
Returns
-------
filtered_image : ndarray
the filtered array.
See also
--------
skimage.feature.blog_dog
Notes
-----
This function will subtract an array filtered with a Gaussian kernel
with sigmas given by ``high_sigma`` from an array filtered with a
Gaussian kernel with sigmas provided by ``low_sigma``. The values for
``high_sigma`` must always be greater than or equal to the corresponding
values in ``low_sigma``, or a ``ValueError`` will be raised.
When ``high_sigma`` is none, the values for ``high_sigma`` will be
calculated as 1.6x the corresponding values in ``low_sigma``. This ratio
was originally proposed by Marr and Hildreth (1980) [1]_ and is commonly
used when approximating the inverted Laplacian of Gaussian, which is used
in edge and blob detection.
Input image is converted according to the conventions of ``img_as_float``.
Except for sigma values, all parameters are used for both filters.
Examples
--------
Apply a simple Difference of Gaussians filter to a color image:
>>> from skimage.data import astronaut
>>> from skimage.filters import difference_of_gaussians
>>> filtered_image = difference_of_gaussians(astronaut(), 2, 10,
... multichannel=True)
Apply a Laplacian of Gaussian filter as approximated by the Difference
of Gaussians filter:
>>> filtered_image = difference_of_gaussians(astronaut(), 2,
... multichannel=True)
Apply a Difference of Gaussians filter to a grayscale image using different
sigma values for each axis:
>>> from skimage.data import camera
>>> filtered_image = difference_of_gaussians(camera(), (2,5), (3,20))
References
----------
.. [1] Marr, D. and Hildreth, E. Theory of Edge Detection. Proc. R. Soc.
Lond. Series B 207, 187-217 (1980).
https://doi.org/10.1098/rspb.1980.0020
"""
image = img_as_float(image)
low_sigma = np.array(low_sigma, dtype='float', ndmin=1)
if high_sigma is None:
high_sigma = low_sigma * 1.6
else:
high_sigma = np.array(high_sigma, dtype='float', ndmin=1)
if multichannel is True:
spatial_dims = image.ndim - 1
else:
spatial_dims = image.ndim
if len(low_sigma) != 1 and len(low_sigma) != spatial_dims:
raise ValueError('low_sigma must have length equal to number of'
' spatial dimensions of input')
if len(high_sigma) != 1 and len(high_sigma) != spatial_dims:
raise ValueError('high_sigma must have length equal to number of'
' spatial dimensions of input')
low_sigma = low_sigma * np.ones(spatial_dims)
high_sigma = high_sigma * np.ones(spatial_dims)
if any(high_sigma < low_sigma):
raise ValueError('high_sigma must be equal to or larger than'
'low_sigma for all axes')
im1 = gaussian(image, low_sigma, mode=mode, cval=cval,
multichannel=multichannel, truncate=truncate)
im2 = gaussian(image, high_sigma, mode=mode, cval=cval,
multichannel=multichannel, truncate=truncate)
return im1 - im2
| 39.618557
| 79
| 0.64984
|
517bd517ad2a0b7b0c730759abd5bff6f5efae95
| 422
|
py
|
Python
|
tutorials/alice_bob_lab/{{cookiecutter.repo_name}}/{{cookiecutter.model_name}}/extracts/csv_extract.py
|
modelyst/dbgen-model-template
|
39c3b84527bc4f01ea3f810d7873e6edf8f056c3
|
[
"Apache-2.0"
] | null | null | null |
tutorials/alice_bob_lab/{{cookiecutter.repo_name}}/{{cookiecutter.model_name}}/extracts/csv_extract.py
|
modelyst/dbgen-model-template
|
39c3b84527bc4f01ea3f810d7873e6edf8f056c3
|
[
"Apache-2.0"
] | null | null | null |
tutorials/alice_bob_lab/{{cookiecutter.repo_name}}/{{cookiecutter.model_name}}/extracts/csv_extract.py
|
modelyst/dbgen-model-template
|
39c3b84527bc4f01ea3f810d7873e6edf8f056c3
|
[
"Apache-2.0"
] | null | null | null |
import csv
from typing import List
from dbgen import Extract
from pydantic import PrivateAttr
class CSVExtract(Extract):
data_dir: str
outputs: List[str] = ["row"]
_reader: PrivateAttr
def setup(self, **_):
csv_file = open(self.data_dir)
reader = csv.reader(csv_file)
self._reader = reader
def extract(self):
for row in self._reader:
yield {"row": row}
| 20.095238
| 38
| 0.637441
|
77d960c62d52f028013634ffd3612b9020359e51
| 3,165
|
py
|
Python
|
var/spack/repos/builtin/packages/netcdf-fortran/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-09-15T23:55:48.000Z
|
2019-09-15T23:55:48.000Z
|
var/spack/repos/builtin/packages/netcdf-fortran/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/netcdf-fortran/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2017-01-21T17:19:32.000Z
|
2017-01-21T17:19:32.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NetcdfFortran(AutotoolsPackage):
"""NetCDF (network Common Data Form) is a set of software libraries and
machine-independent data formats that support the creation, access, and
sharing of array-oriented scientific data. This is the Fortran
distribution."""
homepage = "https://www.unidata.ucar.edu/software/netcdf"
url = "https://www.unidata.ucar.edu/downloads/netcdf/ftp/netcdf-fortran-4.5.2.tar.gz"
version('4.5.2', sha256='b959937d7d9045184e9d2040a915d94a7f4d0185f4a9dceb8f08c94b0c3304aa')
version('4.4.5', sha256='2467536ce29daea348c736476aa8e684c075d2f6cab12f3361885cb6905717b8')
version('4.4.4', sha256='b2d395175f8d283e68c8be516e231a96b191ade67ad0caafaf7fa01b1e6b5d75')
version('4.4.3', sha256='330373aa163d5931e475b5e83da5c1ad041e855185f24e6a8b85d73b48d6cda9')
variant('mpi', default=True,
description='Enable parallel I/O for netcdf-4')
variant('pic', default=True,
description='Produce position-independent code (for shared libs)')
# We need to build with MPI wrappers if parallel I/O features is enabled:
# https://www.unidata.ucar.edu/software/netcdf/docs/building_netcdf_fortran.html
depends_on('mpi', when='+mpi')
depends_on('netcdf-c~mpi', when='~mpi')
depends_on('netcdf-c+mpi', when='+mpi')
# The default libtool.m4 is too old to handle NAG compiler properly:
# https://github.com/Unidata/netcdf-fortran/issues/94
patch('nag.patch', when='@:4.4.4%nag')
def flag_handler(self, name, flags):
if name in ['cflags', 'fflags'] and '+pic' in self.spec:
flags.append(self.compiler.pic_flag)
elif name == 'cppflags':
flags.append(self.spec['netcdf-c'].headers.cpp_flags)
elif name == 'ldflags':
# We need to specify LDFLAGS to get correct dependency_libs
# in libnetcdff.la, so packages that use libtool for linking
# could correctly link to all the dependencies even when the
# building takes place outside of Spack environment, i.e.
# without Spack's compiler wrappers.
flags.append(self.spec['netcdf-c'].libs.search_flags)
return None, None, flags
@property
def libs(self):
libraries = ['libnetcdff']
# This package installs both shared and static libraries. Permit
# clients to query which one they want.
query_parameters = self.spec.last_query.extra_parameters
shared = 'shared' in query_parameters
return find_libraries(
libraries, root=self.prefix, shared=shared, recursive=True
)
def configure_args(self):
config_args = []
if '+mpi' in self.spec:
config_args.append('CC=%s' % self.spec['mpi'].mpicc)
config_args.append('FC=%s' % self.spec['mpi'].mpifc)
config_args.append('F77=%s' % self.spec['mpi'].mpif77)
return config_args
| 41.644737
| 95
| 0.679937
|
bd1f72bc207dea55dc6e78af11e8a24fecf7998d
| 29,345
|
py
|
Python
|
release/stubs.min/System/Windows/Forms/__init___parts/ToolStripControlHost.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/ToolStripControlHost.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/ToolStripControlHost.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class ToolStripControlHost(
ToolStripItem,
IComponent,
IDisposable,
IDropTarget,
ISupportOleDropSource,
IArrangedElement,
):
"""
Hosts custom controls or Windows Forms controls.
ToolStripControlHost(c: Control)
ToolStripControlHost(c: Control,name: str)
"""
def CreateAccessibilityInstance(self, *args):
""" CreateAccessibilityInstance(self: ToolStripControlHost) -> AccessibleObject """
pass
def Dispose(self):
"""
Dispose(self: ToolStripControlHost,disposing: bool)
Releases the unmanaged resources used by the System.Windows.Forms.ToolStripControlHost and
optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def Focus(self):
"""
Focus(self: ToolStripControlHost)
Gives the focus to a control.
"""
pass
def GetPreferredSize(self, constrainingSize):
"""
GetPreferredSize(self: ToolStripControlHost,constrainingSize: Size) -> Size
Retrieves the size of a rectangular area into which a control can be fitted.
constrainingSize: The custom-sized area for a control.
Returns: An ordered pair of type System.Drawing.Size representing the width and height of a rectangle.
"""
pass
def GetService(self, *args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def IsInputChar(self, *args):
"""
IsInputChar(self: ToolStripItem,charCode: Char) -> bool
Determines whether a character is an input character that the item recognizes.
charCode: The character to test.
Returns: true if the character should be sent directly to the item and not preprocessed; otherwise,false.
"""
pass
def IsInputKey(self, *args):
"""
IsInputKey(self: ToolStripItem,keyData: Keys) -> bool
Determines whether the specified key is a regular input key or a special key that requires
preprocessing.
keyData: One of the System.Windows.Forms.Keys values.
Returns: true if the specified key is a regular input key; otherwise,false.
"""
pass
def MemberwiseClone(self, *args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def OnAvailableChanged(self, *args):
"""
OnAvailableChanged(self: ToolStripItem,e: EventArgs)
Raises the AvailableChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackColorChanged(self, *args):
"""
OnBackColorChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBoundsChanged(self, *args):
"""
OnBoundsChanged(self: ToolStripControlHost)
Occurs when the System.Windows.Forms.ToolStripItem.Bounds property changes.
"""
pass
def OnClick(self, *args):
"""
OnClick(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.Click event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDisplayStyleChanged(self, *args):
"""
OnDisplayStyleChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.DisplayStyleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDoubleClick(self, *args):
"""
OnDoubleClick(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.DoubleClick event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragDrop(self, *args):
"""
OnDragDrop(self: ToolStripItem,dragEvent: DragEventArgs)
Raises the System.Windows.Forms.ToolStripItem.DragDrop event.
dragEvent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragEnter(self, *args):
"""
OnDragEnter(self: ToolStripItem,dragEvent: DragEventArgs)
Raises the System.Windows.Forms.ToolStripItem.DragEnter event.
dragEvent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragLeave(self, *args):
"""
OnDragLeave(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.DragLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragOver(self, *args):
"""
OnDragOver(self: ToolStripItem,dragEvent: DragEventArgs)
Raises the System.Windows.Forms.ToolStripItem.DragOver event.
dragEvent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnEnabledChanged(self, *args):
"""
OnEnabledChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.EnabledChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnEnter(self, *args):
"""
OnEnter(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.Enter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnFontChanged(self, *args):
"""
OnFontChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnForeColorChanged(self, *args):
"""
OnForeColorChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnGiveFeedback(self, *args):
"""
OnGiveFeedback(self: ToolStripItem,giveFeedbackEvent: GiveFeedbackEventArgs)
Raises the System.Windows.Forms.ToolStripItem.GiveFeedback event.
giveFeedbackEvent: A System.Windows.Forms.GiveFeedbackEventArgs that contains the event data.
"""
pass
def OnGotFocus(self, *args):
"""
OnGotFocus(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.GotFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnHostedControlResize(self, *args):
"""
OnHostedControlResize(self: ToolStripControlHost,e: EventArgs)
Synchronizes the resizing of the control host with the resizing of the hosted control.
e: An System.EventArgs that contains the event data.
"""
pass
def OnKeyDown(self, *args):
"""
OnKeyDown(self: ToolStripControlHost,e: KeyEventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.KeyDown event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnKeyPress(self, *args):
"""
OnKeyPress(self: ToolStripControlHost,e: KeyPressEventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.KeyPress event.
e: A System.Windows.Forms.KeyPressEventArgs that contains the event data.
"""
pass
def OnKeyUp(self, *args):
"""
OnKeyUp(self: ToolStripControlHost,e: KeyEventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.KeyUp event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnLayout(self, *args):
"""
OnLayout(self: ToolStripControlHost,e: LayoutEventArgs)
e: A System.Windows.Forms.LayoutEventArgs that contains the event data.
"""
pass
def OnLeave(self, *args):
"""
OnLeave(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.Leave event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnLocationChanged(self, *args):
"""
OnLocationChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.LocationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLostFocus(self, *args):
"""
OnLostFocus(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.LostFocus event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnMouseDown(self, *args):
"""
OnMouseDown(self: ToolStripItem,e: MouseEventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseDown event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseEnter(self, *args):
"""
OnMouseEnter(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseEnter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseHover(self, *args):
"""
OnMouseHover(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseHover event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseLeave(self, *args):
"""
OnMouseLeave(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseMove(self, *args):
"""
OnMouseMove(self: ToolStripItem,mea: MouseEventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseMove event.
mea: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseUp(self, *args):
"""
OnMouseUp(self: ToolStripItem,e: MouseEventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseUp event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnOwnerChanged(self, *args):
"""
OnOwnerChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.OwnerChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnOwnerFontChanged(self, *args):
"""
OnOwnerFontChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event when the
System.Windows.Forms.ToolStripItem.Font property has changed on the parent of the
System.Windows.Forms.ToolStripItem.
e: A System.EventArgs that contains the event data.
"""
pass
def OnPaint(self, *args):
"""
OnPaint(self: ToolStripControlHost,e: PaintEventArgs)
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnParentBackColorChanged(self, *args):
"""
OnParentBackColorChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentChanged(self, *args):
"""
OnParentChanged(self: ToolStripControlHost,oldParent: ToolStrip,newParent: ToolStrip)
oldParent: The original parent of the item.
newParent: The new parent of the item.
"""
pass
def OnParentEnabledChanged(self, *args):
"""
OnParentEnabledChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.EnabledChanged event when the
System.Windows.Forms.ToolStripItem.Enabled property value of the item's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentForeColorChanged(self, *args):
"""
OnParentForeColorChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentRightToLeftChanged(self, *args):
"""
OnParentRightToLeftChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnQueryContinueDrag(self, *args):
"""
OnQueryContinueDrag(self: ToolStripItem,queryContinueDragEvent: QueryContinueDragEventArgs)
Raises the System.Windows.Forms.ToolStripItem.QueryContinueDrag event.
queryContinueDragEvent: A System.Windows.Forms.QueryContinueDragEventArgs that contains the event data.
"""
pass
def OnRightToLeftChanged(self, *args):
"""
OnRightToLeftChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSubscribeControlEvents(self, *args):
"""
OnSubscribeControlEvents(self: ToolStripControlHost,control: Control)
Subscribes events from the hosted control.
control: The control from which to subscribe events.
"""
pass
def OnTextChanged(self, *args):
"""
OnTextChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.TextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnUnsubscribeControlEvents(self, *args):
"""
OnUnsubscribeControlEvents(self: ToolStripControlHost,control: Control)
Unsubscribes events from the hosted control.
control: The control from which to unsubscribe events.
"""
pass
def OnValidated(self, *args):
"""
OnValidated(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.Validated event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnValidating(self, *args):
"""
OnValidating(self: ToolStripControlHost,e: CancelEventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.Validating event.
e: A System.ComponentModel.CancelEventArgs that contains the event data.
"""
pass
def OnVisibleChanged(self, *args):
"""
OnVisibleChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.VisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def ProcessCmdKey(self, *args):
"""
ProcessCmdKey(self: ToolStripControlHost,m: Message,keyData: Keys) -> (bool,Message)
Processes a command key.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: false in all cases.
"""
pass
def ProcessDialogKey(self, *args):
"""
ProcessDialogKey(self: ToolStripControlHost,keyData: Keys) -> bool
Processes a dialog key.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the key was processed by the item; otherwise,false.
"""
pass
def ProcessMnemonic(self, *args):
"""
ProcessMnemonic(self: ToolStripControlHost,charCode: Char) -> bool
Processes a mnemonic character.
charCode: The character to process.
Returns: true if the character was processed as a mnemonic by the control; otherwise,false.
"""
pass
def ResetBackColor(self):
"""
ResetBackColor(self: ToolStripControlHost)
This method is not relevant to this class.
"""
pass
def ResetForeColor(self):
"""
ResetForeColor(self: ToolStripControlHost)
This method is not relevant to this class.
"""
pass
def SetBounds(self, *args):
"""
SetBounds(self: ToolStripItem,bounds: Rectangle)
Sets the size and location of the item.
bounds: A System.Drawing.Rectangle that represents the size and location of the
System.Windows.Forms.ToolStripItem
"""
pass
def SetVisibleCore(self, *args):
"""
SetVisibleCore(self: ToolStripControlHost,visible: bool)
visible: true to make the System.Windows.Forms.ToolStripItem visible; otherwise,false.
"""
pass
def __enter__(self, *args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self, *args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, c, name=None):
"""
__new__(cls: type,c: Control)
__new__(cls: type,c: Control,name: str)
"""
pass
def __str__(self, *args):
pass
BackColor = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Get: BackColor(self: ToolStripControlHost) -> Color
Set: BackColor(self: ToolStripControlHost)=value
"""
BackgroundImage = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the background image displayed in the control.
Get: BackgroundImage(self: ToolStripControlHost) -> Image
Set: BackgroundImage(self: ToolStripControlHost)=value
"""
BackgroundImageLayout = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the background image layout as defined in the ImageLayout enumeration.
Get: BackgroundImageLayout(self: ToolStripControlHost) -> ImageLayout
Set: BackgroundImageLayout(self: ToolStripControlHost)=value
"""
CanRaiseEvents = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the component can raise an event.
"""
CanSelect = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a value indicating whether the control can be selected.
Get: CanSelect(self: ToolStripControlHost) -> bool
"""
CausesValidation = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value indicating whether the hosted control causes and raises validation events on other controls when the hosted control receives focus.
Get: CausesValidation(self: ToolStripControlHost) -> bool
Set: CausesValidation(self: ToolStripControlHost)=value
"""
Control = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the System.Windows.Forms.Control that this System.Windows.Forms.ToolStripControlHost is hosting.
Get: Control(self: ToolStripControlHost) -> Control
"""
ControlAlign = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the alignment of the control on the form.
Get: ControlAlign(self: ToolStripControlHost) -> ContentAlignment
Set: ControlAlign(self: ToolStripControlHost)=value
"""
DefaultAutoToolTip = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether to display the System.Windows.Forms.ToolTip that is defined as the default.
"""
DefaultDisplayStyle = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating what is displayed on the System.Windows.Forms.ToolStripItem.
"""
DefaultMargin = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the default margin of an item.
"""
DefaultPadding = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the internal spacing characteristics of the item.
"""
DefaultSize = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the default size of the control.
"""
DesignMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
DismissWhenClicked = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether items on a System.Windows.Forms.ToolStripDropDown are hidden after they are clicked.
"""
DisplayStyle = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is not relevant to this class.
Get: DisplayStyle(self: ToolStripControlHost) -> ToolStripItemDisplayStyle
Set: DisplayStyle(self: ToolStripControlHost)=value
"""
DoubleClickEnabled = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is not relevant to this class.
Get: DoubleClickEnabled(self: ToolStripControlHost) -> bool
Set: DoubleClickEnabled(self: ToolStripControlHost)=value
"""
Enabled = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets a value indicating whether the parent control of the System.Windows.Forms.ToolStripItem is enabled.
Get: Enabled(self: ToolStripControlHost) -> bool
Set: Enabled(self: ToolStripControlHost)=value
"""
Events = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
Focused = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a value indicating whether the control has input focus.
Get: Focused(self: ToolStripControlHost) -> bool
"""
Font = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the font to be used on the hosted control.
Get: Font(self: ToolStripControlHost) -> Font
Set: Font(self: ToolStripControlHost)=value
"""
ForeColor = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the foreground color of the hosted control.
Get: ForeColor(self: ToolStripControlHost) -> Color
Set: ForeColor(self: ToolStripControlHost)=value
"""
Image = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""This property is not relevant to this class.
Get: Image(self: ToolStripControlHost) -> Image
Set: Image(self: ToolStripControlHost)=value
"""
ImageAlign = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is not relevant to this class.
Get: ImageAlign(self: ToolStripControlHost) -> ContentAlignment
Set: ImageAlign(self: ToolStripControlHost)=value
"""
ImageScaling = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is not relevant to this class.
Get: ImageScaling(self: ToolStripControlHost) -> ToolStripItemImageScaling
Set: ImageScaling(self: ToolStripControlHost)=value
"""
ImageTransparentColor = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is not relevant to this class.
Get: ImageTransparentColor(self: ToolStripControlHost) -> Color
Set: ImageTransparentColor(self: ToolStripControlHost)=value
"""
Parent = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the parent container of the System.Windows.Forms.ToolStripItem.
"""
RightToLeft = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: RightToLeft(self: ToolStripControlHost) -> RightToLeft
Set: RightToLeft(self: ToolStripControlHost)=value
"""
RightToLeftAutoMirrorImage = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is not relevant to this class.
Get: RightToLeftAutoMirrorImage(self: ToolStripControlHost) -> bool
Set: RightToLeftAutoMirrorImage(self: ToolStripControlHost)=value
"""
Selected = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a value indicating whether the item is selected.
Get: Selected(self: ToolStripControlHost) -> bool
"""
ShowKeyboardCues = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether to show or hide shortcut keys.
"""
Site = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the site of the hosted control.
Get: Site(self: ToolStripControlHost) -> ISite
Set: Site(self: ToolStripControlHost)=value
"""
Size = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the size of the System.Windows.Forms.ToolStripItem.
Get: Size(self: ToolStripControlHost) -> Size
Set: Size(self: ToolStripControlHost)=value
"""
Text = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the text to be displayed on the hosted control.
Get: Text(self: ToolStripControlHost) -> str
Set: Text(self: ToolStripControlHost)=value
"""
TextAlign = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""This property is not relevant to this class.
Get: TextAlign(self: ToolStripControlHost) -> ContentAlignment
Set: TextAlign(self: ToolStripControlHost)=value
"""
TextDirection = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is not relevant to this class.
Get: TextDirection(self: ToolStripControlHost) -> ToolStripTextDirection
Set: TextDirection(self: ToolStripControlHost)=value
"""
TextImageRelation = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""This property is not relevant to this class.
Get: TextImageRelation(self: ToolStripControlHost) -> TextImageRelation
Set: TextImageRelation(self: ToolStripControlHost)=value
"""
DisplayStyleChanged = None
Enter = None
GotFocus = None
KeyDown = None
KeyPress = None
KeyUp = None
Leave = None
LostFocus = None
Validated = None
Validating = None
| 22.890016
| 221
| 0.659499
|
73a92732dd8df0a5bee5745112147947b7fc474f
| 842
|
py
|
Python
|
tests/programs/relative_import/RelativeImportMain.py
|
sthagen/Nuitka-Nuitka
|
023dc76eeafd9c53ee2a51931474ddd98a3ba083
|
[
"Apache-2.0"
] | null | null | null |
tests/programs/relative_import/RelativeImportMain.py
|
sthagen/Nuitka-Nuitka
|
023dc76eeafd9c53ee2a51931474ddd98a3ba083
|
[
"Apache-2.0"
] | null | null | null |
tests/programs/relative_import/RelativeImportMain.py
|
sthagen/Nuitka-Nuitka
|
023dc76eeafd9c53ee2a51931474ddd98a3ba083
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import dircache
print(dircache)
| 36.608696
| 79
| 0.728029
|
5b96a3de729abdcbea916a9e9035501fbe49bc10
| 20,877
|
py
|
Python
|
code/bib/base.py
|
ribes96/TFG
|
b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9
|
[
"MIT"
] | null | null | null |
code/bib/base.py
|
ribes96/TFG
|
b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9
|
[
"MIT"
] | null | null | null |
code/bib/base.py
|
ribes96/TFG
|
b38ac01da641e40551c1b3fefc1dc3ebd1b8b0a9
|
[
"MIT"
] | null | null | null |
"""Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import warnings
from collections import defaultdict
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from . import __version__
##############################################################################
def _first_and_last_element(arr):
"""Returns first and last element of numpy array or sparse matrix."""
if isinstance(arr, np.ndarray) or hasattr(arr, 'data'):
# numpy array or sparse matrix with .data attribute
data = arr.data if sparse.issparse(arr) else arr
return data.flat[0], data.flat[-1]
else:
# Sparse matrices without .data attribute. Only dok_matrix at
# the time of writing, in this case indexing is fast
return arr[0, 0], arr[-1, -1]
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator : estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe : boolean, optional
If safe is false, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is param2:
# this should always happen
continue
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
and (_first_and_last_element(param1) ==
_first_and_last_element(param2))
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and (_first_and_last_element(param1) ==
_first_and_last_element(param2))
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
# fall back on standard equality
equality_test = param1 == param2
if equality_test:
warnings.warn("Estimator %s modifies parameters in __init__."
" This behavior is deprecated as of 0.18 and "
"support for this behavior will be removed in 0.20."
% type(estimator).__name__, DeprecationWarning)
else:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int
The offset in characters to add at the begin of each line.
printer : callable
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key, None)
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition('__')
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self))
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
def __getstate__(self):
try:
state = super(BaseEstimator, self).__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith('sklearn.'):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith('sklearn.'):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk.".format(
self.__class__.__name__, pickle_version, __version__),
UserWarning)
try:
super(BaseEstimator, self).__setstate__(state)
except AttributeError:
self.__dict__.update(state)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples. For some estimators this may be a
precomputed kernel matrix instead, shape = (n_samples,
n_samples_fitted], where n_samples_fitted is the number of
samples used in the fitting for the estimator.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array
The data.
Returns
-------
submatrix : array
The submatrix corresponding to bicluster i.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class DensityMixin(object):
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Returns the score of the model on the data X
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
score : float
"""
pass
class OutlierMixin(object):
"""Mixin class for all outlier detection estimators in scikit-learn."""
_estimator_type = "outlier_detector"
def fit_predict(self, X, y=None):
"""Performs outlier detection on X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X).predict(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_outlier_detector(estimator):
"""Returns True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "outlier_detector"
| 34.39374
| 79
| 0.560138
|
3cb17f0e08ba1450d108248c2cfcbe8d4756ab38
| 3,699
|
py
|
Python
|
tools/c7n_azure/c7n_azure/resources/storage_container.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 8
|
2021-05-18T02:22:03.000Z
|
2021-09-11T02:49:04.000Z
|
tools/c7n_azure/c7n_azure/resources/storage_container.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 79
|
2019-03-20T12:27:06.000Z
|
2019-08-14T14:07:04.000Z
|
tools/c7n_azure/c7n_azure/resources/storage_container.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 3
|
2017-09-21T13:36:46.000Z
|
2021-09-20T16:38:29.000Z
|
# Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n_azure.provider import resources
from c7n_azure.query import ChildTypeInfo, ChildResourceManager
from c7n_azure.actions.base import AzureBaseAction
from c7n.filters.core import type_schema
from c7n_azure.utils import ResourceIdParser
@resources.register('storage-container')
class StorageContainer(ChildResourceManager):
"""Storage Container Resource
:example:
Finds all containers with public access enabled
.. code-block:: yaml
policies:
- name: storage-container-public
description: |
Find all containers with public access enabled
resource: azure.storage-container
filters:
- type: value
key: properties.publicAccess
op: not-equal
value: None # Possible values: Blob, Container, None
"""
class resource_type(ChildTypeInfo):
doc_groups = ['Storage']
service = 'azure.mgmt.storage'
client = 'StorageManagementClient'
enum_spec = ('blob_containers', 'list', None)
parent_manager_name = 'storage'
diagnostic_settings_enabled = False
resource_type = 'Microsoft.Storage/storageAccounts/blobServices/containers'
enable_tag_operations = False
raise_on_exception = False
default_report_fields = (
'name',
'properties.publicAccess',
'"c7n:parent-id"'
)
@classmethod
def extra_args(cls, parent_resource):
return {'resource_group_name': parent_resource['resourceGroup'],
'account_name': parent_resource['name']}
@StorageContainer.action_registry.register('set-public-access')
class StorageContainerSetPublicAccessAction(AzureBaseAction):
"""Action that updates the access level setting on Storage Containers.
Programmatically, this will be seen by updating the Public Access setting
:example:
Finds all Blob Storage Containers that are not private and sets them to private
.. code-block:: yaml
policies:
- name: set-non-production-accounts-private
resource: azure.storage-container
filters:
- type: value
key: properties.publicAccess
op: not-equal
value: None
actions:
- type: set-public-access
value: None
"""
schema = type_schema(
'set-public-access',
required=['value'],
**{
'value': {'enum': ['Container', 'Blob', 'None']}
}
)
schema_alias = True
def _prepare_processing(self):
self.client = self.manager.get_client()
def _process_resource(self, resource):
resource_group = ResourceIdParser.get_resource_group(resource['id'])
account_name = ResourceIdParser.get_resource_name(resource['c7n:parent-id'])
self.client.blob_containers.update(
resource_group,
account_name,
resource['name'],
public_access=self.data['value']
)
| 33.026786
| 86
| 0.646391
|
f085717ccd74ebad733fd6018d3fa3c88c20f2c3
| 26,487
|
py
|
Python
|
IPython/core/magic.py
|
ptone/ipython
|
b91d6a658d4526746dcbfb62e653d71c5d84eee9
|
[
"BSD-3-Clause-Clear"
] | 8
|
2021-12-14T21:30:01.000Z
|
2022-02-14T11:30:03.000Z
|
IPython/core/magic.py
|
lelegan/ipython
|
7d0bedb0698af89be54802f32a1947678074945a
|
[
"BSD-3-Clause-Clear"
] | 7
|
2021-02-08T20:22:15.000Z
|
2022-03-11T23:19:41.000Z
|
IPython/core/magic.py
|
lelegan/ipython
|
7d0bedb0698af89be54802f32a1947678074945a
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
# encoding: utf-8
"""Magic functions for InteractiveShell.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
# Copyright (C) 2001 Fernando Perez <fperez@colorado.edu>
# Copyright (C) 2008 The IPython Development Team
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import types
from getopt import getopt, GetoptError
# Our own
from IPython.config.configurable import Configurable
from IPython.core import oinspect
from IPython.core.error import UsageError
from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2
from IPython.external.decorator import decorator
from IPython.utils.ipstruct import Struct
from IPython.utils.process import arg_split
from IPython.utils.py3compat import string_types, iteritems
from IPython.utils.text import dedent
from IPython.utils.traitlets import Bool, Dict, Instance, MetaHasTraits
from IPython.utils.warn import error
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# A dict we'll use for each class that has magics, used as temporary storage to
# pass information between the @line/cell_magic method decorators and the
# @magics_class class decorator, because the method decorators have no
# access to the class when they run. See for more details:
# http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class
magics = dict(line={}, cell={})
magic_kinds = ('line', 'cell')
magic_spec = ('line', 'cell', 'line_cell')
magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2)
#-----------------------------------------------------------------------------
# Utility classes and functions
#-----------------------------------------------------------------------------
class Bunch: pass
def on_off(tag):
"""Return an ON/OFF string for a 1/0 input. Simple utility function."""
return ['OFF','ON'][tag]
def compress_dhist(dh):
"""Compress a directory history into a new one with at most 20 entries.
Return a new list made from the first and last 10 elements of dhist after
removal of duplicates.
"""
head, tail = dh[:-10], dh[-10:]
newhead = []
done = set()
for h in head:
if h in done:
continue
newhead.append(h)
done.add(h)
return newhead + tail
def needs_local_scope(func):
"""Decorator to mark magic functions which need to local scope to run."""
func.needs_local_scope = True
return func
#-----------------------------------------------------------------------------
# Class and method decorators for registering magics
#-----------------------------------------------------------------------------
def magics_class(cls):
"""Class decorator for all subclasses of the main Magics class.
Any class that subclasses Magics *must* also apply this decorator, to
ensure that all the methods that have been decorated as line/cell magics
get correctly registered in the class instance. This is necessary because
when method decorators run, the class does not exist yet, so they
temporarily store their information into a module global. Application of
this class decorator copies that global data to the class instance and
clears the global.
Obviously, this mechanism is not thread-safe, which means that the
*creation* of subclasses of Magic should only be done in a single-thread
context. Instantiation of the classes has no restrictions. Given that
these classes are typically created at IPython startup time and before user
application code becomes active, in practice this should not pose any
problems.
"""
cls.registered = True
cls.magics = dict(line = magics['line'],
cell = magics['cell'])
magics['line'] = {}
magics['cell'] = {}
return cls
def record_magic(dct, magic_kind, magic_name, func):
"""Utility function to store a function as a magic of a specific kind.
Parameters
----------
dct : dict
A dictionary with 'line' and 'cell' subdicts.
magic_kind : str
Kind of magic to be stored.
magic_name : str
Key to store the magic as.
func : function
Callable object to store.
"""
if magic_kind == 'line_cell':
dct['line'][magic_name] = dct['cell'][magic_name] = func
else:
dct[magic_kind][magic_name] = func
def validate_type(magic_kind):
"""Ensure that the given magic_kind is valid.
Check that the given magic_kind is one of the accepted spec types (stored
in the global `magic_spec`), raise ValueError otherwise.
"""
if magic_kind not in magic_spec:
raise ValueError('magic_kind must be one of %s, %s given' %
magic_kinds, magic_kind)
# The docstrings for the decorator below will be fairly similar for the two
# types (method and function), so we generate them here once and reuse the
# templates below.
_docstring_template = \
"""Decorate the given {0} as {1} magic.
The decorator can be used with or without arguments, as follows.
i) without arguments: it will create a {1} magic named as the {0} being
decorated::
@deco
def foo(...)
will create a {1} magic named `foo`.
ii) with one string argument: which will be used as the actual name of the
resulting magic::
@deco('bar')
def foo(...)
will create a {1} magic named `bar`.
"""
# These two are decorator factories. While they are conceptually very similar,
# there are enough differences in the details that it's simpler to have them
# written as completely standalone functions rather than trying to share code
# and make a single one with convoluted logic.
def _method_magic_marker(magic_kind):
"""Decorator factory for methods in Magics subclasses.
"""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
call = lambda f, *a, **k: f(*a, **k)
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.__name__
retval = decorator(call, func)
record_magic(magics, magic_kind, name, name)
elif isinstance(arg, string_types):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
record_magic(magics, magic_kind, name, func.__name__)
return decorator(call, func)
retval = mark
else:
raise TypeError("Decorator can only be called with "
"string or function")
return retval
# Ensure the resulting decorator has a usable docstring
magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
return magic_deco
def _function_magic_marker(magic_kind):
"""Decorator factory for standalone functions.
"""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
call = lambda f, *a, **k: f(*a, **k)
# Find get_ipython() in the caller's namespace
caller = sys._getframe(1)
for ns in ['f_locals', 'f_globals', 'f_builtins']:
get_ipython = getattr(caller, ns).get('get_ipython')
if get_ipython is not None:
break
else:
raise NameError('Decorator can only run in context where '
'`get_ipython` exists')
ip = get_ipython()
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.__name__
ip.register_magic_function(func, magic_kind, name)
retval = decorator(call, func)
elif isinstance(arg, string_types):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
ip.register_magic_function(func, magic_kind, name)
return decorator(call, func)
retval = mark
else:
raise TypeError("Decorator can only be called with "
"string or function")
return retval
# Ensure the resulting decorator has a usable docstring
ds = _docstring_template.format('function', magic_kind)
ds += dedent("""
Note: this decorator can only be used in a context where IPython is already
active, so that the `get_ipython()` call succeeds. You can therefore use
it in your startup files loaded after IPython initializes, but *not* in the
IPython configuration file itself, which is executed before IPython is
fully up and running. Any file located in the `startup` subdirectory of
your configuration profile will be OK in this sense.
""")
magic_deco.__doc__ = ds
return magic_deco
# Create the actual decorators for public use
# These three are used to decorate methods in class definitions
line_magic = _method_magic_marker('line')
cell_magic = _method_magic_marker('cell')
line_cell_magic = _method_magic_marker('line_cell')
# These three decorate standalone functions and perform the decoration
# immediately. They can only run where get_ipython() works
register_line_magic = _function_magic_marker('line')
register_cell_magic = _function_magic_marker('cell')
register_line_cell_magic = _function_magic_marker('line_cell')
#-----------------------------------------------------------------------------
# Core Magic classes
#-----------------------------------------------------------------------------
class MagicsManager(Configurable):
"""Object that handles all magic-related functionality for IPython.
"""
# Non-configurable class attributes
# A two-level dict, first keyed by magic type, then by magic function, and
# holding the actual callable object as value. This is the dict used for
# magic function dispatch
magics = Dict
# A registry of the original objects that we've been given holding magics.
registry = Dict
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
auto_magic = Bool(True, config=True, help=
"Automatically call line magics without requiring explicit % prefix")
def _auto_magic_changed(self, name, value):
self.shell.automagic = value
_auto_status = [
'Automagic is OFF, % prefix IS needed for line magics.',
'Automagic is ON, % prefix IS NOT needed for line magics.']
user_magics = Instance('IPython.core.magics.UserMagics')
def __init__(self, shell=None, config=None, user_magics=None, **traits):
super(MagicsManager, self).__init__(shell=shell, config=config,
user_magics=user_magics, **traits)
self.magics = dict(line={}, cell={})
# Let's add the user_magics to the registry for uniformity, so *all*
# registered magic containers can be found there.
self.registry[user_magics.__class__.__name__] = user_magics
def auto_status(self):
"""Return descriptive string with automagic status."""
return self._auto_status[self.auto_magic]
def lsmagic(self):
"""Return a dict of currently available magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a list of names.
"""
return self.magics
def lsmagic_docs(self, brief=False, missing=''):
"""Return dict of documentation of magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a dict keyed by magic
name whose value is the function docstring. If a docstring is
unavailable, the value of `missing` is used instead.
If brief is True, only the first line of each docstring will be returned.
"""
docs = {}
for m_type in self.magics:
m_docs = {}
for m_name, m_func in iteritems(self.magics[m_type]):
if m_func.__doc__:
if brief:
m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
else:
m_docs[m_name] = m_func.__doc__.rstrip()
else:
m_docs[m_name] = missing
docs[m_type] = m_docs
return docs
def register(self, *magic_objects):
"""Register one or more instances of Magics.
Take one or more classes or instances of classes that subclass the main
`core.Magic` class, and register them with IPython to use the magic
functions they provide. The registration process will then ensure that
any methods that have decorated to provide line and/or cell magics will
be recognized with the `%x`/`%%x` syntax as a line/cell magic
respectively.
If classes are given, they will be instantiated with the default
constructor. If your classes need a custom constructor, you should
instanitate them first and pass the instance.
The provided arguments can be an arbitrary mix of classes and instances.
Parameters
----------
magic_objects : one or more classes or instances
"""
# Start by validating them to ensure they have all had their magic
# methods registered at the instance level
for m in magic_objects:
if not m.registered:
raise ValueError("Class of magics %r was constructed without "
"the @register_magics class decorator")
if type(m) in (type, MetaHasTraits):
# If we're given an uninstantiated class
m = m(shell=self.shell)
# Now that we have an instance, we can register it and update the
# table of callables
self.registry[m.__class__.__name__] = m
for mtype in magic_kinds:
self.magics[mtype].update(m.magics[mtype])
def register_function(self, func, magic_kind='line', magic_name=None):
"""Expose a standalone function as magic function for IPython.
This will create an IPython magic (line, cell or both) from a
standalone function. The functions should have the following
signatures:
* For line magics: `def f(line)`
* For cell magics: `def f(line, cell)`
* For a function that does both: `def f(line, cell=None)`
In the latter case, the function will be called with `cell==None` when
invoked as `%f`, and with cell as a string when invoked as `%%f`.
Parameters
----------
func : callable
Function to be registered as a magic.
magic_kind : str
Kind of magic, one of 'line', 'cell' or 'line_cell'
magic_name : optional str
If given, the name the magic will have in the IPython namespace. By
default, the name of the function itself is used.
"""
# Create the new method in the user_magics and register it in the
# global table
validate_type(magic_kind)
magic_name = func.__name__ if magic_name is None else magic_name
setattr(self.user_magics, magic_name, func)
record_magic(self.magics, magic_kind, magic_name, func)
def define_magic(self, name, func):
"""[Deprecated] Expose own function as magic function for IPython.
Example::
def foo_impl(self, parameter_s=''):
'My very own magic!. (Use docstrings, IPython reads them).'
print 'Magic function. Passed parameter is between < >:'
print '<%s>' % parameter_s
print 'The self object is:', self
ip.define_magic('foo',foo_impl)
"""
meth = types.MethodType(func, self.user_magics)
setattr(self.user_magics, name, meth)
record_magic(self.magics, 'line', name, meth)
def register_alias(self, alias_name, magic_name, magic_kind='line'):
"""Register an alias to a magic function.
The alias is an instance of :class:`MagicAlias`, which holds the
name and kind of the magic it should call. Binding is done at
call time, so if the underlying magic function is changed the alias
will call the new function.
Parameters
----------
alias_name : str
The name of the magic to be registered.
magic_name : str
The name of an existing magic.
magic_kind : str
Kind of magic, one of 'line' or 'cell'
"""
# `validate_type` is too permissive, as it allows 'line_cell'
# which we do not handle.
if magic_kind not in magic_kinds:
raise ValueError('magic_kind must be one of %s, %s given' %
magic_kinds, magic_kind)
alias = MagicAlias(self.shell, magic_name, magic_kind)
setattr(self.user_magics, alias_name, alias)
record_magic(self.magics, magic_kind, alias_name, alias)
# Key base class that provides the central functionality for magics.
class Magics(Configurable):
"""Base class for implementing magic functions.
Shell functions which can be reached as %function_name. All magic
functions should accept a string, which they can parse for their own
needs. This can make some functions easier to type, eg `%cd ../`
vs. `%cd("../")`
Classes providing magic functions need to subclass this class, and they
MUST:
- Use the method decorators `@line_magic` and `@cell_magic` to decorate
individual methods as magic functions, AND
- Use the class decorator `@magics_class` to ensure that the magic
methods are properly registered at the instance level upon instance
initialization.
See :mod:`magic_functions` for examples of actual implementation classes.
"""
# Dict holding all command-line options for each magic.
options_table = None
# Dict for the mapping of magic names to methods, set by class decorator
magics = None
# Flag to check that the class decorator was properly applied
registered = False
# Instance of IPython shell
shell = None
def __init__(self, shell=None, **kwargs):
if not(self.__class__.registered):
raise ValueError('Magics subclass without registration - '
'did you forget to apply @magics_class?')
if shell is not None:
if hasattr(shell, 'configurables'):
shell.configurables.append(self)
if hasattr(shell, 'config'):
kwargs.setdefault('parent', shell)
kwargs['shell'] = shell
self.shell = shell
self.options_table = {}
# The method decorators are run when the instance doesn't exist yet, so
# they can only record the names of the methods they are supposed to
# grab. Only now, that the instance exists, can we create the proper
# mapping to bound methods. So we read the info off the original names
# table and replace each method name by the actual bound method.
# But we mustn't clobber the *class* mapping, in case of multiple instances.
class_magics = self.magics
self.magics = {}
for mtype in magic_kinds:
tab = self.magics[mtype] = {}
cls_tab = class_magics[mtype]
for magic_name, meth_name in iteritems(cls_tab):
if isinstance(meth_name, string_types):
# it's a method name, grab it
tab[magic_name] = getattr(self, meth_name)
else:
# it's the real thing
tab[magic_name] = meth_name
# Configurable **needs** to be initiated at the end or the config
# magics get screwed up.
super(Magics, self).__init__(**kwargs)
def arg_err(self,func):
"""Print docstring if incorrect arguments were passed"""
print('Error in arguments:')
print(oinspect.getdoc(func))
def format_latex(self, strng):
"""Format a string for latex inclusion."""
# Characters that need to be escaped for latex:
escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
# Magic command names as headers:
cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
re.MULTILINE)
# Magic commands
cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
re.MULTILINE)
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
# The "\n" symbol
newline_re = re.compile(r'\\n')
# Now build the string for output:
#strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
strng)
strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
strng = par_re.sub(r'\\\\',strng)
strng = escape_re.sub(r'\\\1',strng)
strng = newline_re.sub(r'\\textbackslash{}n',strng)
return strng
def parse_options(self, arg_str, opt_str, *long_opts, **kw):
"""Parse options passed to an argument string.
The interface is similar to that of :func:`getopt.getopt`, but it
returns a :class:`~IPython.utils.struct.Struct` with the options as keys
and the stripped argument string still as a string.
arg_str is quoted as a true sys.argv vector by using shlex.split.
This allows us to easily expand variables, glob files, quote
arguments, etc.
Parameters
----------
arg_str : str
The arguments to parse.
opt_str : str
The options specification.
mode : str, default 'string'
If given as 'list', the argument string is returned as a list (split
on whitespace) instead of a string.
list_all : bool, default False
Put all option values in lists. Normally only options
appearing more than once are put in a list.
posix : bool, default True
Whether to split the input line in POSIX mode or not, as per the
conventions outlined in the :mod:`shlex` module from the standard
library.
"""
# inject default options at the beginning of the input line
caller = sys._getframe(1).f_code.co_name
arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
mode = kw.get('mode','string')
if mode not in ['string','list']:
raise ValueError('incorrect mode given: %s' % mode)
# Get options
list_all = kw.get('list_all',0)
posix = kw.get('posix', os.name == 'posix')
strict = kw.get('strict', True)
# Check if we have more than one argument to warrant extra processing:
odict = {} # Dictionary with options
args = arg_str.split()
if len(args) >= 1:
# If the list of inputs only has 0 or 1 thing in it, there's no
# need to look for options
argv = arg_split(arg_str, posix, strict)
# Do regular option processing
try:
opts,args = getopt(argv, opt_str, long_opts)
except GetoptError as e:
raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
" ".join(long_opts)))
for o,a in opts:
if o.startswith('--'):
o = o[2:]
else:
o = o[1:]
try:
odict[o].append(a)
except AttributeError:
odict[o] = [odict[o],a]
except KeyError:
if list_all:
odict[o] = [a]
else:
odict[o] = a
# Prepare opts,args for return
opts = Struct(odict)
if mode == 'string':
args = ' '.join(args)
return opts,args
def default_option(self, fn, optstr):
"""Make an entry in the options_table for fn, with value optstr"""
if fn not in self.lsmagic():
error("%s is not a magic function" % fn)
self.options_table[fn] = optstr
class MagicAlias(object):
"""An alias to another magic function.
An alias is determined by its magic name and magic kind. Lookup
is done at call time, so if the underlying magic changes the alias
will call the new function.
Use the :meth:`MagicsManager.register_alias` method or the
`%alias_magic` magic function to create and register a new alias.
"""
def __init__(self, shell, magic_name, magic_kind):
self.shell = shell
self.magic_name = magic_name
self.magic_kind = magic_kind
self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name)
self.__doc__ = "Alias for `%s`." % self.pretty_target
self._in_call = False
def __call__(self, *args, **kwargs):
"""Call the magic alias."""
fn = self.shell.find_magic(self.magic_name, self.magic_kind)
if fn is None:
raise UsageError("Magic `%s` not found." % self.pretty_target)
# Protect against infinite recursion.
if self._in_call:
raise UsageError("Infinite recursion detected; "
"magic aliases cannot call themselves.")
self._in_call = True
try:
return fn(*args, **kwargs)
finally:
self._in_call = False
| 37.677098
| 106
| 0.60675
|
cfda4c505095723081e407944d02b71104decde5
| 3,832
|
py
|
Python
|
dnacentersdk/models/validators/v2_1_1/jsd_208579ea4ed98f4f.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
dnacentersdk/models/validators/v2_1_1/jsd_208579ea4ed98f4f.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
dnacentersdk/models/validators/v2_1_1/jsd_208579ea4ed98f4f.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DNA Center Add IP Pool in SDA Virtual Network data model.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator208579Ea4Ed98F4F(object):
"""Add IP Pool in SDA Virtual Network request schema definition."""
def __init__(self):
super(JSONSchemaValidator208579Ea4Ed98F4F, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"items": {
"properties": {
"authenticationPolicyName": {
"description":
"Authentication Policy Name",
"type": [
"string",
"null"
]
},
"ipPoolName": {
"description":
"Ip Pool Name",
"type": [
"string",
"null"
]
},
"isL2FloodingEnabled": {
"type": [
"boolean",
"null"
]
},
"isThisCriticalPool": {
"type": [
"boolean",
"null"
]
},
"poolType": {
"description":
"Pool Type",
"type": [
"string",
"null"
]
},
"scalableGroupName": {
"description":
"Scalable Group Name",
"type": [
"string",
"null"
]
},
"trafficType": {
"description":
"Traffic Type",
"type": [
"string",
"null"
]
},
"virtualNetworkName": {
"description":
"Virtual Network Name",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": "array"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 30.656
| 78
| 0.493737
|
2d2d2ba224de2485880a3e941bfe22331cf76037
| 2,345
|
pyw
|
Python
|
source/todolist.pyw
|
eclipse999/ToDoList
|
708eb31e112e6592a406e3f3f15d654c9f6fe7c2
|
[
"MIT"
] | null | null | null |
source/todolist.pyw
|
eclipse999/ToDoList
|
708eb31e112e6592a406e3f3f15d654c9f6fe7c2
|
[
"MIT"
] | null | null | null |
source/todolist.pyw
|
eclipse999/ToDoList
|
708eb31e112e6592a406e3f3f15d654c9f6fe7c2
|
[
"MIT"
] | null | null | null |
import sys
import os
from PyQt5 import QtWidgets, QtCore
from todo2 import Ui_MainWindow
class Todo(QtWidgets.QMainWindow, Ui_MainWindow):
i = 0
waitforadd = True
cwd = os.getcwd()
def __init__(self, parent=None):
super().__init__()
self.setupUi(self)
self.addbtn.clicked.connect(self.addtodo)
self.clearallbtn.clicked.connect(self.clearall)
self.deletebtn.clicked.connect(self.deleteitem)
self.savebtn.clicked.connect(self.savefile)
self.loadbtn.clicked.connect(self.loadfile)
def addtodo(self):
text = self.addlist.text()
if text != "":
self.todolist.addItem(text)
self.addlist.clear()
def clearall(self):
self.todolist.clear()
self.waitforadd = True
def deleteitem(self):
self.todolist.takeItem(self.todolist.currentRow())
def savefile(self):
with open("todo.txt", 'w', encoding='UTF-8') as thefile:
for i in range(self.todolist.count()):
thefile.write(str(self.todolist.item(i).text())+"\n")
msg = QtWidgets.QMessageBox()
msg.setWindowTitle(u"提示")
msg.setInformativeText(u"存檔成功!")
msg.exec_()
def loadfile(self):
self.todolist.clear()
try:
myfile = QtWidgets.QFileDialog.getOpenFileName(
self, "Open file", self.cwd, "Text Files (*.txt)")
with open(myfile[0], "r", encoding='UTF-8') as thefile:
for line in thefile.readlines():
line = line.strip()
self.todolist.addItem(line)
except FileNotFoundError:
pass
def keyPressEvent(self, qKeyEvent):
print(qKeyEvent.key())
if qKeyEvent.key() == QtCore.Qt.Key_F:
if (QtWidgets.QApplication.keyboardModifiers() ==
QtCore.Qt.ControlModifier):
self.loadfile()
if qKeyEvent.key() == QtCore.Qt.Key_S:
if (QtWidgets.QApplication.keyboardModifiers() ==
QtCore.Qt.ControlModifier):
self.savefile()
print("存檔成功")
else:
super().keyPressEvent(qKeyEvent)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
w = Todo()
w.show()
sys.exit(app.exec_())
| 29.3125
| 69
| 0.58209
|
7bf2a5722d2e28a6ec6c1131de60ee383dd78f0f
| 2,722
|
py
|
Python
|
01_06_histograms_and_scatter_plots.py
|
shevc07/Machine_Learning_for_Trading
|
11581254168ece5ecebff71e7cee6c9efa10929d
|
[
"MIT"
] | null | null | null |
01_06_histograms_and_scatter_plots.py
|
shevc07/Machine_Learning_for_Trading
|
11581254168ece5ecebff71e7cee6c9efa10929d
|
[
"MIT"
] | null | null | null |
01_06_histograms_and_scatter_plots.py
|
shevc07/Machine_Learning_for_Trading
|
11581254168ece5ecebff71e7cee6c9efa10929d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import os
from utils import get_data
from utils import plot_data
from utils import symbol_to_path
def compute_daily_returns(df):
dr = df.copy()
dr[1:] = df / df.shift(1) - 1
dr.ix[0,:] = 0
return dr
def p05_how_to_plot_a_histogram():
dates = pd.date_range('2010-01-01', '2018-03-01')
symbols = ['SPY']
df = get_data(symbols, dates)
plot_data(df)
dr = compute_daily_returns(df)
plot_data(dr, title='Daily Returns of {}'.format(symbols), xlabel='Date', ylabel='ratio')
dr.hist(bins=200)
plt.show()
def p06_computing_histogram_statistics():
dates = pd.date_range('2010-01-01', '2018-03-01')
symbols = ['SPY']
df = get_data(symbols, dates)
# plot_data(df)
dr = compute_daily_returns(df)
# plot_data(dr, title='Daily Returns of {}'.format(symbols), xlabel='Date', ylabel='ratio')
dr.hist(bins=20)
# plt.show()
mean = dr['SPY'].mean()
std = dr['SPY'].std()
kurtosis = dr.kurtosis()
print("mean: {}, std: {}, kurtosis: {}".format(mean, std, kurtosis))
plt.axvline(mean, color='w', linestyle='dashed', linewidth=2)
plt.axvline(std, color='r', linestyle='dashed', linewidth=2)
plt.axvline(-std, color='r', linestyle='dashed', linewidth=2)
plt.show()
def p08_plot_tow_histograms_together():
dates = pd.date_range('2010-01-01', '2018-03-01')
symbols = ['SPY','XOM']
df = get_data(symbols, dates)
# plot_data(df)
dr = compute_daily_returns(df)
# plot_data(dr, title='Daily Returns of {}'.format(symbols), xlabel='Date', ylabel='ratio')
dr['SPY'].hist(bins=50, label='SPY')
dr['XOM'].hist(bins=50, label='XOM')
plt.show()
def p13_scatterplots_in_python():
dates = pd.date_range('2010-01-01', '2018-03-01')
symbols = ['SPY','XOM', 'GLD']
df = get_data(symbols, dates)
# plot_data(df)
dr = compute_daily_returns(df)
# plot_data(dr, title='Daily Returns of {}'.format(symbols), xlabel='Date', ylabel='ratio')
dr.plot(kind='scatter', x='SPY', y='XOM')
beta, alpha = np.polyfit(dr['SPY'], dr['XOM'], 1)
print('beta: {}, alpha: {}'.format(beta, alpha))
plt.plot(dr['SPY'], beta*dr['SPY'] + alpha, linestyle='-', color='r')
plt.show()
dr.plot(kind='scatter', x='SPY', y='GLD')
beta, alpha = np.polyfit(dr['SPY'], dr['GLD'], 1)
print('beta: {}, alpha: {}'.format(beta, alpha))
plt.plot(dr['SPY'], beta*dr['SPY'] + alpha, linestyle='-', color='r')
plt.show()
# calculate correlation coefficient
print(dr.corr(method='pearson'))
def test_run():
p13_scatterplots_in_python()
if __name__ == '__main__':
test_run()
| 28.652632
| 95
| 0.627847
|
dfb5adae133a9c762707dca9bf91816a73b2ddd0
| 1,323
|
py
|
Python
|
tests/test_less_than_or_greater_than.py
|
artisanofcode/python-conjecture
|
5a7d57e407a4fb3e09a05d41ffda773136003289
|
[
"MIT"
] | null | null | null |
tests/test_less_than_or_greater_than.py
|
artisanofcode/python-conjecture
|
5a7d57e407a4fb3e09a05d41ffda773136003289
|
[
"MIT"
] | null | null | null |
tests/test_less_than_or_greater_than.py
|
artisanofcode/python-conjecture
|
5a7d57e407a4fb3e09a05d41ffda773136003289
|
[
"MIT"
] | null | null | null |
"""test conjecture.less_than_or_equal_to."""
from __future__ import annotations
import hypothesis
import hypothesis.strategies as st
import pytest
import conjecture
@pytest.mark.describe("less_than_or_equal_to")
@pytest.mark.it("should match smaller value")
@hypothesis.given(
value=st.shared(base=st.integers(), key="value"),
other=st.shared(base=st.integers(), key="value").flatmap(
lambda value: st.integers(max_value=value - 1)
),
)
def test_should_match_smaller_value(value: int, other: int) -> None:
print(value, other)
assert conjecture.less_than_or_equal_to(value).resolve(other)
@pytest.mark.describe("less_than_or_equal_to")
@pytest.mark.it("should not match bigger value")
@hypothesis.given(
value=st.shared(base=st.integers(), key="value"),
other=st.shared(base=st.integers(), key="value").flatmap(
lambda value: st.integers(min_value=value + 1)
),
)
def test_should_not_match_bigger_value(value: int, other: int) -> None:
assert not conjecture.less_than_or_equal_to(value).resolve(other)
@pytest.mark.describe("less_than_or_equal_to")
@pytest.mark.it("should match equal value")
@hypothesis.given(
value=st.integers(),
)
def test_should_not_match_equal_value(value: int) -> None:
assert conjecture.less_than_or_equal_to(value).resolve(value)
| 30.767442
| 71
| 0.746032
|
a2e3c5e39cdac55f9140001a2055c1b738d17735
| 949
|
py
|
Python
|
manager/test.py
|
epicframework/EPIC-Core
|
c7649f42a3c06c0922281e53de12346ab19ae456
|
[
"MIT"
] | null | null | null |
manager/test.py
|
epicframework/EPIC-Core
|
c7649f42a3c06c0922281e53de12346ab19ae456
|
[
"MIT"
] | 7
|
2019-05-19T23:56:15.000Z
|
2019-05-20T15:11:11.000Z
|
manager/test.py
|
epicframework/EPIC-Core
|
c7649f42a3c06c0922281e53de12346ab19ae456
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from time import sleep
import sys
import json
# Import HA/Connective bindings
from bindings import new_ll, connect
arg_id = sys.argv[1]
arg_re = sys.argv[2]
print(arg_re)
ll = new_ll("../connective/connective/sharedlib/elconn.so")
ll.elconn_init(0)
connective = connect(ll, arg_re.encode())
def main():
for i in range(1,5):
#does thing
# timestamp = datetime.datetime.now()
# receipt = json.dumps({
# "Source": "Test",
# "Event": "Literally nothing",
# "Command": "It was a comment",
# "Time Stamp": str(timestamp),
# "Result": "Literally nothing, this is just a test"
# })
# sys.stdout.write(receipt+"\n")
# sys.stdout.flush()
connective.runs("heartbeats {arg_id} beat")
sleep(5)
return
if __name__ == "__main__":
main()
| 24.333333
| 68
| 0.562698
|
d7bf88f5e4739cce370e83e3949559d0709a2cf2
| 4,316
|
py
|
Python
|
apps/profiles/models.py
|
SatishTammineni/django-lms
|
3dcf11355994089eb5506c441a8b1374c0df1f86
|
[
"BSD-3-Clause"
] | null | null | null |
apps/profiles/models.py
|
SatishTammineni/django-lms
|
3dcf11355994089eb5506c441a8b1374c0df1f86
|
[
"BSD-3-Clause"
] | null | null | null |
apps/profiles/models.py
|
SatishTammineni/django-lms
|
3dcf11355994089eb5506c441a8b1374c0df1f86
|
[
"BSD-3-Clause"
] | 2
|
2020-11-03T03:32:12.000Z
|
2022-03-02T18:14:52.000Z
|
import re, datetime
from dateutil import relativedelta
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group, User
from tinymce import models as tinymce_models
from django.db.models import permalink
from django.contrib.auth.models import User
from django.contrib.localflavor.us.models import PhoneNumberField
from libs.fields import JSONField
from courses.models import Semester
class Degree(models.Model):
name = models.CharField(max_length=100)
abbreviation = models.CharField(max_length=100)
class Profile(models.Model):
GENDER_CHOICES = (
(1, 'Male'),
(2, 'Female'),
)
user = models.OneToOneField(User, related_name = 'profile')
mugshot = models.ImageField(_('mugshot'), upload_to='mugshots/', blank=True)
resume = models.FileField(_('resume'), upload_to='resumes/', blank=True)
data = JSONField(null = True, blank = True)
preferences = JSONField(null = True, blank = True)
class Meta:
verbose_name = _('user profile')
verbose_name_plural = _('user profiles')
db_table = 'user_profiles'
def __unicode__(self):
return u"%s" % self.user.get_full_name()
@permalink
def get_absolute_url(self):
return ('profile_detail', None, { 'username': self.user.username })
@property
def sms_address(self):
if (self.mobile and self.mobile_provider):
return u"%s@%s" % (re.sub('-', '', self.mobile), self.mobile_provider.domain)
@property
def is_alum(self):
degrees = UserDegree.objects.filter(user = self.user)
if len(degrees) > 0:
for degree in degrees:
if degree.graduation.end < datetime.datetime.now().date():
return True
return False
class UserDegree(models.Model):
graduation = models.ForeignKey(Semester)
degree = models.ForeignKey(Degree)
user = models.ForeignKey(User)
@property
def is_expected(self):
return self.graduation.end > datetime.date.today()
def __unicode__(self):
if not self.is_expected :
return "{} {}".format(self.degree.name, self.graduation)
else:
return "{} {} (expected)".format(self.degree.name, self.graduation)
# We may use this later
# class MobileProvider(models.Model):
# """MobileProvider model"""
# title = models.CharField(_('title'), max_length=25)
# domain = models.CharField(_('domain'), max_length=50, unique=True)
# class Meta:
# verbose_name = _('mobile provider')
# verbose_name_plural = _('mobile providers')
# db_table = 'user_mobile_providers'
# def __unicode__(self):
# return u"%s" % self.title
class ServiceType(models.Model):
"""Service type model"""
title = models.CharField(_('title'), blank=True, max_length=100)
url = models.URLField(_('url'), blank=True, help_text='URL with a single \'{user}\' placeholder to turn a username into a service URL.', verify_exists=False)
class Meta:
verbose_name = _('service type')
verbose_name_plural = _('service types')
db_table = 'user_service_types'
def __unicode__(self):
return u"%s" % self.title
class Service(models.Model):
"""Service model"""
service = models.ForeignKey(ServiceType)
profile = models.ForeignKey(Profile)
username = models.CharField(_('Name or ID'), max_length=100, help_text="Username or id to be inserted into the service url.")
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _('service')
verbose_name_plural = _('services')
db_table = 'user_services'
def __unicode__(self):
return u"%s" % self.username
@property
def service_url(self):
return re.sub('{user}', self.username, self.service.url)
@property
def title(self):
return u"%s" % self.service.title
def user_post_save(sender, instance, **kwargs):
profile, new = Profile.objects.get_or_create(user=instance)
if new:
profile.data = {}
profile.preferences = {}
profile.save()
models.signals.post_save.connect(user_post_save, sender=User)
| 31.50365
| 161
| 0.660797
|
514b45fbc5dcbbecfe56de9bd82b3254ed48386a
| 8,319
|
py
|
Python
|
model.py
|
zz803abc/quizXue
|
dc8d9f41f5c277d2310733c9a4186490dd3d5dc7
|
[
"MIT"
] | 3
|
2020-11-11T00:47:40.000Z
|
2021-04-13T01:34:04.000Z
|
model.py
|
zz803abc/quizXue
|
dc8d9f41f5c277d2310733c9a4186490dd3d5dc7
|
[
"MIT"
] | null | null | null |
model.py
|
zz803abc/quizXue
|
dc8d9f41f5c277d2310733c9a4186490dd3d5dc7
|
[
"MIT"
] | 2
|
2020-11-11T00:47:45.000Z
|
2021-03-30T11:45:37.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
@file: model.py
@author: kessil
@contact: https://github.com/kessil/
@time: 2019年06月02日 15:57:45
@desc: Life is short, you need Python
'''
from sqlalchemy import Column,Integer, String, Text, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config import Config
import re
from lxml import etree
# 创建对象的基类:
Base = declarative_base()
# 定义Bank对象:
class Bank(Base):
# 表的名字:
__tablename__ = 'Bank'
# 表的结构:
id = Column(Integer,primary_key=True)
content = Column(Text, unique=True)
item1 = Column(String(128))
item2 = Column(String(128))
item3 = Column(String(128))
item4 = Column(String(128))
answer = Column(String(8))
bounds = Column(String(64))
def __init__(self, content, options, answer='', bounds=''):
for i in range(len(options), 4):
options.append('')
# print(options)
self.content = content
self.item1, self.item2, self.item3, self.item4 = [str(x) for x in options]
self.answer = answer
self.bounds = bounds
@classmethod
def from_xml(cls, filename):
xml = etree.parse(filename)
root = xml.getroot()
xml_question = root.xpath(Config.XPATH_QUESTION)[0]
content = xml_question.xpath(Config.XPATH_CONTENT)[0]
xml_options = xml_question.xpath(Config.XPATH_OPTIONS)
options = [x.xpath(Config.XPATH_OPTOIN_DESC)[0] for x in xml_options]
bounds = []
for x in xml_options:
''' 此处保存的bounds针对华为P20 分辨率2244*1080'''
x0, y0, x1, y1 = [int(x) for x in re.findall(r'\d+', x.xpath(Config.XPATH_OPTION_BOUNDES)[0])]
pos = complex((x0+x1)/2, (y0+y1)/2)
bounds.append(pos)
bounds = " ".join([str(x) for x in bounds])
# print(bounds)
return cls(content=content, options=options, bounds=bounds)
def __eq__(self, other):
# if self.content != other.content:
# return False
# if self.item1 != other.item1:
# return False
# if self.item2 != other.item2:
# return False
# if self.item3 != other.item3:
# return False
# if self.item4 != other.item4:
# return False
# return True
return self.content == other.content
def __repr__(self):
return f'{self.content}\n'
def __str__(self):
# 统一题目内容的留空为两个英文下划线
# 江南自古富庶地,风流才子美名扬,江南四大才子是__、__、__、__。
# 油锅起火时使用以下方法中__方法扑灭是不正确的。
content = re.sub(r'[\((]出题单位.*', "", self.content)
content = re.sub(r'(\s{2,})|((\s*))|(【\s*】)', '____', content)
items = [x for x in (self.item1, self.item2, self.item3, self.item4) if x]
index = ord(self.answer)-65
if index < len(items):
items[index] = f'**{items[index]}**'
options = '\n'.join([f'+ {x}' for x in items])
return f'{self.id}. {content} **{self.answer.upper()}**\n{options}\n'
# 初始化数据库连接:
engine = create_engine(Config.DATABASE_URI)
# 创建DBSession类型:
Session = sessionmaker(bind=engine)
def db_add(session, item):
'''数据库添加纪录'''
result = session.query(Bank).filter_by(content=item.content).first()
if result:
print('数据库已存在此纪录')
else:
session.add(item)
session.commit()
print('数据库添加记录成功!')
def db_update(session, id, answer):
to_update = db_qeury(session, id=id)
to_update.answer = answer
session.commit()
print(f'更新题目[{id}]的答案为“{answer}”')
def db_qeury(session, id=None, content=None):
'''数据库检索记录'''
if id and isinstance(id, int):
return session.query(Bank).filter_by(id=id).first()
if content and isinstance(content, str):
return session.query(Bank).filter_by(content=content).first()
return session.query(Bank).all()
def db_delete(session, item):
'''数据库删除记录'''
to_del = db_qeury(session, content=item.content)
session.delete(to_del)
session.commit()
def db_print(session):
data = db_qeury(session)
print(f'学习强国题库: {len(data)} 题\n')
for d in data:
print(d)
def db_to_xls(session, filename):
import xlwt
data = session.query(Bank).all()
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('题库')
if not data:
raise 'database is empty'
ws.write(0, 0, '序号')
ws.write(0, 1, '题目')
ws.write(0, 2, '选项A')
ws.write(0, 3, '选项B')
ws.write(0, 4, '选项C')
ws.write(0, 5, '选项D')
ws.write(0, 6, '答案')
for d in data:
ws.write(d.id, 0, label=d.id)
ws.write(d.id, 1, label=re.sub(r'\s+', '', d.content))
ws.write(d.id, 2, label=d.item1)
ws.write(d.id, 3, label=d.item2)
ws.write(d.id, 4, label=d.item3)
ws.write(d.id, 5, label=d.item4)
ws.write(d.id, 6, label=d.answer)
wb.save(filename)
print('题库已导出到%s'%filename)
def db_to_mtb(session, filename):
'''导出到磨题帮题库模板'''
import xlwt
data = session.query(Bank).all()
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('题库')
if not data:
raise 'database is empty'
ws.write(0, 0, label='标题')
ws.write_merge(0,0,1,7,"学习强国一战到底")
ws.write(1, 0, label='描述')
ws.write_merge(1,1,1,7,"一份学习强国挑战答题试卷,全为单选题")
ws.write(2, 0, label='用时')
ws.write(2, 1, 100)
ws.write(3, 0, label='题干')
ws.write(3, 1, label='题型')
ws.write(3, 2, label='选择项1')
ws.write(3, 3, label='选择项2')
ws.write(3, 4, label='选择项3')
ws.write(3, 5, label='选择项4')
ws.write(3, 6, label='解析')
ws.write(3, 7, label='答案')
ws.write(3, 8, label='得分')
for d in data:
row = d.id + 3
ws.write(row, 0, label=d.content)
ws.write(row, 1, label='顺序选择题')
ws.write(row, 2, label=d.item1)
ws.write(row, 3, label=d.item2)
ws.write(row, 4, label=d.item3)
ws.write(row, 5, label=d.item4)
ws.write(row, 6, label='')
ws.write(row, 7, label=d.answer)
ws.write(row, 8, 1)
wb.save(filename)
print('题库(磨题帮)已导出到%s'%filename)
def db_from_xls(session, filename):
import xlrd
wb = xlrd.open_workbook(filename)
ws = wb.sheet_by_index(0)
nrows = ws.nrows #获取该sheet中的有效行数
if nrows < 2:
raise 'Excel has no records'
for i in range(1, nrows):
bank = Bank(content=ws.cell_value(i, 1),
options=ws.row_values(i, start_colx=2, end_colx=6),
answer=ws.cell_value(i, 6))
db_add(session, bank)
print('更新数据库成功!来源:%s %d'%(filename, len(data)))
def db_to_md(session, filename):
data = session.query(Bank).all()
if not data:
raise 'database is empty'
with open(filename, 'w', encoding='utf-8') as fp:
fp.write(f'# 学习强国题库: {len(data)} 题\n')
for d in data:
fp.write(str(d))
print('题库已导出到%s'%filename)
def main():
# 创建数据表
Base.metadata.create_all(engine)
session = Session()
# db_update(session, 823, 'B')
while True:
print('%s\n%s\n%s'%('-*-'*28, '\tp-打印题库\tu-更新记录\tx-导出xls\tm-导出md\te-退出', '-*-'*28))
ch = input('''请输入:''').upper()
if 'E' == ch:
break
elif 'P' == ch:
db_print(session)
elif 'U' == ch:
print('暂不支持此功能')
# # 暂不支持此功能
# s = input('请在一行输入题目序号和修正的答案,空格隔开。请输入:')
# idx, new_ans = s.split(" ")
# print(db_qeury(session, id=int(idx)))
# ok = input(f'修改答案为: {new_ans} 确认?(输入 N 撤销)').upper()
# if 'N' != ok: db_update(session, int(idx), new_ans.upper())
elif 'X' == ch:
db_to_xls(session, './data/data-dev.xls')
db_to_mtb(session, './data/data-mtb.xls')
elif 'M' == ch:
db_to_md(session, './data/data-dev.md')
else:
print('输入错误,请重新输入!')
if __name__ == "__main__":
main()
# 执行操作
# bank = Bank(content='近期,我国学者研究“多节点网络”取得基础性突破。(出题单位:科技部引智司)',
# options=['电子', '原子', '质子', '量子'], answer='D')
# db_add(session, bank)
# db_print(session)
# xls 导入
# filename = "C:/Users/vince/repositories/quizXue/data/data-dev-old.xls"
# db_from_xls(session, filename)
# bank = Bank.from_xml('./ui.xml')
| 30.472527
| 106
| 0.574107
|
ee23530c74aae480933a6754a6b196dd1fe8c969
| 5,865
|
py
|
Python
|
tests/test_nagbot.py
|
srosenthal/nagbot
|
07e3414ac5b76df6bfc7547e5c01406654962a5d
|
[
"MIT"
] | 4
|
2020-02-06T21:23:27.000Z
|
2021-05-17T19:54:06.000Z
|
tests/test_nagbot.py
|
srosenthal/nagbot
|
07e3414ac5b76df6bfc7547e5c01406654962a5d
|
[
"MIT"
] | null | null | null |
tests/test_nagbot.py
|
srosenthal/nagbot
|
07e3414ac5b76df6bfc7547e5c01406654962a5d
|
[
"MIT"
] | 1
|
2020-06-18T21:31:36.000Z
|
2020-06-18T21:31:36.000Z
|
import sys
import unittest
import app
from app import nagbot
from app import parsing
from app.sqaws import Instance
class TestNagbot(unittest.TestCase):
def setup_instance(self, state: str, stop_after: str = '', terminate_after: str = ''):
return Instance(region_name='us-east-1',
instance_id='abc123',
state=state,
reason='',
instance_type='m4.xlarge',
name='Stephen',
operating_system='linux',
monthly_price=1,
monthly_server_price=2,
monthly_storage_price=3,
stop_after=stop_after,
terminate_after=terminate_after,
contact='stephen',
nagbot_state='')
def test_stoppable(self):
past_date = self.setup_instance(state='running', stop_after='2019-01-01')
today_date = self.setup_instance(state='running', stop_after=nagbot.TODAY_YYYY_MM_DD)
warning_str = ' (Nagbot: Warned on ' + nagbot.TODAY_YYYY_MM_DD + ')'
past_date_warned = self.setup_instance(state='running', stop_after='2019-01-01' + warning_str)
today_date_warned = self.setup_instance(state='running', stop_after=nagbot.TODAY_YYYY_MM_DD + warning_str)
anything_warned = self.setup_instance(state='running', stop_after='Yummy Udon Noodles' + warning_str)
wrong_state = self.setup_instance(state='stopped', stop_after='2019-01-01')
future_date = self.setup_instance(state='running', stop_after='2050-01-01')
unknown_date = self.setup_instance(state='running', stop_after='TBD')
# These instances should get a stop warning
assert nagbot.is_stoppable(past_date) == True
assert nagbot.is_stoppable(today_date) == True
assert nagbot.is_stoppable(unknown_date) == True
assert nagbot.is_stoppable(past_date_warned) == True
assert nagbot.is_stoppable(today_date_warned) == True
assert nagbot.is_stoppable(anything_warned) == True
# These instances should NOT get a stop warning
assert nagbot.is_stoppable(wrong_state) == False
assert nagbot.is_stoppable(future_date) == False
# These instances don't have a warning, so they shouldn't be stopped yet
assert nagbot.is_safe_to_stop(past_date) == False
assert nagbot.is_safe_to_stop(today_date) == False
assert nagbot.is_safe_to_stop(unknown_date) == False
assert nagbot.is_safe_to_stop(wrong_state) == False
assert nagbot.is_safe_to_stop(future_date) == False
# These instances can be stopped right away
assert nagbot.is_safe_to_stop(past_date_warned) == True
assert nagbot.is_safe_to_stop(today_date_warned) == True
assert nagbot.is_safe_to_stop(anything_warned) == True
def test_terminatable(self):
past_date = self.setup_instance(state='stopped', terminate_after='2019-01-01')
today_date = self.setup_instance(state='stopped', terminate_after=nagbot.TODAY_YYYY_MM_DD)
today_warning_str = ' (Nagbot: Warned on ' + nagbot.TODAY_YYYY_MM_DD + ')'
past_date_warned = self.setup_instance(state='stopped', terminate_after='2019-01-01' + today_warning_str)
today_date_warned = self.setup_instance(state='stopped', terminate_after=nagbot.TODAY_YYYY_MM_DD + today_warning_str)
anything_warned = self.setup_instance(state='stopped', terminate_after='Yummy Udon Noodles' + today_warning_str)
old_warning_str = ' (Nagbot: Warned on ' + nagbot.MIN_TERMINATION_WARNING_YYYY_MM_DD + ')'
past_date_warned_days_ago = self.setup_instance(state='stopped', terminate_after='2019-01-01' + old_warning_str)
anything_warned_days_ago = self.setup_instance(state='stopped', terminate_after='Yummy Udon Noodles' + old_warning_str)
wrong_state = self.setup_instance(state='running', terminate_after='2019-01-01')
future_date = self.setup_instance(state='stopped', terminate_after='2050-01-01')
unknown_date = self.setup_instance(state='stopped', terminate_after='TBD')
# These instances should get a termination warning
assert nagbot.is_terminatable(past_date) == True
assert nagbot.is_terminatable(today_date) == True
assert nagbot.is_terminatable(past_date_warned) == True
assert nagbot.is_terminatable(today_date_warned) == True
# These instances should NOT get a termination warning
assert nagbot.is_terminatable(wrong_state) == False
assert nagbot.is_terminatable(future_date) == False
assert nagbot.is_terminatable(unknown_date) == False
assert nagbot.is_terminatable(anything_warned) == False
# These instances don't have a warning, so they shouldn't be terminated yet
assert nagbot.is_safe_to_terminate(past_date) == False
assert nagbot.is_safe_to_terminate(today_date) == False
assert nagbot.is_safe_to_terminate(unknown_date) == False
assert nagbot.is_safe_to_terminate(wrong_state) == False
assert nagbot.is_safe_to_terminate(future_date) == False
assert nagbot.is_safe_to_terminate(anything_warned) == False
# These instances can be terminated, but not yet
assert nagbot.is_safe_to_terminate(past_date_warned) == False
assert nagbot.is_safe_to_terminate(today_date_warned) == False
# These instances have a warning, but are not eligible to add a warning, so we don't terminate
assert nagbot.is_safe_to_terminate(anything_warned_days_ago) == False
# These instances can be terminated now
assert nagbot.is_safe_to_terminate(past_date_warned_days_ago) == True
if __name__ == '__main__':
unittest.main()
| 50.128205
| 127
| 0.688491
|
22931180ca6a6fad31c6159080c44fdf117a2ce9
| 643
|
py
|
Python
|
yt/frontends/flash/api.py
|
kastalpes/yt
|
b1e197ca84433fbd61eaf44b28ff5cdb37981d4c
|
[
"BSD-3-Clause-Clear"
] | 2
|
2021-03-02T18:59:49.000Z
|
2021-03-02T18:59:50.000Z
|
yt/frontends/flash/api.py
|
kastalpes/yt
|
b1e197ca84433fbd61eaf44b28ff5cdb37981d4c
|
[
"BSD-3-Clause-Clear"
] | 4
|
2018-04-13T23:03:42.000Z
|
2018-05-08T17:50:43.000Z
|
yt/frontends/flash/api.py
|
kastalpes/yt
|
b1e197ca84433fbd61eaf44b28ff5cdb37981d4c
|
[
"BSD-3-Clause-Clear"
] | 2
|
2020-05-16T15:29:37.000Z
|
2020-06-22T10:17:08.000Z
|
"""
API for yt.frontends.flash
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from .data_structures import \
FLASHGrid, \
FLASHHierarchy, \
FLASHDataset, \
FLASHParticleDataset
from .fields import \
FLASHFieldInfo
from .io import \
IOHandlerFLASH, \
IOHandlerFLASHParticle
from . import tests
| 21.433333
| 78
| 0.520995
|
c11c94c24682dcdff48d90f2fbbb39404e3f60f4
| 51
|
py
|
Python
|
Demo/Demo_gym/envs/atari/__init__.py
|
Remosy/iceHocekeyIRL
|
1ffeaf8a9bd9585038629be41a2da552e0a4473b
|
[
"MIT"
] | null | null | null |
Demo/Demo_gym/envs/atari/__init__.py
|
Remosy/iceHocekeyIRL
|
1ffeaf8a9bd9585038629be41a2da552e0a4473b
|
[
"MIT"
] | 3
|
2019-03-09T02:35:24.000Z
|
2019-09-27T11:05:01.000Z
|
Demo/Demo_gym/envs/atari/__init__.py
|
Remosy/iceHocekeyIRL
|
1ffeaf8a9bd9585038629be41a2da552e0a4473b
|
[
"MIT"
] | null | null | null |
from Demo_gym.envs.atari.atari_env import AtariEnv
| 25.5
| 50
| 0.862745
|
708c258180b22e5708761aa3bd2f16061a7fdbbf
| 265
|
py
|
Python
|
lib/rucio/vcsversion.py
|
gabrielefronze/rucio
|
51fd277f3cf896583b1ad73048d5c57178063129
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/vcsversion.py
|
gabrielefronze/rucio
|
51fd277f3cf896583b1ad73048d5c57178063129
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/vcsversion.py
|
gabrielefronze/rucio
|
51fd277f3cf896583b1ad73048d5c57178063129
|
[
"Apache-2.0"
] | 1
|
2021-06-17T14:15:15.000Z
|
2021-06-17T14:15:15.000Z
|
'''
This file is automatically generated; Do not edit it. :)
'''
VERSION_INFO = {
'final': True,
'version': '1.18.7',
'branch_nick': 'patch-0-Rucio_1_18_7_preparation',
'revision_id': '9ffbf2f72d6610ff7c2dea57bef9444fc256f4ae',
'revno': 7366
}
| 22.083333
| 62
| 0.664151
|
be8029a629b72ee794549f7387d444fd199d9c8a
| 1,093
|
py
|
Python
|
setup.py
|
panxuemiao111/THRED
|
58db5e6f3cf316952df3b1a02cfba14dff899e9c
|
[
"MIT"
] | 102
|
2018-11-10T17:18:27.000Z
|
2022-03-22T07:04:08.000Z
|
setup.py
|
panxuemiao111/THRED
|
58db5e6f3cf316952df3b1a02cfba14dff899e9c
|
[
"MIT"
] | 31
|
2019-01-10T08:50:06.000Z
|
2021-11-10T19:39:39.000Z
|
setup.py
|
panxuemiao111/THRED
|
58db5e6f3cf316952df3b1a02cfba14dff899e9c
|
[
"MIT"
] | 27
|
2018-11-10T12:44:31.000Z
|
2022-03-03T09:14:07.000Z
|
from setuptools import find_packages, setup
setup(
name="thred",
version="0.1.2",
author="Nouha Dziri, Ehsan Kamalloo, Kory Mathewson",
author_email="dziri@cs.ualberta.ca",
description="Neural Response Generation Framework",
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords='dialogue-generation sequence-to-sequence tensorflow',
url="https://github.com/nouhadziri/THRED",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['tensorflow_gpu==1.12.2',
'tensorflow-hub==0.2.0',
'spacy>=2.1.0,<2.2.0',
'scipy>=1.0.0,<2.0.0',
'pymagnitude',
'redis',
'PyYAML',
'gensim>=3.4.0',
'mistune>=0.8.0',
'emot==1.0',
'tqdm'],
python_requires='>=3.5.0',
tests_require=['pytest'],
)
| 37.689655
| 69
| 0.509607
|
a4b7e28b787bde396b58a6f8503fd6d9a60b6f49
| 5,569
|
py
|
Python
|
run_scripts_bm/svg_run.py
|
iclavera/meta-mb
|
a1204e573c1415161129403cfb287bf120488fd0
|
[
"MIT"
] | 4
|
2021-01-07T08:22:51.000Z
|
2021-12-27T10:53:14.000Z
|
run_scripts_bm/svg_run.py
|
iclavera/meta-mb
|
a1204e573c1415161129403cfb287bf120488fd0
|
[
"MIT"
] | null | null | null |
run_scripts_bm/svg_run.py
|
iclavera/meta-mb
|
a1204e573c1415161129403cfb287bf120488fd0
|
[
"MIT"
] | null | null | null |
import os
import json
import numpy as np
from experiment_utils.run_sweep import run_sweep
from meta_mb.utils.utils import set_seed, ClassEncoder
from meta_mb.envs.normalized_env import normalize
from meta_mb.algos.svg_1 import SVG1
from meta_mb.trainers.svg_trainer import Trainer
from meta_mb.samplers.sampler import Sampler
from meta_mb.samplers.mbmpo_samplers.mb_sample_processor import ModelSampleProcessor
from meta_mb.policies.gaussian_mlp_policy import GaussianMLPPolicy
from meta_mb.dynamics.probabilistic_mlp_dynamics import ProbMLPDynamics
from meta_mb.baselines.nn_basline import NNValueFun
from meta_mb.logger import logger
from meta_mb.envs.mb_envs import *
INSTANCE_TYPE = 'c4.2xlarge'
EXP_NAME = 'svg'
def run_experiment(**kwargs):
exp_dir = os.getcwd() + '/data/' + EXP_NAME + '/' + kwargs.get('exp_name', '')
logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv'], snapshot_mode='last')
json.dump(kwargs, open(exp_dir + '/params.json', 'w'), indent=2, sort_keys=True, cls=ClassEncoder)
# Instantiate classes
set_seed(kwargs['seed'])
env = normalize(kwargs['env']()) # Wrappers?
baseline = NNValueFun('value-function',
env,
hidden_nonlinearity=kwargs['vfun_hidden_nonlinearity'],
hidden_sizes=kwargs['vfun_hidden_sizes'],
output_nonlinearity=kwargs['vfun_output_nonlinearity'],
learning_rate=kwargs['vfun_learning_rate'],
batch_size=kwargs['vfun_batch_size'],
buffer_size=kwargs['vfun_buffer_size'],
)
policy = GaussianMLPPolicy(
name="policy",
obs_dim=np.prod(env.observation_space.shape),
action_dim=np.prod(env.action_space.shape),
hidden_sizes=kwargs['policy_hidden_sizes'],
learn_std=kwargs['policy_learn_std'],
output_nonlinearity=kwargs['policy_output_nonlinearity'],
)
dynamics_model = ProbMLPDynamics('prob-dynamics',
env=env,
hidden_nonlinearity=kwargs['dyanmics_hidden_nonlinearity'],
hidden_sizes=kwargs['dynamics_hidden_sizes'],
output_nonlinearity=kwargs['dyanmics_output_nonlinearity'],
learning_rate=kwargs['dynamics_learning_rate'],
batch_size=kwargs['dynamics_batch_size'],
buffer_size=kwargs['dynamics_buffer_size'],
)
assert kwargs['num_rollouts'] % kwargs['n_parallel'] == 0
sampler = Sampler(
env=env,
policy=policy,
num_rollouts=kwargs['num_rollouts'],
max_path_length=kwargs['max_path_length'],
n_parallel=kwargs['n_parallel'],
)
sample_processor = ModelSampleProcessor(
baseline=baseline,
discount=kwargs['discount'],
gae_lambda=kwargs['gae_lambda'],
normalize_adv=kwargs['normalize_adv'],
positive_adv=kwargs['positive_adv'],
)
algo = SVG1(
policy=policy,
dynamics_model=dynamics_model,
value_function=baseline,
tf_reward=env.tf_reward,
learning_rate=kwargs['svg_learning_rate'],
num_grad_steps=kwargs['num_rollouts'] * kwargs['max_path_length']//kwargs['svg_batch_size'],
batch_size=kwargs['svg_batch_size'],
discount=kwargs['discount'],
kl_penalty=kwargs['kl_penalty'],
)
trainer = Trainer(
algo=algo,
policy=policy,
env=env,
sampler=sampler,
sample_processor=sample_processor,
dynamics_model=dynamics_model,
value_function=baseline,
n_itr=kwargs['n_itr'],
dynamics_model_max_epochs=kwargs['dynamics_max_epochs'],
vfun_max_epochs=kwargs['vfun_max_epochs'],
)
trainer.train()
if __name__ == '__main__':
params = {
'seed': 1,
'algo': 'svg',
'env': SwimmerEnv,
# Problem Conf
'n_itr': 100,
'max_path_length': 1000,
'discount': 0.99,
'gae_lambda': 1.,
'normalize_adv': True,
'positive_adv': False,
# Env Sampling
'num_rollouts': 4,
'n_parallel': 2, # Parallelized across 2 cores
# Dynamics Model
'dynamics_hidden_sizes': (500, 500),
'dyanmics_hidden_nonlinearity': 'relu',
'dyanmics_output_nonlinearity': None,
'dynamics_max_epochs': 50,
'dynamics_learning_rate': 1e-3,
'dynamics_batch_size': 128,
'dynamics_buffer_size': 25000,
# Value Function
'vfun_hidden_sizes': (400, 200),
'vfun_hidden_nonlinearity': 'relu',
'vfun_output_nonlinearity': None,
'vfun_max_epochs': 50,
'vfun_learning_rate': 5e-4,
'vfun_batch_size': 32,
'vfun_buffer_size': 10000,
# Policy
'policy_hidden_sizes': (100, 100),
'policy_learn_std': True,
'policy_output_nonlinearity': None,
# Algo
'svg_learning_rate': 1e-4, # play with this
'svg_batch_size': 64,
'svg_max_buffer_size': 25000, # play with this
'kl_penalty': 1e-3, # play with this
# Misc
'scope': None,
'exp_tag': '', # For changes besides hyperparams
'exp_name': '', # Add time-stamp here to not overwrite the logging
}
run_experiment(params)
| 33.751515
| 102
| 0.614293
|
62564013f7b7a58d36221bb2cf467e71b99db3d3
| 1,480
|
py
|
Python
|
configs/configs.py
|
kaylode/caption-transformer
|
1572c7f71f2ad5a2fae5b4e2ef26d6858429164d
|
[
"MIT"
] | 8
|
2021-09-02T12:56:26.000Z
|
2022-03-28T08:13:19.000Z
|
configs/configs.py
|
kaylode/caption-transformer
|
1572c7f71f2ad5a2fae5b4e2ef26d6858429164d
|
[
"MIT"
] | null | null | null |
configs/configs.py
|
kaylode/caption-transformer
|
1572c7f71f2ad5a2fae5b4e2ef26d6858429164d
|
[
"MIT"
] | null | null | null |
import yaml
class Config():
"""
Config class for storing all configuration
"""
def __init__(self, yaml_path):
yaml_file = open(yaml_path)
_attr = yaml.load(yaml_file, Loader=yaml.FullLoader)['settings']
for key, value in _attr.items():
self.__dict__[key] = value
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getattr__(self, attr):
try:
return self.__dict__[attr]
except KeyError:
return None
def __str__(self):
print("########## CONFIGURATION INFO ##########")
pretty(self.__dict__)
return '\n'
def to_dict(self):
"""
Export config to dict
"""
out_dict = {}
for k,v in self.__dict__.items():
if v is not None:
out_dict[k] = v
return out_dict
def config_from_dict(_dict, ignore_keys=[]):
"""
Load config from dict
"""
config = Config('./configs/config.yaml')
for k,v in _dict.items():
if k not in ignore_keys:
config.__setattr__(k,v)
return config
def pretty(d, indent=0):
"""
Pretty print the configuration
"""
for key, value in d.items():
print(' ' * indent + str(key) + ':', end='')
if isinstance(value, dict):
print()
pretty(value, indent+1)
else:
print('\t' * (indent+1) + str(value))
| 25.517241
| 72
| 0.525676
|
b9cc7e213106f676328f6c88f575d9b003fe4591
| 55,018
|
py
|
Python
|
env/lib/python3.6/site-packages/pygments/lexers/_mapping.py
|
724686158/NosqlEXP3
|
e29f2807f075831377456b47cf8c9ce0c8d65c30
|
[
"BSD-3-Clause"
] | null | null | null |
env/lib/python3.6/site-packages/pygments/lexers/_mapping.py
|
724686158/NosqlEXP3
|
e29f2807f075831377456b47cf8c9ce0c8d65c30
|
[
"BSD-3-Clause"
] | null | null | null |
env/lib/python3.6/site-packages/pygments/lexers/_mapping.py
|
724686158/NosqlEXP3
|
e29f2807f075831377456b47cf8c9ce0c8d65c30
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping definitions. This file is generated by itself. Everytime
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bib', 'bibtex'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc',), ()),
'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp', 'emacs-lisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FennelLexer': ('pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf'), ('text/x-ini', 'text/inf')),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js', '*.jsm'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', ('json-object',), (), ('application/json-object',)),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle', 'juttle'), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MarkdownLexer': ('pygments.lexers.markup', 'markdown', ('md',), ('*.md',), ('text/x-markdown',)),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NCLLexer': ('pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nim', 'nimrod'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('ps1con',), (), ()),
'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.python', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.python', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rnc', 'rng-compact'), ('*.rnc',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), ('*.txt',), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('rts', 'trafficscript'), ('*.rts',), ()),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust',)),
'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('sc', 'supercollider'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts', 'typescript'), ('*.ts', '*.tsx'), ('text/x-typescript',)),
'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.ts', '*.txt'), ('text/x-typoscript',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XorgLexer': ('pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
# replace crnl to nl for Windows.
#
# Note that, originally, contributers should keep nl of master
# repository, for project by using some kind of automatic
# management EOL, like `EolExtension
# <https://www.mercurial-scm.org/wiki/EolExtension>`.
content = content.replace("\r\n", "\n")
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))
| 109.162698
| 351
| 0.580083
|
df8ffea3179f6be18d8288647e231673a9768393
| 1,951
|
py
|
Python
|
workouts/integration_examples/keister.py
|
fjhickernell/QMCSoftware
|
1e8f080173f4cdc3adfe071b5c53d49592f61956
|
[
"Apache-2.0"
] | 40
|
2019-09-15T03:31:17.000Z
|
2022-02-19T19:52:10.000Z
|
workouts/integration_examples/keister.py
|
fjhickernell/QMCSoftware
|
1e8f080173f4cdc3adfe071b5c53d49592f61956
|
[
"Apache-2.0"
] | 152
|
2019-10-06T17:26:02.000Z
|
2022-03-01T04:17:04.000Z
|
workouts/integration_examples/keister.py
|
fjhickernell/QMCSoftware
|
1e8f080173f4cdc3adfe071b5c53d49592f61956
|
[
"Apache-2.0"
] | 16
|
2019-09-17T23:33:48.000Z
|
2021-07-19T22:38:45.000Z
|
"""
Keister example
python workouts/integration_examples/keister.py > outputs/integration_examples/keister.log
"""
from qmcpy import *
from copy import deepcopy
bar = '\n'+'~'*100+'\n'
def keister(dimension=3, abs_tol=.5):
print(bar)
# CubMCCLT
discrete_distrib = IIDStdUniform(dimension, seed=7)
integrand = Keister(discrete_distrib)
stopping_criterion = CubMCCLT(integrand,abs_tol=abs_tol)
solution,data = stopping_criterion.integrate()
print('%s%s'%(data,bar))
# CubMCG
discrete_distrib = IIDStdUniform(dimension, seed=7)
integrand = Keister(discrete_distrib)
solution,data = CubMCG(integrand,abs_tol=abs_tol).integrate()
print('%s%s'%(data,bar))
# CubQMCCLT
discrete_distrib = Lattice(dimension, randomize=True, seed=7)
integrand = Keister(discrete_distrib)
solution,data = CubQMCCLT(integrand,abs_tol=abs_tol).integrate()
print('%s%s'%(data,bar))
# CubQMCLatticeG
discrete_distrib = Lattice(dimension=dimension, randomize=True, seed=7)
integrand = Keister(discrete_distrib)
solution,data = CubQMCLatticeG(integrand,abs_tol=abs_tol).integrate()
print('%s%s'%(data,bar))
# CubQMCSobolG
discrete_distrib = Sobol(dimension=dimension, randomize=True, seed=7)
integrand = Keister(discrete_distrib)
solution,data = CubQMCSobolG(integrand,abs_tol=abs_tol).integrate()
print('%s%s'%(data,bar))
# CubBayesLatticeG
discrete_distrib = Lattice(dimension=dimension, order='linear', randomize=True)
integrand = Keister(discrete_distrib)
solution,data = CubBayesLatticeG(integrand,abs_tol=abs_tol).integrate()
print('%s%s'%(data,bar))
# CubBayesNetG
discrete_distrib = Sobol(dimension=dimension, graycode=False)
integrand = Keister(discrete_distrib)
solution, data = CubBayesNetG(integrand, abs_tol=abs_tol).integrate()
print('%s%s' % (data, bar))
if __name__ == "__main__":
keister(abs_tol=.0025)
| 33.067797
| 91
| 0.717581
|
27c4860c5efe5cc9878765d162d4e2fd9f52750f
| 251
|
py
|
Python
|
manage.py
|
dmkent/cattrack
|
2387f72d7a384638731a70b377826562f4f22a31
|
[
"MIT"
] | null | null | null |
manage.py
|
dmkent/cattrack
|
2387f72d7a384638731a70b377826562f4f22a31
|
[
"MIT"
] | 11
|
2019-10-06T10:48:19.000Z
|
2022-02-10T08:25:05.000Z
|
manage.py
|
dmkent/cattrack
|
2387f72d7a384638731a70b377826562f4f22a31
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cattrack.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.818182
| 72
| 0.772908
|
71ffd7c745532a9bc0935f80072964eed7f9c49a
| 1,098
|
py
|
Python
|
src/enocean_packet_factory.py
|
rosenloecher-it/enocean-mqtt-bridge
|
d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c
|
[
"MIT"
] | 1
|
2020-12-01T17:10:14.000Z
|
2020-12-01T17:10:14.000Z
|
src/enocean_packet_factory.py
|
rosenloecher-it/enocean-mqtt-bridge
|
d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c
|
[
"MIT"
] | 1
|
2021-09-19T13:38:02.000Z
|
2021-09-19T13:38:02.000Z
|
src/enocean_packet_factory.py
|
rosenloecher-it/enocean-mqtt-bridge
|
d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c
|
[
"MIT"
] | null | null | null |
import copy
from enocean.protocol.packet import RadioPacket
from src.common.eep import Eep
from src.tools.enocean_tools import EnoceanTools
class EnoceanPacketFactory:
_sender_id = None
@classmethod
def set_sender_id(cls, sender_id):
if type(sender_id) == int:
cls._sender_id = EnoceanTools.int_to_byte_list(sender_id)
else:
cls._sender_id = copy.deepcopy(sender_id)
@classmethod
def create_packet(cls, eep: Eep, destination=None, sender=None, learn=False, **kwargs):
destination_id = destination or 0xffffffff
if type(destination_id) == int:
destination_id = EnoceanTools.int_to_byte_list(destination_id)
sender_id = sender or cls._sender_id
if type(sender_id) == int:
sender_id = EnoceanTools.int_to_byte_list(sender_id)
return RadioPacket.create(
eep.rorg, eep.func, eep.type, direction=eep.direction, command=eep.command,
destination=destination_id,
sender=sender_id,
learn=learn,
**kwargs
)
| 28.894737
| 91
| 0.663934
|
ca21f29b752aa5a4cc0b73e535ae818a563a22e6
| 194
|
py
|
Python
|
awtg/api/wrapper.py
|
kvxmmu/aw
|
134603864f624075bc7b06876b2df3386bbeef2d
|
[
"BSD-3-Clause"
] | 2
|
2020-05-21T11:45:16.000Z
|
2020-07-28T16:35:38.000Z
|
awtg/api/wrapper.py
|
kvxmmu/aw
|
134603864f624075bc7b06876b2df3386bbeef2d
|
[
"BSD-3-Clause"
] | 1
|
2020-05-20T09:35:19.000Z
|
2020-05-20T09:35:19.000Z
|
awtg/api/wrapper.py
|
kvxmmu/aw
|
134603864f624075bc7b06876b2df3386bbeef2d
|
[
"BSD-3-Clause"
] | 2
|
2020-05-19T17:39:25.000Z
|
2020-07-01T15:05:06.000Z
|
from ..abstract.api import AbstractApi
class ApiWrapper:
""" Telegram api methods for humans """
api: AbstractApi
def __init__(self, api: AbstractApi):
self.api = api
| 13.857143
| 43
| 0.654639
|
cb8d808a28bc03e4576721f9f5dad90a064bdebd
| 1,364
|
py
|
Python
|
Python/PyEx/hashtable/linearEx.py
|
zionhan/TIL
|
2b74bf3f977ead3432bde64e9826f505af58de26
|
[
"MIT"
] | 1
|
2019-12-28T11:03:27.000Z
|
2019-12-28T11:03:27.000Z
|
Python/PyEx/hashtable/linearEx.py
|
zionhan/TIL
|
2b74bf3f977ead3432bde64e9826f505af58de26
|
[
"MIT"
] | null | null | null |
Python/PyEx/hashtable/linearEx.py
|
zionhan/TIL
|
2b74bf3f977ead3432bde64e9826f505af58de26
|
[
"MIT"
] | null | null | null |
# Linear Probing 기법
hash_table = list( [ 0 for i in range( 8 ) ] )
def get_key( data ):
return hash( data )
def hash_func( key ):
return key % 8
def save_data( data, value ):
index_key = get_key( data )
hash_address = hash_func( index_key )
if ( hash_table[hash_address] != 0 ) :
for index in range( hash_address, len( hash_table ) ) :
if hash_table[index] == 0 :
hash_table[index] = [index_key, value]
return
elif hash_table[index][0] == index_key :
hash_table[index][1] = value
return
else :
hash_table[hash_address] = [index_key, value]
def read_data( data ):
index_key = get_key( data )
hash_address = hash_func( index_key )
if hash_table[hash_address] != 0 :
for index in range( hash_address, len( hash_table ) ) :
if hash_table[index] == 0 :
return None
elif ( hash_table[index][0] == index_key ):
return hash_table[index][1]
return None
else :
return None
save_data( "Dave", "1220123123" )
save_data( "Dd", "1231234123" )
save_data( "Data", "999999" )
print( hash_table )
print( read_data( "Dave" ) )
print( read_data( "Dd" ) )
print( read_data( "Data" ) )
| 25.259259
| 63
| 0.545455
|
79e1d84a8cbc52c4ae3edc2577970d3f3607fccb
| 2,806
|
py
|
Python
|
train.py
|
kk2487/3dresnet
|
d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94
|
[
"MIT"
] | null | null | null |
train.py
|
kk2487/3dresnet
|
d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94
|
[
"MIT"
] | null | null | null |
train.py
|
kk2487/3dresnet
|
d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94
|
[
"MIT"
] | null | null | null |
import torch
from torch.autograd import Variable
import time
import os
import sys
from apex import amp
from utils import AverageMeter, calculate_accuracy
import numpy as np
def train_epoch(epoch, data_loader, model, criterion, optimizer, opt,
epoch_logger, batch_logger):
print('train at epoch {}'.format(epoch))
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(data_loader):
data_time.update(time.time() - end_time)
if not opt.no_cuda:
#targets = targets.cuda(async=True)
targets = targets.cuda()
inputs = Variable(inputs).cuda()
targets = Variable(targets).cuda()
#oricode
outputs = model(inputs)
loss = criterion(outputs, targets)
acc = calculate_accuracy(outputs, targets)
#losses.update(loss.data[0], inputs.size(0))
losses.update(loss.item(), inputs.size(0))
accuracies.update(acc, inputs.size(0))
optimizer.zero_grad()
if opt.use_mix:
print('oh')
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
#loss.backward()
optimizer.step()
batch_time.update(time.time() - end_time)
end_time = time.time()
batch_logger.log({
'epoch': epoch,
'batch': i + 1,
'iter': (epoch - 1) * len(data_loader) + (i + 1),
'loss': losses.val,
'acc': accuracies.val,
'lr': optimizer.param_groups[0]['lr']
})
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
epoch_logger.log({
'epoch': epoch,
'loss': losses.avg,
'acc': accuracies.avg,
'lr': optimizer.param_groups[0]['lr']
})
if epoch % opt.checkpoint == 0:
save_file_path = os.path.join(opt.result_path,
'save_{}.pth'.format(epoch))
states = {
'epoch': epoch,
'arch': opt.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(states, save_file_path)
| 30.172043
| 69
| 0.535638
|
d114365d58606a80b2f5da53d1514a2f378b7330
| 769
|
py
|
Python
|
bucket/bucket.py
|
gcp825/public
|
a4c102b4a52611f59500f9dfbe61376a76a23014
|
[
"Apache-2.0"
] | null | null | null |
bucket/bucket.py
|
gcp825/public
|
a4c102b4a52611f59500f9dfbe61376a76a23014
|
[
"Apache-2.0"
] | null | null | null |
bucket/bucket.py
|
gcp825/public
|
a4c102b4a52611f59500f9dfbe61376a76a23014
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
from gcp_tools import io_tools as io
def run():
# find and replace: your-bucket-name
# if target file name not supplied, defaults to the same as source file name
# target path will be created if it does not exist
storage_client = io.create_storage_client()
bucket = io.get_bucket_path(storage_client, 'your-bucket-name')
# io.file_download(bucket, 'source_path', 'source_file.ext', 'target_path', 'target_file.ext')
io.file_download(bucket, '/download', 'example.txt', '~/files', 'example.new')
# io.file_upload(bucket, 'source_path', 'source_file.ext', 'target_path', 'target_file.ext')
io.file_upload(bucket, '~/files', 'example.new', '/upload')
if __name__ == '__main__': run()
| 33.434783
| 97
| 0.676203
|
a88d35c48ad565393ed8b8f283a63f57063a39bc
| 7,005
|
py
|
Python
|
.pycharm_helpers/python_stubs/-583653458/_csv.py
|
pyy1988/management-system
|
3dee81b4abd54b1c869a509bdd30e09c8b91d05f
|
[
"Apache-2.0"
] | null | null | null |
.pycharm_helpers/python_stubs/-583653458/_csv.py
|
pyy1988/management-system
|
3dee81b4abd54b1c869a509bdd30e09c8b91d05f
|
[
"Apache-2.0"
] | null | null | null |
.pycharm_helpers/python_stubs/-583653458/_csv.py
|
pyy1988/management-system
|
3dee81b4abd54b1c869a509bdd30e09c8b91d05f
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
# module _csv
# from /home/pyy2/.virtualenvs/pyy3.5/lib/python3.5/lib-dynload/_csv.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
"""
CSV parsing and writing.
This module provides classes that assist in the reading and writing
of Comma Separated Value (CSV) files, and implements the interface
described by PEP 305. Although many CSV files are simple to parse,
the format is not formally defined by a stable specification and
is subtle enough that parsing lines of a CSV file with something
like line.split(",") is bound to fail. The module supports three
basic APIs: reading, writing, and registration of dialects.
DIALECT REGISTRATION:
Readers and writers support a dialect argument, which is a convenient
handle on a group of settings. When the dialect argument is a string,
it identifies one of the dialects previously registered with the module.
If it is a class or instance, the attributes of the argument are used as
the settings for the reader or writer:
class excel:
delimiter = ','
quotechar = '"'
escapechar = None
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
SETTINGS:
* quotechar - specifies a one-character string to use as the
quoting character. It defaults to '"'.
* delimiter - specifies a one-character string to use as the
field separator. It defaults to ','.
* skipinitialspace - specifies how to interpret whitespace which
immediately follows a delimiter. It defaults to False, which
means that whitespace immediately following a delimiter is part
of the following field.
* lineterminator - specifies the character sequence which should
terminate rows.
* quoting - controls when quotes should be generated by the writer.
It can take on any of the following module constants:
csv.QUOTE_MINIMAL means only when required, for example, when a
field contains either the quotechar or the delimiter
csv.QUOTE_ALL means that quotes are always placed around fields.
csv.QUOTE_NONNUMERIC means that quotes are always placed around
fields which do not parse as integers or floating point
numbers.
csv.QUOTE_NONE means that quotes are never placed around fields.
* escapechar - specifies a one-character string used to escape
the delimiter when quoting is set to QUOTE_NONE.
* doublequote - controls the handling of quotes inside fields. When
True, two consecutive quotes are interpreted as one during read,
and when writing, each quote character embedded in the data is
written as two quotes
"""
# no imports
# Variables with simple values
QUOTE_ALL = 1
QUOTE_MINIMAL = 0
QUOTE_NONE = 3
QUOTE_NONNUMERIC = 2
__version__ = '1.0'
# functions
def field_size_limit(limit=None): # real signature unknown; restored from __doc__
"""
Sets an upper limit on parsed fields.
csv.field_size_limit([limit])
Returns old limit. If limit is not given, no new limit is set and
the old limit is returned
"""
pass
def get_dialect(name): # real signature unknown; restored from __doc__
"""
Return the dialect instance associated with name.
dialect = csv.get_dialect(name)
"""
pass
def list_dialects(): # real signature unknown; restored from __doc__
"""
Return a list of all know dialect names.
names = csv.list_dialects()
"""
pass
def reader(iterable, dialect='excel', *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
csv_reader = reader(iterable [, dialect='excel']
[optional keyword args])
for row in csv_reader:
process(row)
The "iterable" argument can be any object that returns a line
of input for each iteration, such as a file object or a list. The
optional "dialect" parameter is discussed below. The function
also accepts optional keyword arguments which override settings
provided by the dialect.
The returned object is an iterator. Each iteration returns a row
of the CSV file (which can span multiple input lines).
"""
pass
def register_dialect(name, dialect=None, **fmtparams=None): # real signature unknown; restored from __doc__
"""
Create a mapping from a string name to a dialect class.
dialect = csv.register_dialect(name[, dialect[, **fmtparams]])
"""
pass
def unregister_dialect(name): # real signature unknown; restored from __doc__
"""
Delete the name/dialect mapping associated with a string name.
csv.unregister_dialect(name)
"""
pass
def writer(fileobj, dialect='excel', *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
csv_writer = csv.writer(fileobj [, dialect='excel']
[optional keyword args])
for row in sequence:
csv_writer.writerow(row)
[or]
csv_writer = csv.writer(fileobj [, dialect='excel']
[optional keyword args])
csv_writer.writerows(rows)
The "fileobj" argument can be any object that supports the file API.
"""
pass
# classes
class Dialect(object):
"""
CSV dialect
The Dialect type records CSV parsing and generation options.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
delimiter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
doublequote = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
escapechar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
lineterminator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
quotechar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
quoting = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
skipinitialspace = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
strict = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class Error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
# variables with complex values
_dialects = {}
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
| 34.850746
| 121
| 0.6798
|
4b16b89b1bf756478e60e40b9ca4a9f958ce37a6
| 1,546
|
py
|
Python
|
Part-reID/opt.py
|
wangminjie920705/Part-reid
|
34a1e968a2eab692ba810332f309e82b441793f6
|
[
"MIT"
] | null | null | null |
Part-reID/opt.py
|
wangminjie920705/Part-reid
|
34a1e968a2eab692ba810332f309e82b441793f6
|
[
"MIT"
] | null | null | null |
Part-reID/opt.py
|
wangminjie920705/Part-reid
|
34a1e968a2eab692ba810332f309e82b441793f6
|
[
"MIT"
] | null | null | null |
import argparse
parser = argparse.ArgumentParser(description='reid')
parser.add_argument('--data_path',
default="/home/wangliu/zymount/dataset/Market-1501-v15.09.15",
help='path of Market-1501-v15.09.15')
parser.add_argument('--mode',
default='train', choices=['train', 'evaluate', 'vis'],
help='train or evaluate ')
parser.add_argument('--query_image',
default='0001_c1s1_001051_00.jpg',
help='path to the image you want to query')
parser.add_argument('--freeze',
default=False,
help='freeze backbone or not ')
parser.add_argument('--weight',
default='weights/model.pt',
help='load weights ')
parser.add_argument('--epoch',
default=1000,
help='number of epoch to train')
parser.add_argument('--lr',
default=2e-4,
help='initial learning_rate')
parser.add_argument('--lr_scheduler',
default=[320, 380],
help='MultiStepLR,decay the learning rate')
parser.add_argument("--batchid",
default=4,
help='the batch for id')
parser.add_argument("--batchimage",
default=6,
help='the batch of per id')
parser.add_argument("--batchtest",
default=8,
help='the batch size for test')
opt = parser.parse_args()
| 31.55102
| 82
| 0.528461
|
fc157ee61806fd81e5d72850b227f25ad59a7afe
| 24,718
|
py
|
Python
|
myenv/lib/python2.7/site-packages/premailer/premailer.py
|
dkumarlinux/saleor
|
e3a852fed7da38e4141b0755bd282012f508c7b9
|
[
"BSD-3-Clause"
] | null | null | null |
myenv/lib/python2.7/site-packages/premailer/premailer.py
|
dkumarlinux/saleor
|
e3a852fed7da38e4141b0755bd282012f508c7b9
|
[
"BSD-3-Clause"
] | 2
|
2022-02-10T16:51:56.000Z
|
2022-02-10T18:23:52.000Z
|
myenv/lib/python2.7/site-packages/premailer/premailer.py
|
dkumarlinux/saleor
|
e3a852fed7da38e4141b0755bd282012f508c7b9
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, unicode_literals, print_function
import codecs
import operator
import os
import re
import warnings
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
# some old python 2.6 thing then, eh?
from ordereddict import OrderedDict
import sys
if sys.version_info >= (3,): # pragma: no cover
# As in, Python 3
from io import StringIO
from urllib.parse import urljoin, urlparse
STR_TYPE = str
else: # Python 2
try:
from cStringIO import StringIO
except ImportError: # pragma: no cover
from StringIO import StringIO
StringIO = StringIO # shut up pyflakes
from urlparse import urljoin, urlparse
STR_TYPE = basestring # NOQA
import cssutils
import requests
from lxml import etree
from lxml.cssselect import CSSSelector
from premailer.merge_style import merge_styles, csstext_to_pairs
from premailer.cache import function_cache
__all__ = ['PremailerError', 'Premailer', 'transform']
class PremailerError(Exception):
pass
class ExternalNotFoundError(ValueError):
pass
def make_important(bulk):
"""makes every property in a string !important.
"""
return ';'.join('%s !important' % p if not p.endswith('!important') else p
for p in bulk.split(';'))
def get_or_create_head(root):
"""Ensures that `root` contains a <head> element and returns it.
"""
head = CSSSelector('head')(root)
if not head:
head = etree.Element('head')
body = CSSSelector('body')(root)[0]
body.getparent().insert(0, head)
return head
else:
return head[0]
@function_cache()
def _cache_parse_css_string(css_body, validate=True):
"""
This function will cache the result from cssutils
It is a big gain when number of rules is big
Maximum cache entries are 1000. This is mainly for
protecting memory leak in case something gone wild.
Be aware that you can turn the cache off in Premailer
Args:
css_body(str): css rules in string format
validate(bool): if cssutils should validate
Returns:
cssutils.css.cssstylesheet.CSSStyleSheet
"""
return cssutils.parseString(css_body, validate=validate)
def capitalize_float_margin(css_body):
"""Capitalize float and margin CSS property names
"""
def _capitalize_property(match):
return '{0}:{1}{2}'.format(
match.group('property').capitalize(),
match.group('value'),
match.group('terminator'))
return _lowercase_margin_float_rule.sub(_capitalize_property, css_body)
_element_selector_regex = re.compile(r'(^|\s)\w')
_cdata_regex = re.compile(r'\<\!\[CDATA\[(.*?)\]\]\>', re.DOTALL)
_lowercase_margin_float_rule = re.compile(
r'''(?P<property>margin(-(top|bottom|left|right))?|float)
:
(?P<value>.*?)
(?P<terminator>$|;)''',
re.IGNORECASE | re.VERBOSE)
_importants = re.compile('\s*!important')
#: The short (3-digit) color codes that cause issues for IBM Notes
_short_color_codes = re.compile(r'^#([0-9a-f])([0-9a-f])([0-9a-f])$', re.I)
# These selectors don't apply to all elements. Rather, they specify
# which elements to apply to.
FILTER_PSEUDOSELECTORS = [':last-child', ':first-child', 'nth-child']
class Premailer(object):
attribute_name = 'data-premailer'
def __init__(self, html, base_url=None,
preserve_internal_links=False,
preserve_inline_attachments=True,
exclude_pseudoclasses=True,
keep_style_tags=False,
include_star_selectors=False,
remove_classes=False,
capitalize_float_margin=False,
strip_important=True,
external_styles=None,
css_text=None,
method="html",
base_path=None,
disable_basic_attributes=None,
disable_validation=False,
cache_css_parsing=True,
cssutils_logging_handler=None,
cssutils_logging_level=None,
disable_leftover_css=False,
align_floating_images=True,
remove_unset_properties=True):
self.html = html
self.base_url = base_url
self.preserve_internal_links = preserve_internal_links
self.preserve_inline_attachments = preserve_inline_attachments
self.exclude_pseudoclasses = exclude_pseudoclasses
# whether to delete the <style> tag once it's been processed
# this will always preserve the original css
self.keep_style_tags = keep_style_tags
self.remove_classes = remove_classes
self.capitalize_float_margin = capitalize_float_margin
# whether to process or ignore selectors like '* { foo:bar; }'
self.include_star_selectors = include_star_selectors
if isinstance(external_styles, STR_TYPE):
external_styles = [external_styles]
self.external_styles = external_styles
if isinstance(css_text, STR_TYPE):
css_text = [css_text]
self.css_text = css_text
self.strip_important = strip_important
self.method = method
self.base_path = base_path
if disable_basic_attributes is None:
disable_basic_attributes = []
self.disable_basic_attributes = disable_basic_attributes
self.disable_validation = disable_validation
self.cache_css_parsing = cache_css_parsing
self.disable_leftover_css = disable_leftover_css
self.align_floating_images = align_floating_images
self.remove_unset_properties = remove_unset_properties
if cssutils_logging_handler:
cssutils.log.addHandler(cssutils_logging_handler)
if cssutils_logging_level:
cssutils.log.setLevel(cssutils_logging_level)
def _parse_css_string(self, css_body, validate=True):
if self.cache_css_parsing:
return _cache_parse_css_string(css_body, validate=validate)
return cssutils.parseString(css_body, validate=validate)
def _parse_style_rules(self, css_body, ruleset_index):
"""Returns a list of rules to apply to this doc and a list of rules
that won't be used because e.g. they are pseudoclasses. Rules
look like: (specificity, selector, bulk)
for example: ((0, 1, 0, 0, 0), u'.makeblue', u'color:blue').
The bulk of the rule should not end in a semicolon.
"""
def format_css_property(prop):
if self.strip_important or prop.priority != 'important':
return '{0}:{1}'.format(prop.name, prop.value)
else:
return '{0}:{1} !important'.format(prop.name, prop.value)
def join_css_properties(properties):
""" Accepts a list of cssutils Property objects and returns
a semicolon delimitted string like 'color: red; font-size: 12px'
"""
return ';'.join(
format_css_property(prop)
for prop in properties
)
leftover = []
rules = []
# empty string
if not css_body:
return rules, leftover
sheet = self._parse_css_string(
css_body,
validate=not self.disable_validation
)
for rule in sheet:
# handle media rule
if rule.type == rule.MEDIA_RULE:
leftover.append(rule)
continue
# only proceed for things we recognize
if rule.type != rule.STYLE_RULE:
continue
# normal means it doesn't have "!important"
normal_properties = [
prop for prop in rule.style.getProperties()
if prop.priority != 'important'
]
important_properties = [
prop for prop in rule.style.getProperties()
if prop.priority == 'important'
]
# Create three strings that we can use to add to the `rules`
# list later as ready blocks of css.
bulk_normal = join_css_properties(normal_properties)
bulk_important = join_css_properties(important_properties)
bulk_all = join_css_properties(
normal_properties + important_properties
)
selectors = (
x.strip()
for x in rule.selectorText.split(',')
if x.strip() and not x.strip().startswith('@')
)
for selector in selectors:
if (':' in selector and self.exclude_pseudoclasses and
':' + selector.split(':', 1)[1]
not in FILTER_PSEUDOSELECTORS):
# a pseudoclass
leftover.append((selector, bulk_all))
continue
elif '*' in selector and not self.include_star_selectors:
continue
# Crudely calculate specificity
id_count = selector.count('#')
class_count = selector.count('.')
element_count = len(_element_selector_regex.findall(selector))
# Within one rule individual properties have different
# priority depending on !important.
# So we split each rule into two: one that includes all
# the !important declarations and another that doesn't.
for is_important, bulk in (
(1, bulk_important), (0, bulk_normal)
):
if not bulk:
# don't bother adding empty css rules
continue
specificity = (
is_important,
id_count,
class_count,
element_count,
ruleset_index,
len(rules) # this is the rule's index number
)
rules.append((specificity, selector, bulk))
return rules, leftover
def transform(self, pretty_print=True, **kwargs):
"""change the self.html and return it with CSS turned into style
attributes.
"""
if hasattr(self.html, "getroottree"):
# skip the next bit
root = self.html.getroottree()
page = root
tree = root
else:
if self.method == 'xml':
parser = etree.XMLParser(
ns_clean=False,
resolve_entities=False
)
else:
parser = etree.HTMLParser()
stripped = self.html.strip()
tree = etree.fromstring(stripped, parser).getroottree()
page = tree.getroot()
# lxml inserts a doctype if none exists, so only include it in
# the root if it was in the original html.
root = tree if stripped.startswith(tree.docinfo.doctype) else page
assert page is not None
if self.disable_leftover_css:
head = None
else:
head = get_or_create_head(tree)
#
# style selectors
#
rules = []
index = 0
for element in CSSSelector('style,link[rel~=stylesheet]')(page):
# If we have a media attribute whose value is anything other than
# 'all' or 'screen', ignore the ruleset.
media = element.attrib.get('media')
if media and media not in ('all', 'screen'):
continue
data_attribute = element.attrib.get(self.attribute_name)
if data_attribute:
if data_attribute == 'ignore':
del element.attrib[self.attribute_name]
continue
else:
warnings.warn(
'Unrecognized %s attribute (%r)' % (
self.attribute_name,
data_attribute,
)
)
is_style = element.tag == 'style'
if is_style:
css_body = element.text
else:
href = element.attrib.get('href')
css_body = self._load_external(href)
these_rules, these_leftover = self._parse_style_rules(
css_body, index
)
index += 1
rules.extend(these_rules)
parent_of_element = element.getparent()
if these_leftover or self.keep_style_tags:
if is_style:
style = element
else:
style = etree.Element('style')
style.attrib['type'] = 'text/css'
if self.keep_style_tags:
style.text = css_body
else:
style.text = self._css_rules_to_string(these_leftover)
if self.method == 'xml':
style.text = etree.CDATA(style.text)
if not is_style:
element.addprevious(style)
parent_of_element.remove(element)
elif not self.keep_style_tags or not is_style:
parent_of_element.remove(element)
# external style files
if self.external_styles:
for stylefile in self.external_styles:
css_body = self._load_external(stylefile)
self._process_css_text(css_body, index, rules, head)
index += 1
# css text
if self.css_text:
for css_body in self.css_text:
self._process_css_text(css_body, index, rules, head)
index += 1
# rules is a tuple of (specificity, selector, styles), where
# specificity is a tuple ordered such that more specific
# rules sort larger.
rules.sort(key=operator.itemgetter(0))
# collecting all elements that we need to apply rules on
# id is unique for the lifetime of the object
# and lxml should give us the same everytime during this run
# item id -> {item: item, classes: [], style: []}
elements = {}
for _, selector, style in rules:
new_selector = selector
class_ = ''
if ':' in selector:
new_selector, class_ = re.split(':', selector, 1)
class_ = ':%s' % class_
# Keep filter-type selectors untouched.
if class_ in FILTER_PSEUDOSELECTORS:
class_ = ''
else:
selector = new_selector
sel = CSSSelector(selector)
items = sel(page)
if len(items):
# same so process it first
processed_style = csstext_to_pairs(style)
for item in items:
item_id = id(item)
if item_id not in elements:
elements[item_id] = {
'item': item,
'classes': [],
'style': [],
}
elements[item_id]['style'].append(processed_style)
elements[item_id]['classes'].append(class_)
# Now apply inline style
# merge style only once for each element
# crucial when you have a lot of pseudo/classes
# and a long list of elements
for _, element in elements.items():
final_style = merge_styles(
element['item'].attrib.get('style', ''),
element['style'],
element['classes'],
remove_unset_properties=self.remove_unset_properties,
)
if final_style:
# final style could be empty string because of
# remove_unset_properties
element['item'].attrib['style'] = final_style
self._style_to_basic_html_attributes(
element['item'],
final_style,
force=True
)
if self.remove_classes:
# now we can delete all 'class' attributes
for item in page.xpath('//@class'):
parent = item.getparent()
del parent.attrib['class']
# Capitalize Margin properties
# To fix weird outlook bug
# https://www.emailonacid.com/blog/article/email-development/outlook.com-does-support-margins
if self.capitalize_float_margin:
for item in page.xpath('//@style'):
mangled = capitalize_float_margin(item)
item.getparent().attrib['style'] = mangled
# Add align attributes to images if they have a CSS float value of
# right or left. Outlook (both on desktop and on the web) are bad at
# understanding floats, but they do understand the HTML align attrib.
if self.align_floating_images:
for item in page.xpath('//img[@style]'):
image_css = cssutils.parseStyle(item.attrib['style'])
if image_css.float == 'right':
item.attrib['align'] = 'right'
elif image_css.float == 'left':
item.attrib['align'] = 'left'
#
# URLs
#
if self.base_url:
if not urlparse(self.base_url).scheme:
raise ValueError('Base URL must have a scheme')
for attr in ('href', 'src'):
for item in page.xpath("//@%s" % attr):
parent = item.getparent()
url = parent.attrib[attr]
if (
attr == 'href' and self.preserve_internal_links and
url.startswith('#')
):
continue
if (
attr == 'src' and self.preserve_inline_attachments and
url.startswith('cid:')
):
continue
parent.attrib[attr] = urljoin(self.base_url, url)
if hasattr(self.html, "getroottree"):
return root
else:
kwargs.setdefault('method', self.method)
kwargs.setdefault('pretty_print', pretty_print)
kwargs.setdefault('encoding', 'utf-8') # As Ken Thompson intended
out = etree.tostring(root, **kwargs).decode(kwargs['encoding'])
if self.method == 'xml':
out = _cdata_regex.sub(
lambda m: '/*<![CDATA[*/%s/*]]>*/' % m.group(1),
out
)
if self.strip_important:
out = _importants.sub('', out)
return out
def _load_external_url(self, url):
return requests.get(url).text
def _load_external(self, url):
"""loads an external stylesheet from a remote url or local path
"""
if url.startswith('//'):
# then we have to rely on the base_url
if self.base_url and 'https://' in self.base_url:
url = 'https:' + url
else:
url = 'http:' + url
if url.startswith('http://') or url.startswith('https://'):
css_body = self._load_external_url(url)
else:
stylefile = url
if not os.path.isabs(stylefile):
stylefile = os.path.abspath(
os.path.join(self.base_path or '', stylefile)
)
if os.path.exists(stylefile):
with codecs.open(stylefile, encoding='utf-8') as f:
css_body = f.read()
elif self.base_url:
url = urljoin(self.base_url, url)
return self._load_external(url)
else:
raise ExternalNotFoundError(stylefile)
return css_body
@staticmethod
def six_color(color_value):
"""Fix background colors for Lotus Notes
Notes which fails to handle three character ``bgcolor`` codes well.
see <https://github.com/peterbe/premailer/issues/114>"""
# Turn the color code from three to six digits
retval = _short_color_codes.sub(r'#\1\1\2\2\3\3', color_value)
return retval
def _style_to_basic_html_attributes(self, element, style_content,
force=False):
"""given an element and styles like
'background-color:red; font-family:Arial' turn some of that into HTML
attributes. like 'bgcolor', etc.
Note, the style_content can contain pseudoclasses like:
'{color:red; border:1px solid green} :visited{border:1px solid green}'
"""
if (
style_content.count('}') and
style_content.count('{') == style_content.count('}')
):
style_content = style_content.split('}')[0][1:]
attributes = OrderedDict()
for key, value in [x.split(':') for x in style_content.split(';')
if len(x.split(':')) == 2]:
key = key.strip()
if key == 'text-align':
attributes['align'] = value.strip()
elif key == 'vertical-align':
attributes['valign'] = value.strip()
elif (
key == 'background-color' and
'transparent' not in value.lower()
):
# Only add the 'bgcolor' attribute if the value does not
# contain the word "transparent"; before we add it possibly
# correct the 3-digit color code to its 6-digit equivalent
# ("abc" to "aabbcc") so IBM Notes copes.
attributes['bgcolor'] = self.six_color(value.strip())
elif key == 'width' or key == 'height':
value = value.strip()
if value.endswith('px'):
value = value[:-2]
attributes[key] = value
for key, value in attributes.items():
if (
key in element.attrib and not force or
key in self.disable_basic_attributes
):
# already set, don't dare to overwrite
continue
element.attrib[key] = value
def _css_rules_to_string(self, rules):
"""given a list of css rules returns a css string
"""
lines = []
for item in rules:
if isinstance(item, tuple):
k, v = item
lines.append('%s {%s}' % (k, make_important(v)))
# media rule
else:
for rule in item.cssRules:
if isinstance(rule, cssutils.css.csscomment.CSSComment):
continue
for key in rule.style.keys():
rule.style[key] = (
rule.style.getPropertyValue(key, False),
'!important'
)
lines.append(item.cssText)
return '\n'.join(lines)
def _process_css_text(self, css_text, index, rules, head):
"""processes the given css_text by adding rules that can be
in-lined to the given rules list and adding any that cannot
be in-lined to the given `<head>` element.
"""
these_rules, these_leftover = self._parse_style_rules(css_text, index)
rules.extend(these_rules)
if head is not None and (these_leftover or self.keep_style_tags):
style = etree.Element('style')
style.attrib['type'] = 'text/css'
if self.keep_style_tags:
style.text = css_text
else:
style.text = self._css_rules_to_string(these_leftover)
head.append(style)
def transform(html, base_url=None):
return Premailer(html, base_url=base_url).transform()
if __name__ == '__main__': # pragma: no cover
html = """<html>
<head>
<title>Test</title>
<style>
h1, h2 { color:red; }
strong {
text-decoration:none
}
p { font-size:2px }
p.footer { font-size: 1px}
</style>
</head>
<body>
<h1>Hi!</h1>
<p><strong>Yes!</strong></p>
<p class="footer" style="color:red">Feetnuts</p>
</body>
</html>"""
p = Premailer(html)
print(p.transform())
| 37.451515
| 101
| 0.54879
|
bd6709ec765511797a1abb443203134b889bc169
| 329
|
py
|
Python
|
pacote-download/Mundo2/ex040.py
|
ariadne-pereira/cev-python
|
b2c6bbebb5106bb0152c9127c04c83f23e9d7757
|
[
"MIT"
] | null | null | null |
pacote-download/Mundo2/ex040.py
|
ariadne-pereira/cev-python
|
b2c6bbebb5106bb0152c9127c04c83f23e9d7757
|
[
"MIT"
] | null | null | null |
pacote-download/Mundo2/ex040.py
|
ariadne-pereira/cev-python
|
b2c6bbebb5106bb0152c9127c04c83f23e9d7757
|
[
"MIT"
] | null | null | null |
nota1 = float(input('Digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
media = (nota1+nota2)/2
print('Nota 1: {}. Nota 2: {}. Média {}'.format(nota1, nota2, media))
if media < 5:
print('Situação: Reprovado')
elif media < 6.9:
print('Situação: Recuperação')
else:
print('Situação: Aprovado')
| 29.909091
| 69
| 0.650456
|
d978f95d989ee0bb65229194ed73cc5cc88211f4
| 17,589
|
py
|
Python
|
pytests/cbas/cbas_system_event_logs.py
|
AnithaKuberan/TAF
|
9824c6a4f1680c320ab065e23c720ffa92d530d9
|
[
"Apache-2.0"
] | null | null | null |
pytests/cbas/cbas_system_event_logs.py
|
AnithaKuberan/TAF
|
9824c6a4f1680c320ab065e23c720ffa92d530d9
|
[
"Apache-2.0"
] | null | null | null |
pytests/cbas/cbas_system_event_logs.py
|
AnithaKuberan/TAF
|
9824c6a4f1680c320ab065e23c720ffa92d530d9
|
[
"Apache-2.0"
] | 1
|
2019-05-22T09:10:44.000Z
|
2019-05-22T09:10:44.000Z
|
'''
Created on 7-December-2021
@author: umang.agrawal
'''
import random
from cbas.cbas_base import CBASBaseTest
from cbas_utils.cbas_utils import CBASRebalanceUtil, FlushToDiskTask
import copy
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from SystemEventLogLib.analytics_events import AnalyticsEvents
from CbasLib.CBASOperations import CBASHelper
class CBASSystemEventLogs(CBASBaseTest):
def setUp(self):
super(CBASSystemEventLogs, self).setUp()
# Since all the test cases are being run on 1 cluster only
self.cluster = self.cb_clusters.values()[0]
self.rebalance_util = CBASRebalanceUtil(
self.cluster_util, self.bucket_util, self.task,
vbucket_check=True, cbas_util=self.cbas_util)
self.log_setup_status(self.__class__.__name__, "Finished",
stage=self.setUp.__name__)
def tearDown(self):
self.log_setup_status(self.__class__.__name__, "Started",
stage=self.tearDown.__name__)
super(CBASSystemEventLogs, self).tearDown()
self.log_setup_status(self.__class__.__name__, "Finished",
stage=self.tearDown.__name__)
def test_process_events(self):
self.log.info("Adding event for process_started event")
self.system_events.add_event(AnalyticsEvents.process_started(
self.cluster.cbas_cc_node.ip, "cbas"))
self.log.info("Killing Java process on cbas node to trigger process "
"crash event")
cbas_shell = RemoteMachineShellConnection(self.cluster.cbas_cc_node)
output, error = cbas_shell.kill_process("java", "java", signum=9)
cbas_shell.disconnect()
if error:
self.fail("Failed to kill Java process on CBAS node")
self.log.info("Adding event for process_crashed event")
self.system_events.add_event(AnalyticsEvents.process_crashed(
self.cluster.cbas_cc_node.ip, "java"))
if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
self.fail("Analytics service failed to start after Java process "
"was killed")
self.log.info("Restarting analytics cluster to trigger process exited event")
status, _, _ = self.cbas_util.restart_analytics_cluster_uri(
self.cluster, username=None, password=None)
if not status:
self.fail("Failed to restart analytics cluster")
self.log.info("Adding event for process_exited event")
self.system_events.add_event(AnalyticsEvents.process_exited(
self.cluster.cbas_cc_node.ip, "java"))
if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
self.fail("Analytics service failed to start after Java process "
"was killed")
def test_topology_change_events(self):
available_server_before_rebalance = copy.deepcopy(self.available_servers)
try:
self.log.info("Enabling firewall between Incoming node and CBAS CC "
"node to trigger topology_change_failed event")
for node in available_server_before_rebalance:
RemoteUtilHelper.enable_firewall(
node, bidirectional=False, xdcr=False,
action_on_packet="REJECT", block_ips=[self.cluster.cbas_cc_node.ip],
all_interface=True)
self.log.info("Rebalancing IN CBAS node to trigger "
"topology_change_started event")
rebalance_task, self.available_servers = self.rebalance_util.rebalance(
self.cluster, kv_nodes_in=0, kv_nodes_out=0,
cbas_nodes_in=1, cbas_nodes_out=0,
available_servers=self.available_servers, exclude_nodes=[])
if self.rebalance_util.wait_for_rebalance_task_to_complete(
rebalance_task, self.cluster, check_cbas_running=False):
raise Exception("Rebalance passed when it should have failed.")
self.log.info("Disabling firewall between Incoming node and CBAS CC "
"node and retriggering rebalance to trigger "
"topology_change_completed event")
for node in available_server_before_rebalance:
remote_client = RemoteMachineShellConnection(node)
remote_client.disable_firewall()
remote_client.disconnect()
rebalance_task, self.available_servers = self.rebalance_util.rebalance(
self.cluster, kv_nodes_in=0, kv_nodes_out=0,
cbas_nodes_in=0, cbas_nodes_out=0,
available_servers=self.available_servers, exclude_nodes=[])
if not self.rebalance_util.wait_for_rebalance_task_to_complete(
rebalance_task, self.cluster, check_cbas_running=False):
raise Exception("Rebalance failed even after disabling "
"firewall")
self.log.info("Adding event for topology_change_started event")
self.system_events.add_event(AnalyticsEvents.topology_change_started(
self.cluster.cbas_cc_node.ip, 2, 0))
self.log.info("Adding event for topology_change_failed event")
self.system_events.add_event(AnalyticsEvents.topology_change_failed(
self.cluster.cbas_cc_node.ip, 2, 0))
self.log.info("Adding event for topology_change_completed event")
self.system_events.add_event(AnalyticsEvents.topology_change_completed(
self.cluster.cbas_cc_node.ip, 2, 0))
except Exception as err:
self.log.info("Disabling Firewall")
for node in available_server_before_rebalance:
remote_client = RemoteMachineShellConnection(node)
remote_client.disable_firewall()
remote_client.disconnect()
self.fail(str(err))
def test_analytics_scope_events(self):
dataverse_name = CBASHelper.format_name(
self.cbas_util.generate_name(name_cardinality=2))
if not self.cbas_util.create_dataverse(
self.cluster, dataverse_name,
analytics_scope=random.choice(["True", "False"])):
self.fail("Error while creating dataverse")
self.log.info(
"Adding event for scope_created event")
self.system_events.add_event(AnalyticsEvents.scope_created(
self.cluster.cbas_cc_node.ip, CBASHelper.metadata_format(
dataverse_name)))
if not self.cbas_util.drop_dataverse(
self.cluster, dataverse_name,
analytics_scope=random.choice(["True", "False"])):
self.fail("Error while dropping dataverse")
self.log.info("Adding event for scope_dropped event")
self.system_events.add_event(AnalyticsEvents.scope_dropped(
self.cluster.cbas_cc_node.ip, CBASHelper.metadata_format(
dataverse_name)))
def test_analytics_collection_events(self):
dataset_objs = self.cbas_util.create_dataset_obj(
self.cluster, self.bucket_util, dataset_cardinality=3,
bucket_cardinality=3, enabled_from_KV=False,
no_of_objs=1)
dataset_objs += self.cbas_util.create_dataset_obj(
self.cluster, self.bucket_util, dataset_cardinality=3,
bucket_cardinality=3, enabled_from_KV=True,
no_of_objs=1)
for dataset in dataset_objs:
if dataset.enabled_from_KV:
if not self.cbas_util.enable_analytics_from_KV(
self.cluster, dataset.full_kv_entity_name):
self.fail("Error while mapping KV collection to analytics")
self.system_events.add_event(AnalyticsEvents.collection_mapped(
self.cluster.cbas_cc_node.ip, dataset.kv_bucket.name,
dataset.kv_scope.name, dataset.kv_collection.name))
if not self.cbas_util.disable_analytics_from_KV(
self.cluster, dataset.full_kv_entity_name):
self.fail("Error while unmapping KV collection from "
"analytics")
else:
if not self.cbas_util.create_dataset(
self.cluster, dataset.name, dataset.full_kv_entity_name,
dataverse_name=dataset.dataverse_name,
analytics_collection=random.choice(["True", "False"])):
self.fail("Error while creating analytics collection")
self.system_events.add_event(AnalyticsEvents.collection_created(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset.dataverse_name),
CBASHelper.metadata_format(dataset.name),
CBASHelper.metadata_format(dataset.dataverse_name),
"Local", dataset.kv_bucket.name, dataset.kv_scope.name,
dataset.kv_collection.name))
if not self.cbas_util.drop_dataset(
self.cluster, dataset.full_name,
analytics_collection=random.choice(["True", "False"])):
self.fail("Error while dropping datasets")
self.system_events.add_event(AnalyticsEvents.collection_dropped(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset.dataverse_name),
CBASHelper.metadata_format(dataset.name)))
def test_analytics_index_events(self):
dataset_obj = self.cbas_util.create_dataset_obj(
self.cluster, self.bucket_util, dataset_cardinality=3,
bucket_cardinality=3, enabled_from_KV=False,
no_of_objs=1)[0]
if not self.cbas_util.create_dataset(
self.cluster, dataset_obj.name, dataset_obj.full_kv_entity_name,
dataverse_name=dataset_obj.dataverse_name,
analytics_collection=random.choice(["True", "False"])):
self.fail("Error while creating analytics collection")
index_name = CBASHelper.format_name(
self.cbas_util.generate_name(name_cardinality=1))
if not self.cbas_util.create_cbas_index(
self.cluster, index_name, ["age:bigint"], dataset_obj.full_name,
analytics_index=random.choice(["True", "False"])):
self.fail("Error while creating analytics index")
self.log.info("Adding event for index_created events")
self.system_events.add_event(AnalyticsEvents.index_created(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.metadata_format(index_name),
CBASHelper.metadata_format(dataset_obj.name)))
if not self.cbas_util.drop_cbas_index(
self.cluster, index_name, dataset_obj.full_name,
analytics_index=random.choice(["True", "False"])):
self.fail("Error while dropping analytics index")
self.log.info("Adding event for index_dropped events")
self.system_events.add_event(AnalyticsEvents.index_dropped(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.metadata_format(index_name),
CBASHelper.metadata_format(dataset_obj.name)))
def test_analytics_synonym_events(self):
dataset_obj = self.cbas_util.create_dataset_obj(
self.cluster, self.bucket_util, dataset_cardinality=3,
bucket_cardinality=3, enabled_from_KV=False,
no_of_objs=1)[0]
if not self.cbas_util.create_dataset(
self.cluster, dataset_obj.name,
dataset_obj.full_kv_entity_name,
dataverse_name=dataset_obj.dataverse_name,
analytics_collection=random.choice(["True", "False"])):
self.fail("Error while creating analytics collection")
syn_name_1 = CBASHelper.format_name(
self.cbas_util.generate_name(name_cardinality=1))
if not self.cbas_util.create_analytics_synonym(
self.cluster, CBASHelper.format_name(
dataset_obj.dataverse_name, syn_name_1), dataset_obj.full_name):
self.fail("Error while creating Synonym")
self.log.info("Adding event for synonym_created event")
self.system_events.add_event(AnalyticsEvents.synonym_created(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.metadata_format(syn_name_1),
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.metadata_format(dataset_obj.name)))
syn_name_2 = CBASHelper.format_name(
self.cbas_util.generate_name(name_cardinality=1))
self.log.info("Creating dangling Synonym")
if not self.cbas_util.create_analytics_synonym(
self.cluster, CBASHelper.format_name(
dataset_obj.dataverse_name, syn_name_2), "dangling"):
self.fail("Error while creating Synonym")
self.log.info("Adding event for synonym_created event for dangling "
"synonym")
self.system_events.add_event(AnalyticsEvents.synonym_created(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.metadata_format(syn_name_2),
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.metadata_format("dangling")))
for syn_name in [syn_name_1, syn_name_2]:
if not self.cbas_util.drop_analytics_synonym(
self.cluster, CBASHelper.format_name(
dataset_obj.dataverse_name, syn_name)):
self.fail("Error while dropping synonym")
self.log.info("Adding event for synonym_dropped events")
self.system_events.add_event(AnalyticsEvents.synonym_dropped(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.metadata_format(syn_name)))
def test_analytics_collection_attach_dettach_events(self):
dataset_obj = self.cbas_util.create_dataset_obj(
self.cluster, self.bucket_util, dataset_cardinality=3,
bucket_cardinality=3, enabled_from_KV=False,
no_of_objs=1, exclude_collection=["_default"])[0]
if not self.cbas_util.create_dataset(
self.cluster, dataset_obj.name,
dataset_obj.full_kv_entity_name,
dataverse_name=dataset_obj.dataverse_name,
analytics_collection=random.choice(["True", "False"])):
self.fail("Error while creating analytics collection")
self.log.info("Dropping collection {0}".format(
dataset_obj.full_kv_entity_name))
self.bucket_util.drop_collection(
self.cluster.master, dataset_obj.kv_bucket,
scope_name=dataset_obj.kv_scope.name,
collection_name=dataset_obj.kv_collection.name, session=None)
if not self.cbas_util.wait_for_ingestion_complete(
self.cluster, dataset_obj.full_name, 0, timeout=300):
self.fail("Data is present in the dataset when it should not")
self.log.info("Creating collection {0}".format(
dataset_obj.full_kv_entity_name))
self.bucket_util.create_collection(
self.cluster.master, dataset_obj.kv_bucket,
scope_name=dataset_obj.kv_scope.name,
collection_spec=dataset_obj.kv_collection.get_dict_object(),
session=None)
if not self.cbas_util.wait_for_ingestion_complete(
self.cluster, dataset_obj.full_name, 0, timeout=300):
self.fail("Data ingestion failed.")
self.log.info("Adding event for collection_detach events")
self.system_events.add_event(AnalyticsEvents.collection_detached(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.unformat_name(dataset_obj.name)))
self.log.info("Adding event for collection_attach events")
self.system_events.add_event(AnalyticsEvents.collection_attached(
self.cluster.cbas_cc_node.ip,
CBASHelper.metadata_format(dataset_obj.dataverse_name),
CBASHelper.unformat_name(dataset_obj.name)))
def test_analytics_settings_change_events(self):
status, content, response = \
self.cbas_util.fetch_service_parameter_configuration_on_cbas(
self.cluster)
if not status:
self.fail("Error while fetching the analytics service config")
old_value = content["jobHistorySize"]
new_value = 10
status, content, response = \
self.cbas_util.update_service_parameter_configuration_on_cbas(
self.cluster, config_map={"jobHistorySize": 10})
if not status:
self.fail("Error while setting the analytics service config")
self.log.info("Adding event for settings_change events")
self.system_events.add_event(AnalyticsEvents.setting_changed(
self.cluster.cbas_cc_node.ip, "jobHistorySize", old_value, new_value))
| 50.398281
| 88
| 0.654386
|
b4030615e3250496033c8bc1c77c95fde20db487
| 58
|
py
|
Python
|
labtoys/CTS/__init__.py
|
ppudo/labtoys_python
|
c8ca27637602b8aac0574e92da370a4a97e9fcad
|
[
"MIT"
] | null | null | null |
labtoys/CTS/__init__.py
|
ppudo/labtoys_python
|
c8ca27637602b8aac0574e92da370a4a97e9fcad
|
[
"MIT"
] | null | null | null |
labtoys/CTS/__init__.py
|
ppudo/labtoys_python
|
c8ca27637602b8aac0574e92da370a4a97e9fcad
|
[
"MIT"
] | null | null | null |
#__init__.py
from .ASCII_Proto_ETH import ASCII_Proto_ETH
| 19.333333
| 44
| 0.862069
|
9d1fa0584c20f651c9817dad5d067f7fbd2d2f5b
| 5,716
|
py
|
Python
|
tfx/types/standard_artifacts.py
|
lyschoening/tfx
|
ff87a97db07642e57e2c84cf50682dc5996f99a4
|
[
"Apache-2.0"
] | null | null | null |
tfx/types/standard_artifacts.py
|
lyschoening/tfx
|
ff87a97db07642e57e2c84cf50682dc5996f99a4
|
[
"Apache-2.0"
] | null | null | null |
tfx/types/standard_artifacts.py
|
lyschoening/tfx
|
ff87a97db07642e57e2c84cf50682dc5996f99a4
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of standard TFX Artifact types.
Note: the artifact definitions here are expected to change.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import decimal
import math
from typing import Text
import absl
from tfx.types.artifact import Artifact
from tfx.types.artifact import Property
from tfx.types.artifact import PropertyType
from tfx.types.artifact import ValueArtifact
# Span for an artifact.
SPAN_PROPERTY = Property(type=PropertyType.INT)
# Version for an artifact.
VERSION_PROPERTY = Property(type=PropertyType.INT)
# Comma separated of splits for an artifact. Empty string means artifact
# has no split.
SPLIT_NAMES_PROPERTY = Property(type=PropertyType.STRING)
# Value for a string-typed artifact.
STRING_VALUE_PROPERTY = Property(type=PropertyType.STRING)
class Examples(Artifact):
TYPE_NAME = 'Examples'
PROPERTIES = {
'span': SPAN_PROPERTY,
'version': VERSION_PROPERTY,
'split_names': SPLIT_NAMES_PROPERTY,
}
class ExampleAnomalies(Artifact):
TYPE_NAME = 'ExampleAnomalies'
PROPERTIES = {
'span': SPAN_PROPERTY,
'split_names': SPLIT_NAMES_PROPERTY,
}
class ExampleStatistics(Artifact):
TYPE_NAME = 'ExampleStatistics'
PROPERTIES = {
'span': SPAN_PROPERTY,
'split_names': SPLIT_NAMES_PROPERTY,
}
# TODO(b/158334890): deprecate ExternalArtifact.
class ExternalArtifact(Artifact):
TYPE_NAME = 'ExternalArtifact'
class InferenceResult(Artifact):
TYPE_NAME = 'InferenceResult'
class InfraBlessing(Artifact):
TYPE_NAME = 'InfraBlessing'
class Model(Artifact):
TYPE_NAME = 'Model'
class ModelRun(Artifact):
TYPE_NAME = 'ModelRun'
class ModelBlessing(Artifact):
TYPE_NAME = 'ModelBlessing'
class ModelEvaluation(Artifact):
TYPE_NAME = 'ModelEvaluation'
class PushedModel(Artifact):
TYPE_NAME = 'PushedModel'
class Schema(Artifact):
TYPE_NAME = 'Schema'
class Bytes(ValueArtifact):
"""Artifacts representing raw bytes."""
TYPE_NAME = 'Bytes'
def encode(self, value: bytes):
if not isinstance(value, bytes):
raise TypeError('Expecting bytes but got value %s of type %s' %
(str(value), type(value)))
return value
def decode(self, serialized_value: bytes):
return serialized_value
class String(ValueArtifact):
"""String-typed artifact."""
TYPE_NAME = 'String'
# Note, currently we enforce unicode-encoded string.
def encode(self, value: Text) -> bytes:
if not isinstance(value, Text):
raise TypeError('Expecting Text but got value %s of type %s' %
(str(value), type(value)))
return value.encode('utf-8')
def decode(self, serialized_value: bytes) -> Text:
return serialized_value.decode('utf-8')
class Integer(ValueArtifact):
"""Integer-typed artifact."""
TYPE_NAME = 'Integer'
def encode(self, value: int) -> bytes:
if not isinstance(value, int):
raise TypeError('Expecting int but got value %s of type %s' %
(str(value), type(value)))
return str(value).encode('utf-8')
def decode(self, serialized_value: bytes) -> int:
return int(serialized_value)
class Float(ValueArtifact):
"""Float-typed artifact."""
TYPE_NAME = 'Float'
_POSITIVE_INFINITY = float('Inf')
_NEGATIVE_INFINITY = float('-Inf')
_ENCODED_POSITIVE_INFINITY = 'Infinity'
_ENCODED_NEGATIVE_INFINITY = '-Infinity'
_ENCODED_NAN = 'NaN'
def encode(self, value: float) -> bytes:
if not isinstance(value, float):
raise TypeError('Expecting float but got value %s of type %s' %
(str(value), type(value)))
if math.isinf(value) or math.isnan(value):
absl.logging.warning(
'! The number "%s" may be unsupported by non-python components.' %
value)
str_value = str(value)
# Special encoding for infinities and NaN to increase comatibility with
# other languages.
# Decoding works automatically.
if math.isinf(value):
if value >= 0:
str_value = Float._ENCODED_POSITIVE_INFINITY
else:
str_value = Float._ENCODED_NEGATIVE_INFINITY
if math.isnan(value):
str_value = Float._ENCODED_NAN
return str_value.encode('utf-8')
def decode(self, serialized_value: bytes) -> float:
result = float(serialized_value)
# Check that the decoded value exactly matches the encoded string.
# Note that float() can handle bytes, but Decimal() cannot.
serialized_string = serialized_value.decode('utf-8')
reserialized_string = str(result)
is_exact = (decimal.Decimal(serialized_string) ==
decimal.Decimal(reserialized_string))
if not is_exact:
absl.logging.warning(
'The number "%s" has lost precision when converted to float "%s"' %
(serialized_value, reserialized_string))
return result
class TransformGraph(Artifact):
TYPE_NAME = 'TransformGraph'
class HyperParameters(Artifact):
TYPE_NAME = 'HyperParameters'
# WIP and subject to change.
class DataView(Artifact):
TYPE_NAME = 'DataView'
| 26.962264
| 77
| 0.709237
|
eef8001a6f90b07684c7ea61f561b495eb3eaf41
| 2,224
|
py
|
Python
|
thumt/utils/summary.py
|
Yuran-Zhao/THUMT
|
10f0433c1f2fe3f992d26ccb6f4f8dec457ce695
|
[
"BSD-3-Clause"
] | 422
|
2018-12-03T19:47:06.000Z
|
2022-03-29T13:18:09.000Z
|
thumt/utils/summary.py
|
Yuran-Zhao/THUMT
|
10f0433c1f2fe3f992d26ccb6f4f8dec457ce695
|
[
"BSD-3-Clause"
] | 60
|
2019-02-11T02:43:52.000Z
|
2022-02-20T07:24:40.000Z
|
thumt/utils/summary.py
|
Yuran-Zhao/THUMT
|
10f0433c1f2fe3f992d26ccb6f4f8dec457ce695
|
[
"BSD-3-Clause"
] | 121
|
2018-12-29T03:40:40.000Z
|
2022-03-03T11:33:23.000Z
|
# coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import queue
import threading
import torch
import torch.distributed as dist
import torch.utils.tensorboard as tensorboard
_SUMMARY_WRITER = None
_QUEUE = None
_THREAD = None
class SummaryWorker(threading.Thread):
def run(self):
global _QUEUE
while True:
item = _QUEUE.get()
name, kwargs = item
if name == "stop":
break
self.write_summary(name, **kwargs)
def write_summary(self, name, **kwargs):
if name == "scalar":
_SUMMARY_WRITER.add_scalar(**kwargs)
elif name == "histogram":
_SUMMARY_WRITER.add_histogram(**kwargs)
def stop(self):
global _QUEUE
_QUEUE.put(("stop", None))
self.join()
def init(log_dir, enable=True):
global _SUMMARY_WRITER
global _QUEUE
global _THREAD
if enable and dist.get_rank() == 0:
_SUMMARY_WRITER = tensorboard.SummaryWriter(log_dir)
_QUEUE = queue.Queue()
thread = SummaryWorker(daemon=True)
thread.start()
_THREAD = thread
def scalar(tag, scalar_value, global_step=None, walltime=None,
write_every_n_steps=100):
if _SUMMARY_WRITER is not None:
if global_step % write_every_n_steps == 0:
scalar_value = float(scalar_value)
kwargs = dict(tag=tag, scalar_value=scalar_value,
global_step=global_step, walltime=walltime)
_QUEUE.put(("scalar", kwargs))
def histogram(tag, values, global_step=None, bins="tensorflow", walltime=None,
max_bins=None, write_every_n_steps=100):
if _SUMMARY_WRITER is not None:
if global_step % write_every_n_steps == 0:
values = values.detach().cpu()
kwargs = dict(tag=tag, values=values, global_step=global_step,
bins=bins, walltime=walltime, max_bins=max_bins)
_QUEUE.put(("histogram", kwargs))
def close():
if _SUMMARY_WRITER is not None:
_THREAD.stop()
_SUMMARY_WRITER.close()
| 26.164706
| 78
| 0.635342
|
9c548b8ede93e6538642caa7993404547b3b445e
| 3,664
|
py
|
Python
|
src/ape/_cli.py
|
defidipshit/ape
|
5e003ee5003ca2491aeba9d0d262a8e9e138e084
|
[
"Apache-2.0"
] | 1
|
2022-01-25T21:26:01.000Z
|
2022-01-25T21:26:01.000Z
|
src/ape/_cli.py
|
violetsummerzine/ape
|
8b39dc3d068dea98ec68e17c5b5446dfa8f9a7e3
|
[
"Apache-2.0"
] | 1
|
2022-03-05T07:08:07.000Z
|
2022-03-11T17:05:13.000Z
|
src/ape/_cli.py
|
violetsummerzine/ape
|
8b39dc3d068dea98ec68e17c5b5446dfa8f9a7e3
|
[
"Apache-2.0"
] | null | null | null |
import difflib
import re
import traceback
from typing import Any, Dict
import click
import yaml
from ape.cli import Abort, ape_cli_context
from ape.exceptions import ApeException
from ape.logging import LogLevel, logger
from ape.plugins import clean_plugin_name
try:
from importlib import metadata # type: ignore
except ImportError:
import importlib_metadata as metadata # type: ignore
_DIFFLIB_CUT_OFF = 0.6
def display_config(ctx, param, value):
# NOTE: This is necessary not to interrupt how version or help is intercepted
if not value or ctx.resilient_parsing:
return
from ape import project
click.echo("# Current configuration")
click.echo(yaml.dump(project.config_manager.dict()))
ctx.exit() # NOTE: Must exit to bypass running ApeCLI
class ApeCLI(click.MultiCommand):
_commands = None
def invoke(self, ctx) -> Any:
try:
return super().invoke(ctx)
except click.UsageError as err:
self._suggest_cmd(err)
except ApeException as err:
if logger.level == LogLevel.DEBUG.value:
tb = traceback.format_exc()
err_message = tb or str(err)
else:
err_message = str(err)
raise Abort(f"({type(err).__name__}) {err_message}") from err
@staticmethod
def _suggest_cmd(usage_error):
if usage_error.message is None:
raise usage_error
match = re.match("No such command '(.*)'.", usage_error.message)
if not match:
raise usage_error
bad_arg = match.groups()[0]
suggested_commands = difflib.get_close_matches(
bad_arg, list(usage_error.ctx.command.commands.keys()), cutoff=_DIFFLIB_CUT_OFF
)
if suggested_commands:
if bad_arg not in suggested_commands:
usage_error.message = (
f"No such command '{bad_arg}'. Did you mean {' or '.join(suggested_commands)}?"
)
raise usage_error
@property
def commands(self) -> Dict:
group_name = "ape_cli_subcommands"
if not self._commands:
try:
entry_points = metadata.entry_points(group=group_name) # type: ignore
except TypeError:
entry_points = metadata.entry_points()
entry_points = (
entry_points[group_name] if group_name in entry_points else [] # type: ignore
)
if not entry_points:
raise Abort("Missing registered cli subcommands")
self._commands = {
clean_plugin_name(entry_point.name): entry_point.load # type: ignore
for entry_point in entry_points
}
return self._commands
def list_commands(self, ctx):
return list(sorted(self.commands))
def get_command(self, ctx, name):
if name in self.commands:
try:
return self.commands[name]()
except Exception as err:
logger.warn_from_exception(
err, f"Unable to load CLI endpoint for plugin 'ape_{name}'"
)
# NOTE: don't return anything so Click displays proper error
@click.command(cls=ApeCLI, context_settings=dict(help_option_names=["-h", "--help"]))
@ape_cli_context()
@click.version_option(message="%(version)s", package_name="eth-ape")
@click.option(
"--config",
is_flag=True,
is_eager=True,
expose_value=False,
callback=display_config,
help="Show configuration options (using `ape-config.yaml`)",
)
def cli(context):
_ = context
| 29.788618
| 99
| 0.620087
|
a78cc0747930a163b8b1a1ee0e11429c0cc59d03
| 4,706
|
py
|
Python
|
gamestonk_terminal/technical_analysis/ta_menu.py
|
ooaj/GamestonkTerminal
|
6b5b494458b0e01a2db7a06890610454220d8a8f
|
[
"MIT"
] | 2
|
2021-04-26T14:13:49.000Z
|
2021-05-24T04:19:44.000Z
|
gamestonk_terminal/technical_analysis/ta_menu.py
|
ooaj/GamestonkTerminal
|
6b5b494458b0e01a2db7a06890610454220d8a8f
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/technical_analysis/ta_menu.py
|
ooaj/GamestonkTerminal
|
6b5b494458b0e01a2db7a06890610454220d8a8f
|
[
"MIT"
] | null | null | null |
import argparse
from gamestonk_terminal.technical_analysis import overlap as ta_overlap
from gamestonk_terminal.technical_analysis import momentum as ta_momentum
from gamestonk_terminal.technical_analysis import trend as ta_trend
from gamestonk_terminal.technical_analysis import volatility as ta_volatility
from gamestonk_terminal.technical_analysis import volume as ta_volume
import matplotlib.pyplot as plt
def print_technical_analysis(s_ticker, s_start, s_interval):
""" Print help """
s_intraday = (f"Intraday {s_interval}", "Daily")[s_interval == "1440min"]
if s_start:
print(f"\n{s_intraday} Stock: {s_ticker} (from {s_start.strftime('%Y-%m-%d')})")
else:
print(f"\n{s_intraday} Stock: {s_ticker}")
print("\nTechnical Analysis:") # https://github.com/twopirllc/pandas-ta
print(" help show this technical analysis menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print("overlap:")
print(" ema exponential moving average")
print(" sma simple moving average")
print(" vwap volume weighted average price")
print("momentum:")
print(" cci commodity channel index")
print(" macd moving average convergence/divergence")
print(" rsi relative strength index")
print(" stoch stochastic oscillator")
print("trend:")
print(" adx average directional movement index")
print(" aroon aroon indicator")
print("volatility:")
print(" bbands bollinger bands")
print("volume:")
print(" ad chaikin accumulation/distribution line values")
print(" obv on balance volume")
print("")
def ta_menu(df_stock, s_ticker, s_start, s_interval):
# Add list of arguments that the technical analysis parser accepts
ta_parser = argparse.ArgumentParser(prog="ta", add_help=False)
ta_parser.add_argument(
"cmd",
choices=[
"help",
"q",
"quit",
"ema",
"sma",
"vwap",
"cci",
"macd",
"rsi",
"stoch",
"adx",
"aroon",
"bbands",
"ad",
"obv",
],
)
print_technical_analysis(s_ticker, s_start, s_interval)
# Loop forever and ever
while True:
# Get input command from user
as_input = input("> ")
# Images are non blocking - allows to close them if we type other command
plt.close()
# Parse fundamental analysis command of the list of possible commands
try:
(ns_known_args, l_args) = ta_parser.parse_known_args(as_input.split())
except SystemExit:
print("The command selected doesn't exist\n")
continue
if ns_known_args.cmd == "help":
print_technical_analysis(s_ticker, s_start, s_interval)
elif ns_known_args.cmd == "q":
# Just leave the FA menu
return False
elif ns_known_args.cmd == "quit":
# Abandon the program
return True
# OVERLAP
elif ns_known_args.cmd == "ema":
ta_overlap.ema(l_args, s_ticker, s_interval, df_stock)
elif ns_known_args.cmd == "sma":
ta_overlap.sma(l_args, s_ticker, s_interval, df_stock)
elif ns_known_args.cmd == "vwap":
ta_overlap.vwap(l_args, s_ticker, s_interval, df_stock)
# MOMENTUM
elif ns_known_args.cmd == "cci":
ta_momentum.cci(l_args, s_ticker, s_interval, df_stock)
elif ns_known_args.cmd == "macd":
ta_momentum.macd(l_args, s_ticker, s_interval, df_stock)
elif ns_known_args.cmd == "rsi":
ta_momentum.rsi(l_args, s_ticker, s_interval, df_stock)
elif ns_known_args.cmd == "stoch":
ta_momentum.stoch(l_args, s_ticker, s_interval, df_stock)
# TREND
elif ns_known_args.cmd == "adx":
ta_trend.adx(l_args, s_ticker, s_interval, df_stock)
elif ns_known_args.cmd == "aroon":
ta_trend.aroon(l_args, s_ticker, s_interval, df_stock)
# VOLATILITY
elif ns_known_args.cmd == "bbands":
ta_volatility.bbands(l_args, s_ticker, s_interval, df_stock)
# VOLUME
elif ns_known_args.cmd == "ad":
ta_volume.ad(l_args, s_ticker, s_interval, df_stock)
elif ns_known_args.cmd == "obv":
ta_volume.obv(l_args, s_ticker, s_interval, df_stock)
else:
print("Command not recognized!")
| 33.140845
| 88
| 0.604972
|
ad147cabc0b3f5ff71da21e5df540c3c07a3d676
| 857
|
py
|
Python
|
python/src/main/python/pyalink/alink/tests/examples/from_docs/test_textnearestneighborpredictbatchop.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/tests/examples/from_docs/test_textnearestneighborpredictbatchop.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/tests/examples/from_docs/test_textnearestneighborpredictbatchop.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestTextNearestNeighborPredictBatchOp(unittest.TestCase):
def test_textnearestneighborpredictbatchop(self):
df = pd.DataFrame([
[0, "a b c d e", "a a b c e"],
[1, "a a c e d w", "a a b b e d"],
[2, "c d e f a", "b b c e f a"],
[3, "b d e f h", "d d e a c"],
[4, "a c e d m", "a e e f b c"]
])
inOp = BatchOperator.fromDataframe(df, schemaStr='id long, text1 string, text2 string')
train = TextNearestNeighborTrainBatchOp().setIdCol("id").setSelectedCol("text1").setMetric("LEVENSHTEIN_SIM").linkFrom(inOp)
predict = TextNearestNeighborPredictBatchOp().setSelectedCol("text2").setTopN(3).linkFrom(train, inOp)
predict.print()
pass
| 40.809524
| 132
| 0.597433
|
48cf5feeaf58af48d9790ace6650b0379a8ea085
| 312
|
py
|
Python
|
other/dingding/dingtalk/api/rest/OapiDepartmentListIdsRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiDepartmentListIdsRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiDepartmentListIdsRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
'''
Created by auto_sdk on 2018.07.25
'''
from dingtalk.api.base import RestApi
class OapiDepartmentListIdsRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.id = None
def getHttpMethod(self):
return 'GET'
def getapiname(self):
return 'dingtalk.oapi.department.list_ids'
| 20.8
| 44
| 0.753205
|
84e97bf1561801142f8f5fa4e4b1f24eae4be2be
| 22,282
|
py
|
Python
|
python/ccxt/async_support/gateio.py
|
victor95pc/ccxt
|
5c3e606296a1b15852a35f1330b645f451fa08d6
|
[
"MIT"
] | 1
|
2019-03-17T22:44:30.000Z
|
2019-03-17T22:44:30.000Z
|
python/ccxt/async_support/gateio.py
|
Lara-Bell/ccxt
|
e09230b4b60d5c33e3f6ebc044002bab6f733553
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/gateio.py
|
Lara-Bell/ccxt
|
e09230b4b60d5c33e3f6ebc044002bab6f733553
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
class gateio (Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['CN'],
'version': '2',
'rateLimit': 1000,
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchTickers': True,
'withdraw': True,
'createDepositAddress': True,
'fetchDepositAddress': True,
'fetchClosedOrders': True,
'fetchOpenOrders': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchMyTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'api': {
'public': 'https://data.gate.io/api',
'private': 'https://data.gate.io/api',
},
'www': 'https://gate.io/',
'doc': 'https://gate.io/api2',
'fees': [
'https://gate.io/fee',
'https://support.gate.io/hc/en-us/articles/115003577673',
],
},
'api': {
'public': {
'get': [
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
'exceptions': {
'4': DDoSProtection,
'7': NotSupported,
'8': NotSupported,
'9': NotSupported,
'15': DDoSProtection,
'16': OrderNotFound,
'17': OrderNotFound,
'21': InsufficientFunds,
},
# https://gate.io/api2#errCode
'errorCodeNames': {
'1': 'Invalid request',
'2': 'Invalid version',
'3': 'Invalid request',
'4': 'Too many attempts',
'5': 'Invalid sign',
'6': 'Invalid sign',
'7': 'Currency is not supported',
'8': 'Currency is not supported',
'9': 'Currency is not supported',
'10': 'Verified failed',
'11': 'Obtaining address failed',
'12': 'Empty params',
'13': 'Internal error, please report to administrator',
'14': 'Invalid user',
'15': 'Cancel order too fast, please wait 1 min and try again',
'16': 'Invalid order id or order is already closed',
'17': 'Invalid orderid',
'18': 'Invalid amount',
'19': 'Not permitted or trade is disabled',
'20': 'Your order size is too small',
'21': 'You don\'t have enough fund',
},
'options': {
'limits': {
'cost': {
'min': {
'BTC': 0.0001,
'ETH': 0.001,
'USDT': 1,
},
},
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetMarketinfo()
markets = self.safe_value(response, 'pairs')
if not markets:
raise ExchangeError(self.id + ' fetchMarkets got an unrecognized response')
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': math.pow(10, -details['decimal_places']),
'max': None,
}
defaultCost = amountLimits['min'] * priceLimits['min']
minCost = self.safe_float(self.options['limits']['cost']['min'], quote, defaultCost)
costLimits = {
'min': minCost,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': costLimits,
}
active = True
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privatePostBalances()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
code = self.common_currency_code(currency)
account = self.account()
if 'available' in balance:
if currency in balance['available']:
account['free'] = float(balance['available'][currency])
if 'locked' in balance:
if currency in balance['locked']:
account['used'] = float(balance['locked'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookId(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
percentage = self.safe_float(ticker, 'percentChange')
open = None
change = None
average = None
if (last is not None) and(percentage is not None):
relativeChange = percentage / 100
open = last / self.sum(1, relativeChange)
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high24hr'),
'low': self.safe_float(ticker, 'low24hr'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': self.safe_float(ticker, 'quoteVolume'),
'quoteVolume': self.safe_float(ticker, 'baseVolume'),
'info': ticker,
}
def handle_errors(self, code, reason, url, method, headers, body, response):
if len(body) <= 0:
return
if body[0] != '{':
return
jsonbodyParsed = json.loads(body)
resultString = self.safe_string(jsonbodyParsed, 'result', '')
if resultString != 'false':
return
errorCode = self.safe_string(jsonbodyParsed, 'code')
if errorCode is not None:
exceptions = self.exceptions
errorCodeNames = self.errorCodeNames
if errorCode in exceptions:
message = ''
if errorCode in errorCodeNames:
message = errorCodeNames[errorCode]
else:
message = self.safe_string(jsonbodyParsed, 'message', '(unknown)')
raise exceptions[errorCode](message)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
ticker = tickers[id]
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
# public fetchTrades
timestamp = self.safe_integer(trade, 'timestamp')
# private fetchMyTrades
timestamp = self.safe_integer(trade, 'time_unix', timestamp)
if timestamp is not None:
timestamp *= 1000
id = self.safe_string(trade, 'tradeID')
id = self.safe_string(trade, 'id', id)
# take either of orderid or orderId
orderId = self.safe_string(trade, 'orderid')
orderId = self.safe_string(trade, 'orderNumber', orderId)
price = self.safe_float(trade, 'rate')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': orderId,
'type': None,
'side': trade['type'],
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTradeHistoryId(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['data'], market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
response = await self.privatePostOpenOrders(params)
return self.parse_orders(response['orders'], None, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostGetOrder(self.extend({
'orderNumber': id,
'currencyPair': self.market_id(symbol),
}, params))
return self.parse_order(response['order'])
def parse_order_status(self, status):
statuses = {
'cancelled': 'canceled',
# 'closed': 'closed', # these two statuses aren't actually needed
# 'open': 'open', # as they are mapped one-to-one
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
#
# {'amount': '0.00000000',
# 'currencyPair': 'xlm_usdt',
# 'fee': '0.0113766632239302 USDT',
# 'feeCurrency': 'USDT',
# 'feePercentage': 0.18,
# 'feeValue': '0.0113766632239302',
# 'filledAmount': '30.14004987',
# 'filledRate': 0.2097,
# 'initialAmount': '30.14004987',
# 'initialRate': '0.2097',
# 'left': 0,
# 'orderNumber': '998307286',
# 'rate': '0.2097',
# 'status': 'closed',
# 'timestamp': 1531158583,
# 'type': 'sell'},
#
id = self.safe_string(order, 'orderNumber')
symbol = None
marketId = self.safe_string(order, 'currencyPair')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is not None:
timestamp *= 1000
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'type')
price = self.safe_float(order, 'filledRate')
amount = self.safe_float(order, 'initialAmount')
filled = self.safe_float(order, 'filledAmount')
remaining = self.safe_float(order, 'leftAmount')
if remaining is None:
# In the order status response, self field has a different name.
remaining = self.safe_float(order, 'left')
feeCost = self.safe_float(order, 'feeValue')
feeCurrency = self.safe_string(order, 'feeCurrency')
feeRate = self.safe_float(order, 'feePercentage')
if feeRate is not None:
feeRate = feeRate / 100
if feeCurrency is not None:
if feeCurrency in self.currencies_by_id:
feeCurrency = self.currencies_by_id[feeCurrency]['code']
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
},
'info': order,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
market = self.market(symbol)
order = {
'currencyPair': market['id'],
'rate': price,
'amount': amount,
}
response = await getattr(self, method)(self.extend(order, params))
return self.parse_order(self.extend({
'status': 'open',
'type': side,
'initialAmount': amount,
}, response), market)
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires symbol argument')
await self.load_markets()
return await self.privatePostCancelOrder({
'orderNumber': id,
'currencyPair': self.market_id(symbol),
})
async def query_deposit_address(self, method, code, params={}):
await self.load_markets()
currency = self.currency(code)
method = 'privatePost' + method + 'Address'
response = await getattr(self, method)(self.extend({
'currency': currency['id'],
}, params))
address = self.safe_string(response, 'addr')
tag = None
if (address is not None) and(address.find('address') >= 0):
raise InvalidAddress(self.id + ' queryDepositAddress ' + address)
if code == 'XRP':
parts = address.split(' ')
address = parts[0]
tag = parts[1]
return {
'currency': currency,
'address': address,
'tag': tag,
'info': response,
}
async def create_deposit_address(self, code, params={}):
return await self.query_deposit_address('New', code, params)
async def fetch_deposit_address(self, code, params={}):
return await self.query_deposit_address('Deposit', code, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = await self.privatePostOpenOrders()
return self.parse_orders(response['orders'], market, since, limit)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
response = await self.privatePostTradeHistory(self.extend({
'currencyPair': market['id'],
'orderNumber': id,
}, params))
return self.parse_trades(response['trades'], market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchMyTrades requires symbol param')
await self.load_markets()
market = self.market(symbol)
id = market['id']
response = await self.privatePostTradeHistory(self.extend({'currencyPair': id}, params))
return self.parse_trades(response['trades'], market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
response = await self.privatePostWithdraw(self.extend({
'currency': currency['id'],
'amount': amount,
'address': address, # Address must exist in you AddressBook in security settings
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
result = response['result']
message = self.id + ' ' + self.json(response)
if result is None:
raise ExchangeError(message)
if isinstance(result, basestring):
if result != 'true':
raise ExchangeError(message)
elif not result:
raise ExchangeError(message)
return response
| 38.351119
| 126
| 0.509829
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.