hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
958d94cf080ae3091570afce88a5e68658678c8a
| 5,297
|
py
|
Python
|
data/p3BR/R1/benchmark/startQiskit_QC145.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R1/benchmark/startQiskit_QC145.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R1/benchmark/startQiskit_QC145.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=28
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC145.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 28.788043
| 140
| 0.628469
|
21a99c53a1fff9b635b517b2588e50411d486e40
| 7,263
|
py
|
Python
|
scripts/spikeglx_spikeinterface_pipeline.py
|
catalystneuro/brody-lab-to-nwb
|
bb792591eae988b2dec1a3a608979832da8f884d
|
[
"MIT"
] | null | null | null |
scripts/spikeglx_spikeinterface_pipeline.py
|
catalystneuro/brody-lab-to-nwb
|
bb792591eae988b2dec1a3a608979832da8f884d
|
[
"MIT"
] | 10
|
2021-05-24T22:17:27.000Z
|
2022-03-30T05:42:02.000Z
|
scripts/spikeglx_spikeinterface_pipeline.py
|
catalystneuro/brody-lab-to-nwb
|
bb792591eae988b2dec1a3a608979832da8f884d
|
[
"MIT"
] | null | null | null |
# SpikeInterface pipeline for Brody Lab
from pathlib import Path
from pprint import pprint
import spikeextractors as se
import spiketoolkit as st
import spikesorters as ss
n_jobs = 4
chunk_mb = 2000
export_raw_to_phy = False
export_curated_to_phy = True
# Define sorter and params
sorter = "ironclust"
sorter_params = {}
# on the cluster it's better to point to the sorter inside the script
ss.IronClustSorter.set_ironclust_path("/Users/abuccino/Documents/Codes/spike_sorting/sorters/ironclust")
# ss.Kilosort2Sorter.set_kilosort2_path("$HOME/Documents/Codes/spike_sorting/sorters/kilsort2")
# sorter_params = dict(car=False, n_jobs_bin=n_jobs, chunk_mb=chunk_mb)
# Auto curation params
# (Use None to skip one of the curation steps)
isi_violation_threshold = 0.5
snr_threshold = 5
firing_rate_threshold = 0.1
# 1a) Load AP recordings, LF recordings and TTL signals
base_path = Path("/Users/abuccino/Documents/Data/catalyst/brody")
raw_data_path = base_path
session_name = "test_session"
ap_bin_path = Path("/Users/abuccino/Documents/Data/catalyst/brody/test_npix/LRKV_210217_g2_t0.imec0.ap.bin")
lf_bin_path = ap_bin_path.parent / ap_bin_path.name.replace("ap", "lf")
# ap_bin_path = raw_data_path / session_name / f"{session_name}_imec0" / f"{session_name}_g0_t0.imec0.ap.bin"
# lf_bin_path = ap_bin_path.parent / ap_bin_path.name.replace("ap", "lf")
# Make spikeinterface folders
recording_folder = raw_data_path / session_name
spikeinterface_folder = recording_folder / "spikeinterface"
spikeinterface_folder.mkdir(parents=True, exist_ok=True)
# (optional) stub recording for fast testing; set to False for running processing pipeline on entire data
stub_test = True
nsec_stub = 5
recording_ap = se.SpikeGLXRecordingExtractor(ap_bin_path)
recording_lf = se.SpikeGLXRecordingExtractor(lf_bin_path)
if stub_test:
print("Stub test! Clipping recordings!")
recording_ap = se.SubRecordingExtractor(recording_ap,
end_frame=int(nsec_stub * recording_ap.get_sampling_frequency()))
recording_lf = se.SubRecordingExtractor(recording_lf,
end_frame=int(nsec_stub * recording_lf.get_sampling_frequency()))
print(f"Sampling frequency AP: {recording_ap.get_sampling_frequency()}")
print(f"Sampling frequency LF: {recording_lf.get_sampling_frequency()}")
# 2) Pre-processing
apply_cmr = True
if apply_cmr:
recording_processed = st.preprocessing.common_reference(recording_ap)
else:
recording_processed = recording_ap
num_frames = recording_processed.get_num_frames()
# rates, amps = st.postprocessing.compute_channel_spiking_activity(
# recording_processed,
# n_jobs=16,
# chunk_mb=4000,
# start_frame=10 * 30000,
# end_frame=20 * 30000,
# detect_threshold=8,
# recompute_info=True,
# verbose=True
# )
#
#
# fig, axs = plt.subplots(ncols=2)
# sw.plot_activity_map(recording_processed, activity="rate", colorbar=True, ax=axs[0])
# sw.plot_activity_map(recording_processed, activity="amplitude", colorbar=True, ax=axs[1])
# 3) Run spike sorter
print(f"Running {sorter}")
sorting = ss.run_sorter(sorter, recording_processed, output_folder=spikeinterface_folder / sorter / "output",
verbose=True, **sorter_params)
# 4) Post-processing: extract waveforms, templates, quality metrics, extracellular features
# Set postprocessing parameters
# Post-processing params
postprocessing_params = st.postprocessing.get_postprocessing_params()
pprint(postprocessing_params)
# (optional) change parameters
postprocessing_params['max_spikes_per_unit'] = 1000 # with None, all waveforms are extracted
postprocessing_params['n_jobs'] = n_jobs # n jobs
postprocessing_params['chunk_mb'] = chunk_mb # max RAM usage in Mb
postprocessing_params['verbose'] = True # max RAM usage in Mb
# Set quality metric list
# Quality metrics
# qc_list = st.validation.get_quality_metrics_list()
# print(f"Available quality metrics: {qc_list}")
# (optional) define subset of qc
qc_list = ['snr', 'isi_violation', 'firing_rate']
# Set extracellular features
# Extracellular features
ec_list = st.postprocessing.get_template_features_list()
print(f"Available EC features: {ec_list}")
# (optional) define subset of ec
ec_list = None #['peak_to_valley', 'halfwidth']
# Postprocess all sorting outputs
tmp_folder = spikeinterface_folder / sorter / "tmp"
tmp_folder.mkdir(parents=True, exist_ok=True)
# set local tmp folder
sorting.set_tmp_folder(tmp_folder)
# compute waveforms
waveforms = st.postprocessing.get_unit_waveforms(recording_processed, sorting, **postprocessing_params)
# compute templates
templates = st.postprocessing.get_unit_templates(recording_processed, sorting, **postprocessing_params)
# comput EC features
ec = st.postprocessing.compute_unit_template_features(
recording_processed,
sorting,
feature_names=ec_list,
as_dataframe=True
)
# compute QCs
qc = st.validation.compute_quality_metrics(
sorting,
recording=recording_processed,
metric_names=qc_list,
as_dataframe=True
)
# export raw to phy
if export_raw_to_phy:
phy_folder = spikeinterface_folder / sorter / "phy_raw"
phy_folder.mkdir(parents=True, exist_ok=True)
st.postprocessing.export_to_phy(recording_processed, sorting, phy_folder,
recompute_info=True)
# 5) Automatic curation
# firing rate threshold
if firing_rate_threshold is not None:
sorting_curated = st.curation.threshold_firing_rates(
sorting,
duration_in_frames=num_frames,
threshold=firing_rate_threshold,
threshold_sign='less'
)
else:
sorting_curated = sorting
# isi violation threshold
if isi_violation_threshold is not None:
sorting_curated = st.curation.threshold_isi_violations(
sorting_curated,
duration_in_frames=num_frames,
threshold=isi_violation_threshold,
threshold_sign='greater'
)
# SNR threshold
if snr_threshold is not None:
sorting_curated = st.curation.threshold_snrs(
sorting_curated,
recording=recording_processed,
threshold=snr_threshold,
threshold_sign='less'
)
print(f"{sorter} found {len(sorting_curated.get_unit_ids())} units after auto curation")
# export curated to phy
if export_cutated_to_phy:
phy_folder = spikeinterface_folder / sorter / "phy_curated"
phy_folder.mkdir(parents=True, exist_ok=True)
# avoid recomputing waveforms twice
if export_raw_to_phy:
recompute_info = False
else:
recompute_info = True
st.postprocessing.export_to_phy(recording_processed, sorting_curated, phy_folder,
recompute_info=recompute_info)
# 7) Save to NWB; writes only the spikes
# The name of the NWBFile containing behavioral or full recording data
nwbfile_path = raw_data_path / session_name / f"{session_name}.nwb"
# Choose the sorting extractor from the notebook environment you would like to write to NWB
chosen_sorting_extractor = sorting_curated
se.NwbSortingExtractor.write_sorting(
sorting=chosen_sorting_extractor,
save_path=nwbfile_path,
overwrite=False # this appends the file. True would write a new file
)
| 30.775424
| 109
| 0.751755
|
95b740680b31f20497ef0d2967797db745319771
| 381
|
py
|
Python
|
SegregateEvenOdd.py
|
jissdeodates/Data-Structures-using-Python
|
4c143976b7d38d62af57e0d2fadb96121f7658e6
|
[
"Apache-2.0"
] | null | null | null |
SegregateEvenOdd.py
|
jissdeodates/Data-Structures-using-Python
|
4c143976b7d38d62af57e0d2fadb96121f7658e6
|
[
"Apache-2.0"
] | 7
|
2021-10-05T17:31:16.000Z
|
2021-10-05T18:12:28.000Z
|
SegregateEvenOdd.py
|
jissdeodates/Data-Structures-using-Python
|
4c143976b7d38d62af57e0d2fadb96121f7658e6
|
[
"Apache-2.0"
] | 7
|
2021-10-04T05:33:50.000Z
|
2021-10-05T18:09:30.000Z
|
# fn to segregate even and odd numbers in array
# Time Complexity = O(n) & Space Complexity = O(1)
def segregate_even_odd(arr,n):
i = -1
j = 0
while j != n:
if arr[j] % 2 == 0:
i += 1
arr[i],arr[j] = arr[j],arr[i]
j += 1
return arr
# Driver's Code
arr = [7,5,8,4,3,6,2,11,9,2]
n = len(arr)
print(segregate_even_odd(arr,n))
| 21.166667
| 50
| 0.530184
|
9c6ed126069f51b8e0d3b76e2935cf2b3a9d84d8
| 1,754
|
py
|
Python
|
Lab1-2/tests.py
|
AriosJentu/BigDataCource
|
2f46296d9637148d67326fdbfa791b313124d479
|
[
"MIT"
] | null | null | null |
Lab1-2/tests.py
|
AriosJentu/BigDataCource
|
2f46296d9637148d67326fdbfa791b313124d479
|
[
"MIT"
] | null | null | null |
Lab1-2/tests.py
|
AriosJentu/BigDataCource
|
2f46296d9637148d67326fdbfa791b313124d479
|
[
"MIT"
] | null | null | null |
import pytest
from model import PictureFile
def test_file_lab1_p1():
file = PictureFile("tests/lab1f1.json")
file.open()
file.read_meta()
file.close()
assert file.width == file.height == 3
def test_file_lab1_p2_default():
file = PictureFile("tests/lab1f2.json")
file.open()
file.read_meta()
data = "["
data += ",".join(
[
str(file.read_next_frame()).replace(" ", "")
for i in range(file.frames)
])
data += "]"
file.close()
#---------------
file = open("tests/lab1f2.json")
defdata = file.read()
file.close()
#---------------
assert data == defdata
def test_file_lab1_p2_iterative():
file = PictureFile("tests/lab1f2.json")
file.open()
file.read_meta()
data = "["
data += ",".join(
[
str([
j for j in file.iter_next_frame()
]).replace(" ", "")
for _ in range(file.frames)
])
data += "]"
file.close()
#---------------
file = open("tests/lab1f2.json")
defdata = file.read()
file.close()
#---------------
assert data == defdata
def test_file_lab2():
file = PictureFile("tests/lab2f1.json")
file.open()
file.read_meta()
file.close()
assert file.width == file.height == 3
def test_files_arr_of_ites_equal_default():
file = PictureFile("tests/lab2f1.json")
file.open()
file.read_meta()
defdata = ",".join([
str(file.read_next_frame())
for i in range(file.frames)
])
iterdata = ",".join([
str([
j for j in file.iter_next_frame()
])
for _ in range(file.frames)
])
file.close()
assert defdata == iterdata
def test_check_frames():
file = PictureFile("tests/lab2f1.json")
file.open()
file.read_meta()
x = file.read_current_frame()
y = file.read_current_frame()
z = file.read_next_frame()
assert x == y
assert x != z
assert y != z
| 15.522124
| 47
| 0.620867
|
092a497bb66aa323912955ef276311603ecf4b0a
| 902
|
py
|
Python
|
apps/shasta/matrixMultiplication3.py
|
praneethnamburi/blender-ScriptViz
|
95554873ecebc0aa6b151d90d2ecf952be4b8880
|
[
"MIT"
] | 10
|
2020-06-12T06:39:11.000Z
|
2022-02-03T00:24:28.000Z
|
apps/shasta/matrixMultiplication3.py
|
praneethnamburi/blender-ScriptViz
|
95554873ecebc0aa6b151d90d2ecf952be4b8880
|
[
"MIT"
] | null | null | null |
apps/shasta/matrixMultiplication3.py
|
praneethnamburi/blender-ScriptViz
|
95554873ecebc0aa6b151d90d2ecf952be4b8880
|
[
"MIT"
] | 1
|
2021-04-13T01:55:16.000Z
|
2021-04-13T01:55:16.000Z
|
"""Demonstrate matrix multiplication on points forming 3d objects using blender."""
from bpn_init import * #pylint: disable=wildcard-import, unused-wildcard-import
bpy.data.scenes['Scene'].cursor.location[0] = -10
msh = get('Suzy')
if not msh:
msh = bpn.new.monkey('Suzy')
coords = msh.v.T
# Exercises:
# 1. Make the monkey look away
# 2. Make the monkey's face thin
# 3. make the monkey look around (chaining transforms)
m1 = np.array([\
[1, 0.95, 0],\
[0.95, 1, 0],\
[0, 0, 1]\
])
newCoords = m1@coords
# make sure you're in object mode
msh.v = newCoords.T
# coords = msh.v
# for i, co in enumerate(coords):
# coords[i] = coords[i] + 0.01*np.random.randn(3)
# msh.v = coords
# λ = 0
# δλ = np.pi/6
# λ = λ + δλ
# m1 = np.array([\
# [np.cos(λ), -np.sin(λ), 0],\
# [np.sin(λ), np.cos(λ), 0],\
# [0, 0, 1]\
# ])
| 21.47619
| 84
| 0.569845
|
39fb68983b91158f6c945ebf447373176e336059
| 20,292
|
py
|
Python
|
AutoDiff/forwardNode.py
|
chelsilarious/AutoDiff
|
b4ff703f85288bafd85148edb093d7cd47cbed50
|
[
"MIT"
] | null | null | null |
AutoDiff/forwardNode.py
|
chelsilarious/AutoDiff
|
b4ff703f85288bafd85148edb093d7cd47cbed50
|
[
"MIT"
] | null | null | null |
AutoDiff/forwardNode.py
|
chelsilarious/AutoDiff
|
b4ff703f85288bafd85148edb093d7cd47cbed50
|
[
"MIT"
] | null | null | null |
import numpy as np
class ForwardNode():
def __init__(self, value, trace=1.0, var='x1'):
'''
Constructor
===========
Input:
self - a ForwardNode variable
value - int/flot, specifying the value of the current variable
trace - int/float/np.array, derivative(s) of the current variable with respect to the input variable(s), default to be 1
var - str, initialize the name of the ForwardNode variable, defaut as "x1"
Output:
a ForwardNode object, containing the value and trace of this variable
Example:
>>> x = ForwardNode(5, [0, 1], "x1, x2")
ForwardNode Variable: ['x1, x2'], Value: 5, Trace: [0 1]
'''
if isinstance(value, (int, float)):
self.value = value
else:
raise TypeError("Invalid Input!")
if isinstance(trace, (int, float)):
self.trace = np.array([trace])
elif isinstance(trace, list) and all([isinstance(num, (int, float)) for num in trace]):
self.trace = np.array(trace)
elif isinstance(trace, np.ndarray) and all([isinstance(num, (np.int64, np.float64)) for num in trace]):
self.trace = trace
else:
raise TypeError("Invalid Input!")
if isinstance(var, str):
self.var = [var]
elif isinstance(var, list) and all([isinstance(varname, str) for varname in var]):
self.var = var
else:
raise TypeError("Invalid Input!")
def __add__(self, other):
'''
Dunder method to add another ForwardNode variable, scalar and vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after addition
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> y = x + 3
ForwardNode(6, 1, 'x')
>>> x1 = ForwardNode(3, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array([0,1]), var=['x1','x2'])
>>> z = x1 + x2
ForwardNode(7, [1,1], ['x1','x2'])
'''
if isinstance(other, (int, float)):
# v = y + c; dv/dx1 = dy/dx1, dv/dx2 = dy/dx2, ...
return ForwardNode(self.value + other, self.trace, self.var)
elif isinstance(other, ForwardNode):
# v = y + z; dv/dx1 = dy/dx1 + dz/dx1, dv/dx2 = dy/dx2 + dz/dx2, ...
return ForwardNode(self.value + other.value, self.trace + other.trace, self.var)
else:
raise AttributeError("Invalid Input!")
def __radd__(self, other):
'''
Dunder method to add another ForwardNode variable, scalar and vector from the left
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after addition
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> y = 3 + x
ForwardNode(6, 1 'x')
>>> x1 = ForwardNode(3, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x2 + x1
ForwardNode(7, [1, 1], ['x1', 'x2'])
'''
return self.__add__(other)
def __sub__(self, other):
'''
Dunder method to subtract another ForwardNode variable, scalar and vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after subtraction
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> y = x - 2
ForwardNode(1, 1 'x')
>>> x1 = ForwardNode(3, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x1 - x2
ForwardNode(-1, [1, -1], ['x1', 'x2'])
'''
if isinstance(other, (int, float)):
# v = y - c; dv/dx1 = dy/dx1, dv/dx2 = dy/dx2, ...
return ForwardNode(self.value - other, self.trace, self.var)
elif isinstance(other, ForwardNode):
# v = y - z; dv/dx1 = dy/dx1 - dz/dx1, dv/dx2 = dy/dx2 - dz/dx2, ...
return ForwardNode(self.value - other.value, self.trace - other.trace, self.var)
else:
raise AttributeError("Invalid Input!")
def __rsub__(self, other):
'''
Dunder method to subtract another ForwardNode variable, scalar and vector from the left
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after subtraction
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> y = 4 - x
ForwardNode(1, 1 'x')
>>> x1 = ForwardNode(3, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x2 - x1
ForwardNode(1, [-1, 1], ['x1', 'x2'])
'''
return (-1 * self).__add__(other)
def __mul__(self, other):
'''
Dunder method to multiply another ForwardNode variable, scalar and vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after multiplication
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> y = x * 2
ForwardNode(6, 2 'x')
>>> x1 = ForwardNode(3, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x1 * x2
ForwardNode(12, [4, 3], ['x1', 'x2'])
'''
if isinstance(other, (int, float)):
# v = y * c; dv/dx1 = dy/dx1 * c, dv/dx2 = dy/dx2 * c, ...
return ForwardNode(self.value * other, self.trace * other, self.var)
elif isinstance(other, ForwardNode):
# v = y * z; dv/dx1 = dy/dx1 * z + y * dz/dx1, dv/dx2 = dy/dx2 * z + y * dz/dx2, ...
return ForwardNode(self.value * other.value, self.trace * other.value + self.value * other.trace, self.var)
else:
raise AttributeError("Invalid Input!")
def __rmul__(self, other):
'''
Dunder method to multiply another ForwardNode variable, scalar and vector from the left
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after multiplication
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> y = 2 * x
ForwardNode(6, 2 'x')
>>> x1 = ForwardNode(3, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x2 * x1
ForwardNode(12, [4, 3], ['x1', 'x2'])
'''
return self.__mul__(other)
def __truediv__(self, other):
'''
Dunder method to divide another ForwardNode variable, scalar and vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after division
Examples:
>>> x = ForwardNode(4, trace=1, var=['x'])
>>> y = x / 2
ForwardNode(2, 0.5 'x')
>>> x1 = ForwardNode(12, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x1 / x2
ForwardNode(3, [0.25, -0.75], ['x1', 'x2'])
'''
if isinstance(other, (int, float)):
# v = y / c; dv/dx1 = dy/dx1 / c, dv/dx2 = dy/dx2 / c, ...
return ForwardNode(self.value / other, self.trace / other, self.var)
elif isinstance(other, ForwardNode):
# v = y / z; dv/dx1 = (z * dy/dx1 - y * dz/dx1) / (z**2), dv/dx2 = (z * dy/dx2 - y * dz/dx2) / (z**2), ...
return ForwardNode(self.value / other.value,
(other.value * self.trace - self.value * other.trace) / (other.value ** 2), self.var)
else:
raise AttributeError("Invalid Input!")
def __rtruediv__(self, other):
'''
Dunder method to divide another ForwardNode variable, scalar and vector from the left
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after division
Examples:
>>> x = ForwardNode(4, trace=1, var=['x'])
>>> y = 8 / x
ForwardNode(2, -0.5 'x')
>>> x1 = ForwardNode(2, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x2 / x1
ForwardNode(2, [-1, 0.5], ['x1', 'x2'])
'''
if isinstance(self, ForwardNode):
if not isinstance(other, (int,float)):
raise AttributeError("Invalid Input!")
return ForwardNode(other / self.value, self.trace * (-1 * other) / (self.value ** 2), self.var)
else:
raise AttributeError("Invalid Input!")
def __pow__(self, other):
'''
Dunder method to compute the power of a ForwardNode variable subject to another ForwardNode variable, scalar or vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after taking the power
Examples:
>>> x = ForwardNode(4, trace=1, var=['x'])
>>> y = x ** 2
ForwardNode(16, 8, 'x')
>>> x1 = ForwardNode(4, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(2, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x1 ** x2
ForwardNode(16, [8, 22.18070978], ['x1', 'x2'])
'''
if isinstance(other, (int, float)):
if (self.value < 0) and abs(other) < 1:
raise ValueError("Derivatives of variables with negative values to a power between -1 and 1 are not supported!")
# v = y ** c; dv/dx1 = c * (y ** (c-1)) * dy/dx1, dv/dx2 = c * (y ** (c-1)) * dy/dx2, ...
new_trace = other * (self.value ** (other - 1)) * self.trace
return ForwardNode(self.value ** other, new_trace, self.var)
elif isinstance(other, ForwardNode):
# v = y ** z; dv/dx1 = z * (y ** (z-1)) * dy/dx1 + (y ** z) * log(y) * dz/dx1, ...
new_trace = other.value * (self.value ** (other.value - 1)) * self.trace + (
self.value ** other.value) * np.log(self.value) * other.trace
return ForwardNode(self.value ** other.value, new_trace, self.var)
else:
raise AttributeError("Invalid Input!")
def __rpow__(self, other):
'''
Dunder method to compute the power of a ForwardNode variable subject to another ForwardNode variable, scalar or vector from the left
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
a ForwardNode object, containing new value and trace after taking the power
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> y = 2 ** x
ForwardNode(8, 36, 'x')
>>> x1 = ForwardNode(4, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(2, trace=np.array(([0,1])), var=['x1','x2'])
>>> z = x2 ** x1
ForwardNode(16, [11.09035489, 32], ['x1', 'x2'])
'''
if isinstance(self, ForwardNode):
if not isinstance(other, (int,float)):
raise AttributeError("Invalid Input!")
if (self.value < 0) and abs(other) < 1:
raise ValueError("Derivatives of negative values to a power variable between -1 and 1 are not supported!")
new_trace = (other ** self.value) * np.log(other) * self.trace
return ForwardNode(other ** self.value, new_trace, self.var)
else:
raise AttributeError("Invalid Input!")
def __neg__(self):
'''
Dunder method to take the negation of a ForwardNode variable
Input:
self - a ForwardNode variable
Output:
The negation of the input ForwardNode variable
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> -x
ForwardNode(-3, trace=-1, var=['x'])
'''
return ForwardNode(-1 * self.value, -1 * self.trace, self.var)
def __lt__(self, other):
'''
Dunder method to compare if the value of a ForwardNode variable is less than another ForwardNode variable, scalar or vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
True if self value < other value, False otherwise
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> x < 2
False
>>> x1 = ForwardNode(4, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(8, trace=np.array(([0,1])), var=['x1','x2'])
>>> x1 < x2
True
'''
if isinstance(other, (int, float)):
return self.value < other
elif isinstance(other, ForwardNode):
return self.value < other.value
else:
raise AttributeError("Invalid Input!")
def __gt__(self, other):
'''
Dunder method to compare if the value of a ForwardNode variable is greater than another ForwardNode variable, scalar or vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
True if self value > other value, False otherwise
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> x > 2
True
>>> x1 = ForwardNode(4, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(8, trace=np.array(([0,1])), var=['x1','x2'])
>>> x1 > x2
False
'''
if isinstance(other, (int, float)):
return self.value > other
elif isinstance(other, ForwardNode):
return other.__lt__(self)
else:
raise AttributeError("Invalid Input!")
def __le__(self, other):
'''
Dunder method to compare if the value of a ForwardNode variable is less than or equal to another ForwardNode variable, scalar or vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
True if self value <= other value, False otherwise
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> x <= 3
True
>>> x1 = ForwardNode(4, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(8, trace=np.array(([0,1])), var=['x1','x2'])
>>> x1 <= x2
False
'''
# if isinstance(self, (int,float)):
# if isinstance(other, (int,float)):
# return self <= other
# elif isinstance(other, ForwardNode):
# return self <= other.value
if isinstance(self, ForwardNode):
if isinstance(other, (int, float)):
return self.value <= other
elif isinstance(other, ForwardNode):
return self.value <= other.value
elif isinstance(other, ForwardNode):
if isinstance(self, (int, float)):
return self <= other.value
raise AttributeError("Invalid Input!")
def __ge__(self, other):
'''
Dunder method to compare if the value of a ForwardNode variable is greater than or equal to another ForwardNode variable, scalar or vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
True if self value >= other value, False otherwise
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> x >= 3
True
>>> x1 = ForwardNode(4, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(8, trace=np.array(([0,1])), var=['x1','x2'])
>>> x1 >= x2
False
'''
return other.__le__(self)
def __eq__(self, other):
'''
Dunder method to compare if the value of a ForwardNode variable is equal to another ForwardNode variable, scalar or vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
True if self value == other value, False otherwise
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> x == 2
False
>>> x1 = ForwardNode(4, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> x1 == x2
True
'''
if isinstance(self, (int, float)):
if isinstance(other, (int, float)):
return self == other
elif isinstance(other, ForwardNode):
return self == other.value
elif isinstance(self, ForwardNode):
if isinstance(other, (int, float)):
return self.value == other
elif isinstance(other, ForwardNode):
return self.value == other.value
raise AttributeError("Invalid Input!")
def __neq__(self, other):
'''
Dunder method to compare if the value of a ForwardNode variable is not equal to another ForwardNode variable, scalar or vector
Input:
self - a ForwardNode variable
other - a constant of integers or decimals / a ForwardNode object representing a variable
Output:
True if self value != other value, False otherwise
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> x != 2
True
>>> x1 = ForwardNode(4, trace=np.array([1,0]), var=['x1','x2'])
>>> x2 = ForwardNode(4, trace=np.array(([0,1])), var=['x1','x2'])
>>> x1 != x2
False
'''
return not self.__eq__(other)
def __repr__(self):
'''
Dunder method to represent a ForwardNode objects as a string
Input:
self - a ForwardNode variable
Output:
The value and trace of the ForwardNode object represented as a string
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> repr(x)
ForwardNode Variable: ['x'], Value: 3, Trace: [1]
'''
return f'ForwardNode Variable: {self.var}, Value: {self.value}, Trace: {self.trace}'
def __str__(self):
'''
Dunder method to represent a ForwardNode objects as a string
Input:
self - a ForwardNode variable
Output:
The value and trace of the ForwardNode object represented as a string
Examples:
>>> x = ForwardNode(3, trace=1, var=['x'])
>>> print(x)
ForwardNode Variable: ['x'], Value: 3, Trace: [1]
'''
return f'ForwardNode Variable: {self.var}, Value: {self.value}, Trace: {self.trace}'
| 35.78836
| 146
| 0.550956
|
510e888a09f37f77ff4bf7e4a39879286468cb55
| 915
|
py
|
Python
|
mp_server/src/api_requests.py
|
daryu519/2021-2-OSSProj-OTS-7
|
136e0e78164b5acc7c631dd7629b775ba62fc823
|
[
"MIT"
] | null | null | null |
mp_server/src/api_requests.py
|
daryu519/2021-2-OSSProj-OTS-7
|
136e0e78164b5acc7c631dd7629b775ba62fc823
|
[
"MIT"
] | null | null | null |
mp_server/src/api_requests.py
|
daryu519/2021-2-OSSProj-OTS-7
|
136e0e78164b5acc7c631dd7629b775ba62fc823
|
[
"MIT"
] | null | null | null |
import requests
from .config import DB_SERVER_URL
try:
from .secret_key import SECRET_KEY
async def db_post_winner(user_id: str):
try:
requests.post(url=DB_SERVER_URL + '/winner', data={'name': user_id, 'key': SECRET_KEY}, timeout=2)
except requests.exceptions.Timeout:
print('timeout')
async def db_post_loser(user_id: str):
try:
requests.post(url=DB_SERVER_URL + '/loser', data={'name': user_id, 'key': SECRET_KEY}, timeout=2)
except requests.exceptions.Timeout:
print('timeout')
except ModuleNotFoundError:
async def db_post_winner(user_id: str):
print(f'module not found err \n winner {user_id=}')
pass
async def db_post_loser(user_id: str):
print(f'module not found err \n loser {user_id=}')
pass
# async def auth_jwt_validate(user_id: str, jwt: str) -> bool:
# pass
#
| 30.5
| 110
| 0.642623
|
50f0fbcb31f76e9ec7913ab5b64bc79614ce7913
| 28,861
|
py
|
Python
|
tests/integration/roster/test_nhl_roster.py
|
MArtinherz/sportsipy
|
24f4c1d5e3bb8ecc56e21568961588491e9cfd2a
|
[
"MIT"
] | 221
|
2018-05-15T19:48:03.000Z
|
2021-01-05T15:36:21.000Z
|
tests/integration/roster/test_nhl_roster.py
|
MArtinherz/sportsipy
|
24f4c1d5e3bb8ecc56e21568961588491e9cfd2a
|
[
"MIT"
] | 502
|
2018-07-25T03:09:26.000Z
|
2021-01-06T16:07:02.000Z
|
tests/integration/roster/test_nhl_roster.py
|
MArtinherz/sportsipy
|
24f4c1d5e3bb8ecc56e21568961588491e9cfd2a
|
[
"MIT"
] | 72
|
2021-01-21T13:17:00.000Z
|
2022-03-31T21:43:25.000Z
|
import mock
import os
import pandas as pd
import pytest
from flexmock import flexmock
from sportsipy import utils
from sportsipy.nhl.roster import Player, Roster
from sportsipy.nhl.teams import Team
YEAR = 2018
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'nhl', filename)
return open('%s.html' % filepath, 'r', encoding='utf8').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents, status=200):
self.url = url
self.reason = 'Bad URL' # Used when throwing HTTPErrors
self.headers = {} # Used when throwing HTTPErrors
self.status_code = status
self.html_contents = html_contents
self.text = html_contents
if 'BAD' in url or 'bad' in url:
return MockPQ(None, 404)
if 'zettehe01' in url:
return MockPQ(read_file('zettehe01'))
if '2018' in url:
return MockPQ(read_file('2018'))
return MockPQ(read_file('howarja02'))
def mock_request(url):
class MockRequest:
def __init__(self, html_contents, status_code=200):
self.status_code = status_code
self.html_contents = html_contents
self.text = html_contents
if str(YEAR) in url:
return MockRequest('good')
else:
return MockRequest('bad', status_code=404)
class TestNHLPlayer:
def setup_method(self):
self.skater_results_career = {
'adjusted_assists': 692,
'adjusted_goals': 377,
'adjusted_goals_against_average': None,
'adjusted_goals_created': 394,
'adjusted_points': 1069,
'age': None,
'assists': 623,
'average_time_on_ice': '19:35',
'blocks_at_even_strength': 267,
'corsi_against': 10322.0,
'corsi_for': 12688,
'corsi_for_percentage': 55.1,
'defensive_point_shares': 29.4,
'defensive_zone_start_percentage': 45.5,
'even_strength_assists': 379,
'even_strength_goals': 228,
'even_strength_goals_allowed': None,
'even_strength_save_percentage': None,
'even_strength_shots_faced': None,
'faceoff_losses': 5602,
'faceoff_percentage': 51.1,
'faceoff_wins': 5863,
'fenwick_against': 8123,
'fenwick_for': 9757,
'fenwick_for_percentage': 54.6,
'game_winning_goals': 64,
'games_played': 1082,
'giveaways': 482,
'goal_against_percentage_relative': None,
'goalie_point_shares': None,
'goals': 337,
'goals_against': None,
'goals_against_average': None,
'goals_against_on_ice': 530,
'goals_created': 348,
'goals_for_on_ice': 633,
'goals_saved_above_average': None,
'height': '6-0',
'hits_at_even_strength': 471,
'league': 'NHL',
'losses': None,
'minutes': None,
'name': 'Henrik Zetterberg',
'offensive_point_shares': 79.9,
'offensive_zone_start_percentage': 54.5,
'pdo': 100.0,
'penalties_in_minutes': 401,
'player_id': 'zettehe01',
'plus_minus': 160,
'point_shares': 109.3,
'points': 960,
'power_play_assists': 235,
'power_play_goals': 100,
'power_play_goals_against_on_ice': 140,
'power_play_goals_allowed': None,
'power_play_goals_for_on_ice': 490,
'power_play_save_percentage': None,
'power_play_shots_faced': None,
'quality_start_percentage': None,
'quality_starts': None,
'really_bad_starts': None,
'relative_corsi_for_percentage': 3.3,
'relative_fenwick_for_percentage': 3.1,
'save_percentage': None,
'save_percentage_on_ice': None,
'saves': None,
'season': 'Career',
'shooting_percentage': 9.8,
'shooting_percentage_on_ice': 8.8,
'shootout_attempts': 47,
'shootout_goals': 10,
'shootout_misses': 37,
'shootout_percentage': 21.3,
'short_handed_assists': 9,
'short_handed_goals': 9,
'short_handed_goals_allowed': None,
'short_handed_save_percentage': None,
'short_handed_shots_faced': None,
'shots_against': None,
'shots_on_goal': 3455,
'shutouts': None,
'takeaways': 454,
'team_abbreviation': None,
'ties_plus_overtime_loss': None,
'time_on_ice': 21186,
'time_on_ice_even_strength': 12658.7,
'total_goals_against_on_ice': 851,
'total_goals_for_on_ice': 1362,
'total_shots': 5408,
'weight': 197,
'wins': None
}
self.skater_results_2017 = {
'adjusted_assists': 46,
'adjusted_goals': 11,
'adjusted_goals_against_average': None,
'adjusted_goals_created': 19,
'adjusted_points': 57,
'age': 37,
'assists': 45,
'average_time_on_ice': '19:30',
'blocks_at_even_strength': 34,
'corsi_against': 1243.0,
'corsi_for': 1274,
'corsi_for_percentage': 50.6,
'defensive_point_shares': 2.0,
'defensive_zone_start_percentage': 45.2,
'even_strength_assists': 28,
'even_strength_goals': 10,
'even_strength_goals_allowed': None,
'even_strength_save_percentage': None,
'even_strength_shots_faced': None,
'faceoff_losses': 709,
'faceoff_percentage': 48.4,
'faceoff_wins': 666,
'fenwick_against': 948,
'fenwick_for': 975,
'fenwick_for_percentage': 50.7,
'game_winning_goals': 2,
'games_played': 82,
'giveaways': 57,
'goal_against_percentage_relative': None,
'goalie_point_shares': None,
'goals': 11,
'goals_against': None,
'goals_against_average': None,
'goals_against_on_ice': 52,
'goals_created': 18,
'goals_for_on_ice': 54,
'goals_saved_above_average': None,
'height': '6-0',
'hits_at_even_strength': 49,
'league': 'NHL',
'losses': None,
'minutes': None,
'name': 'Henrik Zetterberg',
'offensive_point_shares': 2.4,
'offensive_zone_start_percentage': 54.8,
'pdo': 99.9,
'penalties_in_minutes': 14,
'player_id': 'zettehe01',
'plus_minus': 1,
'point_shares': 4.4,
'points': 56,
'power_play_assists': 17,
'power_play_goals': 1,
'power_play_goals_against_on_ice': 0,
'power_play_goals_allowed': None,
'power_play_goals_for_on_ice': 25,
'power_play_save_percentage': None,
'power_play_shots_faced': None,
'quality_start_percentage': None,
'quality_starts': None,
'really_bad_starts': None,
'relative_corsi_for_percentage': 2.7,
'relative_fenwick_for_percentage': 2.0,
'save_percentage': None,
'save_percentage_on_ice': None,
'saves': None,
'season': '2017-18',
'shooting_percentage': 6.1,
'shooting_percentage_on_ice': 7.6,
'shootout_attempts': 3,
'shootout_goals': 0,
'shootout_misses': 3,
'shootout_percentage': 0.0,
'short_handed_assists': 0,
'short_handed_goals': 0,
'short_handed_goals_allowed': None,
'short_handed_save_percentage': None,
'short_handed_shots_faced': None,
'shots_against': None,
'shots_on_goal': 180,
'shutouts': None,
'takeaways': 51,
'team_abbreviation': 'DET',
'ties_plus_overtime_loss': None,
'time_on_ice': 1599,
'time_on_ice_even_strength': 1382.2,
'total_goals_against_on_ice': 53,
'total_goals_for_on_ice': 79,
'total_shots': 332,
'weight': 197,
'wins': None
}
self.goalie_results_career = {
'adjusted_assists': None,
'adjusted_goals': None,
'adjusted_goals_against_average': None,
'adjusted_goals_created': None,
'adjusted_points': None,
'age': None,
'assists': 8,
'average_time_on_ice': None,
'blocks_at_even_strength': None,
'corsi_against': None,
'corsi_for': None,
'corsi_for_percentage': None,
'defensive_point_shares': None,
'defensive_zone_start_percentage': None,
'even_strength_assists': None,
'even_strength_goals': None,
'even_strength_goals_allowed': 800,
'even_strength_save_percentage': 0.922,
'even_strength_shots_faced': 10295,
'faceoff_losses': None,
'faceoff_percentage': None,
'faceoff_wins': None,
'fenwick_against': None,
'fenwick_for': None,
'fenwick_for_percentage': None,
'game_winning_goals': None,
'games_played': None,
'giveaways': None,
'goal_against_percentage_relative': 97,
'goalie_point_shares': 78.8,
'goals': 0,
'goals_against': 1091,
'goals_against_average': 2.49,
'goals_against_on_ice': None,
'goals_created': None,
'goals_for_on_ice': None,
'goals_saved_above_average': None,
'height': '6-1',
'hits_at_even_strength': None,
'league': 'NHL',
'losses': 151,
'minutes': 26332,
'name': 'Jimmy Howard',
'offensive_point_shares': None,
'offensive_zone_start_percentage': None,
'pdo': None,
'penalties_in_minutes': 34,
'player_id': 'howarja02',
'plus_minus': None,
'point_shares': None,
'points': 8,
'power_play_assists': None,
'power_play_goals': None,
'power_play_goals_against_on_ice': None,
'power_play_goals_allowed': 26,
'power_play_goals_for_on_ice': None,
'power_play_save_percentage': 0.92,
'power_play_shots_faced': 327,
'quality_start_percentage': 0.544,
'quality_starts': 239,
'really_bad_starts': 61,
'relative_corsi_for_percentage': None,
'relative_fenwick_for_percentage': None,
'save_percentage': 0.915,
'save_percentage_on_ice': None,
'saves': 11696,
'season': 'Career',
'shooting_percentage': None,
'shooting_percentage_on_ice': None,
'shootout_attempts': None,
'shootout_goals': None,
'shootout_misses': None,
'shootout_percentage': None,
'short_handed_assists': None,
'short_handed_goals': None,
'short_handed_goals_allowed': 249,
'short_handed_save_percentage': 0.877,
'short_handed_shots_faced': 2027,
'shots_against': 12787,
'shots_on_goal': None,
'shutouts': 24,
'takeaways': None,
'team_abbreviation': None,
'ties_plus_overtime_loss': 63,
'time_on_ice': None,
'time_on_ice_even_strength': None,
'total_goals_against_on_ice': None,
'total_goals_for_on_ice': None,
'total_shots': None,
'weight': 218,
'wins': 221
}
self.goalie_results_2017 = {
'adjusted_assists': None,
'adjusted_goals': None,
'adjusted_goals_against_average': None,
'adjusted_goals_created': None,
'adjusted_points': None,
'age': 33,
'assists': 1,
'average_time_on_ice': None,
'blocks_at_even_strength': None,
'corsi_against': None,
'corsi_for': None,
'corsi_for_percentage': None,
'defensive_point_shares': None,
'defensive_zone_start_percentage': None,
'even_strength_assists': None,
'even_strength_goals': None,
'even_strength_goals_allowed': 122,
'even_strength_save_percentage': 0.916,
'even_strength_shots_faced': 1455,
'faceoff_losses': None,
'faceoff_percentage': None,
'faceoff_wins': None,
'fenwick_against': None,
'fenwick_for': None,
'fenwick_for_percentage': None,
'game_winning_goals': None,
'games_played': None,
'giveaways': None,
'goal_against_percentage_relative': 103,
'goalie_point_shares': 9.4,
'goals': 0,
'goals_against': 160,
'goals_against_average': 2.85,
'goals_against_on_ice': None,
'goals_created': None,
'goals_for_on_ice': None,
'goals_saved_above_average': -4.65,
'height': '6-1',
'hits_at_even_strength': None,
'league': 'NHL',
'losses': 27,
'minutes': 3368,
'name': 'Jimmy Howard',
'offensive_point_shares': None,
'offensive_zone_start_percentage': None,
'pdo': None,
'penalties_in_minutes': 10,
'player_id': 'howarja02',
'plus_minus': None,
'point_shares': None,
'points': 1,
'power_play_assists': None,
'power_play_goals': None,
'power_play_goals_against_on_ice': None,
'power_play_goals_allowed': 2,
'power_play_goals_for_on_ice': None,
'power_play_save_percentage': 0.949,
'power_play_shots_faced': 39,
'quality_start_percentage': 0.491,
'quality_starts': 28,
'really_bad_starts': 6,
'relative_corsi_for_percentage': None,
'relative_fenwick_for_percentage': None,
'save_percentage': 0.91,
'save_percentage_on_ice': None,
'saves': 1610,
'season': '2017-18',
'shooting_percentage': None,
'shooting_percentage_on_ice': None,
'shootout_attempts': None,
'shootout_goals': None,
'shootout_misses': None,
'shootout_percentage': None,
'short_handed_assists': None,
'short_handed_goals': None,
'short_handed_goals_allowed': 36,
'short_handed_save_percentage': 0.869,
'short_handed_shots_faced': 275,
'shots_against': 1770,
'shots_on_goal': None,
'shutouts': 1,
'takeaways': None,
'team_abbreviation': 'DET',
'ties_plus_overtime_loss': 9,
'time_on_ice': None,
'time_on_ice_even_strength': None,
'total_goals_against_on_ice': None,
'total_goals_for_on_ice': None,
'total_shots': None,
'weight': 218,
'wins': 22
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_nhl_skater_returns_requested_career_stats(self, *args, **kwargs):
# Request the career stats
player = Player('zettehe01')
player = player('')
for attribute, value in self.skater_results_career.items():
assert getattr(player, attribute) == value
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_nhl_skater_returns_player_season_stats(self, *args, **kwargs):
# Request the 2017 stats
player = Player('zettehe01')
player = player('2017-18')
for attribute, value in self.skater_results_2017.items():
assert getattr(player, attribute) == value
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_nhl_goalie_returns_requested_career_stats(self, *args, **kwargs):
# Request the career stats
player = Player('howarja02')
player = player('')
for attribute, value in self.goalie_results_career.items():
assert getattr(player, attribute) == value
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_nhl_goalie_returns_player_season_stats(self, *args, **kwargs):
# Request the 2017 stats
player = Player('howarja02')
player = player('2017-18')
for attribute, value in self.goalie_results_2017.items():
assert getattr(player, attribute) == value
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_dataframe_returns_dataframe(self, *args, **kwargs):
dataframe = [
{'adjusted_assists': 46,
'adjusted_goals': 11,
'adjusted_goals_against_average': None,
'adjusted_goals_created': 19,
'adjusted_points': 57,
'age': 37,
'assists': 45,
'average_time_on_ice': '19:30',
'blocks_at_even_strength': 34,
'corsi_against': 1243.0,
'corsi_for': None,
'corsi_for_percentage': 50.6,
'defensive_point_shares': 2.0,
'defensive_zone_start_percentage': 45.2,
'even_strength_assists': 28,
'even_strength_goals': 10,
'even_strength_goals_allowed': None,
'even_strength_save_percentage': None,
'even_strength_shots_faced': None,
'faceoff_losses': 709,
'faceoff_percentage': 48.4,
'faceoff_wins': 666,
'fenwick_against': 948,
'fenwick_for': 975,
'fenwick_for_percentage': 50.7,
'game_winning_goals': 2,
'games_played': 82,
'giveaways': 57,
'goal_against_percentage_relative': None,
'goalie_point_shares': None,
'goals': 11,
'goals_against': None,
'goals_against_average': None,
'goals_against_on_ice': 52,
'goals_created': 18,
'goals_for_on_ice': 54,
'goals_saved_above_average': None,
'height': '6-0',
'hits_at_even_strength': 49,
'league': 'NHL',
'losses': None,
'minutes': None,
'name': 'Henrik Zetterberg',
'offensive_point_shares': 2.4,
'offensive_zone_start_percentage': 54.8,
'pdo': 99.9,
'penalties_in_minutes': 14,
'player_id': 'zettehe01',
'plus_minus': 1,
'point_shares': 4.4,
'points': 56,
'power_play_assists': 17,
'power_play_goals': 1,
'power_play_goals_against_on_ice': 0,
'power_play_goals_allowed': None,
'power_play_goals_for_on_ice': 25,
'power_play_save_percentage': None,
'power_play_shots_faced': None,
'quality_start_percentage': None,
'quality_starts': None,
'really_bad_starts': None,
'relative_corsi_for_percentage': 2.7,
'relative_fenwick_for_percentage': 2.0,
'save_percentage': None,
'save_percentage_on_ice': None,
'saves': None,
'season': '2017-18',
'shooting_percentage': 6.1,
'shooting_percentage_on_ice': 7.6,
'shootout_attempts': 3,
'shootout_goals': 0,
'shootout_misses': 3,
'shootout_percentage': 0.0,
'short_handed_assists': 0,
'short_handed_goals': 0,
'short_handed_goals_allowed': None,
'short_handed_save_percentage': None,
'short_handed_shots_faced': None,
'shots_against': None,
'shots_on_goal': 180,
'shutouts': None,
'takeaways': 51,
'team_abbreviation': 'DET',
'ties_plus_overtime_loss': None,
'time_on_ice': 1599,
'time_on_ice_even_strength': 1382.2,
'total_goals_against_on_ice': 53,
'total_goals_for_on_ice': 79,
'total_shots': 332,
'weight': 197,
'wins': None},
{'adjusted_assists': 692,
'adjusted_goals': 377,
'adjusted_goals_against_average': None,
'adjusted_goals_created': 394,
'adjusted_points': 1069,
'age': None,
'assists': 623,
'average_time_on_ice': '19:35',
'blocks_at_even_strength': 267,
'corsi_against': 10322.0,
'corsi_for': None,
'corsi_for_percentage': 55.1,
'defensive_point_shares': 29.4,
'defensive_zone_start_percentage': 45.5,
'even_strength_assists': 379,
'even_strength_goals': 228,
'even_strength_goals_allowed': None,
'even_strength_save_percentage': None,
'even_strength_shots_faced': None,
'faceoff_losses': 5602,
'faceoff_percentage': 51.1,
'faceoff_wins': 5863,
'fenwick_against': 8123,
'fenwick_for': 9757,
'fenwick_for_percentage': 54.6,
'game_winning_goals': 64,
'games_played': 1082,
'giveaways': 482,
'goal_against_percentage_relative': None,
'goalie_point_shares': None,
'goals': 337,
'goals_against': None,
'goals_against_average': None,
'goals_against_on_ice': 530,
'goals_created': 348,
'goals_for_on_ice': 633,
'goals_saved_above_average': None,
'height': '6-0',
'hits_at_even_strength': 471,
'league': 'NHL',
'losses': None,
'minutes': None,
'name': 'Henrik Zetterberg',
'offensive_point_shares': 79.9,
'offensive_zone_start_percentage': 54.5,
'pdo': 100.0,
'penalties_in_minutes': 401,
'player_id': 'zettehe01',
'plus_minus': 160,
'point_shares': 109.3,
'points': 960,
'power_play_assists': 235,
'power_play_goals': 100,
'power_play_goals_against_on_ice': 140,
'power_play_goals_allowed': None,
'power_play_goals_for_on_ice': 490,
'power_play_save_percentage': None,
'power_play_shots_faced': None,
'quality_start_percentage': None,
'quality_starts': None,
'really_bad_starts': None,
'relative_corsi_for_percentage': 3.3,
'relative_fenwick_for_percentage': 3.1,
'save_percentage': None,
'save_percentage_on_ice': None,
'saves': None,
'season': 'Career',
'shooting_percentage': 9.8,
'shooting_percentage_on_ice': 8.8,
'shootout_attempts': 47,
'shootout_goals': 10,
'shootout_misses': 37,
'shootout_percentage': 21.3,
'short_handed_assists': 9,
'short_handed_goals': 9,
'short_handed_goals_allowed': None,
'short_handed_save_percentage': None,
'short_handed_shots_faced': None,
'shots_against': None,
'shots_on_goal': 3455,
'shutouts': None,
'takeaways': 454,
'team_abbreviation': None,
'ties_plus_overtime_loss': None,
'time_on_ice': 21186,
'time_on_ice_even_strength': 12658.7,
'total_goals_against_on_ice': 851,
'total_goals_for_on_ice': 1362,
'total_shots': 5408,
'weight': 197,
'wins': None}
]
indices = ['2017', 'Career']
df = pd.DataFrame(dataframe, index=indices)
player = Player('zettehe01')
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected on above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, player.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_nhl_404_returns_none_with_no_errors(self, *args, **kwargs):
player = Player('bad')
assert player.name is None
assert player.dataframe is None
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_nhl_404_returns_none_for_different_season(self, *args, **kwargs):
player = Player('bad')
assert player.name is None
assert player.dataframe is None
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_nhl_player_string_representation(self, *args, **kwargs):
player = Player('zettehe01')
assert player.__repr__() == 'Henrik Zetterberg (zettehe01)'
class TestNHLRoster:
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_roster_class_pulls_all_player_stats(self, *args, **kwargs):
flexmock(utils) \
.should_receive('_find_year_for_season') \
.and_return('2018')
roster = Roster('DET')
assert len(roster.players) == 2
for player in roster.players:
assert player.name in ['Jimmy Howard', 'Henrik Zetterberg']
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_bad_url_raises_value_error(self, *args, **kwargs):
with pytest.raises(ValueError):
roster = Roster('bad')
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_roster_from_team_class(self, *args, **kwargs):
flexmock(Team) \
.should_receive('_parse_team_data') \
.and_return(None)
team = Team(team_data=None, rank=1, year='2018')
mock_abbreviation = mock.PropertyMock(return_value='DET')
type(team)._abbreviation = mock_abbreviation
assert len(team.roster.players) == 2
for player in team.roster.players:
assert player.name in ['Jimmy Howard', 'Henrik Zetterberg']
type(team)._abbreviation = None
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_roster_class_with_slim_parameter(self, *args, **kwargs):
flexmock(utils) \
.should_receive('_find_year_for_season') \
.and_return('2018')
roster = Roster('DET', slim=True)
assert len(roster.players) == 2
assert roster.players == {
'howarja02': 'Jimmy Howard',
'zettehe01': 'Henrik Zetterberg'
}
@mock.patch('requests.get', side_effect=mock_pyquery)
@mock.patch('requests.head', side_effect=mock_request)
def test_invalid_default_year_reverts_to_previous_year(self,
*args,
**kwargs):
flexmock(utils) \
.should_receive('_find_year_for_season') \
.and_return(2019)
roster = Roster('DET')
assert len(roster.players) == 2
for player in roster.players:
assert player.name in ['Jimmy Howard', 'Henrik Zetterberg']
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_roster_class_string_representation(self, *args, **kwargs):
expected = """Jimmy Howard (howarja02)
Henrik Zetterberg (zettehe01)"""
flexmock(utils) \
.should_receive('_find_year_for_season') \
.and_return('2018')
roster = Roster('DET')
assert roster.__repr__() == expected
def test_coach(self):
assert "Jeff Blashill" == Roster('DET', year=YEAR).coach
| 37.875328
| 78
| 0.554659
|
5f1f898ed62872f28d1fbf504aa39da8df67d212
| 9,103
|
py
|
Python
|
test/t_compliance/t_check/test_base_check.py
|
tsehrer/auditree-framework
|
aa76b5450f7a77c1078048c226b1601a560d9779
|
[
"Apache-2.0"
] | null | null | null |
test/t_compliance/t_check/test_base_check.py
|
tsehrer/auditree-framework
|
aa76b5450f7a77c1078048c226b1601a560d9779
|
[
"Apache-2.0"
] | 15
|
2020-11-10T23:01:35.000Z
|
2021-08-19T23:30:27.000Z
|
test/t_compliance/t_check/test_base_check.py
|
dlminvestments/auditree-framework
|
19858c17797a7626fe20f0489d1aab163c6d69ec
|
[
"Apache-2.0"
] | null | null | null |
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compliance automation check tests module."""
import unittest
from datetime import datetime
from unittest.mock import MagicMock, call, create_autospec
from compliance.check import ComplianceCheck
from compliance.config import ComplianceConfig
from compliance.locker import Locker
from git import Commit
class ComplianceCheckTest(unittest.TestCase):
"""ComplianceCheck test class."""
def setUp(self):
"""Initialize each test."""
# Since unittest.TestCase needs a method for running the test
# (runTest, by default) and ComplianceCheck is a child of
# unittest.TestCase, we must pass a method in the
# constructor (otherwise, we will get a ValueError). Since we
# don't need this method, passing ``__doc__`` is enough for
# building a ComplianceCheck object successfully.
self.check = ComplianceCheck('__doc__')
# Ensures that the check object has a (mocked) locker attribute/object
# on it as expected.
self.check.locker = create_autospec(Locker)
def test_title(self):
"""Check title raises an exception in the base class."""
with self.assertRaises(NotImplementedError) as cm:
self.check.title
self.assertEqual(
str(cm.exception),
'Property title not implemented on ComplianceCheck'
)
def test_config(self):
"""Check that the config property returns a ComplianceConfig object."""
self.assertIsInstance(self.check.config, ComplianceConfig)
def test_reports(self):
"""Check reports property."""
self.assertEqual(self.check.reports, [])
self.check.reports.append('dummy')
self.assertEqual(self.check.reports, ['dummy'])
def test_disabled_runbook_url(self):
"""Check runbook URL is none - disabled."""
self.check.config._config.update(
{
'runbooks': {
'enabled': False, 'base_url': 'http://configuredrunbooks'
}
}
)
self.assertEqual(self.check.runbook_url, None)
def test_unconfigured_runbook_url(self):
"""Check runbook URL is none - not configured."""
self.check.config._config.update(
{'runbooks': {
'enabled': True, 'base_url': ''
}}
)
self.assertEqual(self.check.runbook_url, None)
def test_configured_runbook_url(self):
"""Check runbook URL is set."""
self.check.config._config.update(
{
'runbooks': {
'enabled': True, 'base_url': 'http://configuredrunbooks'
}
}
)
self.assertEqual(
self.check.runbook_url,
'http://configuredrunbooks/compliance_check.html'
)
def test_evidence_metadata(self):
"""Check evidence_metadata property."""
self.assertEqual(self.check.evidence_metadata, {})
def test_fixed_failure_count(self):
"""Check fixed_failure_count property."""
self.assertEqual(self.check.fixed_failure_count, 0)
self.check.fixed_failure_count = 100
self.assertEqual(self.check.fixed_failure_count, 100)
def test_failures(self):
"""Test failures property, and the length of dict and of type."""
self.assertEqual(self.check.failures, {})
self.check.add_failures('fail_type', 'fail_for')
self.check.add_failures('fail_type_2', 'fail_for_2')
expected_failure = {
'fail_type': ['fail_for'], 'fail_type_2': ['fail_for_2']
}
self.assertEqual(expected_failure, self.check.failures)
self.assertEqual(self.check.failures_count(), 2)
def test_warnings(self):
"""Test warning property and if key does not exist, throws KeyError."""
self.check._failures = {}
self.assertEqual(self.check.warnings, {})
self.check.add_warnings('warn_type', 'warn_for')
expected_warning = {'warn_type': ['warn_for']}
self.assertEqual(expected_warning, self.check.warnings)
def test_add_issue_if_diff_failure(self):
"""Test add_issue_if_diff adds a failure as expected."""
# Throw a fail and make sure it did not warn
self.check.add_issue_if_diff(
{1, 2, 3, 5}, {1, 2, 3, 4}, 'Extra users found'
)
self.assertEqual(self.check.failures_count(), 1)
self.assertEqual(self.check.warnings_count(), 0)
self.assertEqual(self.check._failures, {'Extra users found': [5]})
def test_add_issue_if_diff_warning(self):
"""Test add_issue_if_diff adds a warning as expected."""
# Throw a fail and make sure it did not warn
self.check.add_issue_if_diff(
{1, 2, 3, 4}, {1, 2, 3, 5}, 'Users not found', True
)
self.assertEqual(self.check.failures_count(), 0)
self.assertEqual(self.check.warnings_count(), 1)
self.assertEqual(self.check._warnings, {'Users not found': [4]})
def test_add_issue_if_diff_no_diff(self):
"""Test add_issue_if_diff does not add a fail/warning when no diff."""
# Ensure no issues are raised when there is no diff
self.check.add_issue_if_diff([], [], 'FAILED')
self.assertEqual(self.check.failures_count(), 0)
self.assertEqual(self.check.warnings_count(), 0)
def test_add_evidence_metadata(self):
"""Test evidence_metadata is populated correctly."""
commit_mock = create_autospec(Commit)
commit_mock.hexsha = 'mycommitsha'
self.check.locker.get_latest_commit = MagicMock()
self.check.locker.get_latest_commit.return_value = commit_mock
self.check.locker.get_evidence_metadata = MagicMock()
self.check.locker.get_evidence_metadata.return_value = {
'foo': 'bar', 'last_update': '2019-11-15'
}
ev_date = datetime(2019, 11, 15)
self.check.add_evidence_metadata('raw/foo/foo.json', ev_date)
self.check.locker.get_latest_commit.assert_called_once_with(
'raw/foo/foo.json', ev_date
)
self.check.locker.get_evidence_metadata.assert_called_once_with(
'raw/foo/foo.json', ev_date
)
self.assertEqual(
self.check.evidence_metadata,
{
('raw/foo/foo.json', '2019-11-15'): {
'path': 'raw/foo/foo.json',
'commit_sha': 'mycommitsha',
'foo': 'bar',
'last_update': '2019-11-15'
}
}
)
def test_add_partitioned_evidence_metadata(self):
"""Test evidence_metadata is populated correctly for partitions."""
commit_mock = create_autospec(Commit)
commit_mock.hexsha = 'mycommitsha'
self.check.locker.get_latest_commit = MagicMock()
self.check.locker.get_latest_commit.return_value = commit_mock
self.check.locker.get_evidence_metadata = MagicMock()
self.check.locker.get_evidence_metadata.return_value = {
'foo': 'bar',
'last_update': '2019-11-15',
'partitions': {
'123': ['foo'], '456': ['bar']
},
'tombstones': 'zombie'
}
ev_date = datetime(2019, 11, 15)
self.check.add_evidence_metadata('raw/foo/foo.json', ev_date)
self.assertEqual(self.check.locker.get_latest_commit.call_count, 2)
self.check.locker.get_latest_commit.assert_has_calls(
[
call('raw/foo/123_foo.json', ev_date),
call('raw/foo/456_foo.json', ev_date)
],
any_order=True
)
self.check.locker.get_evidence_metadata.assert_called_once_with(
'raw/foo/foo.json', ev_date
)
self.assertEqual(
self.check.evidence_metadata,
{
('raw/foo/foo.json', '2019-11-15'): {
'path': 'raw/foo/foo.json',
'partitions': {
'123': {
'key': ['foo'], 'commit_sha': 'mycommitsha'
},
'456': {
'key': ['bar'], 'commit_sha': 'mycommitsha'
}
},
'foo': 'bar',
'last_update': '2019-11-15'
}
}
)
| 38.901709
| 79
| 0.604196
|
882027f070477a8df4032f7079aa2d1653bb0a7f
| 5,604
|
py
|
Python
|
ascend/data/tensor.py
|
bazige/ascendfly
|
cb176fd35b7f71e2e529f00583edc110f9afd364
|
[
"Apache-2.0"
] | 2
|
2021-09-17T02:47:50.000Z
|
2022-02-12T03:21:52.000Z
|
ascend/data/tensor.py
|
bazige/ascendfly
|
cb176fd35b7f71e2e529f00583edc110f9afd364
|
[
"Apache-2.0"
] | null | null | null |
ascend/data/tensor.py
|
bazige/ascendfly
|
cb176fd35b7f71e2e529f00583edc110f9afd364
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..common.const import *
from ..resource.mem import memcpy_d2d
from ..data.ascendarray import AscendArray
from ..ops.op import Permute
def _imdenormalize(img, mean, std, to_bgr=True):
assert img.dtype != np.uint8
mean = mean.reshape(1, -1).astype(np.float64)
std = std.reshape(1, -1).astype(np.float64)
# make a copy
img = np.multiply(img, std)
# inplace
img = np.add(img, mean)
if to_bgr:
img = img[:, :, ::-1]
return img
def imgs2tensor(imgs, tensor_fmt='NCHW', tensor_ptr=None):
"""Convert 3-channel images to tensor
Args:
imgs (list[AscendArray]): A list that contains multiple images,
shape (h, w, c), support RGB/BGR, YUV444
tensor_fmt (str, optional): Data format of output tensor. Defaults to 'NCHW'.
tensor_ptr (int, optional): Data pointer of output tensor. If it is None,
we will create an AscendArray and bind the array's data pointer to it.
Defaults to None.
Returns:
AscendArray: Tensor that contains multiple images, shape (N, C, H, W)
or shape (N, H, W, C)
Typical usage example:
```python
imgs = [ascend_array1, ascend_array2]
data = ascend.imgs2tensor(imgs, tensor_fmt='NHWC')
```
"""
if not isinstance(imgs, list):
raise TypeError(f"Input imgs expects a list, but got {type(imgs)}.")
if len(imgs) <= 0:
raise ValueError(f"Input imgs is a null list.")
# get first image's shape and format
format = imgs[0].format
_shape = imgs[0].shape
if format in yuv420:
shape = _shape + (1,)
else:
shape = _shape
# generate output tensor shape
if tensor_fmt == 'NCHW':
tensor_shape = (len(imgs),) + shape[-1:] + shape[:-1]
elif tensor_fmt == 'NHWC':
tensor_shape = (len(imgs),) + shape
else:
raise ValueError(
f"Tensor format only accept 'NCHW' or 'NHWC', but got {tensor_fmt}.")
if not tensor_ptr:
tensor = AscendArray(
tensor_shape, dtype=imgs[0].dtype, format=tensor_fmt)
_ptr = tensor.ascend_data
else:
assert isinstance(tensor_ptr, int), \
f"Input tensor_ptr expects an int, but got {type(tensor_ptr)}."
_ptr = tensor_ptr
nbytes = 0
for i, img in enumerate(imgs):
assert _shape == img.shape, f"imgs[{i}]'s shape {img.shape} is not same to others."
assert format == img.format, f"imgs[{i}]'s format {img.shape} is not same to others."
if tensor_fmt == 'NCHW':
# swap channel using transform operator
'''
to do transformer
'''
pass
nbytes = nbytes + img.nbytes
memcpy_d2d(_ptr + nbytes, img.ascend_data, img.nbytes)
return tensor if not tensor_ptr else None
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
"""Convert tensor to a 3-channel images
Args:
tensor (AscendArray): Tensor that contains multiple images, shape (N, C, H, W) or shape (N, H, W, C)
mean (tuple[float], optional): The mean value of images. Defaults to (0, 0, 0).
std (tuple[float], optional): The standard deviation of images. Defaults to (1, 1, 1).
to_rgb (bool, optional): Whether the tensor was converted to RGB format in the first place.
If so, convert it back to BGR. Defaults to True.
Returns:
list[np.ndarray]: A list that contains multiple images.
Typical usage example:
```python
imgs = ascend.tensor2imgs(tensors)
```
"""
if not isinstance(tensor, AscendArray):
raise TypeError(
f"Input tensor expects an AscendArray, but got {type(tensor)}.")
if tensor.ndim != 4:
raise ValueError(
f"Input tensor expects a 4-dim, but got {tensor.ndim}.")
if tensor.format not in ["NCHW", "NHWC"]:
raise ValueError(
f"Input tensor's format only support 'NCHW' or 'NHWC', but given {tensor.format}.")
assert len(mean) == 3, \
f"Input mean of images expects a 3-elements tuple, but got {len(mean)}."
assert len(std) == 3, \
f"Input std of images expects a 3-elements tuple, but got {len(std)}."
batch_size = tensor.shape[0]
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
if tensor.format == "NCHW":
try:
tensor = Permute(tensor, axes=(0, 2, 3, 1))
except:
tensor = tensor.to_np.transpose(0, 2, 3, 1)
else:
tensor = tensor.to_np
imgs = []
for img_id in range(batch_size):
img = tensor[img_id, ...]
img = _imdenormalize(img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
| 34.592593
| 109
| 0.607066
|
e85e69f4d1f7a200ce36362f5f98dcc0d92bc4d4
| 2,306
|
py
|
Python
|
Bounce.py
|
Jashu1602/Bounce
|
dba7cc8544401a4417db76ea0090ef5070e8db5d
|
[
"Apache-2.0"
] | null | null | null |
Bounce.py
|
Jashu1602/Bounce
|
dba7cc8544401a4417db76ea0090ef5070e8db5d
|
[
"Apache-2.0"
] | null | null | null |
Bounce.py
|
Jashu1602/Bounce
|
dba7cc8544401a4417db76ea0090ef5070e8db5d
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import*
import random
import time
tk=Tk()
tk.title("Bounce!")
tk.resizable(0,0)
tk.wm_attributes("-topmost",1)
canvas=Canvas(tk,width=500,height=500,bd=0,highlightthickness=0)
canvas.pack()
tk.update()
class Ball:
def __init__(self,canvas,paddle,color):
self.canvas=canvas
self.paddle=paddle
self.id=canvas.create_oval(10,10,25,25,fill=color)
self.canvas.move(self.id,245,100)
start=[-3,-3,-1,0,1,2,3]
random.shuffle(start)
self.x=start[0]
self.y=-3
self.canvas_height=self.canvas.winfo_height()
self.hit_bottom=False
def hit_paddle(self,pos):
paddle_pos=self.canvas.coords(self.paddle.id)
if pos[2]>=paddle_pos[0] and pos[0]<=paddle_pos[2]:
if pos[3]>=paddle_pos[1] and pos[3]<=paddle_pos[3]:
return True
return False
def draw(self):
self.canvas.move(self.id,self.x,self.y)
pos=self.canvas.coords(self.id)
if pos[1]<=0:
self.y=1
if pos[3]>=self.canvas.winfo_height():
self.hit_bottom=True
canvas.create_text(245,100,text="Game Over")
if pos[0]<=0:
self.x=3
if pos[2]>=self.canvas.winfo_width():
self.x=-3
if self.hit_paddle(pos)==True:
self.y=-3
class Paddle:
def __init__(self,canvas,color):
self.canvas=canvas
self.id=canvas.create_rectangle(0,0,100,10,fill=color)
self.canvas.move(self.id,200,300)
self.x=0
self.canvas_width=self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>',self.turn_left)
self.canvas.bind_all('<KeyPress-Right>',self.turn_right)
def draw(self):
self.canvas.move(self.id,self.x,0)
pos=self.canvas.coords(self.id)
if pos[0]<=0:
self.x=0
if pos[2]>=self.canvas.winfo_width():
self.x=0
def turn_left(self,evt):
self.x=-2
def turn_right(self,evt):
self.x=2
paddle=Paddle(canvas,'blue')
ball=Ball(canvas,paddle,'red')
while 1:
if ball.hit_bottom==False:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.000001)
| 27.783133
| 64
| 0.579358
|
82a8faac8e564ce45118ebfa8c39b88d3434987a
| 6,221
|
py
|
Python
|
aiokubernetes/watch/watch.py
|
tantioch/aiokubernetes
|
2f332498598ece14d22f8e59ecb02665db6db68d
|
[
"Apache-2.0"
] | 1
|
2018-07-11T01:35:31.000Z
|
2018-07-11T01:35:31.000Z
|
aiokubernetes/watch/watch.py
|
revoteon/aiokubernetes
|
730eae03e4779563740f07ad3ecef180b511ac18
|
[
"Apache-2.0"
] | null | null | null |
aiokubernetes/watch/watch.py
|
revoteon/aiokubernetes
|
730eae03e4779563740f07ad3ecef180b511ac18
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import pydoc
from collections import namedtuple
import aiokubernetes as k8s
# All API responses will be wrapped into this tuple.
# The `name` will be 'ADDED', MODIFIED, etc, `raw` will the unprocessed but
# Json decoded response from K8s and `obj` will be the Swagger object created
# from `raw` (may be None if there was an error).
WatchResponse = namedtuple('WatchResponse', 'name raw obj')
def _find_return_type(func):
"""Return the K8s response type as a string, eg `V1Namespace`.
Return None if the return type was not in the doc string of `func`.
Raise `AssertionError` if the doc string was ambiguous.
NOTE: this function _assumes_ the doc strings have a certain type.
"""
# Find all the lines that mention the return type.
lines = [_ for _ in pydoc.getdoc(func).splitlines() if _.startswith(":return:")]
# Return None if the doc string does not mention a return type (user
# probably specified an invalid function; would be good to catch at some
# point).
if len(lines) == 0:
return None
# Raise an exception if we could not unambiguously determine the return type.
assert len(lines) == 1, 'Unable to determine return type for {}'.format(func)
# Strip the leading ':return:' and trailing 'List' string to extract the
# correct type name.
line = lines[0]
rtype = line.partition(":return:")[2].strip()
rtype = rtype.rpartition("List")[0].strip()
return rtype
class Watch(object):
def __init__(self, api_func, *args, **kwargs):
"""Watch an API resource and stream the result back via a generator.
:param api_func: The API function pointer, for instance,
CoreV1Api().list_namespace`. Any parameter to the function
can be passed after this parameter.
:return: Event object with these keys:
'type': The type of event such as "ADDED", "DELETED", etc.
'raw_object': a dict representing the watched object.
'object': A model representation of raw_object. The name of
model will be determined based on
the api_func's doc string. If it cannot be
determined, 'object' value will be the same as
'raw_object'.
Example:
v1 = kubernetes_asyncio.client.CoreV1Api()
watch = kubernetes_asyncio.watch.Watch()
async for e in watch.stream(v1.list_namespace, timeout_seconds=10):
type = e['type']
object = e['object'] # object is one of type return_type
raw_object = e['raw_object'] # raw_object is a dict
...
if should_stop:
watch.stop()
"""
self._api_client = api_func.__self__.api_client
self._stop = False
# Make this more explicit and cover with a test.
self.return_type = _find_return_type(api_func)
kwargs['watch'] = True
kwargs['_preload_content'] = False
self.api_func = functools.partial(api_func, *args, **kwargs)
self.connection = None
def __aiter__(self):
return self
async def __anext__(self):
# Set the response object to the user supplied function (eg
# `list_namespaced_pods`) if this is the first iteration.
if self.connection is None:
tmp = await self.api_func()
self.connection = tmp.http.content
del tmp
# Abort at the current iteration if the user has called `stop` on this
# stream instance.
if self._stop:
raise StopAsyncIteration
# Fetch the next K8s response. This is where the callee's async
# iterator will yield until K8s sends another Http chunk through the
# connection.
line = await self.connection.readline()
# Stop the iterator if K8s sends an empty response. This happens when
# eg the supplied timeout has expired.
if len(line) == 0:
raise StopAsyncIteration
return self.unmarshal_event(line, self.return_type)
def stop(self):
self._stop = True
@staticmethod
def unmarshal_event(data: bytes, response_type):
"""Return the K8s response `data` in a `WatchResponse` tuple.
"""
try:
line = data.decode('utf8')
js = json.loads(line)
# Unpack the watched event and extract the event name (ADDED, MODIFIED,
# etc) and the raw event content.
name, k8s_obj = js['type'], js['object']
except UnicodeDecodeError:
# fixup: log message
return WatchResponse(name=None, raw=data, obj=None)
except json.decoder.JSONDecodeError:
# fixup: log message
return WatchResponse(name=None, raw=data, obj=None)
except KeyError:
# fixup: log message
return WatchResponse(name=None, raw=data, obj=None)
# Something went wrong. A typical example would be that the user
# supplied a resource version that was too old. In that case K8s would
# not send a conventional ADDED/DELETED/... event but an error.
if name.lower() == 'error' or response_type is None:
return WatchResponse(name=name, raw=data, obj=None)
# De-serialise the K8s response and return everything.
obj = k8s.swagger.deserialize(data=k8s_obj, klass=response_type)
return WatchResponse(name=name, raw=data, obj=obj)
| 38.639752
| 84
| 0.63575
|
c588648582e28c3af59fd7fa5d5414d97d92b219
| 1,158
|
py
|
Python
|
Basic Data Structures/array/leet_039_CombinationSum.py
|
rush2catch/algorithms-leetcode
|
38a5e6aa33d48fa14fe09c50c28a2eaabd736e55
|
[
"MIT"
] | null | null | null |
Basic Data Structures/array/leet_039_CombinationSum.py
|
rush2catch/algorithms-leetcode
|
38a5e6aa33d48fa14fe09c50c28a2eaabd736e55
|
[
"MIT"
] | null | null | null |
Basic Data Structures/array/leet_039_CombinationSum.py
|
rush2catch/algorithms-leetcode
|
38a5e6aa33d48fa14fe09c50c28a2eaabd736e55
|
[
"MIT"
] | null | null | null |
# Problem: Combination Sum
# Difficulty: Medium
# Category: Array
# Leetcode 39: https://leetcode.com/problems/combination-sum/description/
# Description:
"""
Given a set of candidate numbers (C) (without duplicates) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.
The same repeated number may be chosen from C unlimited number of times.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
For example, given candidate set [2, 3, 6, 7] and target 7,
A solution set is:
[
[7],
[2, 2, 3]
]
"""
class Solution(object):
def combination(self, nums, target):
if not nums:
return []
nums.sort()
ans = []
self.backtrack(ans, [], nums, target, 0)
return ans
def backtrack(self, ans, temp, nums, remain, start):
if remain < 0:
return
elif remain == 0:
ans.append([] + temp)
return
else:
for i in range(start, len(nums)):
temp.append(nums[i])
self.backtrack(ans, temp, nums, remain - nums[i], i)
temp.pop()
obj = Solution()
nums = [2, 3, 6, 7]
target = 6
print(obj.combination(nums, target))
| 23.16
| 155
| 0.683074
|
007d063395c478eb9c26a7c4d4383d2f8c53e8c1
| 874
|
py
|
Python
|
osd/components/boolean.py
|
bmeyers/optimal-signal-demixing
|
87b65a9d3c02ee6b8e5156e6fc457aed041852b1
|
[
"BSD-3-Clause"
] | 1
|
2021-12-17T02:58:25.000Z
|
2021-12-17T02:58:25.000Z
|
osd/components/boolean.py
|
bmeyers/optimal-signal-demixing
|
87b65a9d3c02ee6b8e5156e6fc457aed041852b1
|
[
"BSD-3-Clause"
] | null | null | null |
osd/components/boolean.py
|
bmeyers/optimal-signal-demixing
|
87b65a9d3c02ee6b8e5156e6fc457aed041852b1
|
[
"BSD-3-Clause"
] | null | null | null |
''' Boolean Signal
This module contains the class for Boolean signal
Author: Bennet Meyers
'''
import numpy as np
from osd.components.component import Component
class Boolean(Component):
def __init__(self, scale=1, shift=0, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.shift = shift
return
@property
def is_convex(self):
return False
def _get_cost(self):
return lambda x: 0
def prox_op(self, v, weight, rho, use_set=None):
low_val = self.shift
high_val = self.scale + self.shift
r_0 = np.abs(v - low_val)
r_1 = np.abs(v - high_val)
x = np.ones_like(v) * low_val
x[r_1 <= r_0] = high_val
# deterministic behavior when there are missing values
if use_set is not None:
x[~use_set] = low_val
return x
| 23.621622
| 62
| 0.608696
|
40806074398bfac17206e3af857c8690beb8f834
| 1,793
|
py
|
Python
|
python/forgetpwd.py
|
fanhuajun/notes
|
bd3b76de6dd7b11e2eb5b78f07eb575420adb459
|
[
"Apache-2.0"
] | 2
|
2021-01-24T20:07:03.000Z
|
2021-12-09T06:23:28.000Z
|
python/forgetpwd.py
|
fanhuajun/notes
|
bd3b76de6dd7b11e2eb5b78f07eb575420adb459
|
[
"Apache-2.0"
] | null | null | null |
python/forgetpwd.py
|
fanhuajun/notes
|
bd3b76de6dd7b11e2eb5b78f07eb575420adb459
|
[
"Apache-2.0"
] | 1
|
2021-02-25T09:18:03.000Z
|
2021-02-25T09:18:03.000Z
|
import requests
import json
def forgetPwd(codeIn):
url = "http://localhost:8681/ssoserver/ModifyNextServlet"
querystring = {"code":codeIn,"phone":"18729968867","spm": "0.1798988093245859","username":"fanhuajun"}
headers = {
'User-Agent': "PostmanRuntime/7.20.1",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Postman-Token': "27fc6c54-c6a0-48f5-a90a-def5a48fe202,b528eaec-0d9e-47a9-aa8a-ac36eca2c7a1",
'Host': "szwb.sz.gov.cn:8007",
'Accept-Encoding': "gzip, deflate",
'Cookie': "JSESSIONID=B46AD113600F7C45B8C2E5BA4A2074C6",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
def getCode(username, phone):
url = "http://localhost:8681/ssoserver/SendCode"
querystring = {"phone": phone, "spm": "0.3767032002035844", "username": username}
headers = {
'User-Agent': "PostmanRuntime/7.20.1",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Host': "szwb.sz.gov.cn:8007",
'Cookie': "JSESSIONID=B46AD113600F7C45B8C2E5BA4A2074C6",
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response, response.text)
json_obj = json.loads(response.text)
return json_obj["error"]
def sendCodeTip():
message1 = getCode("fanhuajun","")
message2 = getCode("", "18729968867")
message3 = getCode("fanhuajun不存在", "18729968867")
if "手机号码不对" not in message1:
raise RuntimeError("手机号码不对--提示有问题")
if "用户不存在" not in message2:
raise RuntimeError("手机号码不对--message="+message2)
if "用户不存在" not in message3:
raise RuntimeError("用户不存在--message=" + message3)
| 32.6
| 106
| 0.639152
|
978facc5c617fd975bcb593a38d537fe4215677e
| 21,315
|
py
|
Python
|
pytorch_segmentation/main.py
|
lutbook/pytorch-segmentation-pipeline
|
eb29d1bf240c158c64d81177e9be93cd958c0026
|
[
"MIT"
] | null | null | null |
pytorch_segmentation/main.py
|
lutbook/pytorch-segmentation-pipeline
|
eb29d1bf240c158c64d81177e9be93cd958c0026
|
[
"MIT"
] | null | null | null |
pytorch_segmentation/main.py
|
lutbook/pytorch-segmentation-pipeline
|
eb29d1bf240c158c64d81177e9be93cd958c0026
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn.modules import loss
from torchvision import transforms
from torchvision.transforms.functional import crop
from torchsummary import summary
from torch.optim import SGD, lr_scheduler
from torch.utils.tensorboard import SummaryWriter
import os, time, cv2, argparse, functools
import numpy as np
import pandas as pd
from PIL import Image
from models import DeepLabv3_plus, build_unet
from functions import CustomImageDataset, imshow, AverageMeter, inter_and_union
torch.cuda.empty_cache()
torch.backends.cudnn.benchmark = True
print = functools.partial(print, flush=True)
parser = argparse.ArgumentParser()
parser.add_argument("-exp", "--exp_name", type=str, help='expirement name', default='exp0')
parser.add_argument("-ds", "--dataset_dir", type=str, help='dataset directory', default='data')
parser.add_argument("-m", "--model_name", type=str, help='model name', default='unet')
parser.add_argument("-ne", "--num_epochs", type=int, help='number of training epochs', default=10)
parser.add_argument("-bs", "--batch_size", type=int, help='batch_size', default=2)
parser.add_argument("-img_h", "--image_height", type=int, help='model input image height (and width)', default=64)
parser.add_argument("-pd", "--pred_dir", type=str, help='prediction directory in dataset directory', default=None)
parser.add_argument("-sr", "--sample_resize", type=int, help='sample resize, default=None', default=None)
parser.add_argument("-ic", "--inf_class", type=str, help='inference class name', default=None)
parser.add_argument("-nw", "--num_workers", type=int, help='num_workers for dataloader', default=0)
args = parser.parse_args()
ROOT_DIR = os.path.dirname(os.getcwd())
os.chdir(ROOT_DIR)
EXP_NAME = args.exp_name # default='exp0'
DATASET_DIR = args.dataset_dir # default='data'
MODEL_DIR = 'pytorch_segmentation'
MODEL = args.model_name
CSV_FILE = os.path.join(DATASET_DIR, 'dataset_labels.csv')
IMG_HEIGHT = args.image_height # default=64
IMG_WIDTH = IMG_HEIGHT
N_CLASSES = len(pd.read_csv(CSV_FILE).name)
BATCH_SIZE = args.batch_size # default=2
N_EPOCHS = args.num_epochs # default=10
SAMPLE_RESIZE = args.sample_resize # default=None
NUM_WORKERS = args.num_workers # default=0
RESUME = False
SAVING_STEP = 10 #10 if N_EPOCHS >= 1000 else 10 # int( N_EPOCHS / 10 )
PRED_DIR = os.path.join(args.pred_dir) if not args.pred_dir==None else None
CLASS_LABELS = [str(x) for x in pd.read_csv(CSV_FILE).name]
INF_CLASS_IDX = CLASS_LABELS.index(args.inf_class) if not args.inf_class== None else None
print('\n', '* ----- ' * 7, '*\n')
# Experiment directory check
if not os.path.isdir(EXP_NAME):
os.mkdir(EXP_NAME)
os.mkdir(os.path.join(EXP_NAME, 'weights'))
os.mkdir(os.path.join(EXP_NAME, 'tb_log'))
print("Experiment : '{}' has begin.".format(EXP_NAME))
else:
if not PRED_DIR:
RESUME = True
# Tensorboard log
writer = SummaryWriter(os.path.join(EXP_NAME, 'tb_log'))
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_name = os.path.join(EXP_NAME, 'weights', '{}_epoch_%d.pth'.format(MODEL))
print('{}:{}\n'.format( device.type, device.index))
# model preparation
if MODEL == 'unet':
model = build_unet(num_classes=N_CLASSES)
elif MODEL == 'deeplabv3p':
model = DeepLabv3_plus(n_classes=N_CLASSES)
else:
print('No {} model defined'.format(MODEL))
exit(0)
model.to(device)
# training
if not PRED_DIR:
# dataset preparation
image_transform = transforms.Compose([
# transforms.RandomCrop(IMG_HEIGHT),
transforms.ColorJitter(),
transforms.ToTensor()
])
# target_transform = transforms.Compose([
# transforms.RandomCrop(IMG_HEIGHT)
# ])
train_dataset = CustomImageDataset('train',
DATASET_DIR,
CSV_FILE,
image_transform=image_transform)#,
# target_transform=target_transform)
train_data_loader = torch.utils.data.DataLoader(train_dataset,
BATCH_SIZE,
pin_memory=True,
shuffle=True,
num_workers=NUM_WORKERS)
val_dataset = CustomImageDataset('val',
DATASET_DIR,
CSV_FILE,
image_transform=image_transform)#,
# target_transform=target_transform)
val_data_loader = torch.utils.data.DataLoader(val_dataset,
BATCH_SIZE,
pin_memory=True,
shuffle=True,
num_workers=NUM_WORKERS)
optimizer = SGD(model.parameters(), lr=0.001, momentum=0.9)
criterion = nn.CrossEntropyLoss()
scheduler = lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
start_epoch = 0
train_loss = []
val_loss = []
best_miou = 0
iou = 0
# check if is it resume training
if RESUME:
print("Continue the training of experiment: '{}'".format(EXP_NAME))
try:
os.remove(os.path.join(EXP_NAME, 'weights', '.DS_Store'))
except:
pass
chkpts_list = os.listdir(os.path.join(EXP_NAME, 'weights'))
if len(chkpts_list) != 0:
latest_epoch_saved = np.amax(np.array([int( x.split('.')[0].split('_')[-1] ) for x in chkpts_list]))
checkpoint = torch.load(model_name % latest_epoch_saved)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
best_miou = checkpoint['mIoU']
print('\tresuming from:', os.path.join(EXP_NAME, 'weights', '{}_epoch_%d.pth'.format(MODEL) % latest_epoch_saved),'\n')
if start_epoch >= N_EPOCHS:
print('')
print("Training epoch is {}, but loaded epoch is {}.".format(N_EPOCHS, start_epoch))
print("Try again with higher epoch number.\n")
exit(0)
print('Training..')
for epoch in range(start_epoch, N_EPOCHS):
#train
model.train()
epoch_start = time.time()
train_epoch_loss = 0
for i, (inputs, target) in enumerate(train_data_loader):
inputs = inputs.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
cntr = 0
batch_window_loss = 0
for h in range(0, inputs.shape[1], IMG_HEIGHT):
for w in range(0, inputs.shape[0], IMG_HEIGHT):
cntr += 1
# pil_inputs = transforms.ToPILImage()(inputs)
# input_window = transforms.ToTensor()(crop(inputs, h, w, IMG_HEIGHT, IMG_HEIGHT))
input_window = crop(inputs, h, w, IMG_HEIGHT, IMG_HEIGHT)
# target_window = transforms.ToTensor()(crop(target, h, w, IMG_HEIGHT, IMG_HEIGHT))
target_window = crop(target, h, w, IMG_HEIGHT, IMG_HEIGHT)
output_winow = model(input_window)
loss_window = criterion(output_winow, target_window)
batch_window_loss += loss_window.item()
train_epoch_loss += batch_window_loss/cntr
# for i, (inputs, target) in enumerate(train_data_loader):
# inputs = inputs.to(device, non_blocking=True)
# target = target.to(device, non_blocking=True)
# outputs = model(inputs)
# loss = criterion(outputs, target)
# train_epoch_loss += loss.item()
if device.type == 'cpu':
optimizer.zero_grad()
else:
optimizer.zero_grad(set_to_none=True)
# loss.backward()
loss_window.backward()
optimizer.step()
train_loss.append( train_epoch_loss/ len(train_data_loader) )
# validation
model.eval()
inter_meter = AverageMeter()
union_meter = AverageMeter()
val_epoch_loss = 0
with torch.no_grad():
for i, (inputs, target) in enumerate(val_data_loader):
inputs = inputs.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
cntr = 0
batch_window_loss = 0
for h in range(0, inputs.shape[1], IMG_HEIGHT):
for w in range(0, inputs.shape[0], IMG_HEIGHT):
cntr += 1
# pil_inputs = transforms.ToPILImage()(inputs)
# input_window = transforms.ToTensor()(crop(inputs, h, w, IMG_HEIGHT, IMG_HEIGHT))
# target_window = transforms.ToTensor()(crop(target, h, w, IMG_HEIGHT, IMG_HEIGHT))
input_window = crop(inputs, h, w, IMG_HEIGHT, IMG_HEIGHT)
target_window = crop(target, h, w, IMG_HEIGHT, IMG_HEIGHT)
output_winow = model(input_window)
loss_window = criterion(output_winow, target_window)
batch_window_loss += loss_window.item()
pred_window = torch.argmax(output_winow, dim=1).data.cpu().numpy().squeeze().astype(np.uint8)
inter, union = inter_and_union(pred_window, target_window.cpu(), N_CLASSES)
inter_meter.update(inter)
union_meter.update(union)
val_epoch_loss += batch_window_loss/cntr
# outputs = model(inputs)
# loss = criterion(outputs, target)
# val_epoch_loss += loss.item()
# pred = torch.argmax(outputs, dim=1).data.cpu().numpy().squeeze().astype(np.uint8)
# inter, union = inter_and_union(pred, target.cpu(), N_CLASSES)
# inter_meter.update(inter)
# union_meter.update(union)
iou = inter_meter.sum / (union_meter.sum + 1e-10)
val_loss.append( val_epoch_loss/len(val_data_loader) )
scheduler.step()
epoch_end = time.time()
print('-- Epoch {} -- train_loss: {:.4f}, val_loss: {:.4f} -- miou: {:.4f} ({:.4f} mins)'.format(epoch,
train_loss[epoch - start_epoch],
val_loss[epoch - start_epoch],
iou.mean(),
(epoch_end - epoch_start) / 60))
writer.add_scalars('Loss', {'train loss':train_loss[epoch - start_epoch],
'val loss': val_loss[epoch - start_epoch],
'mIoU': iou.mean()}, epoch)
if epoch % SAVING_STEP == (SAVING_STEP - 1):
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'mIoU': iou.mean()
}, model_name % (epoch + 1))
if best_miou <= iou.mean(): # and best_val_loss >= val_loss[epoch - start_epoch]:
best_miou = iou.mean()
print("\t\t\t\t\t\t\tBest mIoU model: {}: {:.4f} mIoU".format(model_name % 0, best_miou))
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'mIoU': best_miou
}, model_name % 0)
writer.close()
# inference on data
elif PRED_DIR:
print('Inference..')
if SAMPLE_RESIZE is not None:
s = 'inference_{}'.format(SAMPLE_RESIZE)
else:
s = 'inference'
if not os.path.isdir(os.path.join(EXP_NAME, s)):
os.mkdir(os.path.join(EXP_NAME, s ))
print("Prediction result will be saved in '{}'\n".format(os.path.join(EXP_NAME, s)))
checkpoint = torch.load(model_name % 0)
f = open(os.path.join(EXP_NAME, s, 'inference result.txt'), 'w+')
print("\t(mIoU: {:.4f} model loaded: '{}')\n\n".format(checkpoint['mIoU'], model_name % 0))
f.writelines("\nmIoU: {:.4f} model loaded: '{}'\n\n".format(checkpoint['mIoU'], model_name % 0))
model.load_state_dict(checkpoint['state_dict'])
model.eval()
# Color dictionary
df = pd.read_csv(CSV_FILE)
rgb_df = df[['r', 'g', 'b']]
color_dict = [tuple(x) for x in rgb_df.values.astype(np.int) ]
f.writelines('\nInference file name, size, fps\n')
try:
os.remove(os.path.join(PRED_DIR, '.DS_Store'))
except:
pass
# inference on all '*.png', '*.jpg' and '*.mp4' files
for file_name in sorted(os.listdir(PRED_DIR)):
# except directory
if not os.path.isfile(os.path.join(PRED_DIR, file_name)):
continue
if file_name[0] == '.':
continue
# inference on '*.mp4' video files
elif file_name.split('.')[1] == 'mp4':
file_path = os.path.join(PRED_DIR, file_name)
out_file_path = os.path.join(EXP_NAME, s, file_name)
cap = cv2.VideoCapture(file_path)
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
out_video = cv2.VideoWriter(out_file_path.split('.')[0]+'_masked.mp4',
cv2.VideoWriter_fourcc(*'mp4v'),
fps,
(frame_width, frame_height) )
mask_video = cv2.VideoWriter(out_file_path.split('.')[0]+'_mask_only.mp4',
cv2.VideoWriter_fourcc(*'mp4v'),
fps,
(frame_width, frame_height) )
duration = 0
frm_cntr = 0
frames = []
while True:
ret, frame = cap.read()
if not ret:
break
frames.append(frame)
for frame in frames:
frm_cntr += 1
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
output = img
if SAMPLE_RESIZE:
img = img.resize((SAMPLE_RESIZE, SAMPLE_RESIZE))
start_time = time.time()
# image_tensor = transforms.ToTensor()(output)
image_tensor = transforms.ToTensor()(img)
# mask = Image.new("RGB", output.size)
mask = Image.new("RGB", img.size)
for h in range(0, image_tensor.shape[1], IMG_HEIGHT):
for w in range(0, image_tensor.shape[2], IMG_HEIGHT):
window = transforms.ToTensor()(crop(img, h, w, IMG_HEIGHT, IMG_HEIGHT))
window_pred = model(window.unsqueeze(0).to(device, non_blocking=True))
window_pred = torch.argmax(window_pred, dim=1).cpu().squeeze()
window_pred = imshow(window_pred, num_classes=N_CLASSES, colors=color_dict, inf_class_idx=INF_CLASS_IDX, mode='pred')
# window_pred = window_pred.resize( output.size , Image.NEAREST)
Image.Image.paste(mask, window_pred, (w,h))
mask = mask.resize(output.size, Image.NEAREST)
output = Image.composite(mask, output , mask.convert('L'))
out_video.write(np.array(output)[:, :, :: -1] )
mask_video.write(np.array(mask)[:, :, :: -1] )
end_time = time.time()
duration += end_time - start_time
print("\t\tvideo frame segmentation: {}/{}".format(frm_cntr, n_frames))
cap.release()
out_video.release()
mask_video.release()
str = '{} : size= {} (model input size: {}), original fps: {:.4f}, model fps: {:.4f}'.format(file_name,
(frame_width, frame_height),
IMG_HEIGHT,
fps,
n_frames / duration * 1.0 )
print(str)
f.writelines('\n\t' + str)
# inference on '*.png' '*.jpg' image files
elif file_name.split('.')[1] == 'png' or file_name.split('.')[1] == 'jpg':
file_path = os.path.join(PRED_DIR, file_name)
out_file_path = os.path.join(EXP_NAME, s, file_name)
img = Image.open(file_path).convert('RGB')
start_time = time.time()
blend_output = img
masked_output = img
if SAMPLE_RESIZE:
img = img.resize((SAMPLE_RESIZE, SAMPLE_RESIZE))
image_tensor = transforms.ToTensor()(img)
mask = Image.new("RGB", img.size)
# sliding window
for h in range(0, image_tensor.shape[1], IMG_HEIGHT):
for w in range(0, image_tensor.shape[2], IMG_HEIGHT):
window = transforms.ToTensor()(crop(img, h, w, IMG_HEIGHT, IMG_HEIGHT))
window_pred = model(window.unsqueeze(0).to(device, non_blocking=True))
window_pred = torch.argmax(window_pred, dim=1).cpu().squeeze()
window_pred = imshow(window_pred, num_classes=N_CLASSES, colors=color_dict, inf_class_idx=INF_CLASS_IDX, mode='pred')
# window_pred = window_pred.resize( mask.size , Image.NEAREST)
Image.Image.paste(mask, window_pred, (w,h))
mask = mask.resize(blend_output.size, Image.NEAREST)
blend_output = Image.composite(mask, blend_output , mask.convert('L'))
masked_output = mask
end_time = time.time()
blend_output.save(out_file_path.split('.')[0]+'_blend_slidingWindow.png')
masked_output.save(out_file_path.split('.')[0]+'_mask_slidingWindow.png')
str = '{}: size={} (model input size: {}), fps:{:.4f}'.format(file_name,
img.size,
IMG_HEIGHT,
1/(end_time-start_time))
print(str)
f.writelines('\n\t' + str)
# other files are not compatible
else:
print("Your file: ", file_name ,"\n\tChoose '.png','.jpg' image file or '.mp4' video file. \n")
continue
f.close()
if __name__ == "__main__":
main()
print('')
| 49.685315
| 145
| 0.496223
|
4a522bfc558a66ad6ad906c44b86cfebbfe0ebbe
| 2,895
|
py
|
Python
|
userbot/modules/create.py
|
newkanekibot/CilikUserbot
|
472b1215f0dedc33957737f6f57f8c1c93f115f0
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4
|
2022-01-31T14:35:01.000Z
|
2022-03-31T06:42:39.000Z
|
userbot/modules/create.py
|
newkanekibot/CilikUserbot
|
472b1215f0dedc33957737f6f57f8c1c93f115f0
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-03-19T15:54:46.000Z
|
2022-03-19T15:54:46.000Z
|
userbot/modules/create.py
|
newkanekibot/CilikUserbot
|
472b1215f0dedc33957737f6f57f8c1c93f115f0
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 22
|
2022-01-29T20:29:35.000Z
|
2022-03-31T06:42:41.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for filter commands """
from telethon.tl import functions
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import cilik_cmd
@cilik_cmd(pattern="buat (gb|g|c)(?: |$)(.*)")
async def _(grop):
"""For .create command, Creating New Group & Channel"""
if grop.text[0].isalpha() or grop.text[0] in ("/", "#", "@", "!"):
return
if grop.fwd_from:
return
type_of_group = grop.pattern_match.group(1)
group_name = grop.pattern_match.group(2)
if type_of_group == "gb":
try:
result = await grop.client(
functions.messages.CreateChatRequest(
users=["@MissRose_bot"],
# Not enough users (to create a chat, for example)
# Telegram, no longer allows creating a chat with
# ourselves
title=group_name,
)
)
created_chat_id = result.chats[0].id
result = await grop.client(
functions.messages.ExportChatInviteRequest(
peer=created_chat_id,
)
)
await grop.edit(
"Grup/Channel {} Berhasil Dibuat. Tekan [{}]({}) Untuk Melihatnya".format(
group_name, group_name, result.link
)
)
except Exception as e:
await grop.edit(str(e))
elif type_of_group in ["g", "c"]:
try:
r = await grop.client(
functions.channels.CreateChannelRequest(
title=group_name,
about="**Selamat Datang Di Channel Ini!**",
megagroup=type_of_group != "c",
)
)
created_chat_id = r.chats[0].id
result = await grop.client(
functions.messages.ExportChatInviteRequest(
peer=created_chat_id,
)
)
await grop.edit(
"Grup/Channel {} Berhasil Dibuat. Tekan [{}]({}) Untuk Melihatnya".format(
group_name, group_name, result.link
)
)
except Exception as e:
await grop.edit(str(e))
CMD_HELP.update(
{
"membuat": f"➢ **Plugin : **`membuat`\
\n\n ┌✪ **Syntax :** `{cmd}buat g` <nama grup>\
\n └✪ **Function : **Membuat grup telegram.\
\n\n ┌✪ **Syntax :** `{cmd}buat gb` <nama grup>\
\n └✪ **Function : **Membuat Grup bersama bot.\
\n\n ┌✪ **Syntax :** `{cmd}buat c` <nama channel>\
\n └✪ **Function : **Membuat sebuah Channel.\
"
}
)
| 34.058824
| 90
| 0.520553
|
bede742a047f3b312881e571c28c842d81f9ae54
| 15,337
|
py
|
Python
|
coco-caption/pycocotools/coco.py
|
SimonK91/im2txt_char
|
c90c9e7de21f9391b8b5e8d87c87d15bd4aa788c
|
[
"Apache-2.0"
] | null | null | null |
coco-caption/pycocotools/coco.py
|
SimonK91/im2txt_char
|
c90c9e7de21f9391b8b5e8d87c87d15bd4aa788c
|
[
"Apache-2.0"
] | null | null | null |
coco-caption/pycocotools/coco.py
|
SimonK91/im2txt_char
|
c90c9e7de21f9391b8b5e8d87c87d15bd4aa788c
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load result file and create result api object.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. Version 1.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import copy
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = []
self.cats = []
if not annotation_file == None:
print 'loading annotations into memory...'
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
print datetime.datetime.utcnow() - time_t
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
cats = []
catToImgs = []
if 'type' in self.dataset and self.dataset['type'] == 'instances':
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if 'type' in self.dataset and self.dataset['type'] == 'instances':
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'type' in self.dataset and self.dataset['type'] == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
mask = COCO.decodeMask(ann['segmentation'])
img = np.ones( (mask.shape[0], mask.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, mask*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
if 'type' in self.dataset and self.dataset['type'] == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
res.dataset['info'] = copy.deepcopy(self.dataset['info'])
if 'type' in self.dataset:
res.dataset['type'] = copy.deepcopy(self.dataset['type'])
res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
ann['area']=sum(ann['segmentation']['counts'][2:-1:2])
ann['bbox'] = []
ann['id'] = id
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds())
res.dataset['annotations'] = anns
res.createIndex()
return res
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)
M[rr, cc] = 1
return M
| 41.451351
| 128
| 0.554085
|
43fa7fe30409a694aa90ff9842ab450898102b1e
| 113
|
py
|
Python
|
control de repeticion/punto 1.py
|
Vargas13sebas/Algoritmos_programacion
|
84889c377952c8c8fe4f709eb111abe708410e1b
|
[
"MIT"
] | null | null | null |
control de repeticion/punto 1.py
|
Vargas13sebas/Algoritmos_programacion
|
84889c377952c8c8fe4f709eb111abe708410e1b
|
[
"MIT"
] | null | null | null |
control de repeticion/punto 1.py
|
Vargas13sebas/Algoritmos_programacion
|
84889c377952c8c8fe4f709eb111abe708410e1b
|
[
"MIT"
] | null | null | null |
import math
a = 10
b = 1
a = int
b = int(input("digite numero : "))
suma = b + 1
print("la suma es : ",suma)
| 10.272727
| 34
| 0.557522
|
4b3145ee24abe144796478cd86e8c7cccc6fa3b0
| 2,697
|
py
|
Python
|
chrome/test/functional/tracing/tab_tracker.py
|
Crystalnix/BitPop
|
1fae4ecfb965e163f6ce154b3988b3181678742a
|
[
"BSD-3-Clause"
] | 7
|
2015-05-20T22:41:35.000Z
|
2021-11-18T19:07:59.000Z
|
chrome/test/functional/tracing/tab_tracker.py
|
Crystalnix/BitPop
|
1fae4ecfb965e163f6ce154b3988b3181678742a
|
[
"BSD-3-Clause"
] | 1
|
2015-02-02T06:55:08.000Z
|
2016-01-20T06:11:59.000Z
|
chrome/test/functional/tracing/tab_tracker.py
|
Crystalnix/BitPop
|
1fae4ecfb965e163f6ce154b3988b3181678742a
|
[
"BSD-3-Clause"
] | 2
|
2015-12-08T00:37:41.000Z
|
2017-04-06T05:34:05.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import uuid
class TabTracker(object):
"""Uniquely track tabs within a window.
This allows the creation of tabs whose indices can be
determined even after lower indexed tabs have been closed, therefore changing
that tab's index.
This is accomplished via a containing window which is created and tracked via
the window's index. As a result of this, all calls to open and close tabs in
this TabTracker's window must go through the appropriate instance of the
TabTracker. Also note that if a lower indexed window is closed after this
TabTracker is instantiated, this TabTracker will lose track of its window
"""
def __init__(self, browser, visible=False):
"""
Args:
browser: an instance of PyUITest
visible: whether or not this TabTracker's window will be visible
"""
# A binary search tree would be faster, but this is easier to write.
# If this needs to become faster, understand that the important operations
# here are append, arbitrary deletion and searching.
self._uuids = [None]
self._window_idx = browser.GetBrowserWindowCount()
self._browser = browser
browser.OpenNewBrowserWindow(visible)
# We leave the 0'th tab empty to have something to close on __del__
def __del__(self):
self._browser.CloseBrowserWindow(self._window_idx)
def CreateTab(self, url='about:blank'):
"""Create a tracked tab and return its uuid.
Args:
url: a URL to navigate to
Returns:
a uuid uniquely identifying that tab within this TabTracker
"""
self._browser.AppendTab(url, self._window_idx)
# We use uuids here rather than a monotonic integer to prevent confusion
# with the tab index.
tab_uuid = uuid.uuid4()
self._uuids.append(tab_uuid)
return tab_uuid
def ReleaseTab(self, tab_uuid):
"""Release and close a tab tracked by this TabTracker.
Args:
tab_uuid: the uuid of the tab to close
"""
idx = self.GetTabIndex(tab_uuid)
self._browser.GetBrowserWindow(self._window_idx).GetTab(idx).Close()
del self._uuids[idx]
def GetTabIndex(self, tab_uuid):
"""Get the index of a tracked tab within this TabTracker's window.
Args:
tab_uuid: the uuid of the tab to close
Returns:
the index of the tab within this TabTracker's window
"""
return self._uuids.index(tab_uuid)
def GetWindowIndex(self):
"""Get the index of this TabTracker's window.
Returns:
the index of this TabTracker's window
"""
return self._window_idx
| 32.107143
| 79
| 0.713756
|
ef7bd578c7bea83fe6be9a085b81dd8ee148236f
| 532
|
py
|
Python
|
superadmin/adminauth/models.py
|
nkmrohit/python
|
bd644d51909cda548684b5da98eab998564f3568
|
[
"Apache-2.0"
] | null | null | null |
superadmin/adminauth/models.py
|
nkmrohit/python
|
bd644d51909cda548684b5da98eab998564f3568
|
[
"Apache-2.0"
] | null | null | null |
superadmin/adminauth/models.py
|
nkmrohit/python
|
bd644d51909cda548684b5da98eab998564f3568
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Person(models.Model):
""" an actual singular human being """
name = models.CharField(blank=True, max_length=100)
email = models.EmailField()
created_at = models.DateTimeField(auto_now=True)
#created_by = models.ForeignKey(User, blank=True, null=True)
created_by = models.ForeignKey(User,on_delete=models.CASCADE)
def __unicode__(self):
return self.name
| 38
| 69
| 0.682331
|
99d2c4613091adf01cb982ccd52d36c8ee40179a
| 1,053
|
py
|
Python
|
pipeline_files/rename_abyss_contigs.py
|
juliadouglasf/snakemake-partial-genome-pipeline
|
896e46046103573b5bac1896b9fad122c34ed94b
|
[
"MIT"
] | 2
|
2021-05-28T20:55:37.000Z
|
2021-06-02T16:47:28.000Z
|
pipeline_files/rename_abyss_contigs.py
|
juliadouglasf/snakemake-partial-genome-pipeline
|
896e46046103573b5bac1896b9fad122c34ed94b
|
[
"MIT"
] | null | null | null |
pipeline_files/rename_abyss_contigs.py
|
juliadouglasf/snakemake-partial-genome-pipeline
|
896e46046103573b5bac1896b9fad122c34ed94b
|
[
"MIT"
] | 1
|
2021-06-24T14:27:07.000Z
|
2021-06-24T14:27:07.000Z
|
"""
Author: Jackson Eyres
Copyright: Government of Canada
License: MIT
"""
from Bio import SeqIO
import os
import glob
import argparse
def main():
parser = argparse.ArgumentParser(description='Renames Abyss contigs to more closely match SPAdes')
parser.add_argument("input", type=str,
help='Input File')
parser.add_argument('output', type=str,
help='Output File')
args = parser.parse_args()
print("Renaming Contigs in {}".format(args.input))
rename_contigs(args.input, args.output)
def rename_contigs(input, output):
seqs = []
with open(input, "r") as f:
for seq in SeqIO.parse(f, 'fasta'):
seq.name = ""
split = seq.description.split(" ")
header = "NODE_{}_length_{}_cov_{}".format(split[0],split[1],split[2])
seq.id = header
seq.description = ""
seqs.append(seq)
with open(output, "w") as g:
SeqIO.write(seqs, handle=g, format="fasta")
if __name__ == "__main__":
main()
| 27.710526
| 102
| 0.602089
|
189257632e8195e7e9ac12deaf9c333e2508d1c5
| 2,235
|
py
|
Python
|
users/middleware.py
|
shubhankar5/Mitron-Achatting-app-in-django
|
524086254794a713110e496b70588865116c322f
|
[
"Apache-2.0"
] | 7
|
2021-03-10T13:28:30.000Z
|
2021-12-22T15:40:16.000Z
|
users/middleware.py
|
shubhankar5/Mitron-Achatting-app-in-django
|
524086254794a713110e496b70588865116c322f
|
[
"Apache-2.0"
] | 1
|
2022-03-11T04:29:39.000Z
|
2022-03-12T17:57:23.000Z
|
users/middleware.py
|
shubhankar5/Mitron-Achatting-app-in-django
|
524086254794a713110e496b70588865116c322f
|
[
"Apache-2.0"
] | 4
|
2021-07-10T16:49:28.000Z
|
2022-03-11T04:54:21.000Z
|
from django.conf import settings
from django.shortcuts import redirect
from django.urls import reverse
from . import views as user_views
from django.core.cache import cache
from datetime import datetime
from django.contrib import auth
import time
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.views import redirect_to_login
EXEMPT_URLS = [reverse(settings.LOGIN_URL)]
if hasattr(settings, 'EXEMPT_URLS'):
EXEMPT_URLS += [reverse(url) for url in settings.EXEMPT_URLS]
class LoginRequiredMiddleware:
def __init__(self, get_ressponse):
self.get_ressponse = get_ressponse
def __call__(self,request):
response = self.get_ressponse(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
assert hasattr(request,'user')
path = request.path_info
url_is_exempt = any(url == path for url in EXEMPT_URLS)
if request.user.is_authenticated and url_is_exempt:
return redirect('users-home')
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
SESSION_TIMEOUT_KEY = "_session_init_timestamp_"
class SessionTimeoutMiddleware(MiddlewareMixin):
def process_request(self, request):
if not hasattr(request, "session") or request.session.is_empty():
return
init_time = request.session.setdefault(SESSION_TIMEOUT_KEY, time.time())
expire_seconds = getattr(
settings, "SESSION_EXPIRE_SECONDS", settings.SESSION_COOKIE_AGE
)
session_is_expired = time.time() - init_time > expire_seconds
if session_is_expired:
logout(request)
request.session.flush()
messages.info(request, "You have been logged out due to inactivity")
return redirect_to_login(next=request.path)
expire_since_last_activity = getattr(
settings, "SESSION_EXPIRE_AFTER_LAST_ACTIVITY", True
)
grace_period = getattr(
settings, "SESSION_EXPIRE_AFTER_LAST_ACTIVITY_GRACE_PERIOD", 1
)
if expire_since_last_activity and time.time() - init_time > grace_period:
request.session[SESSION_TIMEOUT_KEY] = time.time()
| 30.616438
| 81
| 0.733781
|
028200a9cb9fdc7f5251bb533a762409fd336958
| 50,927
|
py
|
Python
|
src/sage/combinat/sf/witt.py
|
drvinceknight/sage
|
00199fb220aa173d8585b9e90654dafd3247d82d
|
[
"BSL-1.0"
] | 2
|
2015-08-11T05:05:47.000Z
|
2019-05-15T17:27:25.000Z
|
src/sage/combinat/sf/witt.py
|
kaushik94/sage
|
00199fb220aa173d8585b9e90654dafd3247d82d
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/sf/witt.py
|
kaushik94/sage
|
00199fb220aa173d8585b9e90654dafd3247d82d
|
[
"BSL-1.0"
] | 1
|
2020-07-24T12:04:03.000Z
|
2020-07-24T12:04:03.000Z
|
"""
Witt symmetric functions
"""
#*****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>
# 2012 Mike Zabrocki <mike.zabrocki@gmail.com>
# 2013 Darij Grinberg <darijgrinberg@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import multiplicative
from sage.matrix.all import matrix
class SymmetricFunctionAlgebra_witt(multiplicative.SymmetricFunctionAlgebra_multiplicative):
r"""
The Witt symmetric function basis (or Witt basis, to be short).
The Witt basis of the ring of symmetric functions is
denoted by `(x_{\lambda})` in [HazWitt1]_, section 9.63, and by
`(q_{\lambda})` in [DoranIV1996]_. We will denote this basis by
`(w_{\lambda})`. It is a multiplicative basis (meaning that
`w_{\emptyset} = 1` and that every partition `\lambda` satisfies
`w_{\lambda} = w_{\lambda_1} w_{\lambda_2} w_{\lambda_3} \cdots`,
where `w_i` means `w_{(i)}` for every nonnegative integer `i`).
This basis can be defined in various ways. Probably the most
well-known one is using the equation
.. MATH::
\prod_{d=1}^{\infty} (1 - w_d t^d)^{-1} = \sum_{n=0}^{\infty} h_n t^n
where `t` is a formal variable and `h_n` are the complete
homogeneous symmetric functions, extended to `0` by `h_0 = 1`.
This equation allows one to uniquely determine the functions
`w_1, w_2, w_3, \ldots` by recursion; one consequently extends the
definition to all `w_{\lambda}` by requiring multiplicativity.
A way to rewrite the above equation without power series is:
.. MATH::
h_n = \sum_{\lambda \vdash n} w_{\lambda}
for all nonnegative integers `n`, where `\lambda \vdash n` means
that `\lambda` is a partition of `n`.
A similar equation (which is easily seen to be equivalent to the
former) is
.. MATH::
e_n = \sum_{\lambda} (-1)^{n - \ell(\lambda)} w_{\lambda},
with the sum running only over *strict* partitions `\lambda` of
`n` this time. This equation can also be used to recursively
define the `w_n`. Furthermore, every positive integer `n`
satisfies
.. MATH::
p_n = \sum_{d\mid n} d w_d^{n/d},
and this can be used to define the `w_n` recursively over any
ring which is torsion-free as a `\ZZ`-module. While these
equations all yield easy formulas for classical bases of the
ring of symmetric functions in terms of the Witt symmetric
functions, it seems difficult to obtain explicit formulas in
the other direction.
The Witt symmetric functions owe their name to the fact that
the ring of symmetric functions can be viewed as the coordinate
ring of the group scheme of Witt vectors, and the Witt
symmetric functions are the functions that send a Witt vector
to its components (whereas the powersum symmetric functions
send a Witt vector to its ghost components). Details can be
found in [HazWitt1]_ or section 3.2 of [BorWi2004]_.
INPUT:
- ``Sym`` -- an instance of the ring of the symmetric functions.
- ``coerce_h`` - (default: ``True``) a boolean that determines
whether the transition maps between the Witt basis and the
complete homogeneous basis will be cached and registered as
coercions.
- ``coerce_e`` - (default: ``False``) a boolean that determines
whether the transition maps between the Witt basis and the
elementary symmetric basis will be cached and registered as
coercions.
- ``coerce_p`` - (default: ``False``) a boolean that determines
whether the transition maps between the Witt basis and the
powersum basis will be cached and registered as coercions (or
conversions, if the base ring is not a `\QQ`-algebra).
REFERENCES:
.. [HazWitt1] Michiel Hazewinkel. *Witt vectors. Part 1*.
:arXiv:`0804.3888v1`
.. [DoranIV1996] William F. Doran IV.
*A Proof of Reutenauer's `-q_{(n)}` Conjecture*.
Journal of combinatorial theory, Series A 74, pp. 342-344 (1996),
article no. 0056. :doi:`10.1006/jcta.1996.0056`
.. [BorWi2004] James Borger, Ben Wieland.
*Plethystic algebra*.
:arXiv:`math/0407227v1`
EXAMPLES:
Here are the first few Witt symmetric functions, in various bases::
sage: Sym = SymmetricFunctions(QQ)
sage: w = Sym.w()
sage: e = Sym.e()
sage: h = Sym.h()
sage: p = Sym.p()
sage: s = Sym.s()
sage: m = Sym.m()
sage: p(w([1]))
p[1]
sage: m(w([1]))
m[1]
sage: e(w([1]))
e[1]
sage: h(w([1]))
h[1]
sage: s(w([1]))
s[1]
sage: p(w([2]))
-1/2*p[1, 1] + 1/2*p[2]
sage: m(w([2]))
-m[1, 1]
sage: e(w([2]))
-e[2]
sage: h(w([2]))
-h[1, 1] + h[2]
sage: s(w([2]))
-s[1, 1]
sage: p(w([3]))
-1/3*p[1, 1, 1] + 1/3*p[3]
sage: m(w([3]))
-2*m[1, 1, 1] - m[2, 1]
sage: e(w([3]))
-e[2, 1] + e[3]
sage: h(w([3]))
-h[2, 1] + h[3]
sage: s(w([3]))
-s[2, 1]
sage: Sym = SymmetricFunctions(ZZ)
sage: w = Sym.w()
sage: e = Sym.e()
sage: h = Sym.h()
sage: s = Sym.s()
sage: m = Sym.m()
sage: p = Sym.p()
sage: m(w([4]))
-9*m[1, 1, 1, 1] - 4*m[2, 1, 1] - 2*m[2, 2] - m[3, 1]
sage: e(w([4]))
-e[2, 1, 1] + e[3, 1] - e[4]
sage: h(w([4]))
-h[1, 1, 1, 1] + 2*h[2, 1, 1] - h[2, 2] - h[3, 1] + h[4]
sage: s(w([4]))
-s[1, 1, 1, 1] - s[2, 1, 1] - s[2, 2] - s[3, 1]
Some examples of conversions the other way::
sage: w(h[3])
w[1, 1, 1] + w[2, 1] + w[3]
sage: w(e[3])
-w[2, 1] + w[3]
sage: w(m[2,1])
2*w[2, 1] - 3*w[3]
sage: w(p[3])
w[1, 1, 1] + 3*w[3]
Antipodes::
sage: w([1]).antipode()
-w[1]
sage: w([2]).antipode()
-w[1, 1] - w[2]
This holds for all odd `i` and is easily proven by induction::
sage: all( w([i]).antipode() == -w([i]) for i in range(1, 10, 2) )
True
The Witt basis does not allow for simple expressions for
comultiplication and antipode in general (this is related to the
fact that the sum of two Witt vectors isn't easily described in
terms of the components). Therefore, most computations with Witt
symmetric functions, as well as conversions and coercions, pass
through the complete homogeneous symmetric functions by default.
However, one can also use the elementary symmetric functions
instead, or (if the base ring is a `\QQ`-algebra) the powersum
symmetric functions. This is what the optional keyword variables
``coerce_e``, ``coerce_h`` and ``coerce_p`` are for. These
variables do not affect the results of the (non-underscored)
methods of ``self``, but they affect the speed of the computations
(the more of these variables are set to ``True``, the
faster these are) and on the size of the cache (the more of
these variables are set to ``True``, the bigger the cache). Let us
check that the results are the same no matter to what the
variables are set::
sage: Sym = SymmetricFunctions(QQ)
sage: p = Sym.p()
sage: wh = Sym.w()
sage: we = Sym.w(coerce_h=False, coerce_e=True)
sage: wp = Sym.w(coerce_h=False, coerce_p=True)
sage: all( p(wh(lam)) == p(we(lam)) == p(wp(lam)) for lam in Partitions(4) )
True
sage: all ( wh(p(lam)).monomial_coefficients()
....: == we(p(lam)).monomial_coefficients()
....: == wp(p(lam)).monomial_coefficients() for lam in Partitions(4) )
True
TESTS:
Let us check that all the above computations work with a
non-default setting as well::
sage: Sym = SymmetricFunctions(QQ)
sage: w = Sym.w(coerce_h=False, coerce_p=True)
sage: e = Sym.e()
sage: h = Sym.h()
sage: p = Sym.p()
sage: s = Sym.s()
sage: m = Sym.m()
sage: p(w([1]))
p[1]
sage: m(w([1]))
m[1]
sage: e(w([1]))
e[1]
sage: h(w([1]))
h[1]
sage: s(w([1]))
s[1]
sage: p(w([2]))
-1/2*p[1, 1] + 1/2*p[2]
sage: m(w([2]))
-m[1, 1]
sage: e(w([2]))
-e[2]
sage: h(w([2]))
-h[1, 1] + h[2]
sage: s(w([2]))
-s[1, 1]
sage: p(w([3]))
-1/3*p[1, 1, 1] + 1/3*p[3]
sage: m(w([3]))
-2*m[1, 1, 1] - m[2, 1]
sage: e(w([3]))
-e[2, 1] + e[3]
sage: h(w([3]))
-h[2, 1] + h[3]
sage: s(w([3]))
-s[2, 1]
sage: Sym = SymmetricFunctions(ZZ)
sage: w = Sym.w()
sage: e = Sym.e()
sage: h = Sym.h()
sage: s = Sym.s()
sage: m = Sym.m()
sage: p = Sym.p()
sage: m(w([4]))
-9*m[1, 1, 1, 1] - 4*m[2, 1, 1] - 2*m[2, 2] - m[3, 1]
sage: e(w([4]))
-e[2, 1, 1] + e[3, 1] - e[4]
sage: h(w([4]))
-h[1, 1, 1, 1] + 2*h[2, 1, 1] - h[2, 2] - h[3, 1] + h[4]
sage: s(w([4]))
-s[1, 1, 1, 1] - s[2, 1, 1] - s[2, 2] - s[3, 1]
sage: w(h[3])
w[1, 1, 1] + w[2, 1] + w[3]
sage: w(e[3])
-w[2, 1] + w[3]
sage: w(m[2,1])
2*w[2, 1] - 3*w[3]
sage: w(p[3])
w[1, 1, 1] + 3*w[3]
sage: w([1]).antipode()
-w[1]
sage: w([2]).antipode()
-w[1, 1] - w[2]
sage: all( w([i]).antipode() == -w([i]) for i in range(1, 10, 2) )
True
Another non-default setting::
sage: Sym = SymmetricFunctions(QQ)
sage: w = Sym.w(coerce_h=False, coerce_e=True)
sage: e = Sym.e()
sage: h = Sym.h()
sage: p = Sym.p()
sage: s = Sym.s()
sage: m = Sym.m()
sage: p(w([1]))
p[1]
sage: m(w([1]))
m[1]
sage: e(w([1]))
e[1]
sage: h(w([1]))
h[1]
sage: s(w([1]))
s[1]
sage: p(w([2]))
-1/2*p[1, 1] + 1/2*p[2]
sage: m(w([2]))
-m[1, 1]
sage: e(w([2]))
-e[2]
sage: h(w([2]))
-h[1, 1] + h[2]
sage: s(w([2]))
-s[1, 1]
sage: p(w([3]))
-1/3*p[1, 1, 1] + 1/3*p[3]
sage: m(w([3]))
-2*m[1, 1, 1] - m[2, 1]
sage: e(w([3]))
-e[2, 1] + e[3]
sage: h(w([3]))
-h[2, 1] + h[3]
sage: s(w([3]))
-s[2, 1]
sage: Sym = SymmetricFunctions(ZZ)
sage: w = Sym.w()
sage: e = Sym.e()
sage: h = Sym.h()
sage: s = Sym.s()
sage: m = Sym.m()
sage: p = Sym.p()
sage: m(w([4]))
-9*m[1, 1, 1, 1] - 4*m[2, 1, 1] - 2*m[2, 2] - m[3, 1]
sage: e(w([4]))
-e[2, 1, 1] + e[3, 1] - e[4]
sage: h(w([4]))
-h[1, 1, 1, 1] + 2*h[2, 1, 1] - h[2, 2] - h[3, 1] + h[4]
sage: s(w([4]))
-s[1, 1, 1, 1] - s[2, 1, 1] - s[2, 2] - s[3, 1]
sage: w(h[3])
w[1, 1, 1] + w[2, 1] + w[3]
sage: w(e[3])
-w[2, 1] + w[3]
sage: w(m[2,1])
2*w[2, 1] - 3*w[3]
sage: w(p[3])
w[1, 1, 1] + 3*w[3]
sage: w([1]).antipode()
-w[1]
sage: w([2]).antipode()
-w[1, 1] - w[2]
sage: all( w([i]).antipode() == -w([i]) for i in range(1, 10, 2) )
....: #this holds for all odd i and is easily proven by induction
True
"""
def __init__(self, Sym, coerce_h=True, coerce_e=False, coerce_p=False):
"""
Initialize ``self``.
TESTS::
sage: w = SymmetricFunctions(QQ).w()
sage: TestSuite(w).run(skip=['_test_associativity', '_test_distributivity', '_test_prod'])
sage: TestSuite(w).run(elements = [w[1,1]+w[2], w[1]+2*w[1,1]])
"""
self._coerce_h = coerce_h
self._coerce_e = coerce_e
self._coerce_p = coerce_p
multiplicative.SymmetricFunctionAlgebra_multiplicative.__init__(self, Sym, "Witt", 'w')
def _precompute_cache(self, n, to_self_cache, from_self_cache, transition_matrices, inverse_transition_matrices, to_self_gen_function):
"""
Compute the transition matrices between ``self`` and another
multiplicative homogeneous basis in the homogeneous components of
degree `n`.
The results are not returned, but rather stored in the caches.
This assumes that the transition matrices in all degrees smaller
than `n` have already been computed and cached!
INPUT:
- ``n`` -- nonnegative integer
- ``to_self_cache`` -- a cache which stores the coordinates of
the elements of the other basis with respect to the
basis ``self``
- ``from_self_cache`` -- a cache which stores the coordinates
of the elements of ``self`` with respect to the other
basis
- ``transition_matrices`` -- a cache for transition matrices
which contain the coordinates of the elements of the other
basis with respect to ``self``
- ``inverse_transition_matrices`` -- a cache for transition
matrices which contain the coordinates of the elements of
``self`` with respect to the other basis
- ``to_self_gen_function`` -- a function which takes a
positive integer `n` and returns the element of the other
basis corresponding to the partition `[n]` expanded with
respect to the Witt basis ``self`` (as an element of
``self``, not as a dictionary)
Examples for usage of this function are the ``_precompute_h``,
``_precompute_e`` and ``_precompute_p`` methods of this class.
EXAMPLES::
The examples below demonstrate how the caches are built
step by step using the ``_precompute_cache`` method. In order
not to influence the outcome of other doctests, we make sure
not to use the caches internally used by this class, but
rather to create new caches::
sage: Sym = SymmetricFunctions(QQ)
sage: w = Sym.w()
sage: toy_to_self_cache = {}
sage: toy_from_self_cache = {}
sage: toy_transition_matrices = {}
sage: toy_inverse_transition_matrices = {}
sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]
sage: l(toy_to_self_cache)
[]
sage: def toy_gen_function(n):
....: if n > 1:
....: return w(Partition([n])) + n * w(Partition([n-1,1]))
....: return w(Partition([n]))
sage: w._precompute_cache(0, toy_to_self_cache,
....: toy_from_self_cache,
....: toy_transition_matrices,
....: toy_inverse_transition_matrices,
....: toy_gen_function)
sage: l(toy_to_self_cache)
[([], [([], 1)])]
sage: w._precompute_cache(1, toy_to_self_cache,
....: toy_from_self_cache,
....: toy_transition_matrices,
....: toy_inverse_transition_matrices,
....: toy_gen_function)
sage: l(toy_to_self_cache)
[([], [([], 1)]), ([1], [([1], 1)])]
sage: w._precompute_cache(2, toy_to_self_cache,
....: toy_from_self_cache,
....: toy_transition_matrices,
....: toy_inverse_transition_matrices,
....: toy_gen_function)
sage: l(toy_to_self_cache)
[([], [([], 1)]),
([1], [([1], 1)]),
([1, 1], [([1, 1], 1)]),
([2], [([1, 1], 2), ([2], 1)])]
sage: toy_transition_matrices[2]
[1 2]
[0 1]
sage: toy_inverse_transition_matrices[2]
[ 1 -2]
[ 0 1]
sage: toy_transition_matrices.keys()
[0, 1, 2]
"""
# Much of this code is adapted from dual.py
base_ring = self.base_ring()
zero = base_ring.zero()
from sage.combinat.partition import Partition, Partitions_n
# Handle the n == 0 case separately
if n == 0:
part = Partition([])
to_self_cache[ part ] = { part: base_ring.one() }
from_self_cache[ part ] = { part: base_ring.one() }
transition_matrices[n] = matrix(base_ring, [[1]])
inverse_transition_matrices[n] = matrix(base_ring, [[1]])
return
partitions_n = Partitions_n(n).list()
# The other basis will be called B from now on.
# This contains the data for the transition matrix from the
# basis B to the Witt basis self.
transition_matrix_n = matrix(base_ring, len(partitions_n), len(partitions_n))
# This first section calculates how the basis elements of the
# basis B are expressed in terms of the Witt basis ``self``.
# For every partition p of size n, expand B[p] in terms of
# the Witt basis self using multiplicativity and
# to_self_gen_function.
i = 0
for s_part in partitions_n:
# s_mcs will be self(B[s_part])._monomial_coefficients
s_mcs = {}
# We need to compute the coordinates of B[s_part] in the Witt basis.
hsp_in_w_basis = self.one()
for p in s_part:
hsp_in_w_basis *= to_self_gen_function(p)
# Now, hsp_in_w_basis is B[s_part] expanded in the Witt
# basis self (this is the same as the coercion self(B[s_part]).
j = 0
for p_part in partitions_n:
if p_part in hsp_in_w_basis._monomial_coefficients:
sp = hsp_in_w_basis._monomial_coefficients[p_part]
s_mcs[p_part] = sp
transition_matrix_n[i,j] = sp
j += 1
to_self_cache[ s_part ] = s_mcs
i += 1
# Save the transition matrix
transition_matrices[n] = transition_matrix_n
# This second section calculates how the basis elements of
# self expand in terms of the basis B. We do this by
# computing the inverse of the matrix transition_matrix_n
# obtained above.
# TODO: Possibly this can be sped up by using properties
# of this matrix (e. g., it being triangular in most standard cases).
# Are there significantly faster ways to invert a triangular
# matrix (compared to the usual matrix inversion algorithms)?
inverse_transition = ~transition_matrix_n
for i in range(len(partitions_n)):
d_mcs = {}
for j in range(len(partitions_n)):
if inverse_transition[i,j] != zero:
d_mcs[ partitions_n[j] ] = inverse_transition[i,j]
from_self_cache[ partitions_n[i] ] = d_mcs
inverse_transition_matrices[n] = inverse_transition
def _precompute_h(self, n):
"""
Compute the transition matrices between ``self`` and the complete
homogeneous basis in the homogeneous components of degree `n`
(and in those of smaller degree, if not already computed).
The result is not returned, but rather stored in the cache.
This assumes that the ``coerce_h`` keyword has been set to
``True`` in the initialization of ``self`` (otherwise the cache
does not exist).
INPUT:
- ``n`` -- nonnegative integer
EXAMPLES:
The examples below demonstrate how the caches of ``w`` are built
step by step using the ``_precompute_h`` method. Thus they rely on
an untouched Witt symmetric basis that hasn't already seen some
of its cache filled by other computations. We obtain such a basis
by choosing a ground ring unlikely to appear elsewhere::
sage: Sym = SymmetricFunctions(ZZ['hell', 'yeah'])
sage: w = Sym.Witt()
sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]
sage: l(w._h_to_self_cache)
[]
sage: w._precompute_h(0)
sage: l(w._h_to_self_cache)
[([], [([], 1)])]
sage: w._precompute_h(1)
sage: l(w._h_to_self_cache)
[([], [([], 1)]), ([1], [([1], 1)])]
sage: w._precompute_h(2)
sage: l(w._h_to_self_cache)
[([], [([], 1)]),
([1], [([1], 1)]),
([1, 1], [([1, 1], 1)]),
([2], [([1, 1], 1), ([2], 1)])]
sage: w._h_transition_matrices[2]
[1 1]
[0 1]
sage: w._h_inverse_transition_matrices[2]
[ 1 -1]
[ 0 1]
sage: w._h_transition_matrices.keys()
[0, 1, 2]
"""
l = len(self._h_transition_matrices)
if l <= n:
from sage.combinat.partition import Partitions_n
from sage.misc.cachefunc import cached_function
@cached_function
def wsum(m): # expansion of h_m in w-basis, for m > 0
return self._from_dict({lam: 1 for lam in Partitions_n(m)})
for i in range(l, n + 1):
self._precompute_cache(i, self._h_to_self_cache,
self._h_from_self_cache,
self._h_transition_matrices,
self._h_inverse_transition_matrices,
wsum)
def _precompute_e(self, n):
"""
Compute the transition matrices between ``self`` and the elementary
symmetric basis in the homogeneous components of degree `n`
(and in those of smaller degree, if not already computed).
The result is not returned, but rather stored in the cache.
This assumes that the ``coerce_e`` keyword has been set to
``True`` in the initialization of ``self`` (otherwise the cache
does not exist).
INPUT:
- ``n`` -- nonnegative integer
EXAMPLES:
The examples below demonstrate how the caches of ``w`` are built
step by step using the ``_precompute_e`` method. Thus they rely on
an untouched Witt symmetric basis that hasn't already seen some
of its cache filled by other computations. We obtain such a basis
by choosing a ground ring unlikely to appear elsewhere::
sage: Sym = SymmetricFunctions(ZZ['hell', 'yeah'])
sage: w = Sym.Witt(coerce_e=True)
sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]
sage: l(w._e_to_self_cache)
[]
sage: w._precompute_e(0)
sage: l(w._e_to_self_cache)
[([], [([], 1)])]
sage: w._precompute_e(1)
sage: l(w._e_to_self_cache)
[([], [([], 1)]), ([1], [([1], 1)])]
sage: w._precompute_e(2)
sage: l(w._e_to_self_cache)
[([], [([], 1)]),
([1], [([1], 1)]),
([1, 1], [([1, 1], 1)]),
([2], [([2], -1)])]
sage: w._e_transition_matrices[2]
[-1 0]
[ 0 1]
sage: w._e_inverse_transition_matrices[2]
[-1 0]
[ 0 1]
"""
l = len(self._e_transition_matrices)
if l <= n:
from sage.combinat.partition import Partitions
from sage.misc.cachefunc import cached_function
@cached_function
def wsum_e(m): # expansion of e_m in w-basis, for m > 0
return self._from_dict({lam: (-1 if (m + len(lam)) % 2 == 1 else 1)
for lam in Partitions(m, max_slope=-1)})
for i in range(l, n + 1):
self._precompute_cache(i, self._e_to_self_cache,
self._e_from_self_cache,
self._e_transition_matrices,
self._e_inverse_transition_matrices,
wsum_e)
def _precompute_p(self, n):
"""
Compute the transition matrices between ``self`` and the powersum
basis in the homogeneous components of degree `n`
(and in those of smaller degree, if not already computed).
The result is not returned, but rather stored in the cache.
This assumes that the ``coerce_p`` keyword has been set to
``True`` in the initialization of ``self`` (otherwise the cache
does not exist).
INPUT:
- ``n`` -- nonnegative integer
EXAMPLES:
The examples below demonstrate how the caches of ``w`` are built
step by step using the ``_precompute_p`` method. Thus they rely on
an untouched Witt symmetric basis that hasn't already seen some
of its cache filled by other computations. We obtain such a basis
by choosing a ground ring unlikely to appear elsewhere::
sage: Sym = SymmetricFunctions(QQ['hell', 'yeah'])
sage: w = Sym.Witt(coerce_h=False, coerce_e=True, coerce_p=True)
sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]
sage: l(w._p_to_self_cache)
[]
sage: w._precompute_p(0)
sage: l(w._p_to_self_cache)
[([], [([], 1)])]
sage: w._precompute_p(1)
sage: l(w._p_to_self_cache)
[([], [([], 1)]), ([1], [([1], 1)])]
sage: w._precompute_p(2)
sage: l(w._p_to_self_cache)
[([], [([], 1)]), ([1], [([1], 1)]), ([1, 1], [([1, 1], 1)]), ([2], [([1, 1], 1), ([2], 2)])]
sage: w._p_transition_matrices[2]
[2 1]
[0 1]
sage: w._p_inverse_transition_matrices[2]
[ 1/2 -1/2]
[ 0 1]
"""
l = len(self._p_transition_matrices)
if l <= n:
from sage.rings.arith import divisors
from sage.combinat.partition import Partition
from sage.misc.cachefunc import cached_function
@cached_function
def wsum_p(m): # expansion of p_m in w-basis, for m > 0
return self._from_dict({Partition([d] * (m // d)): d
for d in divisors(m)})
for i in range(l, n + 1):
self._precompute_cache(i, self._p_to_self_cache,
self._p_from_self_cache,
self._p_transition_matrices,
self._p_inverse_transition_matrices,
wsum_p)
def _h_to_w_on_basis(self, lam):
r"""
Return the complete homogeneous symmetric function ``h[lam]``
expanded in the Witt basis, where ``lam`` is a partition.
This assumes that the ``coerce_h`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``h[lam]`` in the Witt basis ``self``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: h = Sym.homogeneous()
sage: w = Sym.w()
sage: w._h_to_w_on_basis(Partition([]))
w[]
sage: w._h_to_w_on_basis(Partition([4,2,1]))
w[1, 1, 1, 1, 1, 1, 1] + 2*w[2, 1, 1, 1, 1, 1] + 2*w[2, 2, 1, 1, 1] + w[2, 2, 2, 1] + w[3, 1, 1, 1, 1] + w[3, 2, 1, 1] + w[4, 1, 1, 1] + w[4, 2, 1]
sage: h(w._h_to_w_on_basis(Partition([3,1]))) == h[3,1]
True
"""
n = sum(lam)
self._precompute_h(n)
return self._from_dict(self._h_to_self_cache[lam])
def _w_to_h_on_basis(self, lam):
r"""
Return the Witt symmetric function ``w[lam]`` expanded in the
complete homogeneous basis, where ``lam`` is a partition.
This assumes that the ``coerce_h`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``w[lam]`` in the complete
homogeneous basis of ``self.realization_of()``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: h = Sym.homogeneous()
sage: w = Sym.w()
sage: w._w_to_h_on_basis(Partition([]))
h[]
sage: w._w_to_h_on_basis(Partition([4,2,1]))
h[1, 1, 1, 1, 1, 1, 1] - 3*h[2, 1, 1, 1, 1, 1] + 3*h[2, 2, 1, 1, 1] - h[2, 2, 2, 1] + h[3, 1, 1, 1, 1] - h[3, 2, 1, 1] - h[4, 1, 1, 1] + h[4, 2, 1]
sage: w(w._w_to_h_on_basis(Partition([3,1]))) == w[3,1]
True
"""
n = sum(lam)
self._precompute_h(n)
return self._h._from_dict(self._h_from_self_cache[lam])
def _e_to_w_on_basis(self, lam):
r"""
Return the elementary symmetric function ``e[lam]`` expanded in
the Witt basis, where ``lam`` is a partition.
This assumes that the ``coerce_e`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``e[lam]`` in the Witt basis ``self``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: e = Sym.elementary()
sage: w = Sym.w(coerce_e=True)
sage: w._e_to_w_on_basis(Partition([]))
w[]
sage: w._e_to_w_on_basis(Partition([4,2,1]))
-w[3, 2, 1, 1] + w[4, 2, 1]
sage: e(w._e_to_w_on_basis(Partition([3,1]))) == e[3,1]
True
"""
n = sum(lam)
self._precompute_e(n)
return self._from_dict(self._e_to_self_cache[lam])
def _w_to_e_on_basis(self, lam):
r"""
Return the Witt symmetric function ``w[lam]``
expanded in the elementary symmetric basis, where
``lam`` is a partition.
This assumes that the ``coerce_e`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``w[lam]`` in the elementary
symmetric basis of ``self.realization_of()``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: e = Sym.elementary()
sage: w = Sym.w(coerce_e=True)
sage: w._w_to_e_on_basis(Partition([]))
e[]
sage: w._w_to_e_on_basis(Partition([4,2,1]))
e[2, 2, 1, 1, 1] - e[3, 2, 1, 1] + e[4, 2, 1]
sage: w(w._w_to_e_on_basis(Partition([3,1]))) == w[3,1]
True
"""
n = sum(lam)
self._precompute_e(n)
return self._e._from_dict(self._e_from_self_cache[lam])
def _p_to_w_on_basis(self, lam):
r"""
Return the powersum symmetric function ``p[lam]`` expanded in
the Witt basis, where ``lam`` is a partition.
This assumes that the ``coerce_p`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``p[lam]`` in the Witt basis ``self``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: p = Sym.power()
sage: w = Sym.w(coerce_p=True)
sage: w._p_to_w_on_basis(Partition([]))
w[]
sage: w._p_to_w_on_basis(Partition([4,2,1]))
w[1, 1, 1, 1, 1, 1, 1] + 2*w[2, 1, 1, 1, 1, 1] + 2*w[2, 2, 1, 1, 1] + 4*w[2, 2, 2, 1] + 4*w[4, 1, 1, 1] + 8*w[4, 2, 1]
sage: p(w._p_to_w_on_basis(Partition([3,1]))) == p[3,1]
True
"""
n = sum(lam)
self._precompute_p(n)
return self._from_dict(self._p_to_self_cache[lam])
def _w_to_p_on_basis(self, lam):
r"""
Return the Witt symmetric function ``w[lam]`` expanded in the
powersum basis, where ``lam`` is a partition.
This assumes that the ``coerce_p`` keyword has been set to ``True`` in
the initialization of ``self`` (otherwise the cache does not exist).
INPUT:
- ``lam`` -- a partition
OUTPUT:
- the expansion of ``w[lam]`` in the powersum
basis of ``self.realization_of()``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: p = Sym.power()
sage: w = Sym.w(coerce_p=True)
sage: w._w_to_p_on_basis(Partition([]))
p[]
sage: w._w_to_p_on_basis(Partition([4,2,1]))
3/16*p[1, 1, 1, 1, 1, 1, 1] - 5/16*p[2, 1, 1, 1, 1, 1] + 3/16*p[2, 2, 1, 1, 1] - 1/16*p[2, 2, 2, 1] - 1/8*p[4, 1, 1, 1] + 1/8*p[4, 2, 1]
sage: w(w._w_to_p_on_basis(Partition([3,1]))) == w[3,1]
True
"""
n = sum(lam)
self._precompute_p(n)
return self._p._from_dict(self._p_from_self_cache[lam])
def __init_extra__(self):
"""
Sets up caches for the transition maps to other bases, and registers
them as coercions.
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ) # indirect doctest
sage: h = Sym.h(); w = Sym.w()
sage: phi = h.coerce_map_from(w); phi
Generic morphism:
From: Symmetric Functions over Rational Field in the Witt basis
To: Symmetric Functions over Rational Field in the homogeneous basis
sage: phi(w.an_element()) == h(w.an_element())
True
sage: e = Sym.e(); w2 = Sym.w(coerce_e=True)
sage: psi = e.coerce_map_from(w2); psi
Generic morphism:
From: Symmetric Functions over Rational Field in the Witt basis
To: Symmetric Functions over Rational Field in the elementary basis
sage: psi(w2.an_element()) == e(w2.an_element())
True
"""
#category = sage.categories.all.ModulesWithBasis(self.base_ring())
# Set up coercions and conversions with appropriate other bases.
# self._p, self._e and self._h will be the powersum basis, the elementary
# symmetric basis and the complete homogeneous basis (over the same base
# ring as self), respectively (but they are only set if the respective
# arguments ``coerce_p``, ``coerce_e`` and ``coerce_h`` are True).
# self._friendly will be the one avaliable basis which makes computations
# the easiest.
self._friendly = None
if self._coerce_p:
self._p = self.realization_of().p()
# Set up the cache for conversion from the Witt basis
# to the powersum basis.
# cache for the coordinates of the elements
# of the powersum basis with respect to the Witt basis
self._p_to_self_cache = {}
# cache for the coordinates of the elements
# of the Witt basis with respect to the powersum basis
self._p_from_self_cache = {}
# cache for transition matrices which contain the coordinates of
# the elements of the powersum basis with respect to the Witt basis
self._p_transition_matrices = {}
# cache for transition matrices which contain the coordinates of
# the elements of the Witt basis with respect to the powersum basis
self._p_inverse_transition_matrices = {}
self .register_coercion(self._p._module_morphism(self._p_to_w_on_basis, codomain = self))
from sage.rings.rational_field import RationalField
if self.base_ring().has_coerce_map_from(RationalField):
self._p.register_coercion(self._module_morphism(self._w_to_p_on_basis, codomain = self._p))
self._friendly = self._p
else:
# self._w_to_p_on_basis is a partial map at best
self._p.register_conversion(self._module_morphism(self._w_to_p_on_basis, codomain = self._p))
if (not self._coerce_e) and (not self._coerce_h):
# ensure that self has coercion at least to one other basis,
# or else coercion-based computations will fail
self._coerce_h = True
elif (not self._coerce_e) and (not self._coerce_h):
self._coerce_h = True # at least one coercion is needed!
if self._coerce_h:
self._h = self.realization_of().h()
# Set up the cache for conversion from the Witt basis to the complete
# homogeneous basis. (This is the conversion that is used by default.)
# cache for the coordinates of the elements
# of the homogeneous basis with respect to the Witt basis
self._h_to_self_cache = {}
# cache for the coordinates of the elements
# of the Witt basis with respect to the homogeneous basis
self._h_from_self_cache = {}
# cache for transition matrices which contain the coordinates of
# the elements of the homogeneous basis with respect to the Witt basis
self._h_transition_matrices = {}
# cache for transition matrices which contain the coordinates of
# the elements of the Witt basis with respect to the homogeneous basis
self._h_inverse_transition_matrices = {}
self .register_coercion(self._h._module_morphism(self._h_to_w_on_basis, codomain = self))
self._h.register_coercion(self._module_morphism(self._w_to_h_on_basis, codomain = self._h))
if self._friendly is None:
self._friendly = self._h
if self._coerce_e:
self._e = self.realization_of().e()
# Set up the cache for conversion from the Witt basis to the elementary
# symmetric basis.
# cache for the coordinates of the elements
# of the elementary basis with respect to the Witt basis
self._e_to_self_cache = {}
# cache for the coordinates of the elements
# of the Witt basis with respect to the elementary basis
self._e_from_self_cache = {}
# cache for transition matrices which contain the coordinates of
# the elements of the elementary basis with respect to the Witt basis
self._e_transition_matrices = {}
# cache for transition matrices which contain the coordinates of
# the elements of the Witt basis with respect to the elementary basis
self._e_inverse_transition_matrices = {}
self .register_coercion(self._e._module_morphism(self._e_to_w_on_basis, codomain = self))
self._e.register_coercion(self._module_morphism(self._w_to_e_on_basis, codomain = self._e))
if self._friendly is None:
self._friendly = self._e
def from_other_uncached(self, u):
r"""
Return an element ``u`` of another basis of the ring of
symmetric functions, expanded in the Witt basis ``self``.
The result is the same as ``self(u)``, but the
``from_other_uncached`` method does not precompute a
cache with transition matrices. Thus,
``from_other_uncached`` is faster when ``u`` is sparse.
INPUT:
- ``u`` -- an element of ``self.realization_of()``
OUTPUT:
- the expansion of ``u`` in the Witt basis ``self``
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: p = Sym.p()
sage: w = Sym.w()
sage: a = p([3,2]) - p([4,1]) + 27 * p([3])
sage: w.from_other_uncached(a) == w(a)
True
Here's a verification of an obvious fact that would take
long with regular coercion::
sage: fouc = w.from_other_uncached
sage: fouc(p([15]))
w[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + 3*w[3, 3, 3, 3, 3] + 5*w[5, 5, 5] + 15*w[15]
sage: fouc(p([15])) * fouc(p([14])) == fouc(p([15, 14]))
True
Other bases::
sage: e = Sym.e()
sage: h = Sym.h()
sage: s = Sym.s()
sage: all( fouc(e(lam)) == w(e(lam)) for lam in Partitions(5) )
True
sage: all( fouc(h(lam)) == w(h(lam)) for lam in Partitions(5) )
True
sage: all( fouc(p(lam)) == w(p(lam)) for lam in Partitions(5) )
True
sage: all( fouc(s(lam)) == w(s(lam)) for lam in Partitions(5) )
True
"""
parent_name = u.parent().basis_name()
from sage.misc.cachefunc import cached_function
if parent_name == "homogeneous":
from sage.combinat.partition import Partitions_n
@cached_function
def wsum(m): # expansion of h_m in w-basis, for m > 0
return self._from_dict({lam: 1 for lam in Partitions_n(m)})
result = self.zero()
for lam, a in u.monomial_coefficients().items():
product = self.one()
for i in lam:
product *= wsum(i)
result += a * product
return result
if parent_name == "powersum":
from sage.rings.arith import divisors
from sage.combinat.partition import Partition
@cached_function
def wsum_p(m): # expansion of p_m in w-basis, for m > 0
return self._from_dict({Partition([d] * (m // d)): d
for d in divisors(m)})
result = self.zero()
for lam, a in u.monomial_coefficients().items():
product = self.one()
for i in lam:
product *= wsum_p(i)
result += a * product
return result
# Coerce u into elementary symmetric basis.
if parent_name != "elementary":
u = u.parent().realization_of().elementary()(u)
from sage.combinat.partition import Partitions
@cached_function
def wsum_e(m): # expansion of e_m in w-basis, for m > 0
return self._from_dict({lam: (-1 if (m + len(lam)) % 2 == 1 else 1)
for lam in Partitions(m, max_slope=-1)})
result = self.zero()
for lam, a in u.monomial_coefficients().items():
product = self.one()
for i in lam:
product *= wsum_e(i)
result += a * product
return result
def coproduct(self, elt):
r"""
Return the coproduct of the element ``elt``.
INPUT:
- ``elt`` -- a symmetric function written in this basis
OUTPUT:
- The coproduct acting on ``elt``; the result is an element of the
tensor squared of the basis ``self``
EXAMPLES::
sage: w = SymmetricFunctions(QQ).w()
sage: w[2].coproduct()
w[] # w[2] - w[1] # w[1] + w[2] # w[]
sage: w.coproduct(w[2])
w[] # w[2] - w[1] # w[1] + w[2] # w[]
sage: w[2,1].coproduct()
w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]
sage: w.coproduct(w[2,1])
w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]
TESTS:
The same, but with other settings::
sage: w = SymmetricFunctions(QQ).w(coerce_h=False, coerce_e=True)
sage: w[2].coproduct()
w[] # w[2] - w[1] # w[1] + w[2] # w[]
sage: w.coproduct(w[2])
w[] # w[2] - w[1] # w[1] + w[2] # w[]
sage: w[2,1].coproduct()
w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]
sage: w.coproduct(w[2,1])
w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]
sage: w = SymmetricFunctions(QQ).w(coerce_h=False, coerce_p=True)
sage: w[2].coproduct()
w[] # w[2] - w[1] # w[1] + w[2] # w[]
sage: w.coproduct(w[2])
w[] # w[2] - w[1] # w[1] + w[2] # w[]
sage: w[2,1].coproduct()
w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]
sage: w.coproduct(w[2,1])
w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]
"""
from sage.categories.tensor import tensor
friendly = self._friendly
return self.tensor_square().sum(coeff * tensor([self(friendly[x]), self(friendly[y])])
for ((x,y), coeff) in friendly(elt).coproduct())
def verschiebung(self, n):
r"""
Return the image of the symmetric function ``self`` under the
`n`-th Verschiebung operator.
The `n`-th Verschiebung operator `\mathbf{V}_n` is defined to be
the unique algebra endomorphism `V` of the ring of symmetric
functions that satisfies `V(h_r) = h_{r/n}` for every positive
integer `r` divisible by `n`, and satisfies `V(h_r) = 0` for
every positive integer `r` not divisible by `n`. This operator
`\mathbf{V}_n` is a Hopf algebra endomorphism. For every
nonnegative integer `r` with `n \mid r`, it satisfies
.. MATH::
\mathbf{V}_n(h_r) = h_{r/n},
\quad \mathbf{V}_n(p_r) = n p_{r/n},
\quad \mathbf{V}_n(e_r) = (-1)^{r - r/n} e_{r/n},
\quad \mathbf{V}_n(w_r) = w_{r/n},
(where `h` is the complete homogeneous basis, `p` is the
powersum basis, `e` is the elementary basis, and `w` is the
Witt basis). For every nonnegative integer `r` with `n \nmid r`,
it satisfes
.. MATH::
\mathbf{V}_n(h_r) = \mathbf{V}_n(p_r) = \mathbf{V}_n(e_r)
= \mathbf{V}_n(w_r) = 0.
The `n`-th Verschiebung operator is also called the `n`-th
Verschiebung endomorphism. Its name derives from the Verschiebung
(German for "shift") endomorphism of the Witt vectors.
The `n`-th Verschiebung operator is adjoint to the `n`-th
Frobenius operator (see :meth:`frobenius` for its definition)
with respect to the Hall scalar product (:meth:`scalar`).
The action of the `n`-th Verschiebung operator on the Schur basis
can also be computed explicitly. The following (probably clumsier
than necessary) description can be obtained by solving exercise
7.61 in Stanley's [STA]_.
Let `\lambda` be a partition. Let `n` be a positive integer. If
the `n`-core of `\lambda` is nonempty, then
`\mathbf{V}_n(s_\lambda) = 0`. Otherwise, the following method
computes `\mathbf{V}_n(s_\lambda)`: Write the partition `\lambda`
in the form `(\lambda_1, \lambda_2, \ldots, \lambda_{ns})` for some
nonnegative integer `s`. (If `n` does not divide the length of
`\lambda`, then this is achieved by adding trailing zeroes to
`\lambda`.) Set `\beta_i = \lambda_i + ns - i` for every
`s \in \{ 1, 2, \ldots, ns \}`. Then,
`(\beta_1, \beta_2, \ldots, \beta_{ns})` is a strictly decreasing
sequence of nonnegative integers. Stably sort the list
`(1, 2, \ldots, ns)` in order of (weakly) increasing remainder of
`-1 - \beta_i` modulo `n`. Let `\xi` be the sign of the
permutation that is used for this sorting. Let `\psi` be the sign
of the permutation that is used to stably sort the list
`(1, 2, \ldots, ns)` in order of (weakly) increasing remainder of
`i - 1` modulo `n`. (Notice that `\psi = (-1)^{n(n-1)s(s-1)/4}`.)
Then, `\mathbf{V}_n(s_\lambda) = \xi \psi \prod_{i = 0}^{n - 1}
s_{\lambda^{(i)}}`, where
`(\lambda^{(0)}, \lambda^{(1)}, \ldots, \lambda^{(n - 1)})`
is the `n`-quotient of `\lambda`.
INPUT:
- ``n`` -- a positive integer
OUTPUT:
The result of applying the `n`-th Verschiebung operator (on the ring of
symmetric functions) to ``self``.
EXAMPLES::
sage: Sym = SymmetricFunctions(ZZ)
sage: w = Sym.w()
sage: w[3].verschiebung(2)
0
sage: w[4].verschiebung(4)
w[1]
TESTS:
Let us check that this method on the Witt basis gives the
same result as the implementation in sfa.py on the complete
homogeneous basis::
sage: Sym = SymmetricFunctions(QQ)
sage: w = Sym.w(); h = Sym.h()
sage: all( w(h(lam)).verschiebung(3) == w(h(lam).verschiebung(3))
....: for lam in Partitions(6) )
True
sage: all( h(w(lam)).verschiebung(2) == h(w(lam).verschiebung(2))
....: for lam in Partitions(4) )
True
"""
parent = self.parent()
w_coords_of_self = self.monomial_coefficients().items()
from sage.combinat.partition import Partition
dct = {Partition(map(lambda i: i // n, lam)): coeff
for (lam, coeff) in w_coords_of_self
if all( i % n == 0 for i in lam )}
result_in_w_basis = parent._from_dict(dct)
return parent(result_in_w_basis)
| 38.610311
| 159
| 0.53235
|
f487360ed97adf8e2a216a3c8a2bd775384d8be0
| 4,716
|
py
|
Python
|
backend/config/settings.py
|
itechub/Jane
|
3f4bbc75c5eab8fa1789c985367bdf3cc334adfb
|
[
"MIT"
] | 4
|
2019-12-22T10:33:01.000Z
|
2020-04-19T02:46:44.000Z
|
backend/config/settings.py
|
itechub/Jane
|
3f4bbc75c5eab8fa1789c985367bdf3cc334adfb
|
[
"MIT"
] | 37
|
2019-10-14T10:07:19.000Z
|
2020-09-24T15:35:30.000Z
|
backend/config/settings.py
|
itechub/Jane
|
3f4bbc75c5eab8fa1789c985367bdf3cc334adfb
|
[
"MIT"
] | null | null | null |
"""
Django settings for jane project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import datetime
import os
from config import config
from config.config import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# Third-party apps
"corsheaders",
"rest_framework",
"rest_framework_swagger",
# Self-defined apps
"accounts",
"articles",
"resources",
"tags",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "config.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"
},
]
# django reset framework JWT settings
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticated"
],
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework_jwt.authentication.JSONWebTokenAuthentication"
],
}
JWT_AUTH = {
"JWT_ENCODE_HANDLER": "rest_framework_jwt.utils.jwt_encode_handler",
"JWT_DECODE_HANDLER": "rest_framework_jwt.utils.jwt_decode_handler",
"JWT_PAYLOAD_HANDLER": "rest_framework_jwt.utils.jwt_payload_handler",
"JWT_PAYLOAD_GET_USER_ID_HANDLER": "rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler",
# Overwrite the default JWT response
"JWT_SECRET_KEY": config.SECRET_KEY,
"JWT_GET_USER_SECRET_KEY": None,
"JWT_PUBLIC_KEY": None,
"JWT_PRIVATE_KEY": None,
"JWT_ALGORITHM": "HS256",
"JWT_VERIFY": True,
"JWT_VERIFY_EXPIRATION": True,
"JWT_LEEWAY": 0,
"JWT_EXPIRATION_DELTA": datetime.timedelta(days=1),
"JWT_AUDIENCE": None,
"JWT_ISSUER": None,
"JWT_ALLOW_REFRESH": False,
"JWT_REFRESH_EXPIRATION_DELTA": datetime.timedelta(days=7),
"JWT_AUTH_HEADER_PREFIX": "JWT",
"JWT_AUTH_COOKIE": None,
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "zh-Hans"
TIME_ZONE = "Asia/Shanghai"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# CORS setting
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = (
"accept",
"accept-encoding",
"authorization",
"content-type",
"dnt",
"origin",
"user-agent",
"x-csrftoken",
"x-requested-with",
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/statics/"
STATIC_ROOT = os.path.join(BASE_DIR, "collectstatic", "statics")
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_FILE_PREFIX = "media"
# Customizing authentication and user
AUTH_USER_MODEL = "accounts.User"
AUTHENTICATION_BACKENDS = ["accounts.backends.EmailOrUsernameModelBackend"]
LOGIN_URL = "rest_framework:login"
LOGOUT_URL = "rest_framework:logout"
LOGIN_REDIRECT_URL = "swagger"
| 27.260116
| 103
| 0.712044
|
8eca268e1eac0f03e5e4ab62e2deabdbd5d78694
| 10,578
|
py
|
Python
|
demos/text_to_speech_demo/python/models/mel2wave_ie.py
|
xcmyz/open_model_zoo
|
f09cd03628759e0de8d09996fb43dc8f5ba2b724
|
[
"Apache-2.0"
] | null | null | null |
demos/text_to_speech_demo/python/models/mel2wave_ie.py
|
xcmyz/open_model_zoo
|
f09cd03628759e0de8d09996fb43dc8f5ba2b724
|
[
"Apache-2.0"
] | null | null | null |
demos/text_to_speech_demo/python/models/mel2wave_ie.py
|
xcmyz/open_model_zoo
|
f09cd03628759e0de8d09996fb43dc8f5ba2b724
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path as osp
import numpy as np
from utils.wav_processing import (
fold_with_overlap, infer_from_discretized_mix_logistic, pad_tensor, xfade_and_unfold,
)
class WaveRNNIE:
def __init__(self, model_upsample, model_rnn, ie, target=11000, overlap=550, hop_length=275, bits=9, device='CPU',
verbose=False, upsampler_width=-1):
"""
return class provided WaveRNN inference.
:param model_upsample: path to xml with upsample model of WaveRNN
:param model_rnn: path to xml with rnn parameters of WaveRNN model
:param target: length of the processed fragments
:param overlap: overlap of the processed frames
:param hop_length: The number of samples between successive frames, e.g., the columns of a spectrogram.
:return:
"""
self.verbose = verbose
self.device = device
self.target = target
self.overlap = overlap
self.dynamic_overlap = overlap
self.hop_length = hop_length
self.bits = bits
self.indent = 550
self.pad = 2
self.batch_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256]
self.ie = ie
self.upsample_net = self.load_network(model_upsample)
if upsampler_width > 0:
orig_shape = self.upsample_net.input_info['mels'].input_data.shape
self.upsample_net.reshape({"mels": (orig_shape[0], upsampler_width, orig_shape[2])})
self.upsample_exec = self.create_exec_network(self.upsample_net)
self.rnn_net = self.load_network(model_rnn)
self.rnn_exec = self.create_exec_network(self.rnn_net, batch_sizes=self.batch_sizes)
# fixed number of the mels in mel-spectrogramm
self.mel_len = self.upsample_net.input_info['mels'].input_data.shape[1] - 2 * self.pad
self.rnn_width = self.rnn_net.input_info['x'].input_data.shape[1]
def load_network(self, model_xml):
model_bin_name = ".".join(osp.basename(model_xml).split('.')[:-1]) + ".bin"
model_bin = osp.join(osp.dirname(model_xml), model_bin_name)
print("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = self.ie.read_network(model=model_xml, weights=model_bin)
return net
def create_exec_network(self, net, batch_sizes=None):
if batch_sizes is not None:
exec_net = []
for b_s in batch_sizes:
net.batch_size = b_s
exec_net.append(self.ie.load_network(network=net, device_name=self.device))
else:
exec_net = self.ie.load_network(network=net, device_name=self.device)
return exec_net
@staticmethod
def get_rnn_init_states(b_size=1, rnn_dims=328):
h1 = np.zeros((b_size, rnn_dims), dtype=float)
h2 = np.zeros((b_size, rnn_dims), dtype=float)
x = np.zeros((b_size, 1), dtype=float)
return h1, h2, x
def forward(self, mels):
mels = (mels + 4) / 8
np.clip(mels, 0, 1, out=mels)
mels = np.transpose(mels)
mels = np.expand_dims(mels, axis=0)
n_parts = mels.shape[1] // self.mel_len + 1 if mels.shape[1] % self.mel_len > 0 else mels.shape[
1] // self.mel_len
upsampled_mels = []
aux = []
last_padding = 0
for i in range(n_parts):
i_start = i * self.mel_len
i_end = i_start + self.mel_len
if i_end > mels.shape[1]:
last_padding = i_end - mels.shape[1]
mel = np.pad(mels[:, i_start:mels.shape[1], :], ((0, 0), (0, last_padding), (0, 0)), 'constant',
constant_values=0)
else:
mel = mels[:, i_start:i_end, :]
upsampled_mels_b, aux_b = self.forward_upsample(mel)
upsampled_mels.append(upsampled_mels_b)
aux.append(aux_b)
if len(aux) > 1:
upsampled_mels = np.concatenate(upsampled_mels, axis=1)
aux = np.concatenate(aux, axis=1)
else:
upsampled_mels = upsampled_mels[0]
aux = aux[0]
if last_padding > 0:
upsampled_mels = upsampled_mels[:, :-last_padding * self.hop_length, :]
aux = aux[:, :-last_padding * self.hop_length, :]
upsampled_mels, (_, self.dynamic_overlap) = fold_with_overlap(upsampled_mels, self.target, self.overlap)
aux, _ = fold_with_overlap(aux, self.target, self.overlap)
audio = self.forward_rnn(mels, upsampled_mels, aux)
audio = (audio * (2 ** 15 - 1)).astype("<h")
return audio
def forward_upsample(self, mels):
mels = pad_tensor(mels, pad=self.pad)
out = self.upsample_exec.infer(inputs={"mels": mels})
upsample_mels, aux = out["upsample_mels"][:, self.indent:-self.indent, :], out["aux"]
return upsample_mels, aux
def forward_rnn(self, mels, upsampled_mels, aux):
wave_len = (mels.shape[1] - 1) * self.hop_length
d = aux.shape[2] // 4
aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)]
b_size, seq_len, _ = upsampled_mels.shape
seq_len = min(seq_len, aux_split[0].shape[1])
if b_size not in self.batch_sizes:
raise Exception('Incorrect batch size {0}. Correct should be 2 ** something'.format(b_size))
active_network = self.batch_sizes.index(b_size)
h1, h2, x = self.get_rnn_init_states(b_size, self.rnn_width)
output = []
for i in range(seq_len):
m_t = upsampled_mels[:, i, :]
a1_t, a2_t, a3_t, a4_t = \
(a[:, i, :] for a in aux_split)
out = self.rnn_exec[active_network].infer(inputs={"m_t": m_t, "a1_t": a1_t, "a2_t": a2_t, "a3_t": a3_t,
"a4_t": a4_t, "h1.1": h1, "h2.1": h2, "x": x})
logits = out["logits"]
h1 = out["h1"]
h2 = out["h2"]
sample = infer_from_discretized_mix_logistic(logits)
x = sample[:]
x = np.expand_dims(x, axis=1)
output.append(sample)
output = np.stack(output).transpose(1, 0)
output = output.astype(np.float64)
if b_size > 1:
output = xfade_and_unfold(output, self.dynamic_overlap)
else:
output = output[0]
fade_out = np.linspace(1, 0, 20 * self.hop_length)
output = output[:wave_len]
output[-20 * self.hop_length:] *= fade_out
return output
class MelGANIE:
def __init__(self, model, ie, device='CPU', default_width=800):
"""
return class provided MelGAN inference.
:param model: path to xml with MelGAN model of WaveRNN
:param ie: instance of the IECore
:param device: target device
:return:
"""
self.device = device
self.ie = ie
self.scales = 4
self.hop_length = 256
self.net = self.load_network(model)
if self.net.input_info['mel'].input_data.shape[2] != default_width:
orig_shape = self.net.input_info['mel'].input_data.shape
new_shape = (orig_shape[0], orig_shape[1], default_width)
self.net.reshape({"mel": new_shape})
self.exec_net = self.create_exec_network(self.net, self.scales)
# @xcmyz: attention! the length of mel-spectrogram is fixed
# fixed number of columns in mel-spectrogramm
self.mel_len = self.net.input_info['mel'].input_data.shape[2]
self.widths = [self.mel_len * (i + 1) for i in range(self.scales)]
def load_network(self, model_xml):
model_bin_name = ".".join(osp.basename(model_xml).split('.')[:-1]) + ".bin"
model_bin = osp.join(osp.dirname(model_xml), model_bin_name)
print("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = self.ie.read_network(model=model_xml, weights=model_bin)
return net
def create_exec_network(self, net, scales=None):
if scales is not None:
orig_shape = net.input_info['mel'].input_data.shape
exec_net = []
for i in range(scales):
new_shape = (orig_shape[0], orig_shape[1], orig_shape[2] * (i + 1))
net.reshape({"mel": new_shape})
exec_net.append(self.ie.load_network(network=net, device_name=self.device))
net.reshape({"mel": orig_shape})
else:
exec_net = self.ie.load_network(network=net, device_name=self.device)
return exec_net
def forward(self, mel):
mel = np.expand_dims(mel, axis=0)
res_audio = []
last_padding = 0
if mel.shape[2] % self.mel_len:
last_padding = self.mel_len - mel.shape[2] % self.mel_len
mel = np.pad(mel, ((0, 0), (0, 0), (0, last_padding)), 'constant', constant_values=-11.5129)
active_net = -1
cur_w = -1
cols = mel.shape[2]
for i, w in enumerate(self.widths):
if cols <= w:
cur_w = w
active_net = i
break
if active_net == -1:
cur_w = self.widths[-1]
c_begin = 0
c_end = cur_w
while c_begin < cols:
audio = self.exec_net[active_net].infer(inputs={"mel": mel[:, :, c_begin:c_end]})["audio"]
res_audio.extend(audio)
c_begin = c_end
if c_end + cur_w >= cols:
for i, w in enumerate(self.widths):
if w >= cols - c_end:
cur_w = w
active_net = i
break
c_end += cur_w
if last_padding:
audio = res_audio[:-self.hop_length * last_padding]
else:
audio = res_audio
audio = np.array(audio).astype(dtype=np.int16)
return audio
| 37.378092
| 118
| 0.585839
|
2845d2a7ecd18dc234dcccdbc15e6a17ab853832
| 739
|
py
|
Python
|
sols/1108.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
sols/1108.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
sols/1108.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
class Solution(object):
# Replace (Accepted + Top Voted), O(1) time, O(1) space (Question specifically for IP address)
def defangIPaddr(self, address):
"""
:type address: str
:rtype: str
"""
return address.replace('.', '[.]')
# # Join and Split (Top Voted), O(1) time and space
# def defangIPaddr(self, address):
# return '[.]'.join(address.split('.'))
# # Regex Substitute (Top Voted), O(1) time and space
# def defangIPaddr(self, address):
# return re.sub('\.', '[.]', address)
# # No library join and replace (Top Voted), O(1) time and space
# def defangIPaddr(self, address):
# return ''.join('[.]' if c == '.' else c for c in address)
| 35.190476
| 98
| 0.568336
|
0657e4b48d78a94c42bc6056ec6410a5174907bd
| 2,897
|
py
|
Python
|
recommender_app/movies_app.py
|
fra-mari/Two_Movie_Recommenders
|
da046e06e3ee27699f51b8870c4433f984680c69
|
[
"MIT"
] | null | null | null |
recommender_app/movies_app.py
|
fra-mari/Two_Movie_Recommenders
|
da046e06e3ee27699f51b8870c4433f984680c69
|
[
"MIT"
] | null | null | null |
recommender_app/movies_app.py
|
fra-mari/Two_Movie_Recommenders
|
da046e06e3ee27699f51b8870c4433f984680c69
|
[
"MIT"
] | null | null | null |
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
This module produces the web application for the Movie Recommender.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import random
import logging
import pandas as pd
from flask import Flask
from flask import render_template
from flask import request
from nmf_recommending_engine import get_recommendations, dataframe_updater
from knn_recommending_engine import get_recommendations_knn
logging.basicConfig(filename='RecommenderLog.log',
level=logging.WARNING,
format='%(asctime)s: %(message)s')
MOVIES = pd.read_csv('data_and_models/data/MovieLensDataset/movies.csv')
df_final = pd.read_csv('data_and_models/data/preprocessed_for_nmf/ready_dataset.csv')
MOVIE_IDS_LST = df_final.columns.tolist()
app = Flask(__name__)
@app.route('/')
def main_page():
five_ids = random.sample(MOVIE_IDS_LST,5)
five_titles = []
for id in five_ids:
five_titles.append(MOVIES[MOVIES['movieId']==int(id)]['title'].iloc[0])
return render_template('main.html',
title='🎬 The Statistically Significant Movie Recommender 🎬',
subtitle="Courtesy of Laura Bartolini, Behzad Azarhoushang & Francesco Mari",
subsubtitle="who won't get offended if you don't take their advice...even if you should!",
movie1=five_titles[0],
movie2=five_titles[1],
movie3=five_titles[2],
movie4=five_titles[3],
movie5=five_titles[4])
@app.route('/recommender')
def rec_page():
html_form_data = dict(request.args) # to collect the data from the user (to build the recommendation)
names = list(html_form_data.keys())
logging.warning(f'A new user inserted new ratings for the NMF: {html_form_data}.')
counter = 1
for name in names:
new_key = f'movie_{counter}'
html_form_data[new_key] = html_form_data.pop(name)
counter = counter + 1
recs, new_user = get_recommendations(html_form_data,names)
logging.warning("New NMF recommendations generated based on the user's input.")
dataframe_updater(new_user)
logging.warning("The movie rating generated by the user's input have been added to 'data_and_models/data/preprocessed_for_nmf/ready_dataset.csv'.")
return render_template('recommender.html', movies = recs)
@app.route('/knn_recommender')
def knn_page():
knn_html_form_data = request.args['rating1'] # to collect the data from the user (to build the recommendation)
knn_recs, orig_movie = get_recommendations_knn(knn_html_form_data)
logging.warning("New KNN recommendations generated based on the user's input. No update possible to 'data_and_models/data/preprocessed_for_nmf/ready_dataset.csv'.")
return render_template('knn_recommender.html', movies = knn_recs, input=orig_movie)
if __name__=="__main__":
app.run(port=5000, debug=True)
| 38.626667
| 168
| 0.699344
|
7d691c6a6fab8b1992f04ef78c3b71d8b4afb62a
| 3,804
|
py
|
Python
|
tests/unit_tests/test_tethys_quotas/test_enforce_quota.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 79
|
2015-10-05T13:13:28.000Z
|
2022-02-01T12:30:33.000Z
|
tests/unit_tests/test_tethys_quotas/test_enforce_quota.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 542
|
2015-08-12T22:11:32.000Z
|
2022-03-29T22:18:08.000Z
|
tests/unit_tests/test_tethys_quotas/test_enforce_quota.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 71
|
2016-01-16T01:03:41.000Z
|
2022-03-31T17:55:54.000Z
|
import unittest
from unittest import mock
from tethys_quotas.decorators import enforce_quota
from tethys_quotas.models import ResourceQuota
from django.http import HttpRequest
from tethys_apps.models import TethysApp
from django.core.exceptions import PermissionDenied
@enforce_quota(codename='foo')
def a_controller(request):
return 'Success'
class DecoratorsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('tethys_quotas.decorators.passes_quota')
@mock.patch('tethys_quotas.decorators.get_active_app')
@mock.patch('tethys_quotas.decorators.ResourceQuota')
def test_enforce_quota_applies_to_app(self, mock_RQ, mock_active_app, mock_passes_quota):
mock_RQ.objects.get.return_value = mock.MagicMock(codename='foo', applies_to='tethys_apps.models.TethysApp')
mock_request = mock.MagicMock(spec=HttpRequest)
mock_active_app.return_value = mock.MagicMock(TethysApp(name='Test App'))
ret = a_controller(mock_request)
mock_passes_quota.assert_called()
self.assertEqual('Success', ret)
@mock.patch('tethys_quotas.decorators.passes_quota')
@mock.patch('tethys_quotas.decorators.ResourceQuota')
def test_enforce_quota_applies_to_user(self, mock_RQ, mock_passes_quota):
mock_RQ.objects.get.return_value = mock.MagicMock(codename='foo', applies_to='django.contrib.auth.models.User')
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock())
ret = a_controller(mock_request)
mock_passes_quota.assert_called()
self.assertEqual('Success', ret)
@mock.patch('tethys_quotas.decorators.log')
@mock.patch('tethys_quotas.decorators.ResourceQuota')
def test_enforce_quota_rq_does_not_exist(self, mock_RQ, mock_log):
mock_RQ.objects.get.side_effect = ResourceQuota.DoesNotExist
mock_RQ.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest)
ret = a_controller(mock_request)
mock_log.warning.assert_called_with('ResourceQuota with codename foo does not exist.')
self.assertEqual('Success', ret)
@mock.patch('tethys_quotas.decorators.log')
def test_enforce_quota_no_HttpRequest(self, mock_log):
mock_request = mock.MagicMock()
ret = a_controller(mock_request)
mock_log.warning.assert_called_with('Invalid request')
self.assertEqual('Success', ret)
@mock.patch('tethys_quotas.decorators.log')
@mock.patch('tethys_quotas.decorators.ResourceQuota')
def test_enforce_quota_bad_applies_to(self, mock_RQ, mock_log):
mock_RQ.objects.get.return_value = mock.MagicMock(codename='foo', applies_to='not.valid.rq')
mock_request = mock.MagicMock(spec=HttpRequest)
ret = a_controller(mock_request)
mock_log.warning.assert_called_with('ResourceQuota that applies_to not.valid.rq is not supported')
self.assertEqual('Success', ret)
@mock.patch('tethys_quotas.decorators.passes_quota')
@mock.patch('tethys_quotas.decorators.ResourceQuota')
def test_enforce_quota_passes_quota_false(self, mock_RQ, mock_passes_quota):
mock_RQ.DoesNotExist = ResourceQuota.DoesNotExist
mock_RQ.objects.get.return_value = mock.MagicMock(codename='foo',
help='helpful message',
applies_to='django.contrib.auth.models.User')
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock())
mock_passes_quota.return_value = False
with self.assertRaises(PermissionDenied) as context:
a_controller(mock_request)
self.assertTrue("helpful message" in str(context.exception))
| 41.347826
| 119
| 0.721083
|
a53787be0101d42d3392cf45de1e12fefcd38929
| 562
|
py
|
Python
|
mediaplatform/migrations/0006_link_media_items_to_channel.py
|
jbrownrs/issue-376-GDS-link
|
e8cce1b79f46b98a7d24b2da5eca48430fd904a3
|
[
"MIT"
] | 5
|
2019-01-07T17:22:34.000Z
|
2020-10-08T15:03:12.000Z
|
mediaplatform/migrations/0006_link_media_items_to_channel.py
|
jbrownrs/issue-376-GDS-link
|
e8cce1b79f46b98a7d24b2da5eca48430fd904a3
|
[
"MIT"
] | 203
|
2017-12-14T09:51:56.000Z
|
2018-08-28T14:04:08.000Z
|
mediaplatform/migrations/0006_link_media_items_to_channel.py
|
jbrownrs/issue-376-GDS-link
|
e8cce1b79f46b98a7d24b2da5eca48430fd904a3
|
[
"MIT"
] | 5
|
2018-10-22T11:36:01.000Z
|
2020-07-20T05:47:49.000Z
|
# Generated by Django 2.0.7 on 2018-07-31 09:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mediaplatform', '0005_add_channel_model'),
]
operations = [
migrations.AddField(
model_name='mediaitem',
name='channel',
field=models.ForeignKey(null=True, help_text='Channel containing media item', on_delete=django.db.models.deletion.SET_NULL, related_name='items', to='mediaplatform.Channel'),
),
]
| 28.1
| 186
| 0.669039
|
f00846aca7a272abddc7e77b69ea6646404ba432
| 1,037
|
py
|
Python
|
running_dashboard/admin.py
|
meir412/running_website
|
66d810f6fcfd68351e7372bfa315ddeee2ea4cf7
|
[
"MIT"
] | 1
|
2020-04-14T10:32:40.000Z
|
2020-04-14T10:32:40.000Z
|
running_dashboard/admin.py
|
meir412/running_website
|
66d810f6fcfd68351e7372bfa315ddeee2ea4cf7
|
[
"MIT"
] | 18
|
2020-04-09T15:37:10.000Z
|
2021-06-10T18:52:43.000Z
|
running_dashboard/admin.py
|
meir412/running_website
|
66d810f6fcfd68351e7372bfa315ddeee2ea4cf7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.sessions.models import Session
from django.contrib.gis import admin as gis_admin
from running_dashboard.models import Run, Neighborhood
# Register your models here.
class SessionAdmin(admin.ModelAdmin):
def _session_data(self, obj):
return obj.get_decoded()
fields = ['session_key', '_session_data', 'expire_date']
list_display = ['session_key', '_session_data', 'expire_date']
readonly_fields = ['_session_data']
class RunAdmin(gis_admin.OSMGeoAdmin):
list_display = ('id', 'runner', 'time_sec', 'start_time', 'length') # full -> list_display = ('id', 'time_sec', 'start_time', 'route', 'neighborhood')
fields = ['runner', ('start_time', 'time_sec'), 'length', 'route']
readonly_fields = ['length']
ordering = ['id']
class NeighborhoodAdmin(gis_admin.OSMGeoAdmin):
list_display = ('id', 'name')
admin.site.register(Session, SessionAdmin)
admin.site.register(Run, RunAdmin)
admin.site.register(Neighborhood, NeighborhoodAdmin)
| 30.5
| 155
| 0.720347
|
3a102afc8ba5423def5dcdf3e1a5d70e8a29d713
| 570
|
py
|
Python
|
tensorflow_tutorial/simple_linear_model/simple_linear_model.py
|
adrianB3/cv_practice
|
615e3f94f985e882bf9c21ab087d056c869571ee
|
[
"MIT"
] | null | null | null |
tensorflow_tutorial/simple_linear_model/simple_linear_model.py
|
adrianB3/cv_practice
|
615e3f94f985e882bf9c21ab087d056c869571ee
|
[
"MIT"
] | null | null | null |
tensorflow_tutorial/simple_linear_model/simple_linear_model.py
|
adrianB3/cv_practice
|
615e3f94f985e882bf9c21ab087d056c869571ee
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
print (tf.__version__)
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data\\MNIST\\')
print("Size of: ")
print(" - Training-set:\t\t{}".format(data.train))
print(" - Validation-set:\t{}".format(data.num_val))
print(" - Test-set:\t\t{}".format(data.num_test))
img_size_flat = data.img_size_flat
img_shape = data.img_shape
num_classes = data.num_classes
# TODO data import not working
| 30
| 59
| 0.742105
|
c3dd93a5200a5469305f7d1297c2c67766cf2b17
| 181
|
py
|
Python
|
1.py
|
PSedigh/Python_Class
|
638c73a1b237ef950ebc65994cdc7d7f1330f6ea
|
[
"MIT"
] | null | null | null |
1.py
|
PSedigh/Python_Class
|
638c73a1b237ef950ebc65994cdc7d7f1330f6ea
|
[
"MIT"
] | null | null | null |
1.py
|
PSedigh/Python_Class
|
638c73a1b237ef950ebc65994cdc7d7f1330f6ea
|
[
"MIT"
] | null | null | null |
Python 3.6.4 (v3.6.4:d48eceb, Dec 19 2017, 06:54:40) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> print("Rasoul")
#
| 36.2
| 90
| 0.651934
|
e5cb3c4519bc92f396d36c953f0dd110b210b01a
| 1,190
|
py
|
Python
|
app/core/models.py
|
badari412/recipe-app-api
|
551442e4bfce2aa51cf040334131bb079f39668d
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
badari412/recipe-app-api
|
551442e4bfce2aa51cf040334131bb079f39668d
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
badari412/recipe-app-api
|
551442e4bfce2aa51cf040334131bb079f39668d
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new User"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new superuser"""
user = self.create_user(email=email, password=password)
user.is_staff = True
user.is_superuser = True
user.save(using=self.db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 32.162162
| 76
| 0.690756
|
98b15eadd52a1ab7a6b283aa9d19567aab708e2d
| 15,933
|
py
|
Python
|
evaluation.py
|
MEHAMMEDAMINE/ABSA-BERT-pair
|
a5f978574de2e0514b2a09143a3122d2db6df561
|
[
"MIT"
] | null | null | null |
evaluation.py
|
MEHAMMEDAMINE/ABSA-BERT-pair
|
a5f978574de2e0514b2a09143a3122d2db6df561
|
[
"MIT"
] | null | null | null |
evaluation.py
|
MEHAMMEDAMINE/ABSA-BERT-pair
|
a5f978574de2e0514b2a09143a3122d2db6df561
|
[
"MIT"
] | null | null | null |
import argparse
import collections
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.preprocessing import label_binarize
def get_y_true(task_name):
"""
Read file to obtain y_true.
All of five tasks of Sentihood use the test set of task-BERT-pair-NLI-M to get true labels.
All of five tasks of SemEval-2014 use the test set of task-BERT-pair-NLI-M to get true labels.
"""
if task_name in ["sentihood_single", "sentihood_NLI_M", "sentihood_QA_M", "sentihood_NLI_B", "sentihood_QA_B"]:
true_data_file = "data/sentihood/bert-pair/test_NLI_M.tsv"
df = pd.read_csv(true_data_file,sep='\t')
y_true = []
for i in range(len(df)):
label = df['label'][i]
assert label in ['None', 'Positive', 'Negative'], "error!"
if label == 'None':
n = 0
elif label == 'Positive':
n = 1
else:
n = 2
y_true.append(n)
else:
true_data_file = "data/HAAD./bert-pair/test_NLI_M.csv"
df = pd.read_csv(true_data_file,sep='\t',header=None).values
y_true=[]
for i in range(len(df)):
label = df[i][1]
assert label in ['positive', 'neutral', 'negative', 'conflict', 'none'], "error!"
if label == 'positive':
n = 0
elif label == 'neutral':
n = 1
elif label == 'negative':
n = 2
elif label == 'conflict':
n = 3
elif label == 'none':
n = 4
y_true.append(n)
return y_true
def get_y_pred(task_name, pred_data_dir):
"""
Read file to obtain y_pred and scores.
"""
pred=[]
score=[]
if task_name in ["sentihood_NLI_M", "sentihood_QA_M"]:
with open(pred_data_dir, "r", encoding="utf-8") as f:
s=f.readline().strip().split()
while s:
pred.append(int(s[0]))
score.append([float(s[1]),float(s[2]),float(s[3])])
s = f.readline().strip().split()
elif task_name in ["sentihood_NLI_B", "sentihood_QA_B"]:
count = 0
tmp = []
with open(pred_data_dir, "r", encoding="utf-8") as f:
s = f.readline().strip().split()
while s:
tmp.append([float(s[2])])
count += 1
if count % 3 == 0:
tmp_sum = np.sum(tmp)
t = []
for i in range(3):
t.append(tmp[i] / tmp_sum)
score.append(t)
if t[0] >= t[1] and t[0] >= t[2]:
pred.append(0)
elif t[1] >= t[0] and t[1] >= t[2]:
pred.append(1)
else:
pred.append(2)
tmp = []
s = f.readline().strip().split()
elif task_name == "sentihood_single":
count = 0
with open(pred_data_dir + "loc1_general.txt", "r", encoding="utf-8") as f1_general, \
open(pred_data_dir + "loc1_price.txt", "r", encoding="utf-8") as f1_price, \
open(pred_data_dir + "loc1_safety.txt", "r", encoding="utf-8") as f1_safety, \
open(pred_data_dir + "loc1_transit.txt", "r", encoding="utf-8") as f1_transit:
s = f1_general.readline().strip().split()
while s:
count += 1
pred.append(int(s[0]))
score.append([float(s[1]), float(s[2]), float(s[3])])
if count % 4 == 0:
s = f1_general.readline().strip().split()
if count % 4 == 1:
s = f1_price.readline().strip().split()
if count % 4 == 2:
s = f1_safety.readline().strip().split()
if count % 4 == 3:
s = f1_transit.readline().strip().split()
with open(pred_data_dir + "loc2_general.txt", "r", encoding="utf-8") as f2_general, \
open(pred_data_dir + "loc2_price.txt", "r", encoding="utf-8") as f2_price, \
open(pred_data_dir + "loc2_safety.txt", "r", encoding="utf-8") as f2_safety, \
open(pred_data_dir + "loc2_transit.txt", "r", encoding="utf-8") as f2_transit:
s = f2_general.readline().strip().split()
while s:
count += 1
pred.append(int(s[0]))
score.append([float(s[1]), float(s[2]), float(s[3])])
if count % 4 == 0:
s = f2_general.readline().strip().split()
if count % 4 == 1:
s = f2_price.readline().strip().split()
if count % 4 == 2:
s = f2_safety.readline().strip().split()
if count % 4 == 3:
s = f2_transit.readline().strip().split()
elif task_name in ["semeval_NLI_M", "semeval_QA_M"]:
with open(pred_data_dir,"r",encoding="utf-8") as f:
s=f.readline().strip().split()
while s:
pred.append(int(s[0]))
score.append([float(s[1]), float(s[2]), float(s[3]), float(s[4]), float(s[5])])
s = f.readline().strip().split()
elif task_name in ["semeval_NLI_B", "semeval_QA_B"]:
count = 0
tmp = []
with open(pred_data_dir, "r", encoding="utf-8") as f:
s = f.readline().strip().split()
while s:
tmp.append([float(s[2])])
count += 1
if count % 5 == 0:
tmp_sum = np.sum(tmp)
t = []
for i in range(5):
t.append(tmp[i] / tmp_sum)
score.append(t)
if t[0] >= t[1] and t[0] >= t[2] and t[0]>=t[3] and t[0]>=t[4]:
pred.append(0)
elif t[1] >= t[0] and t[1] >= t[2] and t[1]>=t[3] and t[1]>=t[4]:
pred.append(1)
elif t[2] >= t[0] and t[2] >= t[1] and t[2]>=t[3] and t[2]>=t[4]:
pred.append(2)
elif t[3] >= t[0] and t[3] >= t[1] and t[3]>=t[2] and t[3]>=t[4]:
pred.append(3)
else:
pred.append(4)
tmp = []
s = f.readline().strip().split()
else:
count = 0
with open(pred_data_dir+"price.txt","r",encoding="utf-8") as f_price, \
open(pred_data_dir+"anecdotes.txt", "r", encoding="utf-8") as f_anecdotes, \
open(pred_data_dir+"food.txt", "r", encoding="utf-8") as f_food, \
open(pred_data_dir+"ambience.txt", "r", encoding="utf-8") as f_ambience, \
open(pred_data_dir+"service.txt", "r", encoding="utf-8") as f_service:
s = f_price.readline().strip().split()
while s:
count += 1
pred.append(int(s[0]))
score.append([float(s[1]), float(s[2]), float(s[3]), float(s[4]), float(s[5])])
if count % 5 == 0:
s = f_price.readline().strip().split()
if count % 5 == 1:
s = f_anecdotes.readline().strip().split()
if count % 5 == 2:
s = f_food.readline().strip().split()
if count % 5 == 3:
s = f_ambience.readline().strip().split()
if count % 5 == 4:
s = f_service.readline().strip().split()
return pred, score
def sentihood_strict_acc(y_true, y_pred):
"""
Calculate "strict Acc" of aspect detection task of Sentihood.
"""
total_cases=int(len(y_true)/4)
true_cases=0
for i in range(total_cases):
if y_true[i*4]!=y_pred[i*4]:continue
if y_true[i*4+1]!=y_pred[i*4+1]:continue
if y_true[i*4+2]!=y_pred[i*4+2]:continue
if y_true[i*4+3]!=y_pred[i*4+3]:continue
true_cases+=1
aspect_strict_Acc = true_cases/total_cases
return aspect_strict_Acc
def sentihood_macro_F1(y_true, y_pred):
"""
Calculate "Macro-F1" of aspect detection task of Sentihood.
"""
p_all=0
r_all=0
count=0
for i in range(len(y_pred)//4):
a=set()
b=set()
for j in range(4):
if y_pred[i*4+j]!=0:
a.add(j)
if y_true[i*4+j]!=0:
b.add(j)
if len(b)==0:continue
a_b=a.intersection(b)
if len(a_b)>0:
p=len(a_b)/len(a)
r=len(a_b)/len(b)
else:
p=0
r=0
count+=1
p_all+=p
r_all+=r
Ma_p=p_all/count
Ma_r=r_all/count
aspect_Macro_F1 = 2*Ma_p*Ma_r/(Ma_p+Ma_r)
return aspect_Macro_F1
def sentihood_AUC_Acc(y_true, score):
"""
Calculate "Macro-AUC" of both aspect detection and sentiment classification tasks of Sentihood.
Calculate "Acc" of sentiment classification task of Sentihood.
"""
# aspect-Macro-AUC
aspect_y_true=[]
aspect_y_score=[]
aspect_y_trues=[[],[],[],[]]
aspect_y_scores=[[],[],[],[]]
for i in range(len(y_true)):
if y_true[i]>0:
aspect_y_true.append(0)
else:
aspect_y_true.append(1) # "None": 1
tmp_score=score[i][0] # probability of "None"
aspect_y_score.append(tmp_score)
aspect_y_trues[i%4].append(aspect_y_true[-1])
aspect_y_scores[i%4].append(aspect_y_score[-1])
aspect_auc=[]
for i in range(4):
aspect_auc.append(metrics.roc_auc_score(aspect_y_trues[i], aspect_y_scores[i]))
aspect_Macro_AUC = np.mean(aspect_auc)
# sentiment-Macro-AUC
sentiment_y_true=[]
sentiment_y_pred=[]
sentiment_y_score=[]
sentiment_y_trues=[[],[],[],[]]
sentiment_y_scores=[[],[],[],[]]
for i in range(len(y_true)):
if y_true[i]>0:
sentiment_y_true.append(y_true[i]-1) # "Postive":0, "Negative":1
tmp_score=score[i][2]/(score[i][1]+score[i][2]) # probability of "Negative"
sentiment_y_score.append(tmp_score)
if tmp_score>0.5:
sentiment_y_pred.append(1) # "Negative": 1
else:
sentiment_y_pred.append(0)
sentiment_y_trues[i%4].append(sentiment_y_true[-1])
sentiment_y_scores[i%4].append(sentiment_y_score[-1])
sentiment_auc=[]
for i in range(4):
sentiment_auc.append(metrics.roc_auc_score(sentiment_y_trues[i], sentiment_y_scores[i]))
sentiment_Macro_AUC = np.mean(sentiment_auc)
# sentiment Acc
sentiment_y_true = np.array(sentiment_y_true)
sentiment_y_pred = np.array(sentiment_y_pred)
sentiment_Acc = metrics.accuracy_score(sentiment_y_true,sentiment_y_pred)
return aspect_Macro_AUC, sentiment_Acc, sentiment_Macro_AUC
def semeval_PRF(y_true, y_pred):
"""
Calculate "Micro P R F" of aspect detection task of SemEval-2014.
"""
s_all=0
g_all=0
s_g_all=0
for i in range(len(y_pred)//5):
s=set()
g=set()
for j in range(5):
if y_pred[i*5+j]!=4:
s.add(j)
if y_true[i*5+j]!=4:
g.add(j)
if len(g)==0:continue
s_g=s.intersection(g)
s_all+=len(s)
g_all+=len(g)
s_g_all+=len(s_g)
p=s_g_all/s_all
r=s_g_all/g_all
f=2*p*r/(p+r)
return p,r,f
def semeval_Acc(y_true, y_pred, score, classes=4):
"""
Calculate "Acc" of sentiment classification task of SemEval-2014.
"""
assert classes in [2, 3, 4], "classes must be 2 or 3 or 4."
if classes == 4:
total=0
total_right=0
for i in range(len(y_true)-1):
if y_true[i]==4:continue
total+=1
tmp=y_pred[i]
if tmp==4:
if score[i][0]>=score[i][1] and score[i][0]>=score[i][2] and score[i][0]>=score[i][3]:
tmp=0
elif score[i][1]>=score[i][0] and score[i][1]>=score[i][2] and score[i][1]>=score[i][3]:
tmp=1
elif score[i][2]>=score[i][0] and score[i][2]>=score[i][1] and score[i][2]>=score[i][3]:
tmp=2
else:
tmp=3
if y_true[i]==tmp:
total_right+=1
sentiment_Acc = total_right/total
elif classes == 3:
total=0
total_right=0
for i in range(len(y_true)):
if y_true[i]>=3:continue
total+=1
tmp=y_pred[i]
if tmp>=3:
if score[i][0]>=score[i][1] and score[i][0]>=score[i][2]:
tmp=0
elif score[i][1]>=score[i][0] and score[i][1]>=score[i][2]:
tmp=1
else:
tmp=2
if y_true[i]==tmp:
total_right+=1
sentiment_Acc = total_right/total
else:
total=0
total_right=0
for i in range(len(y_true)):
if y_true[i]>=3 or y_true[i]==1:continue
total+=1
tmp=y_pred[i]
if tmp>=3 or tmp==1:
if score[i][0]>=score[i][2]:
tmp=0
else:
tmp=2
if y_true[i]==tmp:
total_right+=1
sentiment_Acc = total_right/total
return sentiment_Acc
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
choices=["sentihood_single", "sentihood_NLI_M", "sentihood_QA_M", \
"sentihood_NLI_B", "sentihood_QA_B", "semeval_single", \
"semeval_NLI_M", "semeval_QA_M", "semeval_NLI_B", "semeval_QA_B"],
help="The name of the task to evalution.")
parser.add_argument("--pred_data_dir",
default=None,
type=str,
required=True,
help="The pred data dir.")
args = parser.parse_args()
result = collections.OrderedDict()
if args.task_name in ["sentihood_single", "sentihood_NLI_M", "sentihood_QA_M", "sentihood_NLI_B", "sentihood_QA_B"]:
y_true = get_y_true(args.task_name)
y_pred, score = get_y_pred(args.task_name, args.pred_data_dir)
aspect_strict_Acc = sentihood_strict_acc(y_true, y_pred)
aspect_Macro_F1 = sentihood_macro_F1(y_true, y_pred)
aspect_Macro_AUC, sentiment_Acc, sentiment_Macro_AUC = sentihood_AUC_Acc(y_true, score)
result = {'aspect_strict_Acc': aspect_strict_Acc,
'aspect_Macro_F1': aspect_Macro_F1,
'aspect_Macro_AUC': aspect_Macro_AUC,
'sentiment_Acc': sentiment_Acc,
'sentiment_Macro_AUC': sentiment_Macro_AUC}
else:
y_true = get_y_true(args.task_name)
y_pred, score = get_y_pred(args.task_name, args.pred_data_dir)
aspect_P, aspect_R, aspect_F = semeval_PRF(y_true, y_pred)
sentiment_Acc_4_classes = semeval_Acc(y_true, y_pred, score, 4)
sentiment_Acc_3_classes = semeval_Acc(y_true, y_pred, score, 3)
sentiment_Acc_2_classes = semeval_Acc(y_true, y_pred, score, 2)
result = {'aspect_P': aspect_P,
'aspect_R': aspect_R,
'aspect_F': aspect_F,
'sentiment_Acc_4_classes': sentiment_Acc_4_classes,
'sentiment_Acc_3_classes': sentiment_Acc_3_classes,
'sentiment_Acc_2_classes': sentiment_Acc_2_classes}
for key in result.keys():
print(key, "=",str(result[key]))
if __name__ == "__main__":
main()
| 37.053488
| 120
| 0.510576
|
bd93ded6ca2faa117a06d147d2cb1fb23a87ba8e
| 3,236
|
py
|
Python
|
news_crawler/crawler_rss.py
|
rodrigocaputo/gpn
|
62632bea13ea912ae8a48bd9a6b6ac3c3664845f
|
[
"MIT"
] | null | null | null |
news_crawler/crawler_rss.py
|
rodrigocaputo/gpn
|
62632bea13ea912ae8a48bd9a6b6ac3c3664845f
|
[
"MIT"
] | null | null | null |
news_crawler/crawler_rss.py
|
rodrigocaputo/gpn
|
62632bea13ea912ae8a48bd9a6b6ac3c3664845f
|
[
"MIT"
] | null | null | null |
import feedparser, mysql.connector, threading, os
from time import mktime, localtime, strftime, sleep
from datetime import datetime
mysql_host = os.environ.get('MYSQL_HOST', 'localhost')
mysql_user = os.environ.get('MYSQL_USER', 'root')
mysql_password = os.environ.get('MYSQL_PASS', 'root')
mysql_database = os.environ.get('MYSQL_DATABASE', 'gpn')
blacklist = ('pequenas-empresas-grandes-negocios',
'banco-do-brasil',
'bb',
'bradesco',
'itau',
'caixa-economica',
'santander',
'cef',
'nubank',
'lula',
'dilma',
'aecio',
'bolsonaro',
'ciro-gomes',
'pt',
'mdb',
'psdb')
print('Iniciando Crawler...')
sleep(30)
while True:
sleep(30)
print(strftime("%Y-%m-%d %H:%M:%S", localtime()) + ' Atualizando G1...')
d = feedparser.parse('http://pox.globo.com/rss/g1/economia/')
registros = []
for noticia in d['entries']:
#noticia['published_parsed'] # Data hora da publicacao
#noticia['summary_detail'] # Resumo detalhado da noticia
#noticia['links'] # Link para a noticia
#noticia['tags'] # Tags (so tem term: G1)
#noticia['summary'] # Resumo da noticia
#noticia['guidislink'] # Todos estao False
#noticia['title_detail'] # Titulo detalhado da noticia
#noticia['link'] # Link para a noticia
#noticia['published'] # Data hora textual da publicacao
link = noticia['link'][noticia['link'].find('://g1.globo.com/')+16:]
if link[:link.find('/')] == 'economia':
link = link[link.find('/')+1:]
editoria = link[:link.find('/')].replace('-', ' ').upper()
if editoria in ('BLOG'):
continue
elif editoria == 'NOTICIA':
editoria = 'ECONOMIA'
elif editoria == 'PME':
editoria = 'PEQUENAS EMPRESAS GRANDES NEGÓCIOS'
elif editoria == 'EDUCACAO FINANCEIRA':
editoria = 'EDUCAÇÃO FINANCEIRA'
elif editoria == 'AGRONEGOCIOS':
editoria = 'AGRONEGÓCIOS'
texto = link[link.rfind('/'):]
if len(texto) < 10:
continue
incluir = True
for palavra in blacklist:
if palavra in texto:
incluir = False
if not incluir:
continue
registro = {}
registro['id'] = noticia['id'] # Link para a noticia
registro['fonte'] = 'G1'
registro['editoria'] = editoria
registro['titulo'] = noticia['title'] # Titulo da noticia
registro['data_atualizacao'] = datetime.fromtimestamp(mktime(noticia['published_parsed'])).date()
registro['hora_atualizacao'] = datetime.fromtimestamp(mktime(noticia['published_parsed'])).time()
registros.append(registro)
conexao = mysql.connector.connect(host=mysql_host, user=mysql_user, password=mysql_password, database=mysql_database)
cursor = conexao.cursor()
add_noticia = ("REPLACE INTO `noticias` (ID, FONTE, EDITORIA, TITULO, DATA_ATUALIZACAO, HORA_ATUALIZACAO) \
VALUES (%(ID)s, %(FONTE)s, %(EDITORIA)s, %(TITULO)s, %(DATA_ATUALIZACAO)s, %(HORA_ATUALIZACAO)s)")
for noticia in registros:
dados_noticia = {
'ID': noticia['id'],
'FONTE': noticia['fonte'],
'EDITORIA': noticia['editoria'],
'TITULO': noticia['titulo'],
'DATA_ATUALIZACAO': noticia['data_atualizacao'],
'HORA_ATUALIZACAO': noticia['hora_atualizacao']
}
cursor.execute(add_noticia, dados_noticia)
conexao.commit()
cursor.close()
conexao.close()
| 33.020408
| 118
| 0.66471
|
7cca9d5e42589b2be8481f46ed4a362b047d9bed
| 1,367
|
py
|
Python
|
{{cookiecutter.projectname}}/setup.py
|
tobyontour/cookiecutter-django-standalone
|
711b7e1096ebc351eb54a6c254e3bd96c556b75a
|
[
"BSD-2-Clause"
] | null | null | null |
{{cookiecutter.projectname}}/setup.py
|
tobyontour/cookiecutter-django-standalone
|
711b7e1096ebc351eb54a6c254e3bd96c556b75a
|
[
"BSD-2-Clause"
] | 1
|
2020-05-21T21:04:45.000Z
|
2020-05-22T13:24:49.000Z
|
{{cookiecutter.projectname}}/setup.py
|
tobyontour/cookiecutter-django-standalone
|
711b7e1096ebc351eb54a6c254e3bd96c556b75a
|
[
"BSD-2-Clause"
] | null | null | null |
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="{{cookiecutter.projectname}}-pkg-{{cookiecutter.pypiusername}}", # Replace with your own username
version="0.0.1",
author="{{ cookiecutter.author}}",
author_email="{{ cookiecutter.author_email}}",
description="A django article app",
long_description=long_description,
long_description_content_type="text/restructured_text",
url="{{cookiecutter.project_url}}",
packages=setuptools.find_packages(),
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
python_requires='>=3.6',
install_requires=[
'Django>=3.0.0'
],
test_suite='{{cookiecutter.projectname}}.tests.runtests.runtests'
)
| 36.945946
| 107
| 0.624726
|
18cd107fafa5bf21b2e6eb3b98548550c38b8e69
| 4,278
|
py
|
Python
|
pypgdelta/sql/state/_table.py
|
SindreOsnes/pypgdelta
|
00234903a4e3c1c61ac5cc295133b6a69334fbeb
|
[
"MIT"
] | null | null | null |
pypgdelta/sql/state/_table.py
|
SindreOsnes/pypgdelta
|
00234903a4e3c1c61ac5cc295133b6a69334fbeb
|
[
"MIT"
] | null | null | null |
pypgdelta/sql/state/_table.py
|
SindreOsnes/pypgdelta
|
00234903a4e3c1c61ac5cc295133b6a69334fbeb
|
[
"MIT"
] | null | null | null |
import psycopg2
import psycopg2.extras
from collections import OrderedDict
from typing import Dict, List
def get_sql_tables_and_views(connection: psycopg2.extensions.connection) -> List[psycopg2.extras.RealDictRow]:
"""Function for getting the tables and views for a sql database
:param psycopg2.extensions.connection connection: The connection
:return: List of rows using key-value pairs for the data
:rtype: List[psycopg2.extras.RealDictRow]
"""
with connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
query = """SELECT t.table_schema,
t.table_name,
t.table_type,
c.character_maximum_length,
c.column_name,
c.data_type,
c.is_nullable
FROM information_schema.columns c
INNER JOIN information_schema.tables t
ON t.table_schema = c.table_schema AND t.table_name = c.table_name"""
cursor.execute(query)
results = cursor.fetchall()
return results
def get_table_dict(connection: psycopg2.extensions.connection) -> Dict:
"""Function for getting the tables and views for a sql database a dict
:param psycopg2.extensions.connection connection: The connection
:return: Current database setup as a nested dictionary
:rtype: Dict
"""
configuration = OrderedDict()
table_information = get_sql_tables_and_views(connection)
for table_col in table_information:
# Instantiate the schema object
if table_col['table_schema'] not in configuration:
configuration[table_col['table_schema']] = OrderedDict(
[
('tables', OrderedDict()),
('views', OrderedDict())
]
)
# Limit operations to selected table/view definition
schema_definition = configuration[table_col['table_schema']]
if table_col['table_type'] == 'BASE TABLE':
if table_col['table_name'] not in schema_definition['tables']:
schema_definition['tables'][table_col['table_name']] = OrderedDict(
[
('columns', OrderedDict())
]
)
table_definition = schema_definition['tables'][table_col['table_name']]
else:
if table_col['table_name'] not in schema_definition['tables']:
schema_definition['views'][table_col['table_name']] = OrderedDict(
[
('columns', OrderedDict())
]
)
table_definition = schema_definition['views'][table_col['table_name']]
table_definition['columns'].update(_generate_column_definitions(table_col))
return configuration
def _generate_column_definitions(column_definition: psycopg2.extras.RealDictRow) -> Dict:
"""Function for generating the column definition object
:param psycopg2.extras.RealDictRow column_definition: The column definition from the database
:return: The column setup as a dict
:rtype: Dict
"""
column_setup = OrderedDict()
column_information = OrderedDict()
column_setup[column_definition['column_name']] = column_information
column_information['data_type'] = column_definition['data_type']
column_information['character_maximum_length'] = column_definition['character_maximum_length']
column_information['nullable'] = column_definition['is_nullable'] == 'YES'
# Set the data type statement
if column_information['data_type'] == 'bigint':
column_information['data_type_stmt'] = "bigint"
elif column_information['data_type'] == 'character varying':
if column_information['character_maximum_length'] is not None:
column_information['data_type_stmt'] = f"varchar({column_information['character_maximum_length']})"
else:
column_information['data_type_stmt'] = f"varchar"
elif column_information['data_type'] == 'uuid':
column_information['data_type_stmt'] = "uuid"
else:
column_information['data_type_stmt'] = None
return column_setup
| 38.890909
| 111
| 0.64259
|
ee4c82ecf77a18795753c05453232d7f11ae8ab3
| 3,968
|
py
|
Python
|
customSDK/servicefabric/models/node_open_failed_event.py
|
leikong/service-fabric-cli
|
6ec1b1c8445b7cc5a889f3b172b47a6017c8888c
|
[
"MIT"
] | 1
|
2020-06-16T22:32:27.000Z
|
2020-06-16T22:32:27.000Z
|
customSDK/servicefabric/models/node_open_failed_event.py
|
leikong/service-fabric-cli
|
6ec1b1c8445b7cc5a889f3b172b47a6017c8888c
|
[
"MIT"
] | null | null | null |
customSDK/servicefabric/models/node_open_failed_event.py
|
leikong/service-fabric-cli
|
6ec1b1c8445b7cc5a889f3b172b47a6017c8888c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .node_event import NodeEvent
class NodeOpenFailedEvent(NodeEvent):
"""Node Open Failed event.
:param event_instance_id: The identifier for the FabricEvent instance.
:type event_instance_id: str
:param time_stamp: The time event was logged.
:type time_stamp: datetime
:param has_correlated_events: Shows there is existing related events
available.
:type has_correlated_events: bool
:param kind: Constant filled by server.
:type kind: str
:param node_name: The name of a Service Fabric node.
:type node_name: str
:param node_instance: Id of Node instance.
:type node_instance: long
:param node_id: Id of Node.
:type node_id: str
:param upgrade_domain: Upgrade domain of Node.
:type upgrade_domain: str
:param fault_domain: Fault domain of Node.
:type fault_domain: str
:param ip_address_or_fqdn: IP address or FQDN.
:type ip_address_or_fqdn: str
:param hostname: Name of Host.
:type hostname: str
:param is_seed_node: Indicates if it is seed node.
:type is_seed_node: bool
:param node_version: Version of Node.
:type node_version: str
:param error: Describes the error.
:type error: str
"""
_validation = {
'event_instance_id': {'required': True},
'time_stamp': {'required': True},
'kind': {'required': True},
'node_name': {'required': True},
'node_instance': {'required': True},
'node_id': {'required': True},
'upgrade_domain': {'required': True},
'fault_domain': {'required': True},
'ip_address_or_fqdn': {'required': True},
'hostname': {'required': True},
'is_seed_node': {'required': True},
'node_version': {'required': True},
'error': {'required': True},
}
_attribute_map = {
'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'},
'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'},
'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'},
'kind': {'key': 'Kind', 'type': 'str'},
'node_name': {'key': 'NodeName', 'type': 'str'},
'node_instance': {'key': 'NodeInstance', 'type': 'long'},
'node_id': {'key': 'NodeId', 'type': 'str'},
'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'},
'fault_domain': {'key': 'FaultDomain', 'type': 'str'},
'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'},
'hostname': {'key': 'Hostname', 'type': 'str'},
'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'},
'node_version': {'key': 'NodeVersion', 'type': 'str'},
'error': {'key': 'Error', 'type': 'str'},
}
def __init__(self, event_instance_id, time_stamp, node_name, node_instance, node_id, upgrade_domain, fault_domain, ip_address_or_fqdn, hostname, is_seed_node, node_version, error, has_correlated_events=None):
super(NodeOpenFailedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name)
self.node_instance = node_instance
self.node_id = node_id
self.upgrade_domain = upgrade_domain
self.fault_domain = fault_domain
self.ip_address_or_fqdn = ip_address_or_fqdn
self.hostname = hostname
self.is_seed_node = is_seed_node
self.node_version = node_version
self.error = error
self.kind = 'NodeOpenFailed'
| 42.212766
| 212
| 0.624748
|
b319ed21ff0d8cdee9a4f9a476b616cfce1daeab
| 597
|
py
|
Python
|
backend/puzzle/serializers/comment.py
|
mductran/puzzle
|
c4598f5420dff126fa67db1e0adee1677a8baf8f
|
[
"Apache-2.0"
] | null | null | null |
backend/puzzle/serializers/comment.py
|
mductran/puzzle
|
c4598f5420dff126fa67db1e0adee1677a8baf8f
|
[
"Apache-2.0"
] | null | null | null |
backend/puzzle/serializers/comment.py
|
mductran/puzzle
|
c4598f5420dff126fa67db1e0adee1677a8baf8f
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import serializers
from puzzle.models import Comment
from puzzle.models import Account
class CommentSerializer(serializers.ModelSerializer):
author_name = serializers.CharField(source="author.user.username", read_only=True)
class Meta:
model = Comment
fields = ["id", "content", "created", "updated", "author_id", "post_id", "author_name"]
def create(self, validated_data):
print('\nVALIDATED DATA: ', validated_data)
comment_instance = Comment.objects.create(**validated_data)
return comment_instance.validated_data
| 35.117647
| 95
| 0.723618
|
5eba2aace8768cc661d1f29c24ca967146c00613
| 102
|
py
|
Python
|
recport/main.py
|
CircleOnCircles/recport
|
371f8af612f7a0787eab9267ffe65f372c7badb2
|
[
"MIT"
] | null | null | null |
recport/main.py
|
CircleOnCircles/recport
|
371f8af612f7a0787eab9267ffe65f372c7badb2
|
[
"MIT"
] | null | null | null |
recport/main.py
|
CircleOnCircles/recport
|
371f8af612f7a0787eab9267ffe65f372c7badb2
|
[
"MIT"
] | 1
|
2020-02-03T13:52:22.000Z
|
2020-02-03T13:52:22.000Z
|
import sys
def run1():
print("hello world!")
print(sys.argv)
def run2():
print("bye")
| 9.272727
| 25
| 0.568627
|
a25f818381ff1fca723cecf8b18ecf459efb3565
| 1,530
|
py
|
Python
|
measure/migrations/0001_initial.py
|
mpsk2/tut-backend
|
af467809b79b6c1ee84e506cebc7e5ac3fa675bd
|
[
"MIT"
] | null | null | null |
measure/migrations/0001_initial.py
|
mpsk2/tut-backend
|
af467809b79b6c1ee84e506cebc7e5ac3fa675bd
|
[
"MIT"
] | null | null | null |
measure/migrations/0001_initial.py
|
mpsk2/tut-backend
|
af467809b79b6c1ee84e506cebc7e5ac3fa675bd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-12 13:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('taken_at', models.DateTimeField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='measure.Category')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='category',
name='records',
field=models.ManyToManyField(through='measure.Record', to=settings.AUTH_USER_MODEL),
),
]
| 35.581395
| 118
| 0.60915
|
ab261b899c032501f4abe5be1d5a574f37cabebb
| 1,719
|
py
|
Python
|
scripts/python_scripts/processcsv/old/fill_in_ip_opt.py
|
akazachk/pha
|
4120f70554cb0a149d5ab52e04409302e78059fa
|
[
"MIT"
] | 1
|
2021-09-16T19:58:35.000Z
|
2021-09-16T19:58:35.000Z
|
scripts/python_scripts/processcsv/old/fill_in_ip_opt.py
|
akazachk/pha
|
4120f70554cb0a149d5ab52e04409302e78059fa
|
[
"MIT"
] | null | null | null |
scripts/python_scripts/processcsv/old/fill_in_ip_opt.py
|
akazachk/pha
|
4120f70554cb0a149d5ab52e04409302e78059fa
|
[
"MIT"
] | null | null | null |
import processcsv
import csv
import utility
import argparse
import sys
import shutil
# Default values
default_ip_opt_fname = "ip_opt.csv"
default_in_fname = "lg-info.csv"
default_out_fname = "lg-info-ip.csv"
inst_col = 0
def fill_in_ip_opt(in_fname, out_fname, ip_opt_fname, overwrite = None):
"""
Fills in IP opt for each instance in the relevant column
Creates a processcsv.ProcessCSV instance for lg_info and ip_opt
Finds IP opt for each row in lg_info, and creates a new file (out_f) with all info
"""
if (overwrite is None):
overwrite = False
# Read IP opt file in
ip_opt_reader = processcsv.ProcessCSV(ip_opt_fname, num_header_lines = 1)
# Read lg info
lg_info_reader = processcsv.ProcessCSV(in_fname, num_header_lines = 2)
# Open out file
out_f = open(out_fname, 'w')
output = csv.writer(out_f)
# Write file line by line
lg_ip_obj_col = lg_info_reader.get_col_index("IP OBJ")
assert lg_ip_obj_col >= 0
# Write header
for i in range(len(lg_info_reader._header)):
output.writerow(lg_info_reader._header[i])
# Write each row with filled-in value
for row in lg_info_reader._reader:
curr_inst = row[inst_col]
# find_first_val returns a table, with a header row
# The first row contains all the column information
val_str = ip_opt_reader.find_first_val(col_info = "IP OBJ", inst_name = curr_inst)[1][1]
if (len(val_str) > 0):
curr_inst_ip_obj = float(val_str)
if __debug__:
print( "Instance: %s\tIP obj: %f" % (curr_inst, curr_inst_ip_obj) )
row[lg_ip_obj_col] = curr_inst_ip_obj
output.writerow(row)
# Close
out_f.close()
if (overwrite):
# Overwite in_fname
shutil.move(out_fname, in_fname)
| 27.725806
| 92
| 0.719604
|
d3fe8b4e36c091c3a912d70e39b4ec5fb225dc93
| 20
|
py
|
Python
|
test/__init__.py
|
rata-mahata/python-training
|
369d8e3a494cf25b59e0ced3882463be56eb0905
|
[
"Apache-2.0"
] | null | null | null |
test/__init__.py
|
rata-mahata/python-training
|
369d8e3a494cf25b59e0ced3882463be56eb0905
|
[
"Apache-2.0"
] | null | null | null |
test/__init__.py
|
rata-mahata/python-training
|
369d8e3a494cf25b59e0ced3882463be56eb0905
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'Olga'
| 10
| 19
| 0.7
|
780e8b51ddb99c165719167fb2f527598aac8e02
| 1,977
|
py
|
Python
|
PyEntity/modules/Image.py
|
AncientEntity/Pygine
|
b8a9d4bab645f2886417bf9027a8e26ea15769ec
|
[
"MIT"
] | 2
|
2020-06-01T06:03:16.000Z
|
2022-02-15T20:39:27.000Z
|
PyEntity/modules/Image.py
|
AncientEntity/PyEntity
|
b8a9d4bab645f2886417bf9027a8e26ea15769ec
|
[
"MIT"
] | null | null | null |
PyEntity/modules/Image.py
|
AncientEntity/PyEntity
|
b8a9d4bab645f2886417bf9027a8e26ea15769ec
|
[
"MIT"
] | null | null | null |
import pygame
from PyEntity import Globals
def Image(img,override=-1):
if(isinstance(img,pygame.Surface)):
if(override == -1):
Globals.loadedImages.append(img)
else:
Globals.loadedImages[override] = img
Globals.loadedImageLocations.append("runtime")
return len(Globals.loadedImages) - 1
#if(img in Globals.loadedImageLocations and override==-1):
# return Globals.loadedImageLocations.index(img)
if(override == -1):
Globals.loadedImages.append(pygame.image.load(img))
else:
Globals.loadedImages[override] = pygame.image.load(img)
Globals.loadedImageLocations.append(img)
return len(Globals.loadedImages)-1
def ScaleImage(img, newScale):
if(isinstance(img,pygame.Surface)):
return pygame.transform.scale(img,(round(img.get_width() * newScale.x),round(img.get_height() * newScale.y)))
else:
if(img == None):
return
if(Image(Globals.loadedImageLocations[img]) == None):
return
new = Globals.loadedImages[Image(Globals.loadedImageLocations[img])]
new = pygame.transform.scale(new,(int(new.get_width() * newScale.x), int(new.get_height() * newScale.y)))
Globals.loadedImages[img] = new
return img
def RotateImage(img, rotation,scale):
if(isinstance(img,pygame.Surface)):
return pygame.transform.rotate(img,rotation)
else:
new = Image(Globals.loadedImageLocations[img],override=img)
Globals.loadedImages[img] = ScaleImage(pygame.transform.rotate(Globals.loadedImages[new],rotation),scale)
return img
def FlipImage(img, xFlip, yFlip, scale):
if(isinstance(img,pygame.Surface)):
return pygame.transform.flip(img,xFlip,yFlip)
else:
new = Image(Globals.loadedImageLocations[img],override=img)
Globals.loadedImages[img] = ScaleImage(pygame.transform.flip(Globals.loadedImages[new],xFlip,yFlip),scale)
return img
| 38.764706
| 117
| 0.677795
|
109b1c6c0cd3baafe6326218a0d3e682e989ce70
| 5,651
|
py
|
Python
|
tests/test_formating.py
|
hellock/mmaction2
|
def3b651ab7818ece637d8637dddacbca027910c
|
[
"Apache-2.0"
] | 1
|
2021-11-02T15:21:42.000Z
|
2021-11-02T15:21:42.000Z
|
tests/test_formating.py
|
hellock/mmaction2
|
def3b651ab7818ece637d8637dddacbca027910c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_formating.py
|
hellock/mmaction2
|
def3b651ab7818ece637d8637dddacbca027910c
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
import torch
from mmcv.parallel import DataContainer as DC
from mmaction.datasets.pipelines import (Collect, FormatShape, ImageToTensor,
ToDataContainer, ToTensor, Transpose)
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
def test_to_tensor():
to_tensor = ToTensor(['str'])
with pytest.raises(TypeError):
# str cannot be converted to tensor
results = dict(str='0')
to_tensor(results)
# convert tensor, numpy, squence, int, float to tensor
target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float']
to_tensor = ToTensor(target_keys)
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1)
results = to_tensor(original_results)
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
# Add an additional key which is not in keys.
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1,
str='test')
results = to_tensor(original_results)
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
assert repr(to_tensor) == to_tensor.__class__.__name__ + \
f'(keys={target_keys})'
def test_to_data_container():
# check user-defined fields
fields = (dict(key='key1', stack=True), dict(key='key2'))
to_data_container = ToDataContainer(fields=fields)
target_keys = ['key1', 'key2']
original_results = dict(key1=np.random.randn(10, 20), key2=['a', 'b'])
results = to_data_container(original_results.copy())
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
# Add an additional key which is not in keys.
original_results = dict(
key1=np.random.randn(10, 20), key2=['a', 'b'], key3='value3')
results = to_data_container(original_results.copy())
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
assert repr(to_data_container) == (
to_data_container.__class__.__name__ + f'(fields={fields})')
def test_image_to_tensor():
original_results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(original_results)
assert results['imgs'].shape == torch.Size([3, 256, 256])
assert isinstance(results['imgs'], torch.Tensor)
assert torch.equal(results['imgs'].data, original_results['imgs'])
assert repr(image_to_tensor) == image_to_tensor.__class__.__name__ + \
f'(keys={keys})'
def test_transpose():
results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
order = [2, 0, 1]
transpose = Transpose(keys, order)
results = transpose(results)
assert results['imgs'].shape == (3, 256, 256)
assert repr(transpose) == transpose.__class__.__name__ + \
f'(keys={keys}, order={order})'
def test_collect():
inputs = dict(
imgs=np.random.randn(256, 256, 3),
label=[1],
filename='test.txt',
original_shape=(256, 256, 3),
img_shape=(256, 256, 3),
pad_shape=(256, 256, 3),
flip_direction='vertical',
img_norm_cfg=dict(to_bgr=False))
keys = ['imgs', 'label']
collect = Collect(keys)
results = collect(inputs)
assert sorted(list(results.keys())) == sorted(
['imgs', 'label', 'img_meta'])
inputs.pop('imgs')
assert set(results['img_meta'].data.keys()) == set(inputs.keys())
for key in results['img_meta'].data:
assert results['img_meta'].data[key] == inputs[key]
assert repr(collect) == collect.__class__.__name__ + \
f'(keys={keys}, meta_keys={collect.meta_keys})'
def test_format_shape():
with pytest.raises(ValueError):
# invalid input format
FormatShape('NHWC')
# 'NCHW' input format
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCHW')
assert format_shape(results)['input_shape'] == (3, 3, 224, 224)
# `NCTHW` input format with num_clips=1, clip_len=3
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCTHW')
assert format_shape(results)['input_shape'] == (1, 3, 3, 224, 224)
# `NCTHW` input format with num_clips=2, clip_len=3
results = dict(
imgs=np.random.randn(18, 224, 224, 3), num_clips=2, clip_len=3)
assert format_shape(results)['input_shape'] == (6, 3, 3, 224, 224)
target_keys = ['imgs', 'input_shape']
assert check_keys_contain(results.keys(), target_keys)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTHW')"
| 36.224359
| 78
| 0.651035
|
6ef32bccc0c4af6fc68a42b50cc46bf75236ee40
| 2,355
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_noisy48.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_noisy48.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_noisy48.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.x(input_qubit[2]) # number=6
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.x(input_qubit[2]) # number=7
prog.x(input_qubit[2]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy48.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 26.460674
| 118
| 0.62845
|
804a6b089e2540d8a68e0cb2a84a3c1ee89727be
| 373
|
py
|
Python
|
event/consts.py
|
kthaisse/website
|
be0d0e0763ae2a6b8351c08b432229eae9521f1d
|
[
"MIT"
] | 1
|
2020-03-19T09:44:16.000Z
|
2020-03-19T09:44:16.000Z
|
event/consts.py
|
kthaisse/website
|
be0d0e0763ae2a6b8351c08b432229eae9521f1d
|
[
"MIT"
] | 43
|
2020-02-22T09:32:27.000Z
|
2022-03-22T11:24:51.000Z
|
event/consts.py
|
kthaisse/website
|
be0d0e0763ae2a6b8351c08b432229eae9521f1d
|
[
"MIT"
] | 3
|
2020-03-06T13:27:12.000Z
|
2022-02-07T09:01:07.000Z
|
from event.enums import ScheduleType
SCHEDULE_EMOJIS = {
ScheduleType.GENERAL: "📌",
ScheduleType.CEREMONY: "🎤",
ScheduleType.TALK: "🗣️",
ScheduleType.TEAM_BUILDING: "👋",
ScheduleType.MEAL: "🍔",
ScheduleType.DEMO: "👩🏫",
ScheduleType.EVENT_START: "🏁",
ScheduleType.EVENT_END: "🏁",
ScheduleType.GAME: "🕹️",
ScheduleType.PRIZE: "🏆",
}
| 24.866667
| 36
| 0.632708
|
af0401da0233e21ba883a6978e9ce589d848293c
| 3,810
|
py
|
Python
|
tests/test_flask.py
|
ShacharOch/anyway
|
dd62eeec19d478aca78bf9eb151110a26690495d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_flask.py
|
ShacharOch/anyway
|
dd62eeec19d478aca78bf9eb151110a26690495d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_flask.py
|
ShacharOch/anyway
|
dd62eeec19d478aca78bf9eb151110a26690495d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from six.moves import http_client
import six
from anyway import app as flask_app
from anyway.utilities import open_utf8
import json
import pytest
from functools import partial
from urlobject import URLObject
from collections import Counter
@pytest.fixture
def app():
return flask_app.test_client()
query_flag = partial(pytest.mark.parametrize, argvalues=["1", ""])
if six.PY2:
_text_data = lambda rv: rv.data
else:
_text_data = lambda rv: rv.data.decode("utf-8")
def test_main(app):
rv = app.get('/')
assert rv.status_code == http_client.OK
assert '<title>ANYWAY - משפיעים בכל דרך</title>' in _text_data(rv)
#It requires parameters to know which markers you want.
def test_markers_empty(app):
rv = app.get('/markers')
assert rv.status_code == http_client.BAD_REQUEST
assert '<title>400 Bad Request</title>' in _text_data(rv)
#print(rv.data)
@pytest.fixture(scope="module")
def marker_counter():
counter = Counter()
yield counter
assert counter['markers'] == 1624
def test_bad_date(app):
rv = app.get("/markers?ne_lat=32.08656790211843&ne_lng=34.80611543655391&sw_lat=32.08003198103277&sw_lng=34.793884563446&zoom=17&thin_markers=false&start_date=a1104537600&end_date=1484697600&show_fatal=1&show_severe=1&show_light=1&approx=1&accurate=1&show_markers=1&show_discussions=1&show_urban=3&show_intersection=3&show_lane=3&show_day=7&show_holiday=0&show_time=24&start_time=25&end_time=25&weather=0&road=0&separation=0&surface=0&acctype=0&controlmeasure=0&district=0&case_type=0")
assert rv.status_code == http_client.BAD_REQUEST
assert rv.headers['Content-Type'] == 'text/html'
def test_markers_2014086707(app):
# clicking on a car image
rv = app.get("/markers/2014086707")
assert rv.status_code == http_client.OK
#print(rv.data)
with open_utf8('tests/markers_2014086707.json') as fh:
assert json.loads(_text_data(rv)) == json.load(fh)
@query_flag("show_fatal")
@query_flag("show_severe")
@query_flag("show_light")
@query_flag("show_approx")
@query_flag("show_accurate")
def test_markers(app, show_fatal, show_severe, show_light, show_accurate, show_approx, marker_counter):
url = URLObject('/markers').set_query_params({
"ne_lat": "32.085413468822", "ne_lng": "34.797736215591385", "sw_lat": "32.07001357040486", "sw_lng": "34.775548982620194", "zoom": "16", "thin_markers": "false",
"start_date": "1104537600", "end_date": "1484697600", "show_fatal": show_fatal, "show_severe": show_severe, "show_light": show_light, "approx": show_approx, "accurate": show_accurate,
"show_markers": "1", "show_accidents": "1", "show_rsa": "0", "show_discussions": "1", "show_urban": "3", "show_intersection": "3", "show_lane": "3", "show_day": "7", "show_holiday": "0",
"show_time": "24", "start_time": "25", "end_time": "25", "weather": "0", "road": "0", "separation": "0", "surface": "0", "acctype": "0", "controlmeasure": "0", "district": "0", "case_type": "0"})
rv = app.get(url)
assert rv.status_code == http_client.OK
assert rv.headers['Content-Type'] == 'application/json'
resp = json.loads(_text_data(rv))
marker_counter["markers"] += len(resp['markers'])
for marker in resp['markers']:
assert show_fatal or marker['severity'] != 1
assert show_severe or marker['severity'] != 2
assert show_light or marker['severity'] != 3
assert show_accurate or marker['locationAccuracy'] != 1
assert show_approx or marker['locationAccuracy'] == 1
def test_single_marker(app):
rv = app.get("/markers/2014027147")
assert rv.status_code == http_client.OK
#print(rv.data)
resp = json.loads(_text_data(rv))
#assert 'clusters' in resp
assert resp[0]['accident_id'] == 2014027147
| 39.278351
| 490
| 0.701837
|
c3e78a66970284a8b85be9a73d650584c2a18653
| 2,004
|
py
|
Python
|
CarlosCardona_Ejercicio10.py
|
CarlosCardona953/Carlos-Cardona-Ejercicio10-LAB
|
4a07fc9c2e7ba4a2da5ad1ffea5ee58af03a4e1d
|
[
"MIT"
] | null | null | null |
CarlosCardona_Ejercicio10.py
|
CarlosCardona953/Carlos-Cardona-Ejercicio10-LAB
|
4a07fc9c2e7ba4a2da5ad1ffea5ee58af03a4e1d
|
[
"MIT"
] | null | null | null |
CarlosCardona_Ejercicio10.py
|
CarlosCardona953/Carlos-Cardona-Ejercicio10-LAB
|
4a07fc9c2e7ba4a2da5ad1ffea5ee58af03a4e1d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[9]:
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
import urllib
from io import StringIO
from io import BytesIO
from datetime import datetime
import scipy.signal as signal
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
os.system("curl https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2008.txt https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2009.txt https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2010.txt > Transacciones.txt")
data = pd.read_csv("Transacciones.txt",delimiter = ";", header = None, decimal=",")
fecha = data[0].str[0:-8:1]
hora = data[1].str[10:]
tiempo = fecha+hora
tiempo =np.array(pd.to_datetime(tiempo,format='%d/%m/%Y %H:%M:%S'))
dinero = np.array(data[2])
data.set_index(tiempo,inplace=True)
# In[11]:
plt.figure(figsize=(20,7))
plt.plot(tiempo,dinero,label="Dinero")
plt.legend()
plt.savefig("Transacciones2008-2010.png")
# In[ ]:
N = 2 # Orden del filtro
Wn = 0.0001 # Corte de frecuencia
B, A = signal.butter(N, Wn)
dinero_filtrado = signal.filtfilt(B,A, dinero)
# In[ ]:
fig = plt.figure(figsize=(30,10))
ax1 = fig.add_subplot(211)
plt.plot(tiempo,dinero, 'b-')
plt.plot(tiempo,dinero_filtrado, 'r-',linewidth=2)
plt.ylabel(r"Dinero $")
plt.legend(['Original','Filtrado'])
plt.title("Transacciones")
ax1.axes.get_xaxis().set_visible(False)
ax1 = fig.add_subplot(212)
plt.plot(tiempo,dinero-dinero_filtrado, 'b-')
plt.ylabel(r"Dinero $")
plt.xlabel("Fecha")
plt.legend(['Residuales'])
plt.savefig("Transacciones con filtro.png")
# In[ ]:
plt.figure(figsize=(20,7))
ruido=dinero-dinero_filtrado
corr=signal.correlate(ruido,ruido,mode="full")
plt.plot(corr[len(corr)//2:])
plt.savefig("Correlacion.png")
| 24.439024
| 418
| 0.749501
|
8df400684ea7581b28972cee8036c41834e03c69
| 177
|
py
|
Python
|
tests/utilities/data_source.py
|
fossabot/sample-excel
|
07644f8d7199f479a50533b3a8d78ac3be3b5ebf
|
[
"MIT"
] | null | null | null |
tests/utilities/data_source.py
|
fossabot/sample-excel
|
07644f8d7199f479a50533b3a8d78ac3be3b5ebf
|
[
"MIT"
] | 3
|
2019-09-04T09:47:34.000Z
|
2021-03-01T02:29:51.000Z
|
tests/utilities/data_source.py
|
fossabot/sample-excel
|
07644f8d7199f479a50533b3a8d78ac3be3b5ebf
|
[
"MIT"
] | 2
|
2021-03-01T02:27:04.000Z
|
2022-03-02T11:37:54.000Z
|
from pathlib import Path
class DataSource:
@classmethod
def data_path(cls, book_name) -> Path:
return Path(__file__).parent.parent.parent.joinpath(book_name)
| 19.666667
| 70
| 0.723164
|
bcf7964a07071a350f968fce6123783b8faa9b51
| 2,973
|
py
|
Python
|
pallete-gen.py
|
s0rg/telegram-pywal
|
7ec0e4f363a62ed72984b49d9bf1676e05cdd9fc
|
[
"MIT"
] | null | null | null |
pallete-gen.py
|
s0rg/telegram-pywal
|
7ec0e4f363a62ed72984b49d9bf1676e05cdd9fc
|
[
"MIT"
] | null | null | null |
pallete-gen.py
|
s0rg/telegram-pywal
|
7ec0e4f363a62ed72984b49d9bf1676e05cdd9fc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import os
import os.path
CONSTANTS = "colors.tpg-constants"
OUT_NAME = "colors.tdesktop-palette"
WAL_COLORS = os.path.expanduser("~/.cache/wal/colors")
OUT_DIR = os.path.expanduser("~/.cache/telegram-palette-gen")
SCALES = [
("20", float(1/5)),
("30", float(3/10)),
("40", float(2/5)),
("50", float(1/2)),
("60", float(3/5)),
]
ALPHAS = {
0: [0x18, 0x3C, 0x03, 0x7F, 0xB0, 0xCC, 0x00, 0x54, 0x56, 0x74, 0x40, 0x4C, 0xB2],
1: [0x10, 0x33],
2: [0xC8, 0x4C, 0x7F, 0x00, 0x87],
3: [0x64],
7: [0x53, 0x7A, 0x1A, 0x2C, 0x7F, 0xBC, 0x4C, 0x6B, 0x14],
}
def clr2hex(c):
return "#{:02X}{:02X}{:02X}".format(c[0], c[1], c[2])
def clr2hex_alpha(c, a):
return "#{:02X}{:02X}{:02X}{:02X}".format(c[0], c[1], c[2], a)
def color(idx, c):
return "color{}: {};".format(idx, clr2hex(c))
def color_light(idx, scale, c):
return "colorLighter{}_{}: {};".format(idx, scale, clr2hex(c))
def color_dark(idx, scale, c):
return "colorDarker{}_{}: {};".format(idx, scale, clr2hex(c))
def color_alpha(idx, alpha, c):
return "colorAlpha{}_{:02x}: {};".format(idx, alpha, clr2hex_alpha(c, alpha))
def hex2clr(h):
c = h[1:]
return (
int(c[0:2], 16),
int(c[2:4], 16),
int(c[4:6], 16),
)
def clamp_byte(v, vmin, vmax):
if v < vmin: v = vmin
elif v > vmax: v = vmax
return v
def scale_byte(b, s):
f = float(b)
v = int(f + (f*s))
return clamp_byte(v, 0, 255)
def scale_color(c, s):
return (
scale_byte(c[0], s),
scale_byte(c[1], s),
scale_byte(c[2], s),
)
def load_colors(colors_path, limit=9):
with open(colors_path, mode="rt", encoding="utf-8") as fd:
return [hex2clr(fd.readline()) for _ in range(limit)]
def load_constants(name=CONSTANTS):
cpath = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
name,
)
with open(cpath, mode="rt", encoding="utf-8") as fd:
return fd.read()
def dump_colors(values, constants):
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
opath = os.path.join(OUT_DIR, OUT_NAME)
if os.path.exists(opath):
os.remove(opath)
with open(opath, mode="wt", encoding="utf-8") as fd:
fd.write("\n".join(values))
fd.write("\n\n" + constants)
def main(args):
if not os.path.exists(WAL_COLORS):
print("no wal colors cache has been found!")
return 1
values = []
for i, c in enumerate(load_colors(WAL_COLORS)):
values.append(color(i, c))
for n, v in SCALES:
values.append(color_light(i, n, scale_color(c, v)))
values.append(color_dark(i, n, scale_color(c, -v)))
if i not in ALPHAS:
continue
for a in ALPHAS[i]:
values.append(color_alpha(i, a, c))
dump_colors(values, load_constants())
print("[+] OK")
return 0
sys.exit(main(sys.argv))
| 21.543478
| 86
| 0.572822
|
3b8958c0b43fda31d0f846a4d571c3e73120f367
| 4,546
|
py
|
Python
|
utils.py
|
foamliu/CRNN
|
d74ea032d5daa1d6385c0c3ad3083d89c1740c3a
|
[
"MIT"
] | 6
|
2019-07-27T06:10:40.000Z
|
2020-10-17T06:43:15.000Z
|
utils.py
|
foamliu/CRNN
|
d74ea032d5daa1d6385c0c3ad3083d89c1740c3a
|
[
"MIT"
] | 2
|
2019-08-25T08:13:50.000Z
|
2019-08-25T08:28:10.000Z
|
utils.py
|
foamliu/CRNN
|
d74ea032d5daa1d6385c0c3ad3083d89c1740c3a
|
[
"MIT"
] | 1
|
2020-05-03T07:30:02.000Z
|
2020-05-03T07:30:02.000Z
|
import argparse
import logging
import os
import cv2 as cv
import torch
from config import max_target_len, dict, converter
def clip_gradient(optimizer, grad_clip):
"""
Clips gradients computed during backpropagation to avoid explosion of gradients.
:param optimizer: optimizer with the gradients to be clipped
:param grad_clip: clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(epoch, epochs_since_improvement, model, optimizer, hmean, is_best):
state = {'epoch': epoch,
'epochs_since_improvement': epochs_since_improvement,
'hmean': hmean,
'model': model,
'optimizer': optimizer}
# filename = 'checkpoint_' + str(epoch) + '_' + str(loss) + '.tar'
filename = 'checkpoint.tar'
torch.save(state, filename)
# If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint
if is_best:
torch.save(state, 'BEST_checkpoint.tar')
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, shrink_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param shrink_factor: factor in interval (0, 1) to multiply learning rate with.
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * shrink_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
def get_learning_rate(optimizer):
return optimizer.param_groups[0]['lr']
def accuracy(inputs, input_lengths, labels, batch_size):
n_correct = 0
# print(preds.size())
_, inputs = inputs.max(2)
# print(preds.size())
# preds = preds.squeeze(2)
inputs = inputs.transpose(1, 0).contiguous().view(-1)
sim_preds = converter.decode(inputs.data, input_lengths.data, raw=False)
for pred, target in zip(sim_preds, labels):
if pred == target:
n_correct += 1
accuracy = n_correct / float(batch_size)
return accuracy
def parse_args():
parser = argparse.ArgumentParser(description='Train CRNN network')
# general
parser.add_argument('--optimizer', default='adam', help='optimizer')
parser.add_argument('--batch-size', type=int, default=64, help='batch size')
parser.add_argument('--lr', type=float, default=0.01, help='start learning rate')
parser.add_argument('--end-epoch', type=int, default=1000, help='training epoch size.')
# optimizer
parser.add_argument('--k', default=0.2, type=float,
help='tunable scalar multiply to learning rate')
parser.add_argument('--warmup_steps', default=4000, type=int,
help='warmup steps')
parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint')
args = parser.parse_args()
return args
def get_logger():
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(levelname)s \t%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
def draw_str(dst, target, s):
x, y = target
cv.putText(dst, s, (x + 1, y + 1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
def ensure_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def encode_target(target):
return [dict[c] for c in target] + [0] * (max_target_len - len(target))
def get_images_for_test():
from config import annotation_files
split = 'test'
print('loading {} annotation data...'.format('test'))
annotation_file = annotation_files[split]
with open(annotation_file, 'r') as file:
lines = file.readlines()
image_paths = [line.split(' ')[0] for line in lines]
return image_paths
| 31.569444
| 111
| 0.655301
|
f3a8bcbe8d37d2803c9a5830298d2943d22347d3
| 6,144
|
py
|
Python
|
minke/migrations/0003_auto_20190326_1648.py
|
django-minke/minke
|
72e6849a1f71d4597724613168d3902df91cbe5f
|
[
"BSD-3-Clause"
] | 2
|
2019-06-17T10:00:27.000Z
|
2019-11-20T11:57:25.000Z
|
minke/migrations/0003_auto_20190326_1648.py
|
thomst/django-minke
|
72e6849a1f71d4597724613168d3902df91cbe5f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-07T13:27:41.000Z
|
2020-01-07T13:33:16.000Z
|
minke/migrations/0003_auto_20190326_1648.py
|
django-minke/minke
|
72e6849a1f71d4597724613168d3902df91cbe5f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2019-03-26 16:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import minke.messages
import minke.utils
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('minke', '0002_auto_20180619_1703'),
]
operations = [
migrations.CreateModel(
name='BaseMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('level', models.CharField(choices=[('info', 'info'), ('warning', 'warning'), ('error', 'error')], max_length=128)),
('text', models.TextField()),
('html', models.TextField()),
],
),
migrations.CreateModel(
name='HostGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('comment', models.TextField(blank=True, null=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='MinkeSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_name', models.CharField(max_length=128)),
('session_verbose_name', models.CharField(max_length=128)),
('session_description', models.TextField(blank=True, null=True)),
('session_status', models.CharField(choices=[('success', 'success'), ('warning', 'warning'), ('error', 'error')], max_length=128)),
('session_data', minke.utils.JSONField(blank=True, null=True)),
('minkeobj_id', models.PositiveIntegerField()),
('current', models.BooleanField(default=True)),
('proc_status', models.CharField(choices=[('initialized', 'initialized'), ('running', 'running'), ('done', 'done'), ('aborted', 'aborted')], max_length=128)),
('start_time', models.DateTimeField(blank=True, null=True)),
('end_time', models.DateTimeField(blank=True, null=True)),
('run_time', models.DurationField(blank=True, null=True)),
('minkeobj_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterModelOptions(
name='host',
options={'ordering': ['name']},
),
migrations.RenameField(
model_name='host',
old_name='host',
new_name='name',
),
migrations.RemoveField(
model_name='host',
name='hoststring',
),
migrations.RemoveField(
model_name='host',
name='locked',
),
migrations.RemoveField(
model_name='host',
name='port',
),
migrations.RemoveField(
model_name='host',
name='user',
),
migrations.AddField(
model_name='host',
name='comment',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='host',
name='lock',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='host',
name='username',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='host',
name='verbose_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='host',
name='hostname',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='basemessage',
name='session',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='minke.MinkeSession'),
),
migrations.CreateModel(
name='Message',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=(minke.messages.ProxyMixin, 'minke.basemessage'),
),
migrations.AddField(
model_name='host',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='minke.HostGroup'),
),
migrations.CreateModel(
name='PreMessage',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('minke.message',),
),
migrations.CreateModel(
name='ExceptionMessage',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('minke.premessage',),
),
migrations.CreateModel(
name='ExecutionMessage',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('minke.premessage',),
),
migrations.CreateModel(
name='TableMessage',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('minke.premessage',),
),
]
| 35.929825
| 174
| 0.519368
|
2d5c5cfe6c3dd5de2920cf1f6f2567330181e41c
| 28,234
|
py
|
Python
|
research/object_detection/builders/preprocessor_builder_test.py
|
akineeic/models
|
2912042352009c9993dc05403624100bfe42d9c1
|
[
"Apache-2.0"
] | 18
|
2022-01-14T09:58:27.000Z
|
2022-01-14T09:58:37.000Z
|
research/object_detection/builders/preprocessor_builder_test.py
|
yangxl-2014-fe/models
|
11ea5237818e791a5717716d5413977f4c4db1e3
|
[
"Apache-2.0"
] | 5
|
2020-10-01T09:02:34.000Z
|
2021-02-21T12:50:11.000Z
|
research/object_detection/builders/preprocessor_builder_test.py
|
yangxl-2014-fe/models
|
11ea5237818e791a5717716d5413977f4c4db1e3
|
[
"Apache-2.0"
] | 8
|
2019-06-06T20:37:15.000Z
|
2022-03-04T13:54:38.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for preprocessor_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import preprocessor_builder
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
class PreprocessorBuilderTest(tf.test.TestCase):
def assert_dictionary_close(self, dict1, dict2):
"""Helper to check if two dicts with floatst or integers are close."""
self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys()))
for key in dict1:
value = dict1[key]
if isinstance(value, float):
self.assertAlmostEqual(value, dict2[key])
else:
self.assertEqual(value, dict2[key])
def test_build_normalize_image(self):
preprocessor_text_proto = """
normalize_image {
original_minval: 0.0
original_maxval: 255.0
target_minval: -1.0
target_maxval: 1.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.normalize_image)
self.assertEqual(args, {
'original_minval': 0.0,
'original_maxval': 255.0,
'target_minval': -1.0,
'target_maxval': 1.0,
})
def test_build_random_horizontal_flip(self):
preprocessor_text_proto = """
random_horizontal_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_horizontal_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4),
'probability': 0.5})
def test_build_random_vertical_flip(self):
preprocessor_text_proto = """
random_vertical_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_vertical_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4),
'probability': 0.5})
def test_build_random_rotation90(self):
preprocessor_text_proto = """
random_rotation90 {
keypoint_rot_permutation: 3
keypoint_rot_permutation: 0
keypoint_rot_permutation: 1
keypoint_rot_permutation: 2
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rotation90)
self.assertEqual(args, {'keypoint_rot_permutation': (3, 0, 1, 2),
'probability': 0.5})
def test_build_random_pixel_value_scale(self):
preprocessor_text_proto = """
random_pixel_value_scale {
minval: 0.8
maxval: 1.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pixel_value_scale)
self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2})
def test_build_random_image_scale(self):
preprocessor_text_proto = """
random_image_scale {
min_scale_ratio: 0.8
max_scale_ratio: 2.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_image_scale)
self.assert_dictionary_close(args, {'min_scale_ratio': 0.8,
'max_scale_ratio': 2.2})
def test_build_random_rgb_to_gray(self):
preprocessor_text_proto = """
random_rgb_to_gray {
probability: 0.8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rgb_to_gray)
self.assert_dictionary_close(args, {'probability': 0.8})
def test_build_random_adjust_brightness(self):
preprocessor_text_proto = """
random_adjust_brightness {
max_delta: 0.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_brightness)
self.assert_dictionary_close(args, {'max_delta': 0.2})
def test_build_random_adjust_contrast(self):
preprocessor_text_proto = """
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_contrast)
self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1})
def test_build_random_adjust_hue(self):
preprocessor_text_proto = """
random_adjust_hue {
max_delta: 0.01
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_hue)
self.assert_dictionary_close(args, {'max_delta': 0.01})
def test_build_random_adjust_saturation(self):
preprocessor_text_proto = """
random_adjust_saturation {
min_delta: 0.75
max_delta: 1.15
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_saturation)
self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15})
def test_build_random_distort_color(self):
preprocessor_text_proto = """
random_distort_color {
color_ordering: 1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_distort_color)
self.assertEqual(args, {'color_ordering': 1})
def test_build_random_jitter_boxes(self):
preprocessor_text_proto = """
random_jitter_boxes {
ratio: 0.1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jitter_boxes)
self.assert_dictionary_close(args, {'ratio': 0.1})
def test_build_random_crop_image(self):
preprocessor_text_proto = """
random_crop_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
})
def test_build_random_pad_image(self):
preprocessor_text_proto = """
random_pad_image {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pad_image)
self.assertEqual(args, {
'min_image_size': None,
'max_image_size': None,
'pad_color': None,
})
def test_build_random_absolute_pad_image(self):
preprocessor_text_proto = """
random_absolute_pad_image {
max_height_padding: 50
max_width_padding: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_absolute_pad_image)
self.assertEqual(args, {
'max_height_padding': 50,
'max_width_padding': 100,
'pad_color': None,
})
def test_build_random_crop_pad_image(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'pad_color': None,
})
def test_build_random_crop_pad_image_with_optional_parameters(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
min_padded_size_ratio: 0.5
min_padded_size_ratio: 0.75
max_padded_size_ratio: 0.5
max_padded_size_ratio: 0.75
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'min_padded_size_ratio': (0.5, 0.75),
'max_padded_size_ratio': (0.5, 0.75),
'pad_color': None,
})
def test_build_random_crop_to_aspect_ratio(self):
preprocessor_text_proto = """
random_crop_to_aspect_ratio {
aspect_ratio: 0.85
overlap_thresh: 0.35
clip_boxes: False
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio)
self.assert_dictionary_close(args, {'aspect_ratio': 0.85,
'overlap_thresh': 0.35,
'clip_boxes': False})
def test_build_random_black_patches(self):
preprocessor_text_proto = """
random_black_patches {
max_black_patches: 20
probability: 0.95
size_to_image_ratio: 0.12
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_black_patches)
self.assert_dictionary_close(args, {'max_black_patches': 20,
'probability': 0.95,
'size_to_image_ratio': 0.12})
def test_build_random_jpeg_quality(self):
preprocessor_text_proto = """
random_jpeg_quality {
random_coef: 0.5
min_jpeg_quality: 40
max_jpeg_quality: 90
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jpeg_quality)
self.assert_dictionary_close(args, {'random_coef': 0.5,
'min_jpeg_quality': 40,
'max_jpeg_quality': 90})
def test_build_random_downscale_to_target_pixels(self):
preprocessor_text_proto = """
random_downscale_to_target_pixels {
random_coef: 0.5
min_target_pixels: 200
max_target_pixels: 900
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_downscale_to_target_pixels)
self.assert_dictionary_close(args, {
'random_coef': 0.5,
'min_target_pixels': 200,
'max_target_pixels': 900
})
def test_build_random_patch_gaussian(self):
preprocessor_text_proto = """
random_patch_gaussian {
random_coef: 0.5
min_patch_size: 10
max_patch_size: 300
min_gaussian_stddev: 0.2
max_gaussian_stddev: 1.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_patch_gaussian)
self.assert_dictionary_close(args, {
'random_coef': 0.5,
'min_patch_size': 10,
'max_patch_size': 300,
'min_gaussian_stddev': 0.2,
'max_gaussian_stddev': 1.5
})
def test_auto_augment_image(self):
preprocessor_text_proto = """
autoaugment_image {
policy_name: 'v0'
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.autoaugment_image)
self.assert_dictionary_close(args, {'policy_name': 'v0'})
def test_drop_label_probabilistically(self):
preprocessor_text_proto = """
drop_label_probabilistically{
label: 2
drop_probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.drop_label_probabilistically)
self.assert_dictionary_close(args, {
'dropped_label': 2,
'drop_probability': 0.5
})
def test_remap_labels(self):
preprocessor_text_proto = """
remap_labels{
original_labels: 1
original_labels: 2
new_label: 3
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.remap_labels)
self.assert_dictionary_close(args, {
'original_labels': [1, 2],
'new_label': 3
})
def test_build_random_resize_method(self):
preprocessor_text_proto = """
random_resize_method {
target_height: 75
target_width: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_resize_method)
self.assert_dictionary_close(args, {'target_size': [75, 100]})
def test_build_scale_boxes_to_pixel_coordinates(self):
preprocessor_text_proto = """
scale_boxes_to_pixel_coordinates {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates)
self.assertEqual(args, {})
def test_build_resize_image(self):
preprocessor_text_proto = """
resize_image {
new_height: 75
new_width: 100
method: BICUBIC
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.resize_image)
self.assertEqual(args, {'new_height': 75,
'new_width': 100,
'method': tf.image.ResizeMethod.BICUBIC})
def test_build_rgb_to_gray(self):
preprocessor_text_proto = """
rgb_to_gray {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.rgb_to_gray)
self.assertEqual(args, {})
def test_build_subtract_channel_mean(self):
preprocessor_text_proto = """
subtract_channel_mean {
means: [1.0, 2.0, 3.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.subtract_channel_mean)
self.assertEqual(args, {'means': [1.0, 2.0, 3.0]})
def test_random_self_concat_image(self):
preprocessor_text_proto = """
random_self_concat_image {
concat_vertical_probability: 0.5
concat_horizontal_probability: 0.25
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_self_concat_image)
self.assertEqual(args, {'concat_vertical_probability': 0.5,
'concat_horizontal_probability': 0.25})
def test_build_ssd_random_crop(self):
preprocessor_text_proto = """
ssd_random_crop {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_empty_operations(self):
preprocessor_text_proto = """
ssd_random_crop {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {})
def test_build_ssd_random_crop_pad(self):
preprocessor_text_proto = """
ssd_random_crop_pad {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_pad)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': [(1.0, 1.0), (1.0, 1.0)],
'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)],
'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]})
def test_build_ssd_random_crop_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_pad_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_pad_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function,
preprocessor.ssd_random_crop_pad_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': (1.0, 1.0),
'max_padded_size_ratio': (2.0, 2.0)})
def test_build_normalize_image_convert_class_logits_to_softmax(self):
preprocessor_text_proto = """
convert_class_logits_to_softmax {
temperature: 2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.convert_class_logits_to_softmax)
self.assertEqual(args, {'temperature': 2})
def test_random_crop_by_scale(self):
preprocessor_text_proto = """
random_square_crop_by_scale {
scale_min: 0.25
scale_max: 2.0
num_scales: 8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_square_crop_by_scale)
self.assertEqual(args, {
'scale_min': 0.25,
'scale_max': 2.0,
'num_scales': 8,
'max_border': 128
})
def test_adjust_gamma(self):
preprocessor_text_proto = """
adjust_gamma {
gamma: 2.2
gain: 2.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.adjust_gamma)
self.assert_dictionary_close(args, {'gamma': 2.2, 'gain': 2.0})
if __name__ == '__main__':
tf.test.main()
| 36.572539
| 80
| 0.680456
|
4096fabcc7fe12d59d9828b5f33d658e13899a07
| 9,741
|
py
|
Python
|
src/tequila/circuit/gradient.py
|
dwierichs/tequila
|
3df09f7e710314237aa5474476b1b262293e7873
|
[
"MIT"
] | null | null | null |
src/tequila/circuit/gradient.py
|
dwierichs/tequila
|
3df09f7e710314237aa5474476b1b262293e7873
|
[
"MIT"
] | null | null | null |
src/tequila/circuit/gradient.py
|
dwierichs/tequila
|
3df09f7e710314237aa5474476b1b262293e7873
|
[
"MIT"
] | null | null | null |
from tequila.circuit.compiler import Compiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable,\
assign_variable, identity, VectorObjective
from tequila import TequilaException
from tequila.simulators.simulator_api import compile
import typing
import copy
from numpy import pi
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective,VectorObjective], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if variable not in objective.extract_variables():
return 0.0
if no_compile:
compiled = objective
else:
compiler = Compiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or isinstance(compiled, VectorObjective):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: typing.Union[Objective, VectorObjective], variable: Variable):
if isinstance(objective, VectorObjective):
return __grad_vector_objective(objective, variable)
else:
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
def __grad_vector_objective(objective: typing.Union[Objective,VectorObjective], variable: Variable):
argsets = objective.argsets
transformations = objective._transformations
outputs = []
for pos in range(len(objective)):
args = argsets[pos]
transformation = transformations[pos]
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
dO = Objective()
outputs.append(dO)
if len(outputs) == 1:
return outputs[0]
return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad=__grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w,g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w*inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx*Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 39.278226
| 132
| 0.643979
|
be02a80e3c99fd462f2423d1b01ba2e370850bdd
| 40,744
|
py
|
Python
|
pytorch_lightning/trainer/connectors/accelerator_connector.py
|
neptune-ml/pytorch-lightning
|
3bcaed52454f3e6c3bce5513032e34302e5b1bb6
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/trainer/connectors/accelerator_connector.py
|
neptune-ml/pytorch-lightning
|
3bcaed52454f3e6c3bce5513032e34302e5b1bb6
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/trainer/connectors/accelerator_connector.py
|
neptune-ml/pytorch-lightning
|
3bcaed52454f3e6c3bce5513032e34302e5b1bb6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from collections import Counter
from typing import Dict, List, Optional, Union
import torch
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.accelerators.cpu import CPUAccelerator
from pytorch_lightning.accelerators.gpu import GPUAccelerator
from pytorch_lightning.accelerators.hpu import HPUAccelerator
from pytorch_lightning.accelerators.ipu import IPUAccelerator
from pytorch_lightning.accelerators.registry import AcceleratorRegistry
from pytorch_lightning.accelerators.tpu import TPUAccelerator
from pytorch_lightning.plugins import (
ApexMixedPrecisionPlugin,
CheckpointIO,
DeepSpeedPrecisionPlugin,
DoublePrecisionPlugin,
FullyShardedNativeMixedPrecisionPlugin,
HPUPrecisionPlugin,
IPUPrecisionPlugin,
NativeMixedPrecisionPlugin,
PLUGIN_INPUT,
PrecisionPlugin,
ShardedNativeMixedPrecisionPlugin,
TPUBf16PrecisionPlugin,
TPUPrecisionPlugin,
)
from pytorch_lightning.plugins.environments import (
BaguaEnvironment,
ClusterEnvironment,
KubeflowEnvironment,
LightningEnvironment,
LSFEnvironment,
SLURMEnvironment,
TorchElasticEnvironment,
)
from pytorch_lightning.plugins.layer_sync import LayerSync, NativeSyncBatchNorm
from pytorch_lightning.strategies import (
DDP2Strategy,
DDPFullyShardedStrategy,
DDPShardedStrategy,
DDPSpawnShardedStrategy,
DDPSpawnStrategy,
DDPStrategy,
DeepSpeedStrategy,
HorovodStrategy,
HPUParallelStrategy,
IPUStrategy,
SingleDeviceStrategy,
SingleHPUStrategy,
SingleTPUStrategy,
Strategy,
StrategyRegistry,
TPUSpawnStrategy,
)
from pytorch_lightning.utilities import (
_StrategyType,
AMPType,
device_parser,
LightningEnum,
rank_zero_deprecation,
rank_zero_info,
rank_zero_warn,
)
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _HOROVOD_AVAILABLE, _HPU_AVAILABLE, _IPU_AVAILABLE, _TPU_AVAILABLE
log = logging.getLogger(__name__)
if _HOROVOD_AVAILABLE:
import horovod.torch as hvd
class AcceleratorConnector:
def __init__(
self,
devices: Optional[Union[List[int], str, int]] = None,
num_nodes: int = 1,
accelerator: Optional[Union[str, Accelerator]] = None,
strategy: Optional[Union[str, Strategy]] = None,
plugins: Optional[Union[PLUGIN_INPUT, List[PLUGIN_INPUT]]] = None,
precision: Union[int, str] = 32,
amp_type: str = "native",
amp_level: Optional[str] = None,
sync_batchnorm: bool = False,
benchmark: Optional[bool] = None,
replace_sampler_ddp: bool = True,
deterministic: bool = False,
num_processes: Optional[int] = None, # deprecated
tpu_cores: Optional[Union[List[int], int]] = None, # deprecated
ipus: Optional[int] = None, # deprecated
gpus: Optional[Union[List[int], str, int]] = None, # deprecated
gpu_ids: Optional[List[int]] = None, # TODO can be removed
) -> None:
"""The AcceleratorConnector parses several Trainer arguments and instantiates the Strategy including other
components such as the Accelerator and Precision plugins.
A. accelerator flag could be:
1. strategy class (deprecated in 1.5 will be removed in 1.7)
2. strategy str (deprecated in 1.5 will be removed in 1.7)
3. accelerator class
4. accelerator str
5. accelerator auto
B. strategy flag could be :
1. strategy class
2. strategy str registered with StrategyRegistry
3. strategy str in _strategy_type enum which listed in each strategy as
backend (registed these too, and _strategy_type could be deprecated)
C. plugins flag could be:
1. List of str, which could contain:
i. strategy str
ii. precision str (Not supported in the old accelerator_connector version)
iii. checkpoint_io str (Not supported in the old accelerator_connector version)
iv. cluster_environment str (Not supported in the old accelerator_connector version)
2. List of class, which could contains:
i. strategy class (deprecated in 1.5 will be removed in 1.7)
ii. precision class (should be removed, and precision flag should allow user pass classes)
iii. checkpoint_io class
iv. cluster_environment class
priorities which to take when:
A. Class > str
B. Strategy > Accelerator/precision/plugins
C. TODO When multiple flag set to the same thing
"""
if benchmark and deterministic:
rank_zero_warn(
"You passed `deterministic=True` and `benchmark=True`. Note that PyTorch ignores"
" torch.backends.cudnn.deterministic=True when torch.backends.cudnn.benchmark=True.",
)
self.benchmark = not deterministic if benchmark is None else benchmark
# TODO: move to gpu accelerator
torch.backends.cudnn.benchmark = self.benchmark
self.replace_sampler_ddp = replace_sampler_ddp
self._init_deterministic(deterministic)
# 1. Parsing flags
# Get registered strategies, built-in accelerators and precision plugins
self._registered_strategies = StrategyRegistry.available_strategies()
self._accelerator_types = AcceleratorRegistry.available_accelerators()
self._precision_types = ("16", "32", "64", "bf16", "mixed")
# Raise an exception if there are conflicts between flags
# Set each valid flag to `self._x_flag` after validation
# Example: If accelerator is set to a strategy type, set `self._strategy_flag = accelerator`.
# For devices: Assign gpus, ipus, etc. to the accelerator flag and devices flag
self._strategy_flag: Optional[Union[Strategy, str]] = None
self._accelerator_flag: Optional[Union[Accelerator, str]] = None
self._precision_flag: Optional[Union[int, str]] = None
self._precision_plugin_flag: Optional[PrecisionPlugin] = None
self._cluster_environment_flag: Optional[Union[ClusterEnvironment, str]] = None
self._parallel_devices: List[Union[int, torch.device]] = []
self._layer_sync: Optional[LayerSync] = NativeSyncBatchNorm() if sync_batchnorm else None
self.checkpoint_io: Optional[CheckpointIO] = None
self._amp_type_flag: Optional[LightningEnum] = None
self._amp_level_flag: Optional[str] = amp_level
self._check_config_and_set_final_flags(
strategy=strategy,
accelerator=accelerator,
precision=precision,
plugins=plugins,
amp_type=amp_type,
amp_level=amp_level,
sync_batchnorm=sync_batchnorm,
)
self._check_device_config_and_set_final_flags(
devices=devices, num_nodes=num_nodes, num_processes=num_processes, gpus=gpus, ipus=ipus, tpu_cores=tpu_cores
)
# 2. Instantiate Accelerator
# handle `auto` and `None`
self._set_accelerator_if_ipu_strategy_is_passed()
if self._accelerator_flag == "auto" or self._accelerator_flag is None:
self._accelerator_flag = self._choose_accelerator()
self._set_parallel_devices_and_init_accelerator()
# 3. Instantiate ClusterEnvironment
self.cluster_environment: ClusterEnvironment = self._choose_and_init_cluster_environment()
# 4. Instantiate Strategy - Part 1
if self._strategy_flag is None:
self._strategy_flag = self._choose_strategy()
# In specific cases, ignore user selection and fall back to a different strategy
self._check_strategy_and_fallback()
self._init_strategy()
# 5. Instantiate Precision Plugin
self.precision_plugin = self._check_and_init_precision()
# 6. Instantiate Strategy - Part 2
self._lazy_init_strategy()
def _init_deterministic(self, deterministic: bool) -> None:
self.deterministic = deterministic
torch.use_deterministic_algorithms(deterministic)
if deterministic:
# fixing non-deterministic part of horovod
# https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383
os.environ["HOROVOD_FUSION_THRESHOLD"] = "0"
# https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
def _check_config_and_set_final_flags(
self,
strategy: Optional[Union[str, Strategy]],
accelerator: Optional[Union[str, Accelerator]],
precision: Union[int, str],
plugins: Optional[Union[PLUGIN_INPUT, List[PLUGIN_INPUT]]],
amp_type: str,
amp_level: Optional[str],
sync_batchnorm: bool,
) -> None:
"""This method checks:
1. strategy: strategy, accelerator and plugin can all be set to strategies
2. accelerator: if the value of the accelerator argument is a type of accelerator (instance or string),
set self._accelerator_flag accordingly. If the value is strategy related (instance or string),
it gets handled by 1.
3. precision: The final value of the precision flag may be determined either by the precision argument or
by a plugin instance.
4. plugins: a plugin could occur as a value of the strategy argument (handled by 1), or the precision
argument (handled by 3). We also extract the CheckpointIO and ClusterEnvironment plugins.
"""
if plugins is not None:
plugins = [plugins] if not isinstance(plugins, list) else plugins
if strategy is not None:
self._strategy_flag = strategy
if strategy == "ddp_cpu":
raise MisconfigurationException(
"`Trainer(strategy='ddp_cpu')` is not a valid strategy,"
" you can use `Trainer(strategy='ddp'|'ddp_spawn', accelerator='cpu')` instead."
)
if strategy == "tpu_spawn":
raise MisconfigurationException(
"`Trainer(strategy='tpu_spawn')` is not a valid strategy,"
" you can use `Trainer(strategy='ddp_spawn', accelerator='tpu')` instead."
)
# handle duplications and conflict
if isinstance(accelerator, Strategy) and strategy != accelerator:
raise MisconfigurationException(
f"Incompatible values set in `strategy` and `accelerator` arguments."
f"Received both strategy={strategy} and accelerator={accelerator}"
)
if isinstance(accelerator, str) and accelerator in self._registered_strategies and strategy != accelerator:
raise MisconfigurationException(
f"strategy {strategy} already set through `strategy` flag,"
f" but have also passed {accelerator} in through the accelerator flag."
)
if plugins:
for plugin in plugins:
if isinstance(plugin, Strategy):
raise MisconfigurationException(
f"You have passed `Trainer(strategy={strategy})`"
f" and you can only specify one strategy, but you have passed {plugin} as a plugin."
)
if isinstance(plugin, str) and plugin in self._registered_strategies:
raise MisconfigurationException(
f"You have passed `Trainer(strategy={strategy})`"
f" and you can only specify one strategy, but you have passed {plugin} as a plugin."
)
if accelerator is not None:
if accelerator in self._accelerator_types or accelerator == "auto" or isinstance(accelerator, Accelerator):
self._accelerator_flag = accelerator
elif accelerator in self._registered_strategies or isinstance(accelerator, Strategy):
rank_zero_deprecation(
f"Passing `Trainer(accelerator={accelerator!r})` has been deprecated"
f" in v1.5 and will be removed in v1.7. Use `Trainer(strategy={accelerator!r})` instead."
)
self._strategy_flag = accelerator
elif accelerator == "ddp_cpu" and not self._strategy_flag:
self._strategy_flag = accelerator
if precision is not None:
if str(precision) not in self._precision_types:
raise MisconfigurationException(
f"Precision {repr(precision)} is invalid. Allowed precision values: {self._precision_types}"
)
self._precision_flag = precision
if plugins:
plugins_flags_types: Dict[str, int] = Counter()
for plugin in plugins:
if isinstance(plugin, Strategy) or isinstance(plugin, str) and plugin in self._registered_strategies:
self._strategy_flag = plugin
rank_zero_deprecation(
f"Passing {plugin} `strategy` to the `plugins` flag in Trainer has been deprecated"
f" in v1.5 and will be removed in v1.7. Use `Trainer(strategy={plugin})` instead."
)
plugins_flags_types[Strategy.__name__] += 1
elif isinstance(plugin, PrecisionPlugin):
self._precision_plugin_flag = plugin
plugins_flags_types[PrecisionPlugin.__name__] += 1
elif isinstance(plugin, CheckpointIO):
self.checkpoint_io = plugin
plugins_flags_types[CheckpointIO.__name__] += 1
elif isinstance(plugin, ClusterEnvironment):
self._cluster_environment_flag = plugin
plugins_flags_types[ClusterEnvironment.__name__] += 1
elif isinstance(plugin, LayerSync):
if sync_batchnorm and not isinstance(plugin, NativeSyncBatchNorm):
raise MisconfigurationException(
f"You set `Trainer(sync_batchnorm=True)` and provided a `{plugin.__class__.__name__}`"
" plugin, but this is not allowed. Choose one or the other."
)
self._layer_sync = plugin
plugins_flags_types[NativeSyncBatchNorm.__name__] += 1
else:
raise MisconfigurationException(
f"Found invalid type for plugin {plugin}. Expected one of: PrecisionPlugin, "
"CheckpointIO, ClusterEnviroment, LayerSync, or Strategy."
)
duplicated_plugin_key = [k for k, v in plugins_flags_types.items() if v > 1]
if duplicated_plugin_key:
raise MisconfigurationException(
f"Received multiple values for {', '.join(duplicated_plugin_key)} flags in `plugins`."
" Expected one value for each type at most."
)
# handle the case when the user passes in a strategy instance which has an accelerator, precision,
# checkpoint io or cluster env set up
# TODO: @awaelchli improve the error messages below
if self._strategy_flag and isinstance(self._strategy_flag, Strategy):
if self._strategy_flag._accelerator:
if self._accelerator_flag:
raise MisconfigurationException(
"accelerator set through both strategy class and accelerator flag, choose one"
)
else:
self._accelerator_flag = self._strategy_flag._accelerator
if self._strategy_flag._precision_plugin:
# [RFC] handle precision plugin set up conflict?
if self._precision_plugin_flag:
raise MisconfigurationException("precision set through both strategy class and plugins, choose one")
else:
self._precision_plugin_flag = self._strategy_flag._precision_plugin
if self._strategy_flag._checkpoint_io:
if self.checkpoint_io:
raise MisconfigurationException(
"checkpoint_io set through both strategy class and plugins, choose one"
)
else:
self.checkpoint_io = self._strategy_flag._checkpoint_io
if getattr(self._strategy_flag, "cluster_environment", None):
if self._cluster_environment_flag:
raise MisconfigurationException(
"cluster_environment set through both strategy class and plugins, choose one"
)
else:
self._cluster_environment_flag = getattr(self._strategy_flag, "cluster_environment")
if hasattr(self._strategy_flag, "parallel_devices"):
if self._strategy_flag.parallel_devices:
if self._strategy_flag.parallel_devices[0].type == "cpu":
if self._accelerator_flag and self._accelerator_flag not in ("auto", "cpu"):
raise MisconfigurationException(
f"CPU parallel_devices set through {self._strategy_flag.__class__.__name__} class,"
f" but accelerator set to {self._accelerator_flag}, please choose one device type"
)
self._accelerator_flag = "cpu"
if self._strategy_flag.parallel_devices[0].type == "cuda":
if self._accelerator_flag and self._accelerator_flag not in ("auto", "gpu"):
raise MisconfigurationException(
f"GPU parallel_devices set through {self._strategy_flag.__class__.__name__} class,"
f" but accelerator set to {self._accelerator_flag}, please choose one device type"
)
self._accelerator_flag = "gpu"
self._parallel_devices = self._strategy_flag.parallel_devices
amp_type = amp_type if isinstance(amp_type, str) else None
self._amp_type_flag = AMPType.from_str(amp_type)
if amp_level is not None and self._amp_type_flag != AMPType.APEX:
raise MisconfigurationException(
f"You have asked for `amp_level={amp_level!r}` but it's only supported with `amp_backend='apex'`."
)
def _check_device_config_and_set_final_flags(
self,
devices: Optional[Union[List[int], str, int]],
num_nodes: int,
num_processes: Optional[int],
gpus: Optional[Union[List[int], str, int]],
ipus: Optional[int],
tpu_cores: Optional[Union[List[int], int]],
) -> None:
self._num_nodes_flag = int(num_nodes) if num_nodes is not None else 1
self._devices_flag = devices
# TODO: Delete this method when num_processes, gpus, ipus and tpu_cores gets removed
self._map_deprecated_devices_specfic_info_to_accelerator_and_device_flag(
devices, num_processes, gpus, ipus, tpu_cores
)
if self._devices_flag == "auto" and self._accelerator_flag is None:
raise MisconfigurationException(
f"You passed `devices={devices}` but haven't specified"
" `accelerator=('auto'|'tpu'|'gpu'|'ipu'|'cpu'|'hpu)` for the devices mapping"
)
def _map_deprecated_devices_specfic_info_to_accelerator_and_device_flag(
self,
devices: Optional[Union[List[int], str, int]],
num_processes: Optional[int],
gpus: Optional[Union[List[int], str, int]],
ipus: Optional[int],
tpu_cores: Optional[Union[List[int], str, int]],
) -> None:
"""Sets the `devices_flag` and `accelerator_flag` based on num_processes, gpus, ipus, tpu_cores."""
self._gpus: Optional[Union[List[int], str, int]] = gpus
self._tpu_cores: Optional[Union[List[int], str, int]] = tpu_cores
deprecated_devices_specific_flag = num_processes or gpus or ipus or tpu_cores
if deprecated_devices_specific_flag and deprecated_devices_specific_flag not in ([], 0, "0"):
if devices:
# TODO: @awaelchli improve error message
rank_zero_warn(
f"The flag `devices={devices}` will be ignored, "
f"instead the device specific number {deprecated_devices_specific_flag} will be used"
)
if [(num_processes is not None), (gpus is not None), (ipus is not None), (tpu_cores is not None)].count(
True
) > 1:
# TODO: @awaelchli improve error message
rank_zero_warn("more than one device specific flag has been set")
self._devices_flag = deprecated_devices_specific_flag
if self._accelerator_flag is None:
# set accelerator type based on num_processes, gpus, ipus, tpu_cores
if ipus:
self._accelerator_flag = "ipu"
if tpu_cores:
self._accelerator_flag = "tpu"
if gpus:
self._accelerator_flag = "gpu"
if num_processes:
self._accelerator_flag = "cpu"
def _set_accelerator_if_ipu_strategy_is_passed(self) -> None:
# current logic only apply to object config
# TODO this logic should apply to both str and object config
if isinstance(self._strategy_flag, IPUStrategy):
self._accelerator_flag = "ipu"
def _choose_accelerator(self) -> str:
"""Choose the accelerator type (str) based on availability when ``accelerator='auto'``."""
if self._accelerator_flag == "auto":
if _TPU_AVAILABLE:
return "tpu"
if _IPU_AVAILABLE:
return "ipu"
if _HPU_AVAILABLE:
return "hpu"
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
return "gpu"
return "cpu"
def _set_parallel_devices_and_init_accelerator(self) -> None:
if isinstance(self._accelerator_flag, Accelerator):
self.accelerator: Accelerator = self._accelerator_flag
else:
assert self._accelerator_flag is not None
self._accelerator_flag = self._accelerator_flag.lower()
if self._accelerator_flag not in AcceleratorRegistry:
raise MisconfigurationException(
"When passing string value for the `accelerator` argument of `Trainer`,"
f" it can only be one of {self._accelerator_types}."
)
self.accelerator = AcceleratorRegistry.get(self._accelerator_flag)
if not self.accelerator.is_available():
available_accelerator = [
acc_str for acc_str in self._accelerator_types if AcceleratorRegistry.get(acc_str).is_available()
]
raise MisconfigurationException(
f"{self.accelerator.__class__.__qualname__} can not run on your system"
" since the accelerator is not available. The following accelerator(s)"
" is available and can be passed into `accelerator` argument of"
f" `Trainer`: {available_accelerator}."
)
self._set_devices_flag_if_auto_passed()
self._gpus = self._devices_flag if not self._gpus else self._gpus
self._tpu_cores = self._devices_flag if not self._tpu_cores else self._tpu_cores
self._devices_flag = self.accelerator.parse_devices(self._devices_flag)
if not self._parallel_devices:
self._parallel_devices = self.accelerator.get_parallel_devices(self._devices_flag)
def _set_devices_flag_if_auto_passed(self) -> None:
if self._devices_flag == "auto" or self._devices_flag is None:
self._devices_flag = self.accelerator.auto_device_count()
def _choose_and_init_cluster_environment(self) -> ClusterEnvironment:
if isinstance(self._cluster_environment_flag, ClusterEnvironment):
return self._cluster_environment_flag
if self._is_slurm_managing_tasks():
rank_zero_info("Multiprocessing is handled by SLURM.")
return SLURMEnvironment()
for env_type in (BaguaEnvironment, TorchElasticEnvironment, KubeflowEnvironment, LSFEnvironment):
if env_type.detect():
return env_type()
return LightningEnvironment()
def _is_slurm_managing_tasks(self) -> bool:
"""used by choosing cluster enviroment."""
if not SLURMEnvironment.detect() or SLURMEnvironment.job_name() == "bash":
return False
total_requested_devices = len(self._parallel_devices) * self._num_nodes_flag
num_slurm_tasks = int(os.environ["SLURM_NTASKS"], 0)
return num_slurm_tasks == total_requested_devices
def _choose_strategy(self) -> Union[Strategy, str]:
if self._accelerator_flag == "ipu":
return IPUStrategy.strategy_name
if self._accelerator_flag == "hpu":
if self._parallel_devices and len(self._parallel_devices) > 1:
return HPUParallelStrategy.strategy_name
else:
return SingleHPUStrategy(device=torch.device("hpu"))
if self._accelerator_flag == "tpu":
if self._parallel_devices and len(self._parallel_devices) > 1:
return TPUSpawnStrategy.strategy_name
else:
# TODO: lazy initialized device, then here could be self._strategy_flag = "single_tpu_device"
return SingleTPUStrategy(device=self._parallel_devices[0]) # type: ignore
if _HOROVOD_AVAILABLE and ("OMPI_COMM_WORLD_RANK" in os.environ or "HOROVOD_RANK" in os.environ):
return HorovodStrategy.strategy_name
if self._num_nodes_flag > 1:
return DDPStrategy.strategy_name
if len(self._parallel_devices) <= 1:
device = (
device_parser.determine_root_gpu_device(self._parallel_devices) # type: ignore
if self._accelerator_flag == "gpu"
else "cpu"
)
# TODO: lazy initialized device, then here could be self._strategy_flag = "single_device"
return SingleDeviceStrategy(device=device) # type: ignore
if len(self._parallel_devices) > 1:
return DDPSpawnStrategy.strategy_name
return DDPStrategy.strategy_name
def _check_strategy_and_fallback(self) -> None:
"""Checks edge cases when the strategy selection was a string input, and we need to fall back to a
different choice depending on other parameters or the environment."""
# current fallback and check logic only apply to user pass in str config and object config
# TODO this logic should apply to both str and object config
strategy_flag = "" if isinstance(self._strategy_flag, Strategy) else self._strategy_flag
if strategy_flag == "ddp_cpu":
if _TPU_AVAILABLE:
raise MisconfigurationException(
"`accelerator='ddp_cpu'` is not supported on TPU machines. "
"Learn more: https://github.com/PyTorchLightning/pytorch-lightning/issues/7810"
)
if self._devices_flag == 1 and self._num_nodes_flag > 1:
strategy_flag = DDPStrategy.strategy_name
else:
strategy_flag = "ddp_spawn"
if self._accelerator_flag == "gpu":
rank_zero_warn(
"You requested one or more GPUs, but set `accelerator='ddp_cpu'`. Training will not use GPUs."
)
self._accelerator_flag = "cpu"
self.accelerator = CPUAccelerator()
if strategy_flag in ("ddp_spawn", "ddp_spawn_find_unused_parameters_false") and (
TorchElasticEnvironment.detect() or KubeflowEnvironment.detect() or self._is_slurm_managing_tasks()
):
strategy_flag = "ddp"
if strategy_flag in ("dp", "ddp2") and self._accelerator_flag == "cpu":
rank_zero_warn(f"{strategy_flag!r} is not supported on CPUs, hence setting `strategy='ddp'`.")
strategy_flag = "ddp"
if strategy_flag:
self._strategy_flag = strategy_flag
def _handle_horovod(self) -> None:
if self._num_nodes_flag > 1:
raise MisconfigurationException(
"Horovod does not support setting num_nodes / num_gpus explicitly. Use "
"horovodrun / mpirun to configure the number of processes."
)
if not _HOROVOD_AVAILABLE:
raise MisconfigurationException(
'Requested `accelerator="horovod"`, but Horovod is not installed.'
"Install with \n $HOROVOD_WITH_PYTORCH=1 pip install horovod[pytorch]"
)
hvd.init()
if isinstance(self.accelerator, GPUAccelerator):
# Horovod assigns one local GPU per process
self._parallel_devices = [torch.device(f"cuda:{i}") for i in range(hvd.local_size())]
else:
self._parallel_devices = [torch.device("cpu")] * hvd.local_size()
def _init_strategy(self) -> None:
"""Instantiate the Strategy given depending on the setting of ``_strategy_flag``."""
if isinstance(self._strategy_flag, HorovodStrategy) or self._strategy_flag == "horovod":
# handle horovod has to happen before initialize strategy because HorovodStrategy needs hvd.init() first.
# TODO lazy initialized and setup horovod strategy `global_rank`
self._handle_horovod()
if isinstance(self._strategy_flag, str):
self.strategy = StrategyRegistry.get(self._strategy_flag)
elif isinstance(self._strategy_flag, Strategy):
self.strategy = self._strategy_flag
else:
raise RuntimeError(f"{self.strategy} is not valid type: {self.strategy}")
def _check_and_init_precision(self) -> PrecisionPlugin:
self._validate_precision_choice()
if isinstance(self._precision_plugin_flag, PrecisionPlugin):
return self._precision_plugin_flag
if isinstance(self.accelerator, IPUAccelerator):
return IPUPrecisionPlugin(self._precision_flag) # type: ignore
if isinstance(self.accelerator, HPUAccelerator):
return HPUPrecisionPlugin(self._precision_flag) # type: ignore
if isinstance(self.accelerator, TPUAccelerator):
if self._precision_flag == 32:
return TPUPrecisionPlugin()
elif self._precision_flag in (16, "bf16"):
if self._precision_flag == 16:
rank_zero_warn(
"You passed `Trainer(accelerator='tpu', precision=16)` but AMP"
" is not supported with TPUs. Using `precision='bf16'` instead."
)
return TPUBf16PrecisionPlugin()
if isinstance(self.strategy, DeepSpeedStrategy):
return DeepSpeedPrecisionPlugin(
self._precision_flag, self._amp_type_flag, self._amp_level_flag # type: ignore
)
if self._precision_flag == 32:
return PrecisionPlugin()
if self._precision_flag == 64:
return DoublePrecisionPlugin()
if self._precision_flag == 16 and self._accelerator_flag == "cpu":
rank_zero_warn(
"You passed `Trainer(accelerator='cpu', precision=16)` but native AMP is not supported on CPU."
" Using `precision='bf16'` instead."
)
self._precision_flag = "bf16"
if self._precision_flag in (16, "bf16"):
rank_zero_info(
f"Using 16bit {self._amp_type_flag.value} Automatic Mixed Precision (AMP)" # type: ignore
if self._precision_flag == 16
else "Using bfloat16 Automatic Mixed Precision (AMP)"
)
if self._amp_type_flag == AMPType.NATIVE:
device = "cpu" if self._accelerator_flag == "cpu" else "cuda"
if isinstance(self.strategy, (DDPShardedStrategy, DDPSpawnShardedStrategy)):
return ShardedNativeMixedPrecisionPlugin(self._precision_flag, device)
if isinstance(self.strategy, DDPFullyShardedStrategy):
return FullyShardedNativeMixedPrecisionPlugin(self._precision_flag, device)
return NativeMixedPrecisionPlugin(self._precision_flag, device)
if self._amp_type_flag == AMPType.APEX:
self._amp_level_flag = self._amp_level_flag or "O2"
return ApexMixedPrecisionPlugin(self._amp_level_flag)
raise RuntimeError("No precision set")
def _validate_precision_choice(self) -> None:
"""Validate the combination of choices for precision, AMP type, and accelerator."""
if isinstance(self.accelerator, TPUAccelerator):
if self._precision_flag == 64:
raise MisconfigurationException(
"`Trainer(accelerator='tpu', precision=64)` is not implemented."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`"
" requesting this feature."
)
if self._precision_plugin_flag and not isinstance(
self._precision_plugin_flag, (TPUPrecisionPlugin, TPUBf16PrecisionPlugin)
):
raise ValueError(
f"The `TPUAccelerator` can only be used with a `TPUPrecisionPlugin`,"
f" found: {self._precision_plugin_flag}."
)
if isinstance(self.accelerator, HPUAccelerator):
if self._precision_flag not in (16, "bf16", 32):
raise MisconfigurationException(
f"`Trainer(accelerator='hpu', precision={self._precision_flag!r})` is not supported."
)
if (
self._precision_flag == 16
and isinstance(self.accelerator, CPUAccelerator)
and self._amp_type_flag == AMPType.APEX
):
raise MisconfigurationException(
"You passed `Trainer(accelerator='cpu', precision=16, amp_type='apex')`"
" but apex AMP not supported on CPU."
)
if self._precision_flag == "bf16" and self._amp_type_flag != AMPType.NATIVE:
raise MisconfigurationException(
f"You passed `Trainer(amp_type={self._amp_type_flag.value!r}, precision='bf16')` but " # type: ignore
"it's not supported. Try using `amp_type='native'` instead."
)
if self._precision_flag in (16, "bf16") and self._amp_type_flag == AMPType.APEX:
if isinstance(self.strategy, (DDPShardedStrategy, DDPSpawnShardedStrategy, DDPFullyShardedStrategy)):
raise MisconfigurationException(
"Sharded plugins are not supported with apex, please switch to `amp_backend='native'`."
)
def _lazy_init_strategy(self) -> None:
"""Lazily set missing attributes on the previously instantiated strategy."""
self.strategy.accelerator = self.accelerator
if self.precision_plugin:
self.strategy.precision_plugin = self.precision_plugin
if self.checkpoint_io:
self.strategy.checkpoint_io = self.checkpoint_io
if hasattr(self.strategy, "cluster_environment"):
self.strategy.cluster_environment = self.cluster_environment
if hasattr(self.strategy, "parallel_devices"):
if self.strategy.parallel_devices:
self._parallel_devices = self.strategy.parallel_devices
else:
self.strategy.parallel_devices = self._parallel_devices
if hasattr(self.strategy, "num_nodes"):
self.strategy._num_nodes = self._num_nodes_flag
if hasattr(self.strategy, "_layer_sync"):
self.strategy._layer_sync = self._layer_sync
if hasattr(self.strategy, "set_world_ranks"):
self.strategy.set_world_ranks()
self.strategy._configure_launcher()
from pytorch_lightning.utilities import _IS_INTERACTIVE
if _IS_INTERACTIVE and self.strategy.launcher and not self.strategy.launcher.is_interactive_compatible:
raise MisconfigurationException(
f"`Trainer(strategy={self.strategy.strategy_name!r})` or"
f" `Trainer(accelerator={self.strategy.strategy_name!r})` is not compatible with an interactive"
" environment. Run your code as a script, or choose one of the compatible strategies:"
f" Trainer(strategy=None|{'|'.join(_StrategyType.interactive_compatible_types())})."
" In case you are spawning processes yourself, make sure to include the Trainer"
" creation inside the worker function."
)
# TODO: should be moved to _check_strategy_and_fallback().
# Current test check precision first, so keep this check here to meet error order
if isinstance(self.accelerator, TPUAccelerator) and not isinstance(
self.strategy, (SingleTPUStrategy, TPUSpawnStrategy)
):
raise ValueError(
"The `TPUAccelerator` can only be used with a `SingleTPUStrategy` or `TPUSpawnStrategy`,"
f" found {self.strategy.__class__.__name__}."
)
if isinstance(self.accelerator, HPUAccelerator) and not isinstance(
self.strategy, (SingleHPUStrategy, HPUParallelStrategy)
):
raise ValueError(
"The `HPUAccelerator` can only be used with a `SingleHPUStrategy` or `HPUParallelStrategy`,"
f" found {self.strategy.__class__.__name__}."
)
"""The following properties are here for backward-compatibility and will be deprecated and removed in favor
of accessing this information through the strategy/accelerator directly."""
# TODO: deprecate all properties below
@property
def tpu_cores(self) -> Optional[Union[List[int], int]]:
if isinstance(self.accelerator, TPUAccelerator):
return self._tpu_cores # type: ignore
return 0
@property
def gpus(self) -> Optional[Union[List[int], str, int]]:
return self._gpus
@property
def is_distributed(self) -> bool:
# Used for custom plugins.
# Custom plugins should implement is_distributed property.
if hasattr(self.strategy, "is_distributed") and not isinstance(self.accelerator, TPUAccelerator):
return self.strategy.is_distributed
distributed_strategy = (
DDP2Strategy,
DDPStrategy,
DDPSpawnShardedStrategy,
DDPShardedStrategy,
DDPFullyShardedStrategy,
DDPSpawnStrategy,
DeepSpeedStrategy,
TPUSpawnStrategy,
HorovodStrategy,
HPUParallelStrategy,
)
is_distributed = isinstance(self.strategy, distributed_strategy)
if isinstance(self.accelerator, TPUAccelerator):
is_distributed |= self.strategy.is_distributed
return is_distributed
| 49.506683
| 120
| 0.634057
|
cdc7f7a1709a788b0511c4f171311963efeb2456
| 671
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._compute_management_client import ComputeManagementClient
__all__ = ['ComputeManagementClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
| 39.470588
| 94
| 0.600596
|
76d94359858a84bbe011ac52251e0bfc64dbf4d2
| 206
|
py
|
Python
|
arpeggio/calibrate.py
|
ronniyjoseph/Arpeggio
|
a3cc043ff1d6305c4407541555a5a20be6c575e5
|
[
"MIT"
] | null | null | null |
arpeggio/calibrate.py
|
ronniyjoseph/Arpeggio
|
a3cc043ff1d6305c4407541555a5a20be6c575e5
|
[
"MIT"
] | null | null | null |
arpeggio/calibrate.py
|
ronniyjoseph/Arpeggio
|
a3cc043ff1d6305c4407541555a5a20be6c575e5
|
[
"MIT"
] | null | null | null |
"""Module that performs the calibration .
Defines a class and relevant functions that interface with corrcal
to calbrate visibilties
"""
class CorrCal:
def __init__(self):
pass
return
| 18.727273
| 66
| 0.713592
|
8d977cc115cc80d843c5067b60731be5514df532
| 1,245
|
py
|
Python
|
examples/simple_eventlet_receive.py
|
7Geese/kombu
|
b51d1d678e198a80d7e5fd95f32674c7d8e04a75
|
[
"BSD-3-Clause"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/kombu-4.3.0/examples/simple_eventlet_receive.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/kombu-4.3.0/examples/simple_eventlet_receive.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
"""
Example that sends a single message and exits using the simple interface.
You can use `simple_receive.py` (or `complete_receive.py`) to receive the
message sent.
"""
from __future__ import absolute_import, unicode_literals
import eventlet
from kombu import Connection
eventlet.monkey_patch()
def wait_many(timeout=1):
#: Create connection
#: If hostname, userid, password and virtual_host is not specified
#: the values below are the default, but listed here so it can
#: be easily changed.
with Connection('amqp://guest:guest@localhost:5672//') as connection:
#: SimpleQueue mimics the interface of the Python Queue module.
#: First argument can either be a queue name or a kombu.Queue object.
#: If a name, then the queue will be declared with the name as the
#: queue name, exchange name and routing key.
with connection.SimpleQueue('kombu_demo') as queue:
while True:
try:
message = queue.get(block=False, timeout=timeout)
except queue.Empty:
break
else:
message.ack()
print(message.payload)
eventlet.spawn(wait_many).wait()
| 29.642857
| 77
| 0.64498
|
5bb284aa522886558fc7dd66a026ebb1825cf71f
| 1,035
|
py
|
Python
|
src/upcoming_python_events/with_selenium.py
|
codermrhasan/web-scraping-with-python
|
7cd9b6d3d5af3b85a214e8531e5a29cdb68ef405
|
[
"MIT"
] | null | null | null |
src/upcoming_python_events/with_selenium.py
|
codermrhasan/web-scraping-with-python
|
7cd9b6d3d5af3b85a214e8531e5a29cdb68ef405
|
[
"MIT"
] | 1
|
2021-03-31T19:41:22.000Z
|
2021-03-31T19:41:22.000Z
|
src/upcoming_python_events/with_selenium.py
|
codermrhasan/web-scraping-with-python
|
7cd9b6d3d5af3b85a214e8531e5a29cdb68ef405
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
def scraper():
url = 'https://www.python.org/events/python-events/'
driver = webdriver.Chrome(executable_path='chromedriver')
driver.get(url)
events = driver.find_elements_by_xpath('//ul[contains(@class, "list-recent-events")]/li')
print(
f"\n Upcoming Python Events \n" +
'+++++++++++++++++++++++++++++++++++++\n'
)
i=1
for event in events:
event_details = dict()
event_details['name'] = event.find_element_by_xpath('h3[@class="event-title"]/a').text
event_details['time'] = event.find_element_by_xpath('p/time').text
event_details['location'] = event.find_element_by_xpath('p/span[@class="event-location"]').text
print(
f"______________Event {i}______________\n"
f"Event Name: {event_details['name']}\n" +
f"Event Time: {event_details['time']}\n" +
f"Event Location: {event_details['location']}\n"
)
i += 1
driver.close()
| 33.387097
| 103
| 0.582609
|
edea597edf4a71359729014fbb75d6264e39f144
| 420
|
py
|
Python
|
configs/_base_/schedules/bdd100k_lane_12e.py
|
XDong18/mmsegmentation
|
9a14288a654b66babfdfe4f6fa77edc4cd127d41
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/schedules/bdd100k_lane_12e.py
|
XDong18/mmsegmentation
|
9a14288a654b66babfdfe4f6fa77edc4cd127d41
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/schedules/bdd100k_lane_12e.py
|
XDong18/mmsegmentation
|
9a14288a654b66babfdfe4f6fa77edc4cd127d41
|
[
"Apache-2.0"
] | null | null | null |
# optimizer
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=103824)
checkpoint_config = dict(by_epoch=False, interval=2000)
evaluation = dict(interval=2000, metric='mIoU')
| 46.666667
| 73
| 0.766667
|
9a4802b78a83f49b14d91ebd9ca42fc781ece10e
| 519
|
py
|
Python
|
Dataset/Leetcode/train/38/306.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/38/306.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/38/306.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, n: int) -> str:
if n == 1:
return str(1)
count = 1
result = ''
temp = self.XXX(n-1)
for i in range(len(temp)):
if i == (len(temp)-1):
result += (str(count) + temp[i])
return result
if temp[i] == temp[i+1]:
count += 1
continue
else:
result += (str(count) + temp[i])
count = 1
| 25.95
| 48
| 0.356455
|
7f171f5ed48bd2d99364419653d38e56803c8424
| 31,607
|
py
|
Python
|
pycqed/measurement/quantum_experiment.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | null | null | null |
pycqed/measurement/quantum_experiment.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | null | null | null |
pycqed/measurement/quantum_experiment.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | null | null | null |
import traceback
import numpy as np
from pycqed.analysis_v3 import helper_functions
from pycqed.measurement.waveform_control.sequence import Sequence
from pycqed.utilities.general import temporary_value
from pycqed.utilities.timer import Timer, Checkpoint
from pycqed.measurement.waveform_control.circuit_builder import CircuitBuilder
import pycqed.measurement.awg_sweep_functions as awg_swf
from pycqed.measurement import multi_qubit_module as mqm
import pycqed.analysis_v2.base_analysis as ba
from copy import deepcopy
import logging
log = logging.getLogger(__name__)
class QuantumExperiment(CircuitBuilder):
"""
Base class for Experiments with pycqed. A QuantumExperiment consists of
3 main parts:
- The __init__(), which takes care of initializing the parent class
(CircuitBuilder) and setting all the attributes of the quantum experiment
- the run_measurement(), which is the skeleton of any measurement in pycqed.
This function should *not* be modified by child classes
- the run_analysis(), which defaults to calling BaseDataAnalysis. This function
may be overwritten by child classes to start measurement-specific analysis
"""
_metadata_params = {'cal_points', 'preparation_params', 'sweep_points',
'channel_map', 'meas_objs'}
def __init__(self, dev=None, qubits=None, operation_dict=None,
meas_objs=None, classified=False, MC=None,
label=None, exp_metadata=None, upload=True, measure=True,
analyze=True, temporary_values=(), drive="timedomain",
sequences=(), sequence_function=None,
sequence_kwargs=None, df_kwargs=None, df_name=None,
timer_kwargs=None,
mc_points=None, sweep_functions=(awg_swf.SegmentHardSweep,
awg_swf.SegmentSoftSweep),
compression_seg_lim=None, force_2D_sweep=True, callback=None,
callback_condition=lambda : True, **kw):
"""
Initializes a QuantumExperiment.
Args:
dev (Device): Device object used for the experiment. Defaults to None.
qubits (list): list of qubits used for the experiment (e.g. a subset of
qubits on the device). Defaults to None. (see circuitBuilder for more
details).
operation_dict (dict): dictionary with operations. Defaults to None.
(see circuitBuilder for more details).
meas_objs (list): list of measure object (e.g., qubits) to be read
out (i.e. for which the detector functions will be
prepared). Defaults to self.qubits (attribute set by
CircuitBuilder). Required for run_measurement() when qubits
is None.
classified (bool): whether
MC (MeasurementControl): MeasurementControl object. Required for
run_measurement() if qubits is None and device is None.
label (str): Measurement label
exp_metadata (dict): experimental metadata saved in hdf5 file
upload (bool): whether or not to upload the sequences to the AWGs
measure (bool): whether or not to measure
analyze (bool): whether or not to analyze
temporary_values (list): list of temporary values with the form:
[(Qcode_param_1, value_1), (Qcode_param_2, value_2), ...]
drive (str): qubit configuration.
sequences (list): list of sequences for the experiment. Note that
even in the case of a single sequence, a list is required.
Required if sequence_function is None.
sequence_function (callable): functions returning the sequences,
see self._prepare_sequences() for more details. Required for
run_measurement if sequences is None
sequence_kwargs (dict): keyword arguments passed to the sequence_function.
see self._prepare_sequences()
df_kwargs (dict): detector function keyword arguments.
timer_kwargs (dict): keyword arguments for timer. See pycqed.utilities.timer.
Timer.
df_name (str): detector function name.
mc_points (tuple): tuple of 2 lists with first and second dimension
measurement control points (previously also called sweep_points,
but name has changed to avoid confusion with SweepPoints):
[first_dim_mc_points, second_dim_mc_points]. MC points
correspond to measurement_control sweep points i.e. sweep points
directly related to the instruments, e.g. segment readout index.
Not required when using sweep_functions SegmentSoftSweep and
SegmentHardSweep as these may be inferred from the sequences objects.
In case other sweep functions are used (e.g. for sweeping instrument
parameters), then the sweep points must be specified. Note that the list
must always have two entries. E.g. for a 1D sweep of LO frequencies,
mc_points should be of the form: (freqs, [])
sweep_functions (tuple): tuple of sweepfunctions. Similarly to mc_points,
sweep_functions has 2 entries, one for each dimension. Defaults to
SegmentHardSweep for the first sweep dimensions and SegmentSoftSweep
for the second dimension.
compression_seg_lim (int): maximal number of segments that can be in a
single sequence. If not None and the QuantumExperiment is a 2D sweep
with more than 1 sequence, and the sweep_functions are
(SegmentHardSweep, SegmentSoftsweep), then the quantumExperiment
will try to compress the sequences, see Sequence.compress_2D_sweep.
force_2D_sweep (bool): whether or not to force a two-dimensional sweep.
In that case, even if there is only one sequence, a second
sweep_function dimension is added. The idea is to use this more
and more to generalize data format passed to the analysis.
callback (func): optional function to call after run_analysis() in
autorun(). All arguments passed to autorun will be passed down to
the callback.
callback_condition (func): function returning a bool to decide whether or
not the callback function should be executed. Defaults to always True.
**kw:
further keyword arguments are passed to the CircuitBuilder __init__
"""
self.timer = Timer('QuantumExperiment', **timer_kwargs if timer_kwargs is
not None else {})
if qubits is None and dev is None and operation_dict is None:
raise NotImplementedError('Experiments without qubits are not '
'implemented yet. Either dev or qubits'
'or operation_dict has to be provided.')
# planned future behavior (but has to be tested in all aspects):
# if no qubits/devive/operation_dict are provided, use empty
# list to skip iterations over qubit lists
# qubits = []
super().__init__(dev=dev, qubits=qubits, operation_dict=operation_dict,
**kw)
self.exp_metadata = exp_metadata
if self.exp_metadata is None:
self.exp_metadata = {}
self.create_meas_objs_list(**kw, meas_objs=meas_objs)
self.MC = MC
self.classified = classified
self.label = label
self.upload = upload
self.measure = measure
self.temporary_values = list(temporary_values)
self.analyze = analyze
self.drive = drive
self.callback = callback
self.callback_condition = callback_condition
self.sequences = list(sequences)
self.sequence_function = sequence_function
self.sequence_kwargs = {} if sequence_kwargs is None else sequence_kwargs
self.sweep_points = self.sequence_kwargs.get("sweep_points", None)
self.mc_points = mc_points if mc_points is not None else [[], []]
self.sweep_functions = sweep_functions
self.force_2D_sweep = force_2D_sweep
self.compression_seg_lim = compression_seg_lim
self.channels_to_upload = []
# The experiment_name might have been set by the user in kw or by a
# child class as an attribute. Otherwise, the default None will
# trigger guess_label to use the sequence name.
self.experiment_name = kw.pop(
'experiment_name', getattr(self, 'experiment_name', None))
self.timestamp = None
self.analysis = None
# detector and sweep functions
default_df_kwargs = {'det_get_values_kws':
{'classified': self.classified,
'correlated': False,
'thresholded': True,
'averaged': True}}
self.df_kwargs = default_df_kwargs if df_kwargs is None else df_kwargs
if df_name is not None:
self.df_name = df_name
if 'classif' in df_name:
self.classified = True
else:
self.df_name = 'int_avg{}_det'.format('_classif' if self.classified else '')
self.df = None
# determine data type
if "log" in self.df_name or not \
self.df_kwargs.get('averaged', True):
data_type = "singleshot"
else:
data_type = "averaged"
self.exp_metadata.update(kw)
self.exp_metadata.update({'classified_ro': self.classified,
'cz_pulse_name': self.cz_pulse_name,
'data_type': data_type})
def create_meas_objs_list(self, meas_objs=None, **kwargs):
"""
Creates a default list for self.meas_objs if meas_objs is not provided,
and creates the list self.meas_obj_names.
Args:
meas_objs (list): a list of measurement objects (or None for
default, which is self.qubits)
"""
self.meas_objs = self.qubits if meas_objs is None else meas_objs
self.meas_obj_names = [m.name for m in self.meas_objs]
def _update_parameters(self, overwrite_dicts=True, **kwargs):
"""
Update all attributes of the quantumExperiment class.
Args:
overwrite_dicts (bool): whether or not to overwrite
attributes that are dictionaries. If False,
then dictionaries are updated.
**kwargs: any attribute of the QuantumExperiment class
"""
for param_name, param_value in kwargs.items():
if hasattr(self, param_name):
if isinstance(param_value, dict) and not overwrite_dicts:
getattr(self, param_name).update(param_value)
else:
setattr(self, param_name, param_value)
@Timer()
def run_measurement(self, save_timers=True, **kw):
"""
Runs a measurement. Any keyword argument passes to this function that
is also an attribute of the QuantumExperiment class will be updated
before starting the experiment
Args:
save_timers (bool): whether timers should be saved to the hdf
file at the end of the measurement (default: True).
Returns:
"""
self._update_parameters(**kw)
assert self.meas_objs is not None, 'Cannot run measurement without ' \
'measure objects.'
if len(self.mc_points) == 1:
self.mc_points = [self.mc_points[0], []]
exception = None
with temporary_value(*self.temporary_values):
# Perpare all involved qubits. If not available, prepare
# all measure objects.
mos = self.qubits if self.qubits is not None else self.meas_objs
for m in mos:
m.prepare(drive=self.drive)
# create/retrieve sequence to run
self._prepare_sequences(self.sequences, self.sequence_function,
self.sequence_kwargs)
# configure measurement control (mc_points, detector functions)
mode = self._configure_mc()
self.guess_label(**kw)
self.update_metadata()
# run measurement
try:
self.MC.run(name=self.label, exp_metadata=self.exp_metadata,
mode=mode)
except (Exception, KeyboardInterrupt) as e:
exception = e # exception will be raised below
self.extract_timestamp()
if save_timers:
self.save_timers()
if exception is not None:
raise exception
def update_metadata(self):
# make sure that all metadata params are up to date
for name in self._metadata_params:
if hasattr(self, name):
value = getattr(self, name)
try:
if name in ('cal_points', 'sweep_points') and \
value is not None:
old_val = np.get_printoptions()['threshold']
np.set_printoptions(threshold=np.inf)
self.exp_metadata.update({name: repr(value)})
np.set_printoptions(threshold=old_val)
elif name in ('meas_objs', "qubits") and value is not None:
self.exp_metadata.update(
{name: [qb.name for qb in value]})
else:
self.exp_metadata.update({name: value})
except Exception as e:
log.error(
f"Could not add {name} with value {value} to the "
f"metadata")
raise e
def extract_timestamp(self):
try:
self.timestamp = self.MC.data_object._datemark + '_' \
+ self.MC.data_object._timemark
except Exception:
pass # if extraction fails, keep the old value (None from init)
def guess_label(self, **kwargs):
"""
Creates a default label.
Returns:
"""
if self.label is None:
if self.experiment_name is None:
self.experiment_name = self.sequences[0].name
self.label = self.experiment_name
_, qb_names = self.get_qubits(self.qubits)
if self.dev is not None:
self.label += self.dev.get_msmt_suffix(self.meas_obj_names)
else:
# guess_label is called from run_measurement -> we have qubits
self.label += mqm.get_multi_qubit_msmt_suffix(self.meas_objs)
@Timer()
def run_analysis(self, analysis_class=None, analysis_kwargs=None, **kw):
"""
Launches the analysis.
Args:
analysis_class: Class to use for the analysis
analysis_kwargs: keyword arguments passed to the analysis class
Returns: analysis object
"""
if analysis_class is None:
analysis_class = ba.BaseDataAnalysis
if analysis_kwargs is None:
analysis_kwargs = {}
self.analysis = analysis_class(**analysis_kwargs)
return self.analysis
def autorun(self, **kw):
if self.measure:
try:
# Do not save timers here since they will be saved below.
self.run_measurement(save_timers=False, **kw)
except (Exception, KeyboardInterrupt) as e:
self.save_timers()
raise e
# analyze and call callback only when measuring
if self.analyze:
self.run_analysis(**kw)
if self.callback is not None and self.callback_condition():
self.callback(**kw)
self.save_timers() # for now store timers only if creating new file
return self
def serialize(self, omitted_attrs=('MC', 'device', 'qubits')):
"""
Map a Quantum experiment to a large dict for hdf5 storage/pickle object,
etc.
Returns:
"""
raise NotImplementedError()
@Timer()
def _prepare_sequences(self, sequences=None, sequence_function=None,
sequence_kwargs=None):
"""
Prepares/build sequences for a measurement.
Args:
sequences (list): list of sequences to run. Optional. If not given
then a sequence_function from which the sequences can be created
is required.
sequence_function (callable): sequence function to generate sequences..
Should return with one of the following formats:
- a list of sequences: valid if the first and second
sweepfunctions are SegmentHardSweep and SegmentSoftsweep
respectively.
- a sequence: valid if the sweepfunction is SegmentHardsweep
- One of the following tuples:
(sequences, mc_points_tuple), where mc_points_tuple is a
tuple in which each entry corresponds to a dimension
of the sweep. This is the preferred option.
For backwards compatibility, the following two tuples are
also accepted:
(sequences, mc_points_first_dim, mc_points_2nd_dim)
(sequences, mc_points_first_dim)
sequence_kwargs (dict): arguments to pass to the sequence
function if sequence_function is not None. If
sequence_function is None, the following entries in this
dict are supported:
- extra_sequences (list): a list of additional sequences to
measure. This is useful for combining sequences that are
automatically generated by a child-class of
QuantumExperiment with user-provided sequences into a
single experiment (e.g., for measuring them in a single
upload by specifying a sufficiently high
compression_seg_lim). The user has to ensure that the
extra sequences are compatible with the normal sequences
of the QuantumExperiment, e.g., in terms of number of
acquisition elements.
Returns:
"""
if sequence_kwargs is None:
sequence_kwargs = {}
if sequence_function is not None:
# build sequence from function
seq_info = sequence_function(**sequence_kwargs)
if isinstance(seq_info, list):
self.sequences = seq_info
elif isinstance(seq_info, Sequence):
self.sequences = [seq_info]
elif len(seq_info) == 3: # backwards compatible 2D sweep
self.sequences, \
(self.mc_points[0], self.mc_points[1]) = seq_info
elif len(seq_info) == 2:
if np.ndim(seq_info[1]) == 1:
# backwards compatible 1D sweep
self.sequences, self.mc_points[0] = seq_info
else:
self.sequences, self.mc_points = seq_info
# ensure self.sequences is a list
if np.ndim(self.sequences) == 0:
self.sequences = [self.sequences]
elif sequences is not None:
extra_seqs = deepcopy(sequence_kwargs.get('extra_sequences', []))
for seq in extra_seqs:
seq.name = 'Extra' + seq.name
self.sequences = sequences + extra_seqs
if len(self.mc_points) > 1 and len(self.mc_points[1]):
# mc_points are set and won't be generated automatically.
# We have to add additional points for the extra sequences.
self.mc_points[1] = np.concatenate([
self.mc_points[1],
np.arange(len(extra_seqs)) + self.mc_points[1][-1] + 1])
# check sequence
assert len(self.sequences) != 0, "No sequence found."
@Timer()
def _configure_mc(self, MC=None):
"""
Configure the measurement control (self.MC) for the measurement.
This includes setting the sweep points and the detector function.
By default, SegmentHardSweep is the sweepfunction used for the first
dimension and SegmentSoftSweep is the sweepfunction used for the second
dimension. In case other sweepfunctions should be used, self.sweep_functions
should be modified prior to the call of this function.
Returns:
mmnt_mode (str): "1D" or "2D"
"""
# ensure measurement control is set
self._set_MC(MC)
# configure mc_points
if len(self.mc_points[0]) == 0: # first dimension mc_points not yet set
if self.sweep_functions[0] == awg_swf.SegmentHardSweep:
# first dimension mc points can be retrieved as
# ro_indices from sequence
self.mc_points[0] = np.arange(self.sequences[0].n_acq_elements())
else:
raise ValueError("The first dimension of mc_points must be provided "
"with sequence if the sweep function isn't "
"'SegmentHardSweep'.")
if len(self.sequences) > 1 and len(self.mc_points[1]) == 0:
if self.sweep_functions[1] == awg_swf.SegmentSoftSweep:
# 2nd dimension mc_points can be retrieved as sequence number
self.mc_points[1] = np.arange(len(self.sequences))
elif self.sweep_points is not None and len(self.sweep_points) > 1:
# second dimension can be inferred from sweep points
self.mc_points[1] = list(self.sweep_points[1].values())[0][0]
else:
raise ValueError("The second dimension of mc_points must be provided "
"if the sweep function isn't 'SegmentSoftSweep' and"
"no sweep_point object is given.")
# force 2D sweep if needed (allow 1D sweep for backwards compatibility)
if len(self.mc_points[1]) == 0 and self.force_2D_sweep:
self.mc_points[1] = np.array([0]) # force 2d with singleton
# set mc points
if len(self.sequences) > 1:
# compress 2D sweep
if self.compression_seg_lim is not None:
if self.sweep_functions == (awg_swf.SegmentHardSweep,
awg_swf.SegmentSoftSweep):
self.sequences, self.mc_points[0], \
self.mc_points[1], cf = \
self.sequences[0].compress_2D_sweep(self.sequences,
self.compression_seg_lim,
True,
self.mc_points[0])
self.exp_metadata.update({'compression_factor': cf})
else:
log.warning("Sequence compression currently does not support"
"sweep_functions different than (SegmentHardSweep,"
" SegmentSoftSweep). This could easily be implemented"
"by modifying Sequence.compress_2D_sweep to accept"
"mc_points and do the appropriate reshaping. Feel"
"free to make a pull request ;). Skipping compression"
"for now.")
try:
sweep_param_name = list(self.sweep_points[0])[0]
unit = list(self.sweep_points[0].values())[0][2]
except TypeError:
sweep_param_name, unit = "None", ""
sweep_func_1st_dim = self.sweep_functions[0](
sequence=self.sequences[0], upload=self.upload,
parameter_name=sweep_param_name, unit=unit)
self.MC.set_sweep_function(sweep_func_1st_dim)
self.MC.set_sweep_points(self.mc_points[0])
# set second dimension sweep function
if len(self.mc_points[1]) > 0: # second dimension exists
try:
sweep_param_name = list(self.sweep_points[1])[0]
unit = list(self.sweep_points[1].values())[0][2]
except TypeError:
sweep_param_name, unit = "None", ""
if len(self.channels_to_upload) == 0:
self.channels_to_upload = "all"
if self.sweep_functions[1] == awg_swf.SegmentSoftSweep:
self.MC.set_sweep_function_2D(self.sweep_functions[1](
sweep_func_1st_dim, self.sequences, sweep_param_name, unit,
self.channels_to_upload))
else:
# In case of an unknown sweep function type, it is assumed
# that self.sweep_functions[1] has already been initialized
# with all required parameters and can be directly passed to
# MC.
self.MC.set_sweep_function_2D(self.sweep_functions[1])
self.MC.set_sweep_points_2D(self.mc_points[1])
# check whether there is at least one measure object
if len(self.meas_objs) == 0:
raise ValueError('No measure objects provided. Cannot '
'configure detector functions')
# Configure detector function
# FIXME: this should be extended to meas_objs that are not qubits
df = mqm.get_multiplexed_readout_detector_functions(
self.meas_objs, **self.df_kwargs)[self.df_name]
self.MC.set_detector_function(df)
if self.dev is not None:
meas_obj_value_names_map = self.dev.get_meas_obj_value_names_map(
self.meas_objs, df)
else:
meas_obj_value_names_map = mqm.get_meas_obj_value_names_map(
self.meas_objs, df)
self.exp_metadata.update(
{'meas_obj_value_names_map': meas_obj_value_names_map})
if 'meas_obj_sweep_points_map' not in self.exp_metadata:
self.exp_metadata['meas_obj_sweep_points_map'] = {}
if len(self.mc_points[1]) > 0:
mmnt_mode = "2D"
else:
mmnt_mode = "1D"
return mmnt_mode
def _set_MC(self, MC=None):
"""
Sets the measurement control and raises an error if no MC
could be retrieved from device/qubits objects
Args:
MC (MeasurementControl):
Returns:
"""
if MC is not None:
self.MC = MC
elif self.MC is None:
try:
self.MC = self.dev.instr_mc.get_instr()
except AttributeError:
try:
self.MC = self.meas_objs[0].instr_mc.get_instr()
except (AttributeError, IndexError):
raise ValueError("The Measurement Control (MC) could not "
"be retrieved because no Device/measure "
"objects were found. Pass the MC to "
"run_measurement() or set the MC attribute"
" of the QuantumExperiment instance.")
# def __setattr__(self, name, value):
# """
# Observes attributes which are set to this class. If they are in the
# _metadata_params then they are automatically added to the experimental
# metadata
# Args:
# name:
# value:
#
# Returns:
#
# """
# if name in self._metadata_params:
# try:
# if name in 'cal_points' and value is not None:
# self.exp_metadata.update({name: repr(value)})
# elif name in ('meas_objs', "qubits") and value is not None:
# self.exp_metadata.update({name: [qb.name for qb in value]})
# else:
# self.exp_metadata.update({name: value})
# except Exception as e:
# log.error(f"Could not add {name} with value {value} to the "
# f"metadata")
# raise e
#
# self.__dict__[name] = value
def save_timers(self, quantum_experiment=True, sequence=True, segments=True, filepath=None):
if self.MC is None or self.MC.skip_measurement():
return
data_file = helper_functions.open_hdf_file(self.timestamp, filepath=filepath, mode="r+")
try:
timer_group = data_file.get(Timer.HDF_GRP_NAME)
if timer_group is None:
timer_group = data_file.create_group(Timer.HDF_GRP_NAME)
if quantum_experiment:
self.timer.save(timer_group)
if sequence:
seq_group = timer_group.create_group('Sequences')
for s in self.sequences:
# save sequence timers
try:
timer_seq_name = s.timer.name
# check that name doesn't exist and it case it does, append an index
# Note: normally that should not happen (not desirable)
if timer_seq_name in seq_group.keys():
log.warning(f"Timer with name {timer_seq_name} already "
f"exists in Sequences timers. "
f"Only last instance will be kept")
s.timer.save(seq_group)
if segments:
seg_group = seq_group[timer_seq_name].create_group(timer_seq_name + ".segments")
for _, seg in s.segments.items():
try:
timer_seg_name = seg.timer.name
# check that name doesn't exist and it case it does, append an index
# Note: normally that should not happen (not desirable)
if timer_seg_name in seg_group.keys():
log.warning(f"Timer with name {timer_seg_name} already "
f"exists in Segments timers. "
f"Only last instance will be kept")
seg.timer.save(seg_group)
except AttributeError:
pass
except AttributeError:
pass # in case some sequences don't have timers
except Exception as e:
data_file.close()
raise e
def __repr__(self):
return f"QuantumExperiment(dev={self.dev}, qubits={self.qubits})"
| 47.529323
| 108
| 0.573133
|
d8abb4a61d8af7758ac982e903bc309c627ad90b
| 2,645
|
py
|
Python
|
pysaurus/database/viewport/layers/search_layer.py
|
notoraptor/pysaurus
|
3bf5fe8c15e0e0e580e5edaea05b4a1298641367
|
[
"MIT"
] | null | null | null |
pysaurus/database/viewport/layers/search_layer.py
|
notoraptor/pysaurus
|
3bf5fe8c15e0e0e580e5edaea05b4a1298641367
|
[
"MIT"
] | 4
|
2021-08-13T14:03:02.000Z
|
2022-03-05T16:02:45.000Z
|
pysaurus/database/viewport/layers/search_layer.py
|
notoraptor/pysaurus
|
3bf5fe8c15e0e0e580e5edaea05b4a1298641367
|
[
"MIT"
] | null | null | null |
from typing import Optional
from pysaurus.core import functions
from pysaurus.database.video import Video
from pysaurus.database.video_features import VideoFeatures
from pysaurus.database.viewport.layers.layer import Layer
from pysaurus.database.viewport.layers.source_layer import SourceLayer
from pysaurus.database.viewport.viewtools.group import Group
from pysaurus.database.viewport.viewtools.search_def import SearchDef
from pysaurus.database.viewport.viewtools.video_array import VideoArray
class SearchLayer(Layer):
__slots__ = ()
__props__ = ("search",)
DEFAULT_SEARCH_DEF = SearchDef(None, None) # str text, str cond
def set_search(self, text: Optional[str], cond: Optional[str]):
self._set_parameters(search=SearchDef(text, cond))
def get_search(self) -> SearchDef:
return self.get_parameter("search")
def reset_parameters(self):
self._set_parameters(search=self.DEFAULT_SEARCH_DEF)
def filter(self, data: Group) -> VideoArray:
search_def = self.get_search()
if search_def:
root = self.get_root()
if isinstance(root, SourceLayer):
return self.__filter_from_root_layer(search_def, root, data)
return VideoArray(VideoFeatures.find(search_def, data.videos))
return data.videos
def __filter_from_root_layer(
self, search_def: SearchDef, source_layer: SourceLayer, data: Group
) -> VideoArray:
term_to_videos = source_layer.get_index()
terms = functions.string_to_pieces(search_def.text)
if search_def.cond == "exact":
selection_and = set(data.videos)
for term in terms:
selection_and &= term_to_videos.get(term, set())
video_filter = Video.has_terms_exact
selection = (video for video in selection_and if video_filter(video, terms))
elif search_def.cond == "and":
selection = set(data.videos)
for term in terms:
selection &= term_to_videos.get(term, set())
elif search_def.cond == "id":
(term,) = terms
video_id = int(term)
selection = (video for video in data.videos if video.video_id == video_id)
else: # search_def.cond == 'or'
selection = set(term_to_videos.get(terms[0], set()))
for term in terms[1:]:
selection |= term_to_videos.get(term, set())
selection &= set(data.videos)
return VideoArray(selection)
def remove_from_cache(self, cache: VideoArray, video: Video):
if video in cache:
cache.remove(video)
| 40.692308
| 88
| 0.665784
|
9051576df0dc2d03382dc3e87b1346ceef0baebd
| 7,126
|
py
|
Python
|
zhaquirks/tuya/ts0043.py
|
ha-zig/zha-device-handlers
|
71adabe3912f86e7392d1dcfd70c8a686577da8e
|
[
"Apache-2.0"
] | null | null | null |
zhaquirks/tuya/ts0043.py
|
ha-zig/zha-device-handlers
|
71adabe3912f86e7392d1dcfd70c8a686577da8e
|
[
"Apache-2.0"
] | null | null | null |
zhaquirks/tuya/ts0043.py
|
ha-zig/zha-device-handlers
|
71adabe3912f86e7392d1dcfd70c8a686577da8e
|
[
"Apache-2.0"
] | null | null | null |
"""Tuya 3 Button Remote."""
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import Basic, OnOff, Ota, PowerConfiguration, Time
from . import TuyaSmartRemoteOnOffCluster
from ..const import (
BUTTON_1,
BUTTON_2,
BUTTON_3,
COMMAND,
DEVICE_TYPE,
DOUBLE_PRESS,
ENDPOINT_ID,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
MODEL,
OUTPUT_CLUSTERS,
PROFILE_ID,
SHORT_PRESS,
)
class TuyaSmartRemote0043(CustomDevice):
"""Tuya 3-button remote device."""
signature = {
# SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0, device_version=1, input_clusters=[0, 10, 1, 6], output_clusters=[25]))
# SizePrefixedSimpleDescriptor(endpoint=2, profile=260, device_type=0, device_version=1, input_clusters=[1, 6], output_clusters=[])
# SizePrefixedSimpleDescriptor(endpoint=3, profile=260, device_type=0, device_version=1, input_clusters=[1, 6], output_clusters=[])
MODEL: "TS0043",
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
OnOff.cluster_id,
Time.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,
INPUT_CLUSTERS: [
PowerConfiguration.cluster_id,
OnOff.cluster_id,
],
OUTPUT_CLUSTERS: [],
},
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,
INPUT_CLUSTERS: [
PowerConfiguration.cluster_id,
OnOff.cluster_id,
],
OUTPUT_CLUSTERS: [],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
TuyaSmartRemoteOnOffCluster,
Time.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [
PowerConfiguration.cluster_id,
TuyaSmartRemoteOnOffCluster,
],
OUTPUT_CLUSTERS: [],
},
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [
PowerConfiguration.cluster_id,
TuyaSmartRemoteOnOffCluster,
],
OUTPUT_CLUSTERS: [],
},
},
}
device_automation_triggers = {
(SHORT_PRESS, BUTTON_1): {ENDPOINT_ID: 1, COMMAND: SHORT_PRESS},
(LONG_PRESS, BUTTON_1): {ENDPOINT_ID: 1, COMMAND: LONG_PRESS},
(DOUBLE_PRESS, BUTTON_1): {ENDPOINT_ID: 1, COMMAND: DOUBLE_PRESS},
(SHORT_PRESS, BUTTON_2): {ENDPOINT_ID: 2, COMMAND: SHORT_PRESS},
(LONG_PRESS, BUTTON_2): {ENDPOINT_ID: 2, COMMAND: LONG_PRESS},
(DOUBLE_PRESS, BUTTON_2): {ENDPOINT_ID: 2, COMMAND: DOUBLE_PRESS},
(SHORT_PRESS, BUTTON_3): {ENDPOINT_ID: 3, COMMAND: SHORT_PRESS},
(LONG_PRESS, BUTTON_3): {ENDPOINT_ID: 3, COMMAND: LONG_PRESS},
(DOUBLE_PRESS, BUTTON_3): {ENDPOINT_ID: 3, COMMAND: DOUBLE_PRESS},
}
class BenexmartRemote0043(CustomDevice):
"""Benexmart/Tuya 3-button remote device."""
signature = {
# SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0, device_version=1, input_clusters=[0, 1, 6], output_clusters=[10, 25]))
# SizePrefixedSimpleDescriptor(endpoint=2, profile=260, device_type=0, device_version=1, input_clusters=[1, 6], output_clusters=[])
# SizePrefixedSimpleDescriptor(endpoint=3, profile=260, device_type=0, device_version=1, input_clusters=[1, 6], output_clusters=[])
MODEL: "TS0043",
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
OnOff.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,
INPUT_CLUSTERS: [
PowerConfiguration.cluster_id,
OnOff.cluster_id,
],
OUTPUT_CLUSTERS: [],
},
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,
INPUT_CLUSTERS: [
PowerConfiguration.cluster_id,
OnOff.cluster_id,
],
OUTPUT_CLUSTERS: [],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
TuyaSmartRemoteOnOffCluster,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [
PowerConfiguration.cluster_id,
TuyaSmartRemoteOnOffCluster,
],
OUTPUT_CLUSTERS: [],
},
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [
PowerConfiguration.cluster_id,
TuyaSmartRemoteOnOffCluster,
],
OUTPUT_CLUSTERS: [],
},
},
}
device_automation_triggers = {
(SHORT_PRESS, BUTTON_1): {ENDPOINT_ID: 1, COMMAND: SHORT_PRESS},
(DOUBLE_PRESS, BUTTON_1): {ENDPOINT_ID: 1, COMMAND: DOUBLE_PRESS},
(SHORT_PRESS, BUTTON_2): {ENDPOINT_ID: 2, COMMAND: SHORT_PRESS},
(DOUBLE_PRESS, BUTTON_2): {ENDPOINT_ID: 2, COMMAND: DOUBLE_PRESS},
(SHORT_PRESS, BUTTON_3): {ENDPOINT_ID: 3, COMMAND: SHORT_PRESS},
(DOUBLE_PRESS, BUTTON_3): {ENDPOINT_ID: 3, COMMAND: DOUBLE_PRESS},
}
| 36.92228
| 149
| 0.53817
|
969c9196a35a6735da0c8826a965743679d3b267
| 4,184
|
py
|
Python
|
pymc/backends/text.py
|
RoyalTS/pymc
|
53aff9951018cdaf1d070f63fa4b42c456b9d5ee
|
[
"Apache-2.0"
] | 2
|
2016-03-07T15:25:10.000Z
|
2020-11-21T18:38:31.000Z
|
pymc/backends/text.py
|
RoyalTS/pymc
|
53aff9951018cdaf1d070f63fa4b42c456b9d5ee
|
[
"Apache-2.0"
] | null | null | null |
pymc/backends/text.py
|
RoyalTS/pymc
|
53aff9951018cdaf1d070f63fa4b42c456b9d5ee
|
[
"Apache-2.0"
] | null | null | null |
"""Text file trace backend
After sampling with NDArray backend, save results as text files.
As this other backends, this can be used by passing the backend instance
to `sample`.
>>> import pymc as pm
>>> db = pm.backends.Text('test')
>>> trace = pm.sample(..., trace=db)
Or sampling can be performed with the default NDArray backend and then
dumped to text files after.
>>> from pymc.backends import text
>>> trace = pm.sample(...)
>>> text.dump('test', trace)
Database format
---------------
For each chain, a directory named `chain-N` is created. In this
directory, one file per variable is created containing the values of the
object. To deal with multidimensional variables, the array is reshaped
to one dimension before saving with `numpy.savetxt`. The shape
information is saved in a json file in the same directory and is used to
load the database back again using `numpy.loadtxt`.
"""
import os
import glob
import json
import numpy as np
from ..backends import base
from ..backends.ndarray import NDArray
class Text(NDArray):
"""Text storage
Parameters
----------
name : str
Name of directory to store text files
model : Model
If None, the model is taken from the `with` context.
vars : list of variables
Sampling values will be stored for these variables. If None,
`model.unobserved_RVs` is used.
"""
def __init__(self, name, model=None, vars=None):
if not os.path.exists(name):
os.mkdir(name)
super(Text, self).__init__(name, model, vars)
def close(self):
super(Text, self).close()
_dump_trace(self.name, self)
def dump(name, trace, chains=None):
"""Store NDArray trace as text database.
Parameters
----------
name : str
Name of directory to store text files
trace : MultiTrace of NDArray traces
Result of MCMC run with default NDArray backend
chains : list
Chains to dump. If None, all chains are dumped.
"""
if not os.path.exists(name):
os.mkdir(name)
if chains is None:
chains = trace.chains
for chain in chains:
_dump_trace(name, trace._traces[chain])
def _dump_trace(name, trace):
"""Dump a single-chain trace.
"""
chain_name = 'chain-{}'.format(trace.chain)
chain_dir = os.path.join(name, chain_name)
os.mkdir(chain_dir)
shapes = {}
for varname in trace.varnames:
data = trace.get_values(varname)
var_file = os.path.join(chain_dir, varname + '.txt')
np.savetxt(var_file, data.reshape(-1, data.size))
shapes[varname] = data.shape
## Store shape information for reloading.
shape_file = os.path.join(chain_dir, 'shapes.json')
with open(shape_file, 'w') as sfh:
json.dump(shapes, sfh)
def load(name, chains=None, model=None):
"""Load text database.
Parameters
----------
name : str
Path to root directory for text database
chains : list
Chains to load. If None, all chains are loaded.
model : Model
If None, the model is taken from the `with` context.
Returns
-------
ndarray.Trace instance
"""
chain_dirs = _get_chain_dirs(name)
if chains is None:
chains = list(chain_dirs.keys())
traces = []
for chain in chains:
chain_dir = chain_dirs[chain]
shape_file = os.path.join(chain_dir, 'shapes.json')
with open(shape_file, 'r') as sfh:
shapes = json.load(sfh)
samples = {}
for varname, shape in shapes.items():
var_file = os.path.join(chain_dir, varname + '.txt')
samples[varname] = np.loadtxt(var_file).reshape(shape)
trace = NDArray(model=model)
trace.samples = samples
trace.chain = chain
traces.append(trace)
return base.MultiTrace(traces)
def _get_chain_dirs(name):
"""Return mapping of chain number to directory."""
return {_chain_dir_to_chain(chain_dir): chain_dir
for chain_dir in glob.glob(os.path.join(name, 'chain-*'))}
def _chain_dir_to_chain(chain_dir):
return int(os.path.basename(chain_dir).split('-')[1])
| 28.855172
| 72
| 0.642925
|
245c43bb7e673d8cd464ff98440eac3f0b9383c3
| 1,969
|
py
|
Python
|
django_mailbox/south_migrations/0004_auto__add_field_message_outgoing.py
|
JBwebkrone/django-mailbox-1
|
40263b66703332d82c179d79f5ea0d80fc1ea388
|
[
"MIT"
] | 225
|
2015-01-02T14:53:59.000Z
|
2022-03-04T23:07:34.000Z
|
django_mailbox/south_migrations/0004_auto__add_field_message_outgoing.py
|
JBwebkrone/django-mailbox-1
|
40263b66703332d82c179d79f5ea0d80fc1ea388
|
[
"MIT"
] | 182
|
2015-02-06T23:29:50.000Z
|
2022-01-20T21:50:39.000Z
|
django_mailbox/south_migrations/0004_auto__add_field_message_outgoing.py
|
JBwebkrone/django-mailbox-1
|
40263b66703332d82c179d79f5ea0d80fc1ea388
|
[
"MIT"
] | 138
|
2015-01-18T16:57:34.000Z
|
2022-03-24T19:33:38.000Z
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Message.outgoing'
db.add_column('django_mailbox_message', 'outgoing',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Message.outgoing'
db.delete_column('django_mailbox_message', 'outgoing')
models = {
'django_mailbox.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_mailbox.message': {
'Meta': {'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'from_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailbox': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django_mailbox.Mailbox']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'outgoing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'received': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['django_mailbox']
| 45.790698
| 145
| 0.577958
|
e1f933e2005ae6cea37fe51d265fe7ef79f7e4b2
| 7,415
|
py
|
Python
|
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_5GHzIsApSsidAdvertisementEnabled.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_5GHzIsApSsidAdvertisementEnabled.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_5GHzIsApSsidAdvertisementEnabled.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2017 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>3</version>
<name>TS_WIFIHAL_5GHzIsApSsidAdvertisementEnabled</name>
<primitive_test_id/>
<primitive_test_name>WIFIHAL_GetOrSetParamBoolValue</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>Check APSSID-Advertisement enable status using wfi_getApSsidAdvertisementEnable HAL API</synopsis>
<groups_id>4</groups_id>
<execution_time>10</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
<box_type>Emulator</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIHAL_89</test_case_id>
<test_objective>Check APSSID-Advertisement enable status using wfi_getApSsidAdvertisementEnable HAL API</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3. XB6, Emulator</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>wifi_getApSsidAdvertisementEnable()</api_or_interface_used>
<input_parameters>methodName : getApSsidAdvertisementEnable
methodName : setApSsidAdvertisementEnable
apIndex : 1</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(WIFIHAL_GetOrSetParamBoolValue - func name - "If not exists already"
WIFIHAL - module name
Necessary I/P args as Mentioned in Input)
2.Python Script will be generated/overrided automatically by Test Manager with provided arguments in configure page (TS_WIFIHAL_5GHzIsApSsidAdvertisementEnabled.py)
3.Execute the generated Script(TS_WIFIHAL_5GHzIsApSsidAdvertisementEnabled.py) using execution page of Test Manager GUI
4.wifihalstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named WIFIHAL_GetOrSetParamBoolValue through registered TDK wifihalstub function along with necessary arguments
5.WIFIHAL_GetOrSetParamBoolValue function will call Ccsp Base Function named "ssp_WIFIHALGetOrSetParamBoolValue", that inturn will call WIFIHAL Library Functions
wifi_getApSsidAdvertisementEnable() and wifi_setApSsidAdvertisementEnable()
6.Response(s)(printf) from TDK Component,Ccsp Library function and wifihalstub would be logged in Agent Console log based on the debug info redirected to agent console
7.wifihalstub will validate the available result (from agent console log and Pointer to instance as updated) with expected result
8.Test Manager will publish the result in GUI as SUCCESS/FAILURE based on the response from wifihalstub</automation_approch>
<except_output>"
CheckPoint
1:wifi_getApSsidAdvertisementEnable log from DUT should be available in Agent Console LogCheckPoint
2:TDK agent Test Function will log the test case result as PASS based on API response CheckPoint
3:Test Manager GUI will publish the result as SUCCESS in Execution page"""</except_output>
<priority>High</priority>
<test_stub_interface>WIFIHAL</test_stub_interface>
<test_script>TS_WIFIHAL_5GHzIsApSsidAdvertisementEnabled</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from wifiUtility import *;
radio = "5G"
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifihal","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIHAL_5GHzIsApSsidAdvertisementEnabled');
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
tdkTestObjTemp, idx = getIndex(obj, radio);
## Check if a invalid index is returned
if idx == -1:
print "Failed to get radio index for radio %s\n" %radio;
tdkTestObjTemp.setResultStatus("FAILURE");
else:
expectedresult="SUCCESS";
apIndex = idx
getMethod = "getApSsidAdvertisementEnable"
primitive = 'WIFIHAL_GetOrSetParamBoolValue'
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, 0, getMethod)
if expectedresult in actualresult :
tdkTestObj.setResultStatus("SUCCESS");
enable = details.split(":")[1].strip()
if "Enabled" in enable:
print "Access point SSID Advertisement is Enabled"
oldEnable = 1
newEnable = 0
else:
print "Access point SSID Advertisement is Disabled"
oldEnable = 0
newEnable = 1
setMethod = "setApSsidAdvertisementEnable"
#Toggle the enable status using set
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, newEnable, setMethod)
if expectedresult in actualresult :
print "Enable state toggled using set"
# Get the New enable status
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, 0, getMethod)
if expectedresult in actualresult and enable not in details.split(":")[1].strip():
print "getApSsidAdvertisementEnable Success, verified along with setApSsidAdvertisementEnable() api"
#Revert back to original Enable status
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, oldEnable, setMethod)
if expectedresult in actualresult :
print "Enable status reverted back";
else:
print "Couldn't revert enable status"
tdkTestObj.setResultStatus("FAILURE");
else:
print "getApSsidAdvertisementEnable() failed after set function"
tdkTestObj.setResultStatus("FAILURE");
else:
print "setApSsidAdvertisementEnable() failed"
tdkTestObj.setResultStatus("FAILURE");
else:
print "getApSsidAdvertisementEnable() failed"
tdkTestObj.setResultStatus("FAILURE");
obj.unloadModule("wifihal");
else:
print "Failed to load wifi module";
obj.setLoadModuleStatus("FAILURE");
| 45.490798
| 224
| 0.73648
|
5bb6b32ace4876b6472a342c4fb44f9bf16064d3
| 5,077
|
py
|
Python
|
Modules/NavWidgets/Wifi.py
|
macromorgan/pocketchip-menu
|
1f824b07ba179b386079528f2bf0496ec0c9c94f
|
[
"MIT"
] | 1
|
2021-11-12T12:57:59.000Z
|
2021-11-12T12:57:59.000Z
|
Modules/NavWidgets/Wifi.py
|
macromorgan/pocketchip-menu
|
1f824b07ba179b386079528f2bf0496ec0c9c94f
|
[
"MIT"
] | null | null | null |
Modules/NavWidgets/Wifi.py
|
macromorgan/pocketchip-menu
|
1f824b07ba179b386079528f2bf0496ec0c9c94f
|
[
"MIT"
] | null | null | null |
import pygame
from Modules.Globals import *
import Modules.DBusMain as DBusMain
import dbus
from Modules.GenWidgets.Widget import *
from multiprocessing import Value
class Wifi(Widget):
def __init__(self, parent=None):
self.parent = parent
self.size = (26, 24)
self.pos = (self.parent.parent.screen.get_width() - self.size[0] - EDGE_PADDING, EDGE_PADDING)
self.image = None
self.page = None
self.persistent = True
self.wifi_device = None
self.wifi_connection = None
self.wifi_signal = Value('i', 100)
self.wifi_status = Value('i', 1)
try:
self.get_wifi_dev()
self.persistent = True
except:
self.persistent = False
if self.wifi_device is not None:
try:
self.get_active_wifi_connection()
DBusMain.DBUS_BUS.add_signal_receiver(self.dbus_signal_handler,
bus_name='org.freedesktop.NetworkManager',
dbus_interface='org.freedesktop.DBus.Properties',
signal_name='PropertiesChanged',
path=self.wifi_device)
except:
self.wifi_connection = None
if self.wifi_connection is not None:
try:
self.get_wifi_connection_strength()
DBusMain.DBUS_BUS.add_signal_receiver(self.dbus_signal_handler,
bus_name='org.freedesktop.NetworkManager',
dbus_interface='org.freedesktop.DBus.Properties',
signal_name='PropertiesChanged',
path=self.wifi_connection)
except:
self.wifi_signal = 0
def dbus_signal_handler(self, interface, data, type):
#added print for testing
print(data)
update = False
if 'Strength' in data and int(data['Strength']) != self.wifi_signal.value:
self.wifi_signal.value = int(data['Strength'])
update = True
if 'ActiveAccessPoint' in data and str(data['ActiveAccessPoint']) != self.wifi_connection:
self.wifi_connection = str(data['ActiveAccessPoint'])
update = True
if update is True:
#added print for testing
print("Updated")
pygame.fastevent.post(pygame.event.Event(pygame.USEREVENT, type="screen_update"))
update = False
def get_wifi_dev(self):
proxy = DBusMain.DBUS_BUS.get_object('org.freedesktop.NetworkManager',
'/org/freedesktop/NetworkManager')
getmanager = dbus.Interface(proxy, 'org.freedesktop.NetworkManager')
devices = getmanager.GetDevices()
for device in devices:
deviceobject = DBusMain.DBUS_BUS.get_object('org.freedesktop.NetworkManager',device)
deviceinterface = dbus.Interface(deviceobject, dbus_interface='org.freedesktop.DBus.Properties')
if deviceinterface.Get('org.freedesktop.NetworkManager.Device', 'DeviceType') == 2:
self.wifi_device = device
def get_active_wifi_connection(self):
proxy = DBusMain.DBUS_BUS.get_object('org.freedesktop.NetworkManager', self.wifi_device)
getmanager = dbus.Interface(proxy, dbus_interface='org.freedesktop.DBus.Properties')
self.wifi_connection = str(getmanager.Get('org.freedesktop.NetworkManager.Device.Wireless','ActiveAccessPoint'))
def get_wifi_connection_strength(self):
apobject = DBusMain.DBUS_BUS.get_object('org.freedesktop.NetworkManager', self.wifi_connection)
apinterface = dbus.Interface(apobject,dbus_interface='org.freedesktop.DBus.Properties')
self.wifi_signal.value = int(apinterface.Get('org.freedesktop.NetworkManager.AccessPoint', 'Strength'))
def update(self):
if self.wifi_device is None or self.persistent is False:
return
if self.wifi_status.value == 0:
self.image = pygame.transform.scale(pygame.image.load(assetpath('wifi-disconnected.png')).convert_alpha(), self.size)
return
if self.wifi_signal.value > 75:
self.image = pygame.transform.scale(pygame.image.load(assetpath('wifi-100.png')).convert_alpha(), self.size)
return
if self.wifi_signal.value > 50:
self.image = pygame.transform.scale(pygame.image.load(assetpath('wifi-75.png')).convert_alpha(), self.size)
return
if self.wifi_signal.value > 25:
self.image = pygame.transform.scale(pygame.image.load(assetpath('wifi-50.png')).convert_alpha(), self.size)
return
else:
self.image = pygame.transform.scale(pygame.image.load(assetpath('wifi-25.png')).convert_alpha(), self.size)
return
| 49.77451
| 129
| 0.602521
|
4bdc08710ad4864ce87374daf01cc37ef3e62a5a
| 448
|
py
|
Python
|
Systems/Engine/Scene.py
|
RippeR37/PyPong
|
601db4346f7c27c88226ce79317008941cbc5754
|
[
"MIT"
] | 1
|
2018-12-06T06:16:49.000Z
|
2018-12-06T06:16:49.000Z
|
Systems/Engine/Scene.py
|
RippeR37/PyPong
|
601db4346f7c27c88226ce79317008941cbc5754
|
[
"MIT"
] | 10
|
2016-01-07T19:22:44.000Z
|
2016-01-10T14:32:37.000Z
|
Systems/Engine/Scene.py
|
RippeR37/PyPong
|
601db4346f7c27c88226ce79317008941cbc5754
|
[
"MIT"
] | null | null | null |
class Scene(object):
def __init__(self, stackable=True, stack_usable=True):
self._is_stackable = stackable
self._is_stack_usable = stack_usable
def is_stackable(self):
return self._is_stackable
def is_stack_usable(self):
return self._is_stack_usable
def update(self, dt):
pass
def render(self):
pass
def process_scene_stack(self, scene_stack, scene_index):
pass
| 21.333333
| 60
| 0.660714
|
e709698585e7dfb1d43865fdd8961dafef650846
| 4,015
|
py
|
Python
|
test.py
|
brandonhorst/cdev-client-py
|
42febafa43735e8ff8dae05021037358490c5b3d
|
[
"MIT"
] | 1
|
2015-02-16T19:41:16.000Z
|
2015-02-16T19:41:16.000Z
|
test.py
|
brandonhorst/cdev-client-py
|
42febafa43735e8ff8dae05021037358490c5b3d
|
[
"MIT"
] | null | null | null |
test.py
|
brandonhorst/cdev-client-py
|
42febafa43735e8ff8dae05021037358490c5b3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import unittest
import cdev
class TestCDEVServer(unittest.TestCase):
def setUp(self):
self.instance = cdev.CacheInstance('bigfoot', 57776, '_SYSTEM', 'SYS')
def get_samples(self):
namespaces = self.instance.get_namespaces()
self.assertIn('SAMPLES', [namespace.name for namespace in namespaces])
samples = [namespace for namespace in namespaces if namespace.name == 'SAMPLES'][0]
return samples
def test_namespaces(self):
self.get_samples()
def test_queries(self):
samples = self.get_samples()
sql = "SELECT Name, SSN FROM Sample.Person"
sqlresult = self.instance.add_query(samples, sql)
self.assertTrue(sqlresult.success)
self.assertIn(sql, sqlresult.query.content)
executeresult = self.instance.execute_query(sqlresult.query)
self.assertTrue(executeresult.success)
self.assertIn("Name", executeresult.resultset)
def test_globals(self):
samples = self.get_samples()
globs = self.instance.get_globals(samples)
self.assertIn('oddDEF', [glob.name for glob in globs])
personglob = [glob for glob in globs if glob.name == 'Sample.PersonD'][0]
personglobfull = self.instance.get_global(personglob)
self.assertIn('1', personglobfull.content)
def test_classes_and_routines(self):
samples = self.get_samples()
files = self.instance.get_files(samples)
self.assertIn('Sample.Person.cls', [file.name for file in files])
self.assertIn('LDAP.mac', [file.name for file in files])
personfile = [file for file in files if file.name == 'Sample.Person.cls'][0]
person = self.instance.get_file(personfile)
self.assertIn('Class Sample.Person', person.content)
ldapfile = [file for file in files if file.name == 'LDAP.mac'][0]
ldap = self.instance.get_file(ldapfile)
self.assertIn('LDAP', ldap.content)
person.content = '///modified by cdev\r\n{0}'.format(person.content)
putmodifiedpersonrequest = self.instance.put_file(person)
self.assertTrue(putmodifiedpersonrequest.success)
ldap.content = '///modified by cdev\r\n{0}'.format(ldap.content)
putmodifiedldaprequest = self.instance.put_file(ldap)
self.assertTrue(putmodifiedldaprequest.success)
newpersoncontent = person.content.replace('Sample.Person','Sample.CDEVPerson').replace('Stored_Procedure_Test','CDEV_Stored_Procedure_Test').replace('SP_Sample_By_Name','CDEV_Sample_By_Name')
newpersonname = 'Sample.CDEVPerson.cls'
newpersonresult = self.instance.add_file(samples, newpersonname, newpersoncontent)
self.assertTrue(newpersonresult.success)
newldapcontent = ldap.content
newldapname = 'CDEVLDAP.mac'
newldapresult = self.instance.add_file(samples, newldapname, newldapcontent)
self.assertTrue(newldapresult.success)
compilationresult = self.instance.compile_file(newpersonresult.file, 'ck')
self.assertTrue(compilationresult.success)
generatedfiles = self.instance.get_generated_files(compilationresult.file)
self.assertIn('Sample.CDEVPerson.1.int', [file.name for file in generatedfiles])
intfile = [file for file in generatedfiles if file.name == 'Sample.CDEVPerson.1.int'][0]
int = self.instance.get_file(intfile)
self.assertIn('Sample.CDEVPerson.1', int.content)
personxml = self.instance.get_xml(person)
self.assertIn('Sample.Person', personxml.content)
personxmlresult = self.instance.put_xml(personxml)
self.assertTrue(personxmlresult.success)
self.assertEqual(personxmlresult.file.name, "Sample.Person.cls")
anonxmlresult = self.instance.add_xml(samples, personxml.content)
self.assertTrue(anonxmlresult.success)
self.assertEqual(anonxmlresult.file.name, "Sample.Person.cls")
if __name__=='__main__':
unittest.main()
| 40.969388
| 199
| 0.694147
|
dd15b4adf5284ee286128267209807e79946353a
| 14,132
|
py
|
Python
|
federated-MPI/mpi_advanced_classifier.py
|
dylan-fan/federated-averaging-tutorials
|
9320d1fce7e4740a8fdaf391f69ca00cbd0d0990
|
[
"Apache-2.0"
] | 1
|
2019-02-10T13:22:00.000Z
|
2019-02-10T13:22:00.000Z
|
federated-MPI/mpi_advanced_classifier.py
|
dylan-fan/federated-averaging-tutorials
|
9320d1fce7e4740a8fdaf391f69ca00cbd0d0990
|
[
"Apache-2.0"
] | null | null | null |
federated-MPI/mpi_advanced_classifier.py
|
dylan-fan/federated-averaging-tutorials
|
9320d1fce7e4740a8fdaf391f69ca00cbd0d0990
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 coMind. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# https://comind.org/
# ==============================================================================
# TensorFlow
import tensorflow as tf
# Helper libraries
import numpy as np
from time import time
from mpi4py import MPI
import sys
import multiprocessing
# You can safely tune these variables
BATCH_SIZE = 128
SHUFFLE_SIZE = BATCH_SIZE * 100
EPOCHS = 250
EPOCHS_PER_DECAY = 50
INTERVAL_STEPS = 100 # Steps between averages
BATCHES_TO_PREFETCH = 1
# -----------------
# Let the code know about the MPI config
comm = MPI.COMM_WORLD
num_workers = comm.size
# Dataset dependent constants
num_train_images = int(50000 / num_workers)
num_test_images = 10000
height = 32
width = 32
channels = 3
num_batch_files = 5
# Path to TFRecord files (check readme for instructions on how to get these files)
cifar10_train_files = ['cifar-10-tf-records/train{}.tfrecords'.format(i) for i in range(num_batch_files)]
cifar10_test_file = 'cifar-10-tf-records/test.tfrecords'
# Shuffle filenames before loading them
np.random.shuffle(cifar10_train_files)
checkpoint_dir='logs_dir/{}'.format(time())
print('Checkpoint directory: ' + checkpoint_dir)
sys.stdout.flush()
global_step = tf.train.get_or_create_global_step()
cpu_count = int(multiprocessing.cpu_count() / num_workers)
# Define input pipeline, place these ops in the cpu
with tf.name_scope('dataset'), tf.device('/cpu:0'):
# Map function to decode data and preprocess it
def preprocess(serialized_examples):
# Parse a batch
features = tf.parse_example(serialized_examples, {'image': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64)})
# Decode and reshape imag
image = tf.map_fn(lambda img: tf.reshape(tf.decode_raw(img, tf.uint8), tf.stack([height, width, channels])), features['image'], dtype=tf.uint8, name='decode')
# Cast image
casted_image = tf.cast(image, tf.float32, name='input_cast')
# Resize image for testing
resized_image = tf.image.resize_image_with_crop_or_pad(casted_image, 24, 24)
# Augment images for training
distorted_image = tf.map_fn(lambda img: tf.random_crop(img, [24, 24, 3]), casted_image, name='random_crop')
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, 63)
distorted_image = tf.image.random_contrast(distorted_image, 0.2, 1.8)
# Check if test or train mode
result = tf.cond(train_mode, lambda: distorted_image, lambda: resized_image)
# Standardize images
processed_image = tf.map_fn(lambda img: tf.image.per_image_standardization(img), result, name='standardization')
return processed_image, features['label']
# Placeholders for the iterator
filename_placeholder = tf.placeholder(tf.string, name='input_filename')
batch_size = tf.placeholder(tf.int64, name='batch_size')
shuffle_size = tf.placeholder(tf.int64, name='shuffle_size')
train_mode = tf.placeholder(tf.bool, name='train_mode')
# Create dataset, shuffle, repeat, batch, map and prefetch
dataset = tf.data.TFRecordDataset(filename_placeholder)
dataset = dataset.shard(num_workers, comm.rank)
dataset = dataset.shuffle(shuffle_size, reshuffle_each_iteration=True)
dataset = dataset.repeat(EPOCHS)
dataset = dataset.batch(batch_size)
dataset = dataset.map(preprocess, cpu_count)
dataset = dataset.prefetch(BATCHES_TO_PREFETCH)
# Define a feedable iterator and the initialization op
iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
dataset_init_op = iterator.make_initializer(dataset, name='dataset_init')
X, y = iterator.get_next()
# Define our model
first_conv = tf.layers.conv2d(X, 64, 5, padding='SAME', activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2), name='first_conv')
first_pool = tf.nn.max_pool(first_conv, [1, 3, 3 ,1], [1, 2, 2, 1], padding='SAME', name='first_pool')
first_norm = tf.nn.lrn(first_pool, 4, alpha=0.001 / 9.0, beta=0.75, name='first_norm')
second_conv = tf.layers.conv2d(first_norm, 64, 5, padding='SAME', activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2), name='second_conv')
second_norm = tf.nn.lrn(second_conv, 4, alpha=0.001 / 9.0, beta=0.75, name='second_norm')
second_pool = tf.nn.max_pool(second_norm, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME', name='second_pool')
flatten_layer = tf.layers.flatten(second_pool, name='flatten')
first_relu = tf.layers.dense(flatten_layer, 384, activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.04), name='first_relu')
second_relu = tf.layers.dense(first_relu, 192, activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.04), name='second_relu')
logits = tf.layers.dense(second_relu, 10, kernel_initializer=tf.truncated_normal_initializer(stddev=1/192.0), name='logits')
# Object to keep moving averages of our metrics (for tensorboard)
summary_averages = tf.train.ExponentialMovingAverage(0.9)
# Define cross_entropy loss
with tf.name_scope('loss'):
base_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits), name='base_loss')
# Add regularization loss to both relu layers
regularizer_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'relu/kernel' in v.name], name='regularizer_loss') * 0.004
loss = tf.add(base_loss, regularizer_loss)
loss_averages_op = summary_averages.apply([loss])
# Store moving average of the loss
tf.summary.scalar('cross_entropy', summary_averages.average(loss))
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
# Compare prediction with actual label
correct_prediction = tf.equal(tf.argmax(logits, 1), y)
# Average correct predictions in the current batch
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy_metric')
accuracy_averages_op = summary_averages.apply([accuracy])
# Store moving average of the accuracy
tf.summary.scalar('accuracy', summary_averages.average(accuracy))
n_batches = int(num_train_images / BATCH_SIZE)
last_step = int(n_batches * EPOCHS)
# Define moving averages of the trainable variables. This sometimes improve
# the performance of the trained model
with tf.name_scope('variable_averages'):
variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
# Define optimizer and training op
with tf.name_scope('train'):
# Make decaying learning rate
lr = tf.train.exponential_decay(0.1, global_step, n_batches * EPOCHS_PER_DECAY, 0.1, staircase=True)
tf.summary.scalar('learning_rate', lr)
# Make train_op dependent on moving averages ops. Otherwise they will be
# disconnected from the graph
with tf.control_dependencies([loss_averages_op, accuracy_averages_op, variable_averages_op]):
train_op = tf.train.GradientDescentOptimizer(lr).minimize(loss, global_step=global_step)
print('Graph definition finished')
sys.stdout.flush()
sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
print('Training {} batches...'.format(last_step))
sys.stdout.flush()
# Logger hook to keep track of the training
class _LoggerHook(tf.train.SessionRunHook):
def begin(self):
self._total_loss = 0
self._total_acc = 0
def before_run(self, run_context):
return tf.train.SessionRunArgs([loss, accuracy, global_step])
def after_run(self, run_context, run_values):
loss_value, acc_value, step_value = run_values.results
self._total_loss += loss_value
self._total_acc += acc_value
if (step_value + 1) % n_batches == 0 and comm.rank == 0:
print("Epoch {}/{} - loss: {:.4f} - acc: {:.4f}".format(int(step_value / n_batches) + 1, EPOCHS, self._total_loss / n_batches, self._total_acc / n_batches))
sys.stdout.flush()
self._total_loss = 0
self._total_acc = 0
# Custom hook
class _FederatedHook(tf.train.SessionRunHook):
def __init__(self, comm):
# Store the MPI config
self._comm = comm
def _create_placeholders(self):
# Create placeholders for all the trainable variables
for v in tf.trainable_variables():
self._placeholders.append(tf.placeholder_with_default(v, v.shape, name="%s/%s" % ("FedAvg", v.op.name)))
def _assign_vars(self, local_vars):
# Assign value feeded to placeholders to local vars
reassign_ops = []
for var, fvar in zip(local_vars, self._placeholders):
reassign_ops.append(tf.assign(var, fvar))
return tf.group(*(reassign_ops))
def _gather_weights(self, session):
# Gather all weights in the chief worker
gathered_weights = []
for v in tf.trainable_variables():
value = session.run(v)
value = self._comm.gather(value, root=0)
gathered_weights.append(np.array(value))
return gathered_weights
def _broadcast_weights(self, session):
# Broadcast averaged weights to all workers
broadcasted_weights = []
for v in tf.trainable_variables():
value = session.run(v)
value = self._comm.bcast(value, root=0)
broadcasted_weights.append(np.array(value))
return broadcasted_weights
def begin(self):
self._placeholders = []
self._create_placeholders()
# Op to initialize update the weight
self._update_local_vars_op = self._assign_vars(tf.trainable_variables())
def after_create_session(self, session, coord):
# Broadcast weights
broadcasted_weights = self._broadcast_weights(session)
# Initialize the workers at the same point
if self._comm.rank != 0:
feed_dict = {}
for ph, bw in zip(self._placeholders, broadcasted_weights):
feed_dict[ph] = bw
session.run(self._update_local_vars_op, feed_dict=feed_dict)
def before_run(self, run_context):
return tf.train.SessionRunArgs(global_step)
def after_run(self, run_context, run_values):
step_value = run_values.results
session = run_context.session
# Check if we should average
if step_value % INTERVAL_STEPS == 0 and not step_value == 0:
gathered_weights = self._gather_weights(session)
# Chief gather weights and averages
if self._comm.rank == 0:
print('Average applied, iter: {}/{}'.format(step_value, last_step))
sys.stdout.flush()
for i in range(len(gathered_weights)):
gathered_weights[i] = np.mean(gathered_weights[i], axis=0)
feed_dict = {}
for ph, gw in zip(self._placeholders, gathered_weights):
feed_dict[ph] = gw
session.run(self._update_local_vars_op, feed_dict=feed_dict)
# The rest get the averages and update their local model
broadcasted_weights = self._broadcast_weights(session)
if self._comm.rank != 0:
feed_dict = {}
for ph, bw in zip(self._placeholders, broadcasted_weights):
feed_dict[ph] = bw
session.run(self._update_local_vars_op, feed_dict=feed_dict)
# Hook to initialize the dataset
class _InitHook(tf.train.SessionRunHook):
def after_create_session(self, session, coord):
session.run(dataset_init_op, feed_dict={filename_placeholder: cifar10_train_files, batch_size: BATCH_SIZE, shuffle_size: SHUFFLE_SIZE, train_mode: True})
print("Worker {} ready".format(comm.rank))
sys.stdout.flush()
with tf.name_scope('monitored_session'):
with tf.train.MonitoredTrainingSession(
checkpoint_dir=checkpoint_dir,
hooks=[_LoggerHook(), _InitHook(), _FederatedHook(comm), tf.train.CheckpointSaverHook(checkpoint_dir=checkpoint_dir, save_steps=n_batches, saver=tf.train.Saver(variable_averages.variables_to_restore()))],
config=sess_config,
save_checkpoint_secs=None) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
if comm.rank == 0:
print('--- Begin Evaluation ---')
sys.stdout.flush()
tf.reset_default_graph()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
saver = tf.train.import_meta_graph(ckpt.model_checkpoint_path + '.meta', clear_devices=True)
saver.restore(sess, ckpt.model_checkpoint_path)
print('Model restored')
sys.stdout.flush()
graph = tf.get_default_graph()
images_placeholder = graph.get_tensor_by_name('dataset/images_placeholder:0')
labels_placeholder = graph.get_tensor_by_name('dataset/labels_placeholder:0')
batch_size = graph.get_tensor_by_name('dataset/batch_size:0')
train_mode = graph.get_tensor_by_name('dataset/train_mode:0')
accuracy = graph.get_tensor_by_name('accuracy/accuracy_metric:0')
dataset_init_op = graph.get_operation_by_name('dataset/dataset_init')
sess.run(dataset_init_op, feed_dict={filename_placeholder: cifar10_test_file, batch_size: num_test_images, shuffle_size: 1, train_mode: False})
print('Test accuracy: {:4f}'.format(sess.run(accuracy)))
sys.stdout.flush()
| 45.440514
| 216
| 0.706553
|
af52f6f11aaa653ff5ac95411a97fc3b3cf46179
| 2,838
|
py
|
Python
|
MauricioGonzalez_Ejercicio10.py
|
lmgonzalezc/MauricioGonzalez_Ejercicio10Lab
|
862c1f6c7454db229f5eb6e9c136bd43dde088e3
|
[
"MIT"
] | null | null | null |
MauricioGonzalez_Ejercicio10.py
|
lmgonzalezc/MauricioGonzalez_Ejercicio10Lab
|
862c1f6c7454db229f5eb6e9c136bd43dde088e3
|
[
"MIT"
] | null | null | null |
MauricioGonzalez_Ejercicio10.py
|
lmgonzalezc/MauricioGonzalez_Ejercicio10Lab
|
862c1f6c7454db229f5eb6e9c136bd43dde088e3
|
[
"MIT"
] | null | null | null |
import urllib
from io import StringIO
from io import BytesIO
import csv
import numpy as np
from datetime import datetime
import matplotlib.pylab as plt
import pandas as pd
import scipy.signal as signal
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
datos=pd.read_csv('https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2008.txt',sep=";",header=None, decimal=",")
datos[0]=pd.to_datetime(datos[0],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([0],inplace=True)
datos[1]=pd.to_datetime(datos[1],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([1],inplace=True)
datos[1]=str(datos[1])
datos[1]=datos[1].str[1:20]
datos2=pd.read_csv('https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2009.txt',sep=";",header=None, decimal=",")
datos2[0]=pd.to_datetime(datos2[0],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([0],inplace=True)
datos2[1]=pd.to_datetime(datos2[1],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([1],inplace=True)
datos2[1]=str(datos2[1])
datos2[1]=datos2[1].str[1:20]
datos3=pd.read_csv('https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2010.txt',sep=";",header=None, decimal=",")
datos3[0]=pd.to_datetime(datos3[0],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([0],inplace=True)
datos3[1]=pd.to_datetime(datos3[1],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([1],inplace=True)
datos3[1]=str(datos3[1])
datos3[1]=datos3[1].str[1:20]
plt.figure(figsize=(12,12))
plt.subplot(2,2,1)
plt.plot(datos[0],datos[2])
plt.subplot(2,2,2)
plt.plot(datos2[0],datos2[2])
plt.subplot(2,2,3)
plt.plot(datos3[0],datos3[2])
N = 2 # Orden del filtro
Wn = 0.01 # Corte de frecuancia
B, A = signal.butter(N, Wn)
cost=pd.concat([datos[2],datos2[2],datos3[2]])
cost=np.array(cost)
cost=cost.astype(np.float)
CostFil=signal.filtfilt(B,A, cost)
date=pd.concat([datos[0],datos2[0],datos3[0]])
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(211)
plt.plot(date,cost, 'b-')
plt.plot(date,CostFil, 'r-',linewidth=2)
plt.ylabel(r"Costo")
plt.legend(['Original','Filtrado'])
plt.title("Costos en la bolsa de valores")
ax1.axes.get_xaxis().set_visible(False)
ax1 = fig.add_subplot(212)
plt.plot(date,cost-CostFil, 'b-')
plt.ylabel(r"Costo")
plt.xlabel("Fecha")
plt.legend(['Residuales'])
plt.savefig("FiltroCostos.png")
plt.show()
plt.figure(figsize=(20,7))
ruido=cost-CostFil
corr=signal.correlate(ruido,ruido,mode="full")
plt.plot(corr[len(corr)//2:])
plt.show()
plt.figure(figsize=(20,7))
ruido=cost-CostFil
corr=signal.correlate(ruido,ruido,mode="full")
plt.plot(corr[len(corr)//2:])
plt.savefig("Correlacion.png")
plt.show()
| 37.342105
| 182
| 0.713531
|
357e1a4558c1757165b854d22ee21a014d3ce9ec
| 9,047
|
py
|
Python
|
src/demos/python/chrono-tensorflow/envs/chtrain_pendulum.py
|
rxdu/chrono
|
d7183358f95d74d90f412880894d10a17b9f7bff
|
[
"BSD-3-Clause"
] | null | null | null |
src/demos/python/chrono-tensorflow/envs/chtrain_pendulum.py
|
rxdu/chrono
|
d7183358f95d74d90f412880894d10a17b9f7bff
|
[
"BSD-3-Clause"
] | null | null | null |
src/demos/python/chrono-tensorflow/envs/chtrain_pendulum.py
|
rxdu/chrono
|
d7183358f95d74d90f412880894d10a17b9f7bff
|
[
"BSD-3-Clause"
] | null | null | null |
import pychrono as chrono
from pychrono import irrlicht as chronoirr
import numpy as np
class Model(object):
def __init__(self, render):
self.render = render
self.observation_space= np.empty([4,1])
self.action_space= np.empty([1,1])
self.info = {}
self.timestep = 0.01
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
self.rev_pend_sys = chrono.ChSystemNSC()
chrono.ChCollisionModel.SetDefaultSuggestedEnvelope(0.001)
chrono.ChCollisionModel.SetDefaultSuggestedMargin(0.001)
#rev_pend_sys.SetSolverType(chrono.ChSolver.Type_BARZILAIBORWEIN) # precise, more slow
self.rev_pend_sys.SetSolverMaxIterations(70)
# Create a contact material (surface property)to share between all objects.
self.rod_material = chrono.ChMaterialSurfaceNSC()
self.rod_material.SetFriction(0.5)
self.rod_material.SetDampingF(0.2)
self.rod_material.SetCompliance (0.0000001)
self.rod_material.SetComplianceT(0.0000001)
# Create the set of rods in a vertical stack, along Y axis
self.size_rod_y = 2.0
self.radius_rod = 0.05
self.density_rod = 50; # kg/m^3
self.mass_rod = self.density_rod * self.size_rod_y *chrono.CH_C_PI* (self.radius_rod**2);
self.inertia_rod_y = (self.radius_rod**2) * self.mass_rod/2;
self.inertia_rod_x = (self.mass_rod/12)*((self.size_rod_y**2)+3*(self.radius_rod**2))
self.size_table_x = 0.3;
self.size_table_y = 0.3;
self.size_table_z = 0.3;
if self.render:
self.myapplication = chronoirr.ChIrrApp(self.rev_pend_sys)
self.myapplication.AddShadowAll();
self.myapplication.SetStepManage(True)
self.myapplication.SetTimestep(0.01)
self. myapplication.SetTryRealtime(True)
self.myapplication.AddTypicalSky()
self.myapplication.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
self.myapplication.AddTypicalCamera(chronoirr.vector3df(0.5,0.5,1.0))
self.myapplication.AddLightWithShadow(chronoirr.vector3df(2,4,2), # point
chronoirr.vector3df(0,0,0), # aimpoint
9, # radius (power)
1,9, # near, far
30) # angle of FOV
def reset(self):
#print("reset")
self.isdone = False
self.rev_pend_sys.Clear()
# create it
self.body_rod = chrono.ChBody()
# set initial position
self.body_rod.SetPos(chrono.ChVectorD(0, self.size_rod_y/2, 0 ))
# set mass properties
self.body_rod.SetMass(self.mass_rod)
self.body_rod.SetInertiaXX(chrono.ChVectorD(self.inertia_rod_x,self.inertia_rod_y,self.inertia_rod_x))
# set collision surface properties
self.body_rod.SetMaterialSurface(self.rod_material)
# Visualization shape, for rendering animation
self.cyl_base1= chrono.ChVectorD(0, -self.size_rod_y/2, 0 )
self.cyl_base2= chrono.ChVectorD(0, self.size_rod_y/2, 0 )
self.body_rod_shape = chrono.ChCylinderShape()
self.body_rod_shape.GetCylinderGeometry().p1= self.cyl_base1
self.body_rod_shape.GetCylinderGeometry().p2= self.cyl_base2
self.body_rod_shape.GetCylinderGeometry().rad= self.radius_rod
self.body_rod.AddAsset(self.body_rod_shape)
self.rev_pend_sys.Add(self.body_rod)
self.body_floor = chrono.ChBody()
self.body_floor.SetBodyFixed(True)
self.body_floor.SetPos(chrono.ChVectorD(0, -5, 0 ))
self.body_floor.SetMaterialSurface(self.rod_material)
if self.render:
self.body_floor_shape = chrono.ChBoxShape()
self.body_floor_shape.GetBoxGeometry().Size = chrono.ChVectorD(3, 1, 3)
self.body_floor.GetAssets().push_back(self.body_floor_shape)
self.body_floor_texture = chrono.ChTexture()
self.body_floor_texture.SetTextureFilename(chrono.GetChronoDataFile('concrete.jpg'))
self.body_floor.GetAssets().push_back(self.body_floor_texture)
self.rev_pend_sys.Add(self.body_floor)
self.body_table = chrono.ChBody()
self.body_table.SetPos(chrono.ChVectorD(0, -self.size_table_y/2, 0 ))
self.body_table.SetMaterialSurface(self.rod_material)
if self.render:
self.body_table_shape = chrono.ChBoxShape()
self.body_table_shape.GetBoxGeometry().Size = chrono.ChVectorD(self.size_table_x/2, self.size_table_y/2, self.size_table_z/2)
self.body_table_shape.SetColor(chrono.ChColor(0.4,0.4,0.5))
self.body_table.GetAssets().push_back(self.body_table_shape)
self.body_table_texture = chrono.ChTexture()
self.body_table_texture.SetTextureFilename(chrono.GetChronoDataFile('concrete.jpg'))
self.body_table.GetAssets().push_back(self.body_table_texture)
self.body_table.SetMass(0.1)
self.rev_pend_sys.Add(self.body_table)
self.link_slider = chrono.ChLinkLockPrismatic()
z2x = chrono.ChQuaternionD()
z2x.Q_from_AngAxis(-chrono.CH_C_PI / 2 , chrono.ChVectorD(0, 1, 0))
self.link_slider.Initialize(self.body_table, self.body_floor, chrono.ChCoordsysD(chrono.ChVectorD(0, 0, 0), z2x))
self.rev_pend_sys.Add(self.link_slider)
self.act_initpos = chrono.ChVectorD(0,0,0)
self.actuator = chrono.ChLinkMotorLinearForce()
self.actuator.Initialize(self.body_table, self.body_floor, chrono.ChFrameD(self.act_initpos))
self.rev_pend_sys.Add(self.actuator)
self.rod_pin = chrono.ChMarker()
self.body_rod.AddMarker(self.rod_pin)
self.rod_pin.Impose_Abs_Coord(chrono.ChCoordsysD(chrono.ChVectorD(0,0,0)))
self.table_pin = chrono.ChMarker()
self.body_table.AddMarker(self.table_pin)
self.table_pin.Impose_Abs_Coord(chrono.ChCoordsysD(chrono.ChVectorD(0,0,0)))
self.pin_joint = chrono.ChLinkLockRevolute()
self.pin_joint.Initialize(self.rod_pin, self.table_pin)
self.rev_pend_sys.Add(self.pin_joint)
if self.render:
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
self.myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
self.myapplication.AssetUpdateAll();
self.isdone= False
self.steps= 0
self.step(np.array([[0]]))
return self.get_ob()
def step(self, ac):
action=float(ac[0])
self.steps += 1
self.ac = chrono.ChFunction_Const(action)
self.actuator.SetForceFunction(self.ac)
self.omega = self.pin_joint.GetRelWvel().Length()
if self.render:
self.myapplication.GetDevice().run()
self.myapplication.BeginScene()
self.myapplication.DrawAll()
self.myapplication.DoStep()
else:
self.rev_pend_sys.DoStepDynamics(self.timestep)
self.rew = 1.0
self.obs= self.get_ob()
if self.render:
self.myapplication.EndScene()
self.is_done()
return self.obs, self.rew, self.isdone, self.info
def get_ob(self):
self.state = [self.link_slider.GetDist(), self.link_slider.GetDist_dt(), self.pin_joint.GetRelAngle(), self.omega]
return np.asarray(self.state)
def is_done(self):
if abs(self.link_slider.GetDist()) > 2 or self.steps> 100000 or abs(self.pin_joint.GetRelAngle()) > 0.2 :
self.isdone = True
def ScreenCapture(self, interval):
try:
self.myapplication.SetVideoframeSave(True)
self.myapplication.SetVideoframeSaveInterval(interval)
except:
print('No ChIrrApp found. Cannot save video frames.')
def __del__(self):
if self.render:
self.myapplication.GetDevice().closeDevice()
print('Destructor called, Device deleted.')
else:
print('Destructor called, No device to delete.')
| 37.695833
| 138
| 0.626948
|
20ef3297771ae316c546c0d93710a87eaaf4c49f
| 925
|
py
|
Python
|
pypy/translator/js/test/test_rpbc.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/translator/js/test/test_rpbc.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
pypy/translator/js/test/test_rpbc.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
import py
from pypy.translator.js.test.runtest import JsTest
from pypy.rpython.test.test_rpbc import BaseTestRPBC
# ====> ../../../rpython/test/test_rpbc.py
class TestJsPBC(JsTest, BaseTestRPBC):
def test_single_pbc_getattr(self):
class C:
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def _freeze_(self):
return True
c1 = C(11, lambda: "hello")
c2 = C(22, lambda: 623)
def f1(l, c):
l.append(c.v1)
def f2(c):
return c.v2
def f3(c):
return c.v2
def g():
l = []
f1(l, c1)
f1(l, c2)
return f2(c1)(), f3(c2)()
res = self.interpret(g, [])
assert res[0] == "hello"
assert res[1] == 623
def test_call_memoized_function_with_bools(self):
py.test.skip("WIP")
| 25
| 53
| 0.492973
|
0893d261c0e7b1fba8c20dba202031608deb5dc2
| 659
|
py
|
Python
|
wecom_material/__init__.py
|
rainbow-studio-solution/wecom
|
937ea9c15c5ef42ba749c67335ede85544292aad
|
[
"MulanPSL-1.0"
] | 5
|
2021-12-17T06:44:41.000Z
|
2022-02-05T03:34:07.000Z
|
wecom_material/__init__.py
|
rainbow-studio-solution/wecom
|
937ea9c15c5ef42ba749c67335ede85544292aad
|
[
"MulanPSL-1.0"
] | null | null | null |
wecom_material/__init__.py
|
rainbow-studio-solution/wecom
|
937ea9c15c5ef42ba749c67335ede85544292aad
|
[
"MulanPSL-1.0"
] | 2
|
2022-02-06T13:27:56.000Z
|
2022-02-27T08:06:59.000Z
|
# -*- coding: utf-8 -*-
from . import models
import os.path
from odoo import api, SUPERUSER_ID, _
from odoo.exceptions import UserError
def pre_init_hook(cr):
env = api.Environment(cr, SUPERUSER_ID, {})
path = env["ir.config_parameter"].get_param("wecom.resources_path")
if path:
if not os.path.exists(path):
try:
os.makedirs(path)
except BaseException as e:
raise UserError(
_("Unable to create WeCom image storage path! Error:%s") % (repr(e))
)
else:
raise UserError(_("WeCom image storage path has not been configured yet!"))
| 26.36
| 88
| 0.596358
|
dd09aaed197588ca392bcce5fc3121f9d5d36aac
| 1,868
|
py
|
Python
|
client/init.py
|
mikaelbrandin/armory
|
222e549fbf2cf89a874cad96a8bb7edd186e4800
|
[
"Apache-2.0"
] | null | null | null |
client/init.py
|
mikaelbrandin/armory
|
222e549fbf2cf89a874cad96a8bb7edd186e4800
|
[
"Apache-2.0"
] | null | null | null |
client/init.py
|
mikaelbrandin/armory
|
222e549fbf2cf89a874cad96a8bb7edd186e4800
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'kra869'
import os
import configparser
from . import utils
def directory_filter(args):
return os.getcwd();
def init(context):
parser = context.register_command('init', command_init, help='Initialize a new repository', directory_filter=directory_filter)
parser.add_argument('repository', metavar='REPOSITORY_URI', help="the repository uri")
return None
def command_init(args, context):
if not utils.confirm("Initialize repository in " + args.directory):
print("Skipping initalization of local repository")
initialize(args.directory, args.repository)
return None
def initialize(directory, repository):
db_directory = directory + '.armory' + os.sep
modules_directory = directory + 'modules.d' + os.sep
configuration_directory = directory + 'conf.d' + os.sep
if not os.path.exists(db_directory):
print("Create .armory directory")
os.makedirs(db_directory)
if not os.path.exists(modules_directory):
print("Create modules.d directory")
os.makedirs(modules_directory)
if not os.path.exists(configuration_directory):
print("Create conf.d directory")
os.makedirs(configuration_directory)
repositories = configparser.SafeConfigParser()
repositories.read(db_directory + 'repositories')
# Modules
if not repositories.has_section('modules'):
repositories.add_section('modules');
#Configurations
if not repositories.has_section('configurations'):
repositories.add_section('configurations');
#Default repository
repositories.set('modules', 'default', repository)
repositories.set('configurations', 'default', repository)
with open(db_directory + 'repositories', "w+") as f:
repositories.write(f);
with open(db_directory + 'local', "w+") as f:
f.write('1.0.0');
| 28.738462
| 130
| 0.700214
|
530d06fc39e5f5cc48c5240278254fb6de7f4a50
| 20,113
|
py
|
Python
|
irrd/storage/queries.py
|
morrowc/irrd
|
8a2af9a6648a73fc3c31d21cf07ef80a49031a14
|
[
"BSD-2-Clause"
] | null | null | null |
irrd/storage/queries.py
|
morrowc/irrd
|
8a2af9a6648a73fc3c31d21cf07ef80a49031a14
|
[
"BSD-2-Clause"
] | 1
|
2021-04-20T14:57:52.000Z
|
2021-04-20T14:57:52.000Z
|
irrd/storage/queries.py
|
morrowc/irrd
|
8a2af9a6648a73fc3c31d21cf07ef80a49031a14
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
from typing import List, Optional
import sqlalchemy as sa
from IPy import IP
from sqlalchemy.sql import Select, ColumnCollection
import sqlalchemy.dialects.postgresql as pg
from irrd.conf import get_setting
from irrd.rpki.status import RPKIStatus
from irrd.rpsl.rpsl_objects import lookup_field_names
from irrd.scopefilter.status import ScopeFilterStatus
from irrd.storage.models import (RPSLDatabaseObject, RPSLDatabaseJournal, RPSLDatabaseStatus,
ROADatabaseObject)
from irrd.utils.validators import parse_as_number, ValidationError
logger = logging.getLogger(__name__)
class BaseRPSLObjectDatabaseQuery:
statement: Select
table: sa.Table
columns: ColumnCollection
def __init__(self, ordered_by_sources=True, enable_ordering=True):
self._query_frozen = False
self._sources_list = []
self._ordered_by_sources = ordered_by_sources
self._enable_ordering = enable_ordering
self._set_object_classes = []
def pk(self, pk: str):
"""Filter on an exact object PK (UUID)."""
return self._filter(self.columns.pk == pk)
def rpsl_pk(self, rpsl_pk: str):
"""Filter on an exact RPSL PK (e.g. 192.0.2.0/24,AS65537)."""
return self.rpsl_pks([rpsl_pk])
def rpsl_pks(self, rpsl_pks: List[str]):
"""Filter on an exact RPSL PK (e.g. 192.0.2.0/24,AS65537) - will match any PK in the list."""
rpsl_pks = [p.upper().strip() for p in rpsl_pks]
return self._filter(self.columns.rpsl_pk.in_(rpsl_pks))
def sources(self, sources: List[str]):
"""
Filter on one or more sources.
Sources list must be an iterable. Will match objects from any
of the mentioned sources. Order is used for sorting of results.
"""
sources = [s.upper().strip() for s in sources]
self._sources_list = sources
fltr = self.columns.source.in_(self._sources_list)
return self._filter(fltr)
def object_classes(self, object_classes: List[str]):
"""
Filter on one or more object classes.
Classes list must be an iterable. Will match objects from any
of the mentioned classes.
"""
self._set_object_classes = object_classes
fltr = self.columns.object_class.in_(object_classes)
return self._filter(fltr)
def first_only(self):
"""Only return the first match."""
return self.limit(1)
def limit(self, record_limit: int):
"""Limit the response to a certain number of rows"""
self.statement = self.statement.limit(record_limit)
return self
def finalise_statement(self) -> Select:
"""
Finalise the statement and return it.
This method does some final work on statements that may be dependent on
each other - particularly statements that determine the sort order of
the query, which depends on sources_list() and prioritise_source().
"""
self._query_frozen = True
if self._enable_ordering:
order_by = []
if 'ip_first' in self.columns:
order_by.append(self.columns.ip_first.asc())
if 'asn_first' in self.columns:
order_by.append(self.columns.asn_first.asc())
if 'rpsl_pk' in self.columns:
order_by.append(self.columns.rpsl_pk.asc())
if self._ordered_by_sources and self._sources_list:
case_elements = []
for idx, source in enumerate(self._sources_list):
case_elements.append((self.columns.source == source, idx + 1))
criterion = sa.case(case_elements, else_=100000)
order_by.insert(0, criterion)
self.statement = self.statement.order_by(*order_by)
return self.statement
def _filter(self, fltr):
self._check_query_frozen()
self.statement = self.statement.where(fltr)
return self
def _check_query_frozen(self) -> None:
if self._query_frozen:
raise ValueError('This query was frozen - no more filters can be applied.')
class RPSLDatabaseQuery(BaseRPSLObjectDatabaseQuery):
"""
RPSL data query builder for retrieving RPSL objects.
Offers various ways to filter, which are always constructed in an AND query.
For example:
q = RPSLDatabaseQuery().sources(['NTTCOM']).asn_less_specific(65537)
would match all objects that refer or include AS65537 (i.e. aut-num, route,
as-block, route6) from the NTTCOM source.
For methods taking a prefix or IP address, this should be an IPy.IP object.
"""
table = RPSLDatabaseObject.__table__
columns = RPSLDatabaseObject.__table__.c
lookup_field_names = lookup_field_names()
def __init__(self, column_names=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if column_names is None:
columns = [
self.columns.pk,
self.columns.object_class,
self.columns.rpsl_pk,
self.columns.parsed_data,
self.columns.object_text,
self.columns.source,
self.columns.rpki_status,
self.columns.updated,
self.columns.asn_first,
self.columns.asn_last,
self.columns.ip_first,
self.columns.ip_last,
self.columns.prefix_length,
]
else:
columns = [self.columns.get(name) for name in column_names]
self.statement = sa.select(columns)
self._lookup_attr_counter = 0
def lookup_attr(self, attr_name: str, attr_value: str):
"""
Filter on a lookup attribute, e.g. mnt-by.
At least one of the values for the lookup attribute must match attr_value.
Matching is case-insensitive.
"""
return self.lookup_attrs_in([attr_name], [attr_value])
def lookup_attrs_in(self, attr_names: List[str], attr_values: List[str]):
"""
Filter on one or more lookup attributes, e.g. mnt-by, or ['admin-c', 'tech-c']
At least one of the values for at least one of the lookup attributes must
match one of the items in attr_values. Matching is case-insensitive.
"""
attr_names = [attr_name.lower() for attr_name in attr_names]
for attr_name in attr_names:
if attr_name not in self.lookup_field_names:
raise ValueError(f'Invalid lookup attribute: {attr_name}')
self._check_query_frozen()
value_filters = []
statement_params = {}
for attr_name in attr_names:
for attr_value in attr_values:
counter = self._lookup_attr_counter
self._lookup_attr_counter += 1
value_filters.append(sa.text(f'parsed_data->:lookup_attr_name{counter} ? :lookup_attr_value{counter}'))
statement_params[f'lookup_attr_name{counter}'] = attr_name
statement_params[f'lookup_attr_value{counter}'] = attr_value.upper()
fltr = sa.or_(*value_filters)
self.statement = self.statement.where(fltr).params(**statement_params)
return self
def ip_exact(self, ip: IP):
"""
Filter on an exact prefix or address.
The provided ip should be an IPy.IP class, and can be a prefix or
an address.
"""
fltr = sa.and_(
self.columns.ip_first == str(ip.net()),
self.columns.ip_last == str(ip.broadcast()),
self.columns.ip_version == ip.version()
)
return self._filter(fltr)
def ip_less_specific(self, ip: IP):
"""Filter any less specifics or exact matches of a prefix."""
if self._prefix_query_permitted():
pg_prefix = sa.cast(str(ip), pg.CIDR)
fltr = self.columns.prefix.op(">>=")(pg_prefix)
else:
fltr = sa.and_(
self.columns.ip_first <= str(ip.net()),
self.columns.ip_last >= str(ip.broadcast()),
self.columns.ip_version == ip.version()
)
return self._filter(fltr)
def ip_less_specific_one_level(self, ip: IP):
"""
Filter one level less specific of a prefix.
Due to implementation details around filtering, this must
always be the last call on a query object, or unpredictable
results may occur.
"""
self._check_query_frozen()
# One level less specific could still have multiple objects.
# A subquery determines the smallest possible size less specific object,
# and this is then used to filter for any objects with that size.
fltr = sa.and_(
self.columns.ip_first <= str(ip.net()),
self.columns.ip_last >= str(ip.broadcast()),
self.columns.ip_version == ip.version(),
sa.not_(sa.and_(self.columns.ip_first == str(ip.net()), self.columns.ip_last == str(ip.broadcast()))),
)
self.statement = self.statement.where(fltr)
size_subquery = self.statement.with_only_columns([self.columns.ip_size])
size_subquery = size_subquery.order_by(self.columns.ip_size.asc())
size_subquery = size_subquery.limit(1)
self.statement = self.statement.where(self.columns.ip_size.in_(size_subquery))
self._query_frozen = True
return self
def ip_more_specific(self, ip: IP):
"""Filter any more specifics of a prefix, not including exact matches.
Note that this only finds full more specifics: objects for which their
IP range is fully encompassed by the ip parameter.
"""
if self._prefix_query_permitted():
pg_prefix = sa.cast(str(ip), pg.CIDR)
fltr = self.columns.prefix.op("<<")(pg_prefix)
else:
fltr = sa.and_(
self.columns.ip_first >= str(ip.net()),
self.columns.ip_first <= str(ip.broadcast()),
self.columns.ip_last <= str(ip.broadcast()),
self.columns.ip_last >= str(ip.net()),
self.columns.ip_version == ip.version(),
sa.not_(sa.and_(self.columns.ip_first == str(ip.net()), self.columns.ip_last == str(ip.broadcast()))),
)
return self._filter(fltr)
def ip_any(self, ip: IP):
"""
Filter any less specifics, more specifics or exact matches of a prefix.
Note that this only finds full more specifics: objects for which their
IP range is fully encompassed by the ip parameter - not partial overlaps.
"""
if self._prefix_query_permitted():
pg_prefix = sa.cast(str(ip), pg.CIDR)
fltr = sa.or_(
self.columns.prefix.op(">>=")(pg_prefix),
self.columns.prefix.op("<<")(pg_prefix),
)
else:
fltr = sa.and_(
sa.or_(
sa.and_(
self.columns.ip_first <= str(ip.net()),
self.columns.ip_last >= str(ip.broadcast()),
),
sa.and_(
self.columns.ip_first >= str(ip.net()),
self.columns.ip_first <= str(ip.broadcast()),
self.columns.ip_last <= str(ip.broadcast()),
self.columns.ip_last >= str(ip.net()),
),
),
self.columns.ip_version == ip.version()
)
return self._filter(fltr)
def asn(self, asn: int):
"""
Filter for exact matches on an ASN.
"""
fltr = sa.and_(self.columns.asn_first == asn, self.columns.asn_last == asn)
return self._filter(fltr)
def asns_first(self, asns: List[int]):
"""
Filter for asn_first being in a list of ASNs.
This is useful when also restricting object class to 'route' for instance.
"""
fltr = self.columns.asn_first.in_(asns)
return self._filter(fltr)
def asn_less_specific(self, asn: int):
"""
Filter for a specific ASN, or any less specific matches.
This will match all objects that refer to this ASN, or a block
encompassing it - including route, route6, aut-num and as-block.
"""
fltr = sa.and_(self.columns.asn_first <= asn, self.columns.asn_last >= asn)
return self._filter(fltr)
def rpki_status(self, status: List[RPKIStatus]):
"""
Filter for RPSL objects with a specific RPKI validation status.
"""
fltr = self.columns.rpki_status.in_(status)
return self._filter(fltr)
def scopefilter_status(self, status: List[ScopeFilterStatus]):
"""
Filter for RPSL objects with a specific scope filter status.
"""
fltr = self.columns.scopefilter_status.in_(status)
return self._filter(fltr)
def text_search(self, value: str, extract_asn_ip=True):
"""
Search the database for a specific free text.
In order, this attempts:
- If the value is a valid AS number, return all as-block, as-set, aut-num objects
relating or including that AS number.
- If the value is a valid IP address or network, return all objects that relate to
that resource and any less specifics.
- Otherwise, return all objects where the RPSL primary key is exactly this value,
or it matches part of a person/role name (not nic-hdl, their
actual person/role attribute value).
If extract_asn_ip is False, the first two steps are skipped.
"""
self._check_query_frozen()
if extract_asn_ip:
try:
_, asn = parse_as_number(value)
return self.object_classes(['as-block', 'as-set', 'aut-num']).asn_less_specific(asn)
except ValidationError:
pass
try:
ip = IP(value)
return self.ip_less_specific(ip)
except ValueError:
pass
counter = self._lookup_attr_counter
self._lookup_attr_counter += 1
fltr = sa.or_(
self.columns.rpsl_pk == value.upper(),
sa.and_(
self.columns.object_class == 'person',
sa.text(f"parsed_data->>'person' ILIKE :lookup_attr_text_search{counter}")
),
sa.and_(
self.columns.object_class == 'role',
sa.text(f"parsed_data->>'role' ILIKE :lookup_attr_text_search{counter}")
),
)
self.statement = self.statement.where(fltr).params(
**{f'lookup_attr_text_search{counter}': '%' + value + '%'}
)
return self
def _prefix_query_permitted(self):
return (
get_setting('compatibility.inetnum_search_disabled')
or (self._set_object_classes and 'inetnum' not in self._set_object_classes)
) and not get_setting('compatibility.irrd42_migration_in_progress')
def __repr__(self):
return f'{self.statement}\nPARAMS: {self.statement.compile().params}'
class RPSLDatabaseJournalQuery(BaseRPSLObjectDatabaseQuery):
"""
RPSL data query builder for retrieving the journal,
analogous to RPSLDatabaseQuery.
"""
table = RPSLDatabaseJournal.__table__
columns = RPSLDatabaseJournal.__table__.c
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.statement = sa.select([
self.columns.pk,
self.columns.rpsl_pk,
self.columns.source,
self.columns.serial_nrtm,
self.columns.operation,
self.columns.object_class,
self.columns.object_text,
self.columns.origin,
self.columns.timestamp,
]).order_by(self.columns.source.asc(), self.columns.serial_nrtm.asc())
def serial_range(self, start: int, end: Optional[int]=None):
"""
Filter for a serials within a specific range, inclusive.
"""
if end is not None:
fltr = sa.and_(self.columns.serial_nrtm >= start, self.columns.serial_nrtm <= end)
else:
fltr = self.columns.serial_nrtm >= start
return self._filter(fltr)
def __repr__(self):
return f'RPSLDatabaseJournalQuery: {self.statement}\nPARAMS: {self.statement.compile().params}'
class DatabaseStatusQuery:
table = RPSLDatabaseStatus.__table__
columns = RPSLDatabaseStatus.__table__.c
def __init__(self):
self._sources_list: List[str] = []
self.statement = sa.select([
self.columns.pk,
self.columns.source,
self.columns.serial_oldest_seen,
self.columns.serial_newest_seen,
self.columns.serial_oldest_journal,
self.columns.serial_newest_journal,
self.columns.serial_last_export,
self.columns.serial_newest_mirror,
self.columns.force_reload,
self.columns.synchronised_serials,
self.columns.last_error,
self.columns.last_error_timestamp,
self.columns.created,
self.columns.updated,
])
def source(self, source: str):
"""Filter on a source."""
return self.sources([source])
def sources(self, sources: List[str]):
"""Filter on one or more sources."""
self._sources_list = [s.upper() for s in sources]
return self
def finalise_statement(self):
order_by = [self.columns.source.asc()]
if self._sources_list:
fltr = self.columns.source.in_(self._sources_list)
self._filter(fltr)
case_elements = []
for idx, source in enumerate(self._sources_list):
case_elements.append((self.columns.source == source, idx + 1))
criterion = sa.case(case_elements, else_=100000)
order_by.insert(0, criterion)
self.statement = self.statement.order_by(*order_by)
return self.statement
def _filter(self, fltr):
self.statement = self.statement.where(fltr)
return self
def __repr__(self):
return f'DatabaseStatusQuery: {self.statement}\nPARAMS: {self.statement.compile().params}'
class RPSLDatabaseObjectStatisticsQuery:
"""
Special statistics query, calculating the number of
objects per object class per source.
"""
table = RPSLDatabaseObject.__table__
columns = RPSLDatabaseObject.__table__.c
def __init__(self):
self.statement = sa.select([
self.columns.source,
self.columns.object_class,
sa.func.count(self.columns.pk).label('count'),
]).group_by(self.columns.source, self.columns.object_class)
def finalise_statement(self):
return self.statement
def __repr__(self):
return f'RPSLDatabaseObjectStatisticsQuery: {self.statement}\nPARAMS: {self.statement.compile().params}'
class ROADatabaseObjectQuery:
"""
Query builder for ROA objects.
"""
table = ROADatabaseObject.__table__
columns = ROADatabaseObject.__table__.c
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.statement = sa.select([
self.columns.pk,
self.columns.prefix,
self.columns.asn,
self.columns.max_length,
self.columns.trust_anchor,
self.columns.ip_version,
])
def ip_less_specific_or_exact(self, ip: IP):
"""Filter any less specifics or exact matches of a prefix."""
fltr = sa.and_(
self.columns.prefix.op('>>=')(str(ip))
)
self.statement = self.statement.where(fltr)
return self
def finalise_statement(self):
return self.statement
def __repr__(self):
return f'ROADatabaseObjectQuery: {self.statement}\nPARAMS: {self.statement.compile().params}'
| 37.454376
| 119
| 0.611843
|
344271dda42c7eaa326c9c84749dc8bf09960da0
| 3,279
|
py
|
Python
|
agents/DDPGActor.py
|
schkip/MLProject_Quadcopter
|
148da1c5ffc4ff409144200be5a943b6ca2e22b2
|
[
"MIT"
] | null | null | null |
agents/DDPGActor.py
|
schkip/MLProject_Quadcopter
|
148da1c5ffc4ff409144200be5a943b6ca2e22b2
|
[
"MIT"
] | null | null | null |
agents/DDPGActor.py
|
schkip/MLProject_Quadcopter
|
148da1c5ffc4ff409144200be5a943b6ca2e22b2
|
[
"MIT"
] | null | null | null |
from keras import layers, models, optimizers, regularizers
from keras import backend as K
class Actor:
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, action_low, action_high):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
action_low (array): Min value of each action dimension
action_high (array): Max value of each action dimension
"""
self.state_size = state_size
self.action_size = action_size
self.action_low = action_low
self.action_high = action_high
self.action_range = self.action_high - self.action_low
# Initialize any other variables here
self.build_model()
def build_model(self):
"""Build an actor (policy) network that maps states -> actions."""
# Define input layer (states)
states = layers.Input(shape=(self.state_size,), name='states')
# Add hidden layers
net = layers.Dense(units=32, activation='relu',
kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))(states)
net = layers.BatchNormalization()(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(0.2)(net)
net = layers.Dense(units=64, activation='relu',
kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))(net)
net = layers.BatchNormalization()(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(0.2)(net)
net = layers.Dense(units=128, activation='relu',
kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))(net)
net = layers.BatchNormalization()(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(0.2)(net)
net = layers.Dense(units=32, activation='relu',
kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))(net)
net = layers.BatchNormalization()(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(0.2)(net)
# Add final output layer with sigmoid activation
raw_actions = layers.Dense(units=self.action_size, activation='sigmoid',
name='raw_actions')(net)
# Scale [0, 1] output for each action dimension to proper range
actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low,
name='actions')(raw_actions)
# Create Keras model
self.model = models.Model(inputs=states, outputs=actions)
# Define loss function using action value (Q value) gradients
action_gradients = layers.Input(shape=(self.action_size,))
loss = K.mean(-action_gradients * actions)
# Define optimizer and training function
optimizer = optimizers.Adam()
updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
self.train_fn = K.function(
inputs=[self.model.input, action_gradients, K.learning_phase()],
outputs=[],
updates=updates_op)
| 41.506329
| 105
| 0.640439
|
3c8a099c74e9c7b1b5de66c52080337a8c397d98
| 12,865
|
py
|
Python
|
pex/common.py
|
Djailla/pex
|
cf20f8fce16cc5d78962835ecc2824f372f17412
|
[
"Apache-2.0"
] | null | null | null |
pex/common.py
|
Djailla/pex
|
cf20f8fce16cc5d78962835ecc2824f372f17412
|
[
"Apache-2.0"
] | null | null | null |
pex/common.py
|
Djailla/pex
|
cf20f8fce16cc5d78962835ecc2824f372f17412
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import atexit
import contextlib
import errno
import os
import shutil
import stat
import sys
import tempfile
import threading
import time
import zipfile
from collections import defaultdict
from datetime import datetime
from uuid import uuid4
# We use the start of MS-DOS time, which is what zipfiles use (see section 4.4.6 of
# https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT).
DETERMINISTIC_DATETIME = datetime(
year=1980, month=1, day=1, hour=0, minute=0, second=0, tzinfo=None
)
def die(msg, exit_code=1):
print(msg, file=sys.stderr)
sys.exit(exit_code)
def safe_copy(source, dest, overwrite=False):
def do_copy():
temp_dest = dest + uuid4().hex
shutil.copy(source, temp_dest)
os.rename(temp_dest, dest)
# If the platform supports hard-linking, use that and fall back to copying.
# Windows does not support hard-linking.
if hasattr(os, 'link'):
try:
os.link(source, dest)
except OSError as e:
if e.errno == errno.EEXIST:
# File already exists. If overwrite=True, write otherwise skip.
if overwrite:
do_copy()
elif e.errno == errno.EXDEV:
# Hard link across devices, fall back on copying
do_copy()
else:
raise
elif os.path.exists(dest):
if overwrite:
do_copy()
else:
do_copy()
# See http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit
class MktempTeardownRegistry(object):
def __init__(self):
self._registry = defaultdict(set)
self._getpid = os.getpid
self._lock = threading.RLock()
self._exists = os.path.exists
self._getenv = os.getenv
self._rmtree = shutil.rmtree
atexit.register(self.teardown)
def __del__(self):
self.teardown()
def register(self, path):
with self._lock:
self._registry[self._getpid()].add(path)
return path
def teardown(self):
for td in self._registry.pop(self._getpid(), []):
if self._exists(td):
self._rmtree(td)
_MKDTEMP_SINGLETON = MktempTeardownRegistry()
class PermPreservingZipFile(zipfile.ZipFile, object):
"""A ZipFile that works around https://bugs.python.org/issue15795"""
@classmethod
def zip_info_from_file(cls, filename, arcname=None, date_time=None):
"""Construct a ZipInfo for a file on the filesystem.
Usually this is provided directly as a method of ZipInfo, but it is not implemented in Python
2.7 so we re-implement it here. The main divergance we make from the original is adding a
parameter for the datetime (a time.struct_time), which allows us to use a deterministic
timestamp. See https://github.com/python/cpython/blob/master/Lib/zipfile.py#L495."""
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
if date_time is None:
date_time = time.localtime(st.st_mtime)
zinfo = zipfile.ZipInfo(filename=arcname, date_time=date_time[:6])
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
if isdir:
zinfo.file_size = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.file_size = st.st_size
return zinfo
def _extract_member(self, member, targetpath, pwd):
result = super(PermPreservingZipFile, self)._extract_member(member, targetpath, pwd)
info = member if isinstance(member, zipfile.ZipInfo) else self.getinfo(member)
self._chmod(info, result)
return result
def _chmod(self, info, path):
# This magic works to extract perm bits from the 32 bit external file attributes field for
# unix-created zip files, for the layout, see:
# https://www.forensicswiki.org/wiki/ZIP#External_file_attributes
attr = info.external_attr >> 16
os.chmod(path, attr)
@contextlib.contextmanager
def open_zip(path, *args, **kwargs):
"""A contextmanager for zip files. Passes through positional and kwargs to zipfile.ZipFile."""
with contextlib.closing(PermPreservingZipFile(path, *args, **kwargs)) as zip:
yield zip
@contextlib.contextmanager
def temporary_dir(cleanup=True):
td = tempfile.mkdtemp()
try:
yield td
finally:
if cleanup:
safe_rmtree(td)
def safe_mkdtemp(**kw):
"""Create a temporary directory that is cleaned up on process exit.
Takes the same parameters as tempfile.mkdtemp.
"""
# proper lock sanitation on fork [issue 6721] would be desirable here.
return _MKDTEMP_SINGLETON.register(tempfile.mkdtemp(**kw))
def register_rmtree(directory):
"""Register an existing directory to be cleaned up at process exit."""
return _MKDTEMP_SINGLETON.register(directory)
def safe_mkdir(directory, clean=False):
"""Safely create a directory.
Ensures a directory is present. If it's not there, it is created. If it
is, it's a no-op. If clean is True, ensures the directory is empty.
"""
if clean:
safe_rmtree(directory)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def safe_open(filename, *args, **kwargs):
"""Safely open a file.
``safe_open`` ensures that the directory components leading up the
specified file have been created first.
"""
safe_mkdir(os.path.dirname(filename))
return open(filename, *args, **kwargs) # noqa: T802
def safe_delete(filename):
"""Delete a file safely. If it's not present, no-op."""
try:
os.unlink(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_rmtree(directory):
"""Delete a directory if it's present. If it's not present, no-op."""
if os.path.exists(directory):
shutil.rmtree(directory, True)
def safe_sleep(seconds):
"""Ensure that the thread sleeps at a minimum the requested seconds.
Until Python 3.5, there was no guarantee that time.sleep() would actually sleep the requested
time. See https://docs.python.org/3/library/time.html#time.sleep."""
if sys.version_info[0:2] >= (3, 5):
time.sleep(seconds)
else:
start_time = current_time = time.time()
while current_time - start_time < seconds:
remaining_time = seconds - (current_time - start_time)
time.sleep(remaining_time)
current_time = time.time()
def rename_if_empty(src, dest, allowable_errors=(errno.EEXIST, errno.ENOTEMPTY)):
"""Rename `src` to `dest` using `os.rename()`.
If an `OSError` with errno in `allowable_errors` is encountered during the rename, the `dest`
dir is left unchanged and the `src` directory will simply be removed.
"""
try:
os.rename(src, dest)
except OSError as e:
if e.errno in allowable_errors:
safe_rmtree(src)
else:
raise
def chmod_plus_x(path):
"""Equivalent of unix `chmod a+x path`"""
path_mode = os.stat(path).st_mode
path_mode &= int('777', 8)
if path_mode & stat.S_IRUSR:
path_mode |= stat.S_IXUSR
if path_mode & stat.S_IRGRP:
path_mode |= stat.S_IXGRP
if path_mode & stat.S_IROTH:
path_mode |= stat.S_IXOTH
os.chmod(path, path_mode)
def chmod_plus_w(path):
"""Equivalent of unix `chmod +w path`"""
path_mode = os.stat(path).st_mode
path_mode &= int('777', 8)
path_mode |= stat.S_IWRITE
os.chmod(path, path_mode)
def touch(file, times=None):
"""Equivalent of unix `touch path`.
:file The file to touch.
:times Either a tuple of (atime, mtime) or else a single time to use for both. If not
specified both atime and mtime are updated to the current time.
"""
if times:
if len(times) > 2:
raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value '
'to use for both.')
if len(times) == 1:
times = (times, times)
with safe_open(file, 'a'):
os.utime(file, times)
class Chroot(object):
"""A chroot of files overlayed from one directory to another directory.
Files may be tagged when added in order to keep track of multiple overlays
in the chroot.
"""
class Error(Exception): pass
class ChrootTaggingException(Error):
def __init__(self, filename, orig_tag, new_tag):
super(Chroot.ChrootTaggingException, self).__init__( # noqa: T800
"Trying to add %s to fileset(%s) but already in fileset(%s)!" % (
filename, new_tag, orig_tag))
def __init__(self, chroot_base):
"""Create the chroot.
:chroot_base Directory for the creation of the target chroot.
"""
try:
safe_mkdir(chroot_base)
except OSError as e:
raise self.ChrootException('Unable to create chroot in %s: %s' % (chroot_base, e))
self.chroot = chroot_base
self.filesets = defaultdict(set)
def clone(self, into=None):
"""Clone this chroot.
:keyword into: (optional) An optional destination directory to clone the
Chroot into. If not specified, a temporary directory will be created.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit.
"""
into = into or safe_mkdtemp()
new_chroot = Chroot(into)
for label, fileset in self.filesets.items():
for fn in fileset:
new_chroot.link(os.path.join(self.chroot, fn), fn, label=label)
return new_chroot
def path(self):
"""The path of the chroot."""
return self.chroot
def _normalize(self, dst):
dst = os.path.normpath(dst)
if dst.startswith(os.sep) or dst.startswith('..'):
raise self.Error('Destination path is not a relative path!')
return dst
def _check_tag(self, fn, label):
for fs_label, fs in self.filesets.items():
if fn in fs and fs_label != label:
raise self.ChrootTaggingException(fn, fs_label, label)
def _tag(self, fn, label):
self._check_tag(fn, label)
self.filesets[label].add(fn)
def _ensure_parent(self, path):
safe_mkdir(os.path.dirname(os.path.join(self.chroot, path)))
def copy(self, src, dst, label=None):
"""Copy file ``src`` to ``chroot/dst`` with optional label.
May raise anything shutil.copy can raise, e.g.
IOError(Errno 21 'EISDIR')
May raise ChrootTaggingException if dst is already in a fileset
but with a different label.
"""
dst = self._normalize(dst)
self._tag(dst, label)
self._ensure_parent(dst)
shutil.copy(src, os.path.join(self.chroot, dst))
def link(self, src, dst, label=None):
"""Hard link file from ``src`` to ``chroot/dst`` with optional label.
May raise anything os.link can raise, e.g.
IOError(Errno 21 'EISDIR')
May raise ChrootTaggingException if dst is already in a fileset
but with a different label.
"""
dst = self._normalize(dst)
self._tag(dst, label)
self._ensure_parent(dst)
abs_src = src
abs_dst = os.path.join(self.chroot, dst)
safe_copy(abs_src, abs_dst, overwrite=False)
# TODO: Ensure the target and dest are the same if the file already exists.
def write(self, data, dst, label=None, mode='wb'):
"""Write data to ``chroot/dst`` with optional label.
Has similar exceptional cases as ``Chroot.copy``
"""
dst = self._normalize(dst)
self._tag(dst, label)
self._ensure_parent(dst)
with open(os.path.join(self.chroot, dst), mode) as wp:
wp.write(data)
def touch(self, dst, label=None):
"""Perform 'touch' on ``chroot/dst`` with optional label.
Has similar exceptional cases as Chroot.copy
"""
dst = self._normalize(dst)
self._tag(dst, label)
touch(os.path.join(self.chroot, dst))
def get(self, label):
"""Get all files labeled with ``label``"""
return self.filesets.get(label, set())
def files(self):
"""Get all files in the chroot."""
all_files = set()
for label in self.filesets:
all_files.update(self.filesets[label])
return all_files
def labels(self):
return self.filesets.keys()
def __str__(self):
return 'Chroot(%s {fs:%s})' % (self.chroot,
' '.join('%s' % foo for foo in self.filesets.keys()))
def delete(self):
shutil.rmtree(self.chroot)
def zip(self, filename, mode='w', deterministic_timestamp=False):
with open_zip(filename, mode) as zf:
for f in sorted(self.files()):
full_path = os.path.join(self.chroot, f)
zinfo = zf.zip_info_from_file(
filename=full_path,
arcname=f,
date_time=DETERMINISTIC_DATETIME.timetuple() if deterministic_timestamp else None
)
with open(full_path, 'rb') as open_f:
data = open_f.read()
zf.writestr(zinfo, data, compress_type=zipfile.ZIP_DEFLATED)
| 30.413712
| 100
| 0.68286
|
0d5a87cfa9d015c5f57f58893513f639d838e139
| 6,805
|
py
|
Python
|
metaworld/envs/mujoco/sawyer_xyz/sawyer_push_v2.py
|
Simon0xzx/metaworld
|
2d441eed70b6f5cb1f35883b0517c4bd2812268c
|
[
"MIT"
] | null | null | null |
metaworld/envs/mujoco/sawyer_xyz/sawyer_push_v2.py
|
Simon0xzx/metaworld
|
2d441eed70b6f5cb1f35883b0517c4bd2812268c
|
[
"MIT"
] | null | null | null |
metaworld/envs/mujoco/sawyer_xyz/sawyer_push_v2.py
|
Simon0xzx/metaworld
|
2d441eed70b6f5cb1f35883b0517c4bd2812268c
|
[
"MIT"
] | 1
|
2020-10-28T11:51:08.000Z
|
2020-10-28T11:51:08.000Z
|
import numpy as np
from gym.spaces import Box
from metaworld.envs.env_util import get_asset_full_path
from metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv, _assert_task_is_set
class SawyerPushEnvV2(SawyerXYZEnv):
"""
Motivation for V2:
V1 was very difficult to solve because the observation didn't say where
to move after reaching the puck.
Changelog from V1 to V2:
- (7/7/20) Removed 3 element vector. Replaced with 3 element position
of the goal (for consistency with other environments)
- (6/15/20) Added a 3 element vector to the observation. This vector
points from the end effector to the goal coordinate.
i.e. (self._state_goal - pos_hand)
- (6/15/20) Separated reach-push-pick-place into 3 separate envs.
"""
def __init__(self):
lift_thresh = 0.04
goal_low = (-0.1, 0.8, 0.05)
goal_high = (0.1, 0.9, 0.3)
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.02)
obj_high = (0.1, 0.7, 0.02)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0., 0.6, 0.02]),
'hand_init_pos': np.array([0., 0.6, 0.2]),
}
self.goal = np.array([0.1, 0.8, 0.02])
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = lift_thresh
self.max_path_length = 150
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([+1, +1, +1, +1]),
)
self.obj_and_goal_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.observation_space = Box(
np.hstack((self.hand_low, obj_low, obj_low, goal_low)),
np.hstack((self.hand_high, obj_high, obj_high, goal_high)),
)
self.num_resets = 0
@property
def model_name(self):
return get_asset_full_path('sawyer_xyz/sawyer_push_v2.xml')
@_assert_task_is_set
def step(self, action):
self.set_xyz_action(action[:3])
self.do_simulation([action[-1], -action[-1]])
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
ob = self._get_obs()
obs_dict = self._get_obs_dict()
rew, reach_dist, push_dist = self.compute_reward(action, obs_dict)
success = float(push_dist <= 0.07)
info = {
'reachDist': reach_dist,
'epRew': rew,
'goalDist': push_dist,
'success': success,
'goal': self.goal
}
self.curr_path_length += 1
return ob, rew, False, info
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def _set_goal_marker(self, goal):
self.data.site_xpos[self.model.site_name2id('goal')] = goal[:3]
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def fix_extreme_obj_pos(self, orig_init_pos):
# This is to account for meshes for the geom and object are not
# aligned. If this is not done, the object could be initialized in an
# extreme position
diff = self.get_body_com('obj')[:2] - \
self.data.get_geom_xpos('objGeom')[:2]
adjusted_pos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0,
# and geom_pos[2] is the object height
return [
adjusted_pos[0],
adjusted_pos[1],
self.data.get_geom_xpos('objGeom')[-1]
]
def reset_model(self):
self._reset_hand()
self._state_goal = self.goal.copy()
self.obj_init_pos = self.fix_extreme_obj_pos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = self.objHeight + self.liftThresh
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._state_goal = goal_pos[3:]
while np.linalg.norm(goal_pos[:2] - self._state_goal[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
self._state_goal = goal_pos[3:]
self._state_goal = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
self._set_goal_marker(self._state_goal)
self._set_obj_xyz(self.obj_init_pos)
self.maxPushDist = np.linalg.norm(
self.obj_init_pos[:2] - np.array(self._state_goal)[:2])
self.target_reward = 1000*self.maxPushDist + 1000*2
self.num_resets += 1
return self._get_obs()
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', self.hand_init_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation([-1, 1], self.frame_skip)
finger_right, finger_left = (
self.get_site_pos('rightEndEffector'),
self.get_site_pos('leftEndEffector')
)
self.init_finger_center = (finger_right + finger_left) / 2
self.pickCompleted = False
def compute_reward(self, actions, obs):
obs = obs['state_observation']
pos_obj = obs[3:6]
finger_right, finger_left = (
self.get_site_pos('rightEndEffector'),
self.get_site_pos('leftEndEffector')
)
finger_center = (finger_right + finger_left) / 2
goal = self._state_goal
assert np.all(goal == self.get_site_pos('goal'))
c1 = 1000
c2 = 0.01
c3 = 0.001
reach_dist = np.linalg.norm(finger_center - pos_obj)
reach_rew = -reach_dist
push_dist = np.linalg.norm(pos_obj[:2] - goal[:2])
if reach_dist < 0.05:
push_rew = c1 * (self.maxPushDist - push_dist) + \
c1 * (np.exp(-(push_dist ** 2) / c2) +
np.exp(-(push_dist ** 2) / c3))
push_rew = max(push_rew, 0)
else:
push_rew = 0
reward = reach_rew + push_rew
return [reward, reach_dist, push_dist]
| 34.897436
| 89
| 0.588685
|
e56563cfb4f2d14718f61139ce287ef6ffb4087f
| 24,404
|
py
|
Python
|
pypy/rlib/parsing/makepackrat.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | 1
|
2020-01-21T11:10:51.000Z
|
2020-01-21T11:10:51.000Z
|
pypy/rlib/parsing/makepackrat.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | null | null | null |
pypy/rlib/parsing/makepackrat.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | null | null | null |
from __future__ import with_statement
import py
import sys
from pypy.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor
from pypy.rlib.parsing.codebuilder import Codebuilder
from pypy.rlib.objectmodel import we_are_translated
class BacktrackException(Exception):
def __init__(self, error=None):
self.error = error
if not we_are_translated():
Exception.__init__(self, error)
class TreeOptimizer(RPythonVisitor):
def visit_or(self, t):
if len(t.children) == 1:
return self.dispatch(t.children[0])
return self.general_nonterminal_visit(t)
visit_commands = visit_or
def visit_negation(self, t):
child = self.dispatch(t.children[0])
if child.symbol == "negation":
child.symbol = "lookahead"
return child
t.children[0] = child
return t
def general_nonterminal_visit(self, t):
for i in range(len(t.children)):
t.children[i] = self.dispatch(t.children[i])
return t
def general_visit(self, t):
return t
syntax = r"""
NAME:
`[a-zA-Z_][a-zA-Z0-9_]*`;
SPACE:
' ';
COMMENT:
`( *#[^\n]*\n)+`;
IGNORE:
`(#[^\n]*\n)|\n|\t| `;
newline:
COMMENT
| `( *\n *)*`;
REGEX:
r = `\`[^\\\`]*(\\.[^\\\`]*)*\``
return {Symbol('REGEX', r, None)};
QUOTE:
r = `'[^\']*'`
return {Symbol('QUOTE', r, None)};
PYTHONCODE:
r = `\{[^\n\}]*\}`
return {Symbol('PYTHONCODE', r, None)};
EOF:
!__any__;
file:
IGNORE*
list
[EOF];
list:
content = production+
return {Nonterminal('list', content)};
production:
name = NAME
SPACE*
args = productionargs
':'
IGNORE*
what = or_
IGNORE*
';'
IGNORE*
return {Nonterminal('production', [name, args, what])};
productionargs:
'('
IGNORE*
args = (
NAME
[
IGNORE*
','
IGNORE*
]
)*
arg = NAME
IGNORE*
')'
IGNORE*
return {Nonterminal('productionargs', args + [arg])}
| return {Nonterminal('productionargs', [])};
or_:
l = (commands ['|' IGNORE*])+
last = commands
return {Nonterminal('or', l + [last])}
| commands;
commands:
cmd = command
newline
cmds = (command [newline])+
return {Nonterminal('commands', [cmd] + cmds)}
| command;
command:
simplecommand;
simplecommand:
return_
| if_
| named_command
| repetition
| choose
| negation;
return_:
'return'
SPACE*
code = PYTHONCODE
IGNORE*
return {Nonterminal('return', [code])};
if_:
'do'
newline
cmd = command
SPACE*
'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [cmd, condition])}
| 'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [condition])};
choose:
'choose'
SPACE*
name = NAME
SPACE*
'in'
SPACE*
expr = PYTHONCODE
IGNORE*
cmds = commands
return {Nonterminal('choose', [name, expr, cmds])};
commandchain:
result = simplecommand+
return {Nonterminal('commands', result)};
named_command:
name = NAME
SPACE*
'='
SPACE*
cmd = command
return {Nonterminal('named_command', [name, cmd])};
repetition:
what = enclosed
SPACE* '?' IGNORE*
return {Nonterminal('maybe', [what])}
| what = enclosed
SPACE*
repetition = ('*' | '+')
IGNORE*
return {Nonterminal('repetition', [repetition, what])};
negation:
'!'
SPACE*
what = negation
IGNORE*
return {Nonterminal('negation', [what])}
| enclosed;
enclosed:
'<'
IGNORE*
what = primary
IGNORE*
'>'
IGNORE*
return {Nonterminal('exclusive', [what])}
| '['
IGNORE*
what = or_
IGNORE*
']'
IGNORE*
return {Nonterminal('ignore', [what])}
| ['(' IGNORE*] or_ [')' IGNORE*]
| primary;
primary:
call | REGEX [IGNORE*] | QUOTE [IGNORE*];
call:
x = NAME
args = arguments
IGNORE*
return {Nonterminal("call", [x, args])};
arguments:
'('
IGNORE*
args = (
PYTHONCODE
[IGNORE* ',' IGNORE*]
)*
last = PYTHONCODE
')'
IGNORE*
return {Nonterminal("args", args + [last])}
| return {Nonterminal("args", [])};
"""
class ErrorInformation(object):
def __init__(self, pos, expected=None):
if expected is None:
expected = []
self.expected = expected
self.pos = pos
def __str__(self):
return "ErrorInformation(%s, %s)" % (self.pos, self.expected)
def get_line_column(self, source):
pos = self.pos
assert pos >= 0
uptoerror = source[:pos]
lineno = uptoerror.count("\n")
columnno = pos - uptoerror.rfind("\n")
return lineno, columnno
def nice_error_message(self, filename='<filename>', source=""):
if source:
lineno, columnno = self.get_line_column(source)
result = [" File %s, line %s" % (filename, lineno + 1)]
result.append(source.split("\n")[lineno])
result.append(" " * columnno + "^")
else:
result.append("<couldn't get source>")
if self.expected:
failure_reasons = self.expected
if len(failure_reasons) > 1:
all_but_one = failure_reasons[:-1]
last = failure_reasons[-1]
expected = "%s or '%s'" % (
", ".join(["'%s'" % e for e in all_but_one]), last)
else:
expected = failure_reasons[0]
result.append("ParseError: expected %s" % (expected, ))
else:
result.append("ParseError")
return "\n".join(result)
class Status(object):
# status codes:
NORMAL = 0
ERROR = 1
INPROGRESS = 2
LEFTRECURSION = 3
SOMESOLUTIONS = 4
_annspecialcase_ = 'specialize:ctr_location' # polymorphic
def __repr__(self):
return "Status(%s, %s, %s, %s)" % (self.pos, self.result, self.error,
self.status)
def __init__(self):
self.pos = 0
self.error = None
self.status = self.INPROGRESS
self.result = None
class ParserBuilder(RPythonVisitor, Codebuilder):
def __init__(self):
Codebuilder.__init__(self)
self.initcode = []
self.names = {}
self.matchers = {}
def make_parser(self):
m = {'Status': Status,
'Nonterminal': Nonterminal,
'Symbol': Symbol,}
exec py.code.Source(self.get_code()).compile() in m
return m['Parser']
def memoize_header(self, name, args):
dictname = "_dict_%s" % (name, )
self.emit_initcode("self.%s = {}" % (dictname, ))
if args:
self.emit("_key = (self._pos, %s)" % (", ".join(args)))
else:
self.emit("_key = self._pos")
self.emit("_status = self.%s.get(_key, None)" % (dictname, ))
with self.block("if _status is None:"):
self.emit("_status = self.%s[_key] = Status()" % (
dictname, ))
with self.block("else:"):
self.emit("_statusstatus = _status.status")
with self.block("if _statusstatus == _status.NORMAL:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
with self.block("elif _statusstatus == _status.ERROR:"):
self.emit("raise BacktrackException(_status.error)")
if self.have_call:
with self.block(
"elif (_statusstatus == _status.INPROGRESS or\n"
" _statusstatus == _status.LEFTRECURSION):"):
self.emit("_status.status = _status.LEFTRECURSION")
with self.block("if _status.result is not None:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
with self.block("else:"):
self.emit("raise BacktrackException(None)")
with self.block(
"elif _statusstatus == _status.SOMESOLUTIONS:"):
self.emit("_status.status = _status.INPROGRESS")
self.emit("_startingpos = self._pos")
self.start_block("try:")
self.emit("_result = None")
self.emit("_error = None")
def memoize_footer(self, name, args):
dictname = "_dict_%s" % (name, )
if self.have_call:
with self.block(
"if _status.status == _status.LEFTRECURSION:"):
with self.block("if _status.result is not None:"):
with self.block("if _status.pos >= self._pos:"):
self.emit("_status.status = _status.NORMAL")
self.emit("self._pos = _status.pos")
self.emit("return _status")
self.emit("_status.pos = self._pos")
self.emit("_status.status = _status.SOMESOLUTIONS")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("self._pos = _startingpos")
self.emit("return self._%s(%s)" % (name, ', '.join(args)))
else:
self.emit("assert _status.status != _status.LEFTRECURSION")
self.emit("_status.status = _status.NORMAL")
self.emit("_status.pos = self._pos")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("return _status")
self.end_block("try")
with self.block("except BacktrackException, _exc:"):
self.emit("_status.pos = -1")
self.emit("_status.result = None")
self.combine_error('_exc.error')
self.emit("_status.error = _error")
self.emit("_status.status = _status.ERROR")
self.emit("raise BacktrackException(_error)")
def choice_point(self, name=None):
var = "_choice%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = self._pos" % (var, ))
return var
def revert(self, var):
self.emit("self._pos = %s" % (var, ))
def visit_list(self, t):
self.start_block("class Parser(object):")
for elt in t.children:
self.dispatch(elt)
with self.block("def __init__(self, inputstream):"):
for line in self.initcode:
self.emit(line)
self.emit("self._pos = 0")
self.emit("self._inputstream = inputstream")
if self.matchers:
self.emit_regex_code()
self.end_block("class")
def emit_regex_code(self):
for regex, matcher in self.matchers.iteritems():
with self.block(
"def _regex%s(self):" % (abs(hash(regex)), )):
c = self.choice_point()
self.emit("_runner = self._Runner(self._inputstream, self._pos)")
self.emit("_i = _runner.recognize_%s(self._pos)" % (
abs(hash(regex)), ))
self.start_block("if _runner.last_matched_state == -1:")
self.revert(c)
self.emit("raise BacktrackException")
self.end_block("if")
self.emit("_upto = _runner.last_matched_index + 1")
self.emit("_pos = self._pos")
self.emit("assert _pos >= 0")
self.emit("assert _upto >= 0")
self.emit("_result = self._inputstream[_pos: _upto]")
self.emit("self._pos = _upto")
self.emit("return _result")
with self.block("class _Runner(object):"):
with self.block("def __init__(self, text, pos):"):
self.emit("self.text = text")
self.emit("self.pos = pos")
self.emit("self.last_matched_state = -1")
self.emit("self.last_matched_index = -1")
self.emit("self.state = -1")
for regex, matcher in self.matchers.iteritems():
matcher = str(matcher).replace(
"def recognize(runner, i)",
"def recognize_%s(runner, i)" % (abs(hash(regex)), ))
self.emit(str(matcher))
def visit_production(self, t):
name = t.children[0]
if name in self.names:
raise Exception("name %s appears twice" % (name, ))
self.names[name] = True
otherargs = t.children[1].children
argswithself = ", ".join(["self"] + otherargs)
argswithoutself = ", ".join(otherargs)
with self.block("def %s(%s):" % (name, argswithself)):
self.emit("return self._%s(%s).result" % (name, argswithoutself))
self.start_block("def _%s(%s):" % (name, argswithself, ))
self.namecount = 0
self.resultname = "_result"
self.have_call = False
self.created_error = False
allother = self.store_code_away()
self.dispatch(t.children[-1])
subsequent = self.restore_code(allother)
self.memoize_header(name, otherargs)
self.add_code(subsequent)
self.memoize_footer(name, otherargs)
self.end_block("def")
def visit_or(self, t, first=False):
possibilities = t.children
if len(possibilities) > 1:
self.start_block("while 1:")
for i, p in enumerate(possibilities):
c = self.choice_point()
with self.block("try:"):
self.dispatch(p)
self.emit("break")
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
if i == len(possibilities) - 1:
self.emit("raise BacktrackException(_error)")
self.dispatch(possibilities[-1])
if len(possibilities) > 1:
self.emit("break")
self.end_block("while")
def visit_commands(self, t):
for elt in t.children:
self.dispatch(elt)
def visit_maybe(self, t):
c = self.choice_point()
with self.block("try:"):
self.dispatch(t.children[0])
with self.block("except BacktrackException:"):
self.revert(c)
def visit_repetition(self, t):
name = "_all%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = []" % (name, ))
if t.children[0] == '+':
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
with self.block("while 1:"):
c = self.choice_point()
with self.block("try:"):
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
self.emit("break")
self.emit("_result = %s" % (name, ))
def visit_exclusive(self, t):
self.resultname = "_enclosed"
self.dispatch(t.children[0])
self.emit("_enclosed = _result")
def visit_ignore(self, t):
resultname = "_before_discard%i" % (self.namecount, )
self.namecount += 1
self.emit("%s = _result" % (resultname, ))
self.dispatch(t.children[0])
self.emit("_result = %s" % (resultname, ))
def visit_negation(self, t):
c = self.choice_point()
resultname = "_stored_result%i" % (self.namecount, )
self.namecount += 1
child = t.children[0]
self.emit("%s = _result" % (resultname, ))
with self.block("try:"):
self.dispatch(child)
with self.block("except BacktrackException:"):
self.revert(c)
self.emit("_result = %s" % (resultname, ))
with self.block("else:"):
# heuristic to get nice error messages sometimes
if isinstance(child, Symbol) and child.symbol == "QUOTE":
error = "self._ErrorInformation(%s, ['NOT %s'])" % (
c, child.additional_info[1:-1], )
else:
error = "None"
self.emit("raise BacktrackException(%s)" % (error, ))
def visit_lookahead(self, t):
resultname = "_stored_result%i" % (self.namecount, )
self.emit("%s = _result" % (resultname, ))
c = self.choice_point()
self.dispatch(t.children[0])
self.revert(c)
self.emit("_result = %s" % (resultname, ))
def visit_named_command(self, t):
name = t.children[0]
self.dispatch(t.children[1])
self.emit("%s = _result" % (name, ))
def visit_return(self, t):
self.emit("_result = (%s)" % (t.children[0].additional_info[1:-1], ))
def visit_if(self, t):
if len(t.children) == 2:
self.dispatch(t.children[0])
with self.block("if not (%s):" % (
t.children[-1].additional_info[1:-1], )):
self.emit("raise BacktrackException(")
self.emit(" self._ErrorInformation(")
self.emit(" _startingpos, ['condition not met']))")
def visit_choose(self, t):
with self.block("for %s in (%s):" % (
t.children[0], t.children[1].additional_info[1:-1], )):
with self.block("try:"):
self.dispatch(t.children[2])
self.emit("break")
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
with self.block("else:"):
self.emit("raise BacktrackException(_error)")
def visit_call(self, t):
self.have_call = True
args = ", ".join(['(%s)' % (arg.additional_info[1:-1], )
for arg in t.children[1].children])
if t.children[0].startswith("_"):
callname = t.children[0]
self.emit("_result = self.%s(%s)" % (callname, args))
else:
callname = "_" + t.children[0]
self.emit("_call_status = self.%s(%s)" % (callname, args))
self.emit("_result = _call_status.result")
self.combine_error('_call_status.error')
def visit_REGEX(self, t):
r = t.additional_info[1:-1].replace('\\`', '`')
matcher = self.get_regex(r)
self.emit("_result = self._regex%s()" % (abs(hash(r)), ))
def visit_QUOTE(self, t):
self.emit("_result = self.__chars__(%r)" % (
str(t.additional_info[1:-1]), ))
def get_regex(self, r):
from pypy.rlib.parsing.regexparse import parse_regex
if r in self.matchers:
return self.matchers[r]
regex = parse_regex(r)
if regex is None:
raise ValueError(
"%s is not a valid regular expression" % regextext)
automaton = regex.make_automaton().make_deterministic()
automaton.optimize()
matcher = automaton.make_lexing_code()
self.matchers[r] = py.code.Source(matcher)
return matcher
def combine_error(self, newerror):
if self.created_error:
self.emit(
"_error = self._combine_errors(_error, %s)" % (newerror, ))
else:
self.emit("_error = %s" % (newerror, ))
self.created_error = True
class MetaPackratParser(type):
def __new__(cls, name_, bases, dct):
if '__doc__' not in dct or dct['__doc__'] is None:
return type.__new__(cls, name_, bases, dct)
from pypackrat import PyPackratSyntaxParser
import sys, new, inspect
frame = sys._getframe(1)
source = dct['__doc__']
p = PyPackratSyntaxParser(source)
try:
t = p.file()
except BacktrackException, exc:
print exc.error.nice_error_message("<docstring>", source)
lineno, _ = exc.error.get_line_column(source)
errorline = source.split("\n")[lineno]
try:
code = frame.f_code
source = inspect.getsource(code)
lineno_in_orig = source.split("\n").index(errorline)
if lineno_in_orig >= 0:
print "probable error position:"
print "file:", code.co_filename
print "line:", lineno_in_orig + code.co_firstlineno + 1
except (IOError, ValueError):
pass
raise exc
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
pcls = visitor.make_parser()
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in dct
#XXX XXX XXX
if 'BacktrackException' not in frame.f_globals:
raise Exception("must import BacktrackException")
if 'Status' not in frame.f_globals:
raise Exception("must import Status")
result = type.__new__(cls, name_, bases, dct)
for key, value in pcls.__dict__.iteritems():
if isinstance(value, type):
value.__module__ = result.__module__ #XXX help the annotator
if isinstance(value, type(lambda: None)):
value = new.function(value.func_code, frame.f_globals)
if not hasattr(result, key) and key not in forbidden:
setattr(result, key, value)
if result.__init__ is object.__init__:
result.__init__ = pcls.__dict__['__init__']
result.init_parser = pcls.__dict__['__init__']
result._code = visitor.get_code()
return result
class PackratParser(object):
__metaclass__ = MetaPackratParser
_ErrorInformation = ErrorInformation
_BacktrackException = BacktrackException
def __chars__(self, chars):
#print '__chars__(%s)' % (chars, ), self._pos
try:
for i in range(len(chars)):
if self._inputstream[self._pos + i] != chars[i]:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
self._pos += len(chars)
return chars
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
def __any__(self):
try:
result = self._inputstream[self._pos]
self._pos += 1
return result
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, ['anything']))
def _combine_errors(self, error1, error2):
if error1 is None:
return error2
if (error2 is None or error1.pos > error2.pos or
len(error2.expected) == 0):
return error1
elif error2.pos > error1.pos or len(error1.expected) == 0:
return error2
expected = []
already_there = {}
for ep in [error1.expected, error2.expected]:
for reason in ep:
if reason not in already_there:
already_there[reason] = True
expected.append(reason)
return ErrorInformation(error1.pos, expected)
def test_generate():
f = py.path.local(__file__).dirpath().join("pypackrat.py")
from pypackrat import PyPackratSyntaxParser
p = PyPackratSyntaxParser(syntax)
t = p.file()
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
code = visitor.get_code()
content = """
from pypy.rlib.parsing.tree import Nonterminal, Symbol
from makepackrat import PackratParser, BacktrackException, Status
%s
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.iteritems():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
""" % (code, )
print content
f.write(content)
| 32.582109
| 81
| 0.550074
|
0f8dfcb7e2239463e5bc74a17c08e393ea0568d9
| 15,120
|
py
|
Python
|
calla/JTG/wind.py
|
warmwaver/calla
|
6667bfc51e3ed66eb0ae3491f827b893e4d8aa0b
|
[
"MIT"
] | 7
|
2018-10-11T09:03:09.000Z
|
2022-02-23T01:34:12.000Z
|
calla/JTG/wind.py
|
warmwaver/calla
|
6667bfc51e3ed66eb0ae3491f827b893e4d8aa0b
|
[
"MIT"
] | null | null | null |
calla/JTG/wind.py
|
warmwaver/calla
|
6667bfc51e3ed66eb0ae3491f827b893e4d8aa0b
|
[
"MIT"
] | 1
|
2021-03-13T11:59:43.000Z
|
2021-03-13T11:59:43.000Z
|
"""JTG/T 3360-01-2018 公路桥梁抗风设计规范"""
__all__ = [
'wind_reference_speed',
'wind_girder',
'wind_element',
'flutter_stability'
]
from calla import abacus, InputError, numeric
from collections import OrderedDict
from math import pi, sqrt, sin, cos, tan
class wind_reference_speed(abacus):
'''
设计基准风速
《公路桥梁抗风设计规范》(JTG/T 3360-01-2018)第5.2节
'''
__title__ = '设计基准风速'
__inputs__ = OrderedDict([
# ('bridge_type',('','','0','桥梁类型','',{'0':'I形、π形或箱形截面','1':'桁架梁'})),
# ('B',('<i>B</i>','m',1.0,'主梁的特征宽度')),
# ('D',('<i>D</i>','m',1.0,'主梁的特征高度','主梁梁体的投影高度')),
# ('βd',('<i>β</i><sub>d</sub>','',0,'腹板倾角','腹板与竖直方向的夹角')),
# ('truss_type',('桁架构件类型','','0','','',{'0':'矩形与H形截面','1':'圆柱形','2':'桥面系构造'})),
# ('实面积比',('实面积比','',0.1,'','桁架净面积/桁架轮廓面积',[0.1,0.2,0.3,0.4,0.5])),
# ('间距比',('间距比','',1,'','两桁架中心距/迎风桁架高度',[1,2,3,4,5,6])),
# ('d',('<i>d</i>','m',1.0,'圆柱形构件直径','')),
('U10',('<i>U</i><sub>10</sub>','m/s',10,'基本风速','可按附录A.2或附录A.3取值')),
('kt',('<i>k</i><sub>t</sub>','',1.0,'地形条件系数','不小于1.0。开阔平坦地形取1.0,峡谷山口取1.2~1.5')),
# ('L',('<i>L</i>','m',20,'水平加载长度','成桥状态下为主桥全长')),
('Z',('<i>Z</i>','m',10,'基准高度','按规范4.2.2、4.2.3条取值')),
('地表类别',('地表类别','','A','','''A 海岸、海面、开阔水面、沙漠;
B 田野、乡村、丛林、平坦开阔地及低层建筑稀少区;
C 树木及低层建筑物等密集地区、中高层建筑物稀少地区、平缓的丘陵地;
D 中高层建筑物密集地区、起伏较大的丘陵地''',('A','B','C','D'))),
('ρ',('<i>ρ</i>','kg/m<sup>3</sup>',1.25,'空气密度')),
])
__deriveds__ = OrderedDict([
('GV',('<i>G</i><sub>V</sub>','',1.0,'静阵风系数','查表5.2.1')),
('Ud',('<i>U</i><sub>d</sub>','m/s',0,'设计基准风速','基准高度Z处的设计基准风速')),
('kf',('<i>k</i><sub>f</sub>','',1.0,'抗风风险系数','表4.2.6-1')),
('kh',('<i>k</i><sub>h</sub>','',1.0,'地表类别转换及风速高度修正系数','取1.0~1.77,表4.2.6-2')),
('Ug',('<i>U</i><sub>g</sub>','m/s',0,'等效静阵风风速')),
('ηc',('<i>η</i><sub>c</sub>','m',1.0,'横向力系数的倾角折减系数','')),
('η',('<i>η</i>','m',1.0,'桁架遮挡系数','')),
('Fg',('<i>F</i><sub>g</sub>','N/m',0,'等效静阵风荷载')),
])
_α0 = {'A':0.12,'B':0.16,'C':0.22,'D':0.30}
_z0 = {'A':0.01,'B':0.05,'C':0.3,'D':1.0}
_kc = {'A':1.174,'B':1.0,'C':0.785,'D':0.564}
_kf = {'R1':1.05,'R2':1.02,'R3':1.0}
# _GV = { # 表5.2.1
# 'A':(1.29,1.28,1.26,1.24,1.23,1.22,1.21,1.2,1.19,1.18,1.17,1.16,1.15),
# 'B':(1.35,1.33,1.31,1.29,1.27,1.26,1.25,1.24,1.23,1.22,1.21,1.20,1.18),
# 'C':(1.49,1.48,1.45,1.41,1.39,1.37,1.36,1.34,1.33,1.31,1.30,1.29,1.26),
# 'D':(1.56,1.54,1.51,1.47,1.44,1.42,1.41,1.39,1.37,1.35,1.34,1.32,1.30)
# }
# 表5.2.1水平加载长度
# 最后一列>=2000逻辑上错误,应为>1500
_L = (20,60,100,200,300,400,500,650,800,1000,1200,1500,2000)
table_5_3_2_1 = (
(1.9,1.2,0.7),
(1.8,1.2,0.8),
(1.7,1.2,0.8),
(1.7,1.1,0.8),
(1.6,1.1,0.8)
)
table_5_3_2_2 = (
(1.0,0.9,0.8,0.6,0.45),
(1.0,0.9,0.8,0.65,0.5),
(1.0,0.95,0.8,0.7,0.55),
(1.0,0.95,0.8,0.7,0.6),
(1.0,0.95,0.85,0.75,0.65),
(1.0,0.95,0.9,0.8,0.7)
)
@staticmethod
def _findindex(table, data):
for i in range(0,len(table)):
if data<=table[i]:
return i
return i
@staticmethod
def fUd(kf,kt,kh,U10):
'''
采用公式(4.2.6-2)计算
原文公式(4.2.6-1)错误,漏掉kt
'''
return kf*kt*kh*U10
@staticmethod
def fkh(kc,Z,α0):
'''
按公式(4.2.6-3)~(4.2.6-6)计算
'''
return kc*(Z/10)**α0
def solve(self):
self.validate('positive','B', 'H', 'Z')
self.validate('non-negative','βd')
U10 = self.U10
self.R = 'R1' if U10>32.6 else 'R2' if U10>24.5 else 'R3'
self.kf = self._kf[self.R]
self.kc = self._kc[self.地表类别]
self.α0 = self._α0[self.地表类别]
kh = self.fkh(self.kc,self.Z,self.α0)
# 1≤kh≤1.77
kh = max(kh, 1.0)
self.kh = min(kh, 1.77)
self.Ud = self.fUd(self.kf,self.kt,self.kh,self.U10)
# i = self._findindex(self._L, self.L)
# self.GV = self._GV[self.地表类别][i]
# self.Ug = self.GV*self.Ud
def _html(self, digits):
for para in ('U10','Z'):
yield self.format(para, digits=None)
for para in ('kf','kt','kh'):
yield self.format(para, digits)
yield self.format('Ud',eq='kf·kt·kh·U10')
# yield self.format('Ug',eq='GV·Ud')
class wind_girder(abacus):
'''
主梁上的等效静阵风荷载
《公路桥梁抗风设计规范》(JTG/T 3360-01-2018)第5.3节
'''
__title__ = '主梁上的风荷载'
__inputs__ = [
('bridge_type','','','girder','桥梁类型','',{'girder':'I形、π形或箱形截面','truss':'桁架梁'}),
('B','<i>B</i>','m',1.0,'主梁的特征宽度'),
('D','<i>D</i>','m',1.0,'主梁的特征高度','主梁梁体的投影高度'),
('βd','<i>β</i><sub>d</sub>','',0,'腹板倾角','腹板与竖直方向的夹角'),
('truss_type','桁架构件类型','','a','','',{'a':'矩形与H形截面','b':'圆柱形','c':'桥面系构造'}),
('实面积比','实面积比','',0.1,'','桁架净面积/桁架轮廓面积',[0.1,0.2,0.3,0.4,0.5]),
('间距比','间距比','',1,'','两桁架中心距/迎风桁架高度',[1,2,3,4,5,6]),
('d','<i>d</i>','m',1.0,'圆柱形构件直径',''),
('U10','<i>U</i><sub>10</sub>','m/s',10,'基本风速','可按附录A.2或附录A.3取值'),
('kt','<i>k</i><sub>t</sub>','',1.0,'地形条件系数','不小于1.0。开阔平坦地形取1.0,峡谷山口取1.2~1.5'),
('L','<i>L</i>','m',20,'水平加载长度','成桥状态下为主桥全长'),
('Z','<i>Z</i>','m',10,'基准高度','按规范4.2.2、4.2.3条取值'),
('地表类别','地表类别','','A','','''A 海岸、海面、开阔水面、沙漠;
B 田野、乡村、丛林、平坦开阔地及低层建筑稀少区;
C 树木及低层建筑物等密集地区、中高层建筑物稀少地区、平缓的丘陵地;
D 中高层建筑物密集地区、起伏较大的丘陵地''',('A','B','C','D')),
('ρ','<i>ρ</i>','kg/m<sup>3</sup>',1.25,'空气密度'),
('CH','<i>C</i><sub>H</sub>','',1.0,'主梁横向力系数',''),
]
__deriveds__ = [
('GV','<i>G</i><sub>V</sub>','',1.0,'等效静阵风系数','查表5.2.1'),
('Ud','<i>U</i><sub>d</sub>','m/s',0,'设计基准风速','基准高度Z处的设计基准风速'),
('kf','<i>k</i><sub>f</sub>','',1.0,'抗风风险系数','表4.2.6-1'),
('kh','<i>k</i><sub>h</sub>','',1.0,'地表类别转换及风速高度修正系数','取1.0~1.77,表4.2.6-2'),
('Ug','<i>U</i><sub>g</sub>','m/s',0,'等效静阵风风速'),
('ηc','<i>η</i><sub>c</sub>','m',1.0,'横向力系数的倾角折减系数',''),
('η','<i>η</i>','m',1.0,'桁架遮挡系数',''),
('Fg','<i>F</i><sub>g</sub>','N/m',0,'等效静阵风荷载'),
]
__toggles__ = [
'bridge_type',{'girder':('CH','truss_type','实面积比','间距比','d'),'truss':('CH', 'B','D','βd')},
'truss_type',{'a':('d',)}
]
_α0 = {'A':0.12,'B':0.16,'C':0.22,'D':0.30}
_z0 = {'A':0.01,'B':0.05,'C':0.3,'D':1.0}
_kc = {'A':1.174,'B':1.0,'C':0.785,'D':0.564}
_kf = {'R1':1.05,'R2':1.02,'R3':1.0}
_GV = { # 表5.2.1
'A':(1.29,1.28,1.26,1.24,1.23,1.22,1.21,1.2,1.19,1.18,1.17,1.16,1.15),
'B':(1.35,1.33,1.31,1.29,1.27,1.26,1.25,1.24,1.23,1.22,1.21,1.20,1.18),
'C':(1.49,1.48,1.45,1.41,1.39,1.37,1.36,1.34,1.33,1.31,1.30,1.29,1.26),
'D':(1.56,1.54,1.51,1.47,1.44,1.42,1.41,1.39,1.37,1.35,1.34,1.32,1.30)
}
# 表5.2.1水平加载长度
# 最后一列>=2000逻辑上错误,应为>1500
_L = (20,60,100,200,300,400,500,650,800,1000,1200,1500,2000)
table_5_3_2_1 = (
(1.9,1.2,0.7),
(1.8,1.2,0.8),
(1.7,1.2,0.8),
(1.7,1.1,0.8),
(1.6,1.1,0.8)
)
table_5_3_2_2 = (
(1.0,0.9,0.8,0.6,0.45),
(1.0,0.9,0.8,0.65,0.5),
(1.0,0.95,0.8,0.7,0.55),
(1.0,0.95,0.8,0.7,0.6),
(1.0,0.95,0.85,0.75,0.65),
(1.0,0.95,0.9,0.8,0.7)
)
@staticmethod
def _findindex(table, data):
for i in range(0,len(table)):
if data<=table[i]:
return i
return i
@staticmethod
def fUd(kf,kt,kh,U10):
'''
采用公式(4.2.6-2)计算
原文公式(4.2.6-1)错误,漏掉kt
'''
return kf*kt*kh*U10
@staticmethod
def fkh(kc,Z,α0):
'''
按公式(4.2.6-3)~(4.2.6-6)计算
'''
return kc*(Z/10)**α0
@staticmethod
def fFg(ρ, Ug, CH, D):
"""
计算静阵风荷载
《公路桥梁抗风设计规范》5.3.1节,公式(5.3.1)
"""
return 1/2*ρ*Ug**2*CH*D
def solve(self):
self.validate('positive','B', 'H', 'Z')
self.validate('non-negative','βd')
U10 = self.U10
self.R = 'R1' if U10>32.6 else 'R2' if U10>24.5 else 'R3'
self.kf = self._kf[self.R]
self.地表类别 = str(self.地表类别).upper()
self.kc = self._kc[self.地表类别]
self.α0 = self._α0[self.地表类别]
kh = self.fkh(self.kc,self.Z,self.α0)
# 1≤kh≤1.77
kh = max(kh, 1.0)
self.kh = min(kh, 1.77)
self.Ud = self.fUd(self.kf,self.kt,self.kh,self.U10)
i = self._findindex(self._L, self.L)
self.GV = self._GV[self.地表类别][i]
self.Ug = self.GV*self.Ud
B = self.B
D = self.D
if self.bridge_type == 'girder':
βd = self.βd
ηc = 1-0.005*βd if βd<60 else 0.7
CH = 2.1-0.1*(B/D) if B/D<8 else 1.3
self.CH = ηc*CH
else:
i = round(10*self.实面积比)-1
i = min(0 if i<0 else i,4)
j = 0 if self.truss_type == 'a' else 1 if self.d*self.Ud<=6 else 2
CH = self.table_5_3_2_1[i][j]
j = i
i = round(self.间距比)-1
i = min(0 if i<0 else i,5)
η = self.table_5_3_2_2[i][j]
self.CH = 1.3 if self.truss_type == 'c' else η*CH
self.Fg = self.fFg(self.ρ, self.Ug, self.CH, D)
def _html(self, digits):
yield self.format('bridge_type')
if self.bridge_type == 'girder':
yield self.format('B', digits=None)
yield self.format('D', digits=None)
for para in ('U10','GV','Z'):
yield self.format(para, digits=None)
for para in ('kf','kt','kh','CH'):
yield self.format(para, digits)
yield self.format('Ud',eq='kf·kt·kh·U10')
yield self.format('Ug',eq='GV·Ud')
yield self.format('Fg',eq='1/2·ρ·Ug<sup>2</sup>·CH·D')
class wind_element(wind_reference_speed):
'''
计算桥墩、桥塔、斜拉索、主缆和吊杆(索)上的等效静阵风荷载
《公路桥梁抗风设计规范》(JTG/T 3360-01-2018)第5.4节
'''
__title__ = '构件上的风荷载'
__inputs__ = OrderedDict()
__inputs__.update(wind_reference_speed.__inputs__)
__inputs__.update(
OrderedDict([
('H',('<i>H</i>','m',10,'构件高度')),
('CD',('<i>C</i><sub>D</sub>','',1.0,'构件的阻力系数','按5.4.2~5.4.5节取值')),
('An',('<i>A</i><sub>n</sub>','m<sup>2</sup>/m',1.0,'构件单位长度上顺风向的投影面积','对斜拉索、主缆和吊杆取外径计算')),
])
)
__deriveds__ = OrderedDict()
__deriveds__.update(wind_reference_speed.__deriveds__)
__deriveds__.update(
OrderedDict([
('Fg',('<i>F</i><sub>g</sub>','N/m',0,'构件单位长度上的风荷载')),
])
)
# 表5.2.2
_H = [40, 60, 80, 100, 150, 200, 300, 400]
table_GV = {
# 结构高度: <40, 60, 80, 100, 150, 200, 300, 400
'A':(1.19, 1.18, 1.17, 1.16, 1.14, 1.13, 1.12, 1.11),
'B':(1.24, 1.22, 1.20, 1.19, 1.17, 1.16, 1.14, 1.13),
'C':(1.33, 1.29, 1.27, 1.26, 1.23, 1.21, 1.18, 1.16),
'D':(1.48, 1.42, 1.39, 1.36, 1.31, 1.28, 1.24, 1.22)
}
@staticmethod
def fFg(ρ, Ug, CD, An):
"""
计算静阵风荷载
《公路桥梁抗风设计规范》5.4.1节,公式(5.4.1)
"""
return 1/2*ρ*Ug**2*CD*An
def solve(self):
wind_reference_speed.solve(self)
i = self._findindex(self._H, self.H)
self.GV = self.table_GV[self.地表类别][i]
self.Ug = self.GV*self.Ud
self.Fg = self.fFg(self.ρ, self.Ug, self.CD, self.An)
def _html(self, digits):
for para in ('ρ','H','GV','Ug','CD','An'):
yield self.format(para, digits)
yield self.format('Fg',eq='1/2·ρ·Ug<sup>2</sup>·CD·An')
class flutter_stability(abacus):
"""
颤振稳定性
《公路桥梁抗风设计规范》(JTG/T 3360-01-2018) 第7.5节
"""
__title__ = '颤振稳定性'
__inputs__ = [
('B', '<i>B</i>', 'm', 0, '主梁断面特征宽度'),
('Ud', '<i>U</i><sub>d</sub>', 'm/s', 0, '设计基准风速'),
('Ks', '<i>K</i><sub>s</sub>', 'm', 0, '与截面形状有关的系数'),
('m', '<i>m</i>', 'kg/m', 0, '桥梁单位长度质量'),
('ρ', '<i>ρ</i>','kg/m<sup>3</sup>',1.25,'空气密度'),
('ηs', '<i>η</i><sub>s</sub>', '', 0, '形状系数'),
('ηα', '<i>η</i><sub>α</sub>', '', 0, '攻角效应系数'),
('Im', '<i>I</i><sub>m</sub>', 'kg*m<sup>2</sup>/m', 0, '主梁单位长度质量惯性矩'), # (6.7)
('ft', '<i>f</i><sub>t</sub>', 'Hz', 0, '主梁扭转基频'),
('γf', '<i>γ</i><sub>f</sub>', '', 1.4, '颤振稳定性分项系数'),
('γt', '<i>γ</i><sub>t</sub>', '', 1.0, '风速脉动空间影响系数'),
('γα', '<i>γ</i><sub>α</sub>', '', 1.0, '攻角效应分项系数'),
]
__deriveds__ = [
('b', '', 'm', 0, '主梁断面半宽'),
('If', '<i>I</i><sub>f</sub>', '', 0, '桥梁颤振稳定性指数'),
('r', '<i>r</i>', 'm', 0, '桥梁的惯性半径'),
('μ', '<i>μ</i>', '', 0, '桥梁结构与空气的密度比'),
('Uco', '<i>U</i><sub>co</sub>', 'm/s', 0, '理想平板颤振临界风速'),
('Uf', '<i>U</i><sub>f</sub>', 'm/s', 0, '颤振临界风速'),
('Uf_min', '<i>U</i><sub>f</sub>', 'm/s', 0, '颤振检验风速'),
]
def _solve_(B, Ud, Ks, m, ρ, μ, ft):
B = 27.8
b=B/2
Ud = 24.4
Ks = 15
m = 16370
ρ = 1.25
μ = m/(pi*ρ*b**2)
ft = 0.95
If = Ks/sqrt(μ)*Ud/ft/B # (7.5.1)
if If<4:
ηs = 0.65
ηα=0.7
Im = 0.2
r = sqrt(Im/m)
Uco = 2.5*sqrt(μ*r/b)*ft*B # (7.5.4-2)
Uf = ηs*ηα*Uco # (7.5.4-1)
γf = 1.4
γt = 1.33 # 表7.5.8
γα = 1.0
Uf_min = γf*γt*γα*Ud # (7.5.8)
print(Uf)
print(Uf_min)
def solve(self):
self.b = self.B/2
self.μ = self.m/(pi*self.ρ*self.b**2)
self.If = self.Ks/sqrt(self.μ)*self.Ud/self.ft/self.B # (7.5.1)
if self.If<4:
self.r = sqrt(self.Im/self.m)
self.Uco = 2.5*sqrt(self.μ*self.r/self.b)*self.ft*self.B # (7.5.4-2)
self.Uf = self.ηs*self.ηα*self.Uco # (7.5.4-1)
self.Uf_min = self.γf*self.γt*self.γα*self.Ud # (7.5.8)
def _html(self, digits=2):
disableds = self.disableds()
if hasattr(self, '_inputs_'):
for attr in self._inputs_:
if hasattr(self, attr) and (not attr in disableds):
yield self.format(attr, digits = None)
if hasattr(self, '_deriveds_'):
for attr in self._deriveds_:
if hasattr(self, attr) and (not attr in disableds):
yield self.format(attr, digits = digits)
ok = self.Uf > self.Uf_min
if self.If<4:
yield self.format_conclusion(
ok,
self.format('Uf', digits, eq='ηs*ηα*Uco'),
'>' if ok else '≤',
self.format('Uf_min', digits, eq='γf*γt*γα*Ud'),
'{}满足规范式(7.5.8)的要求。'.format('' if ok else '不')
)
else:
yield '应利用节段模型风洞试验或虚拟风洞试验进行气动选型,并通过节段模型风洞试验或全桥气动弹性模型试验进行检验。'
| 36.258993
| 102
| 0.451587
|
edbc130ffd0d6ed36f3b91e78d9674457d393b2d
| 833
|
py
|
Python
|
iwork/api_urls.py
|
kellyyk/blueking_work1-5
|
3661d96ba12a9884227d2c4c559212398398c973
|
[
"Apache-2.0"
] | null | null | null |
iwork/api_urls.py
|
kellyyk/blueking_work1-5
|
3661d96ba12a9884227d2c4c559212398398c973
|
[
"Apache-2.0"
] | 3
|
2020-02-12T02:55:30.000Z
|
2021-06-10T21:39:23.000Z
|
iwork/api_urls.py
|
kellyyk/blueking_work1-5
|
3661d96ba12a9884227d2c4c559212398398c973
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.conf.urls import patterns
urlpatterns = patterns(
'iwork.api_views',
(r'^get_host_capacity/$', 'get_host_capacity')
)
| 49
| 115
| 0.773109
|
dbc87814589d433494c8f68cc0eee4ee39d0ed77
| 1,091
|
py
|
Python
|
scripts/convert_notebooks.py
|
nnadeau/academic-kickstart
|
1696b6f9fc1c4069731bb1d473787bf772463158
|
[
"MIT"
] | null | null | null |
scripts/convert_notebooks.py
|
nnadeau/academic-kickstart
|
1696b6f9fc1c4069731bb1d473787bf772463158
|
[
"MIT"
] | 21
|
2020-04-08T12:17:11.000Z
|
2021-02-17T21:20:04.000Z
|
scripts/convert_notebooks.py
|
nnadeau/academic-kickstart
|
1696b6f9fc1c4069731bb1d473787bf772463158
|
[
"MIT"
] | null | null | null |
import logging
import subprocess
from pathlib import Path
from typing import Optional
import fire
import nbconvert
import nbformat
def main(path: Optional[str] = None):
if path:
paths = [Path(path)]
else:
# glob notebooks
paths = list((Path.cwd() / "content").rglob("*.ipynb"))
paths = [p for p in paths if ".ipynb_checkpoints" not in str(p.resolve())]
logging.info(f"Globbed {len(paths)} notebooks")
logging.info(f"Paths to convert: {paths}")
# convert
for p in paths:
logging.info(f"Exporting {p}")
args = ["jupyter", "nbconvert", p, "--to", "markdown"]
subprocess.run(args)
output = p.with_suffix(".md")
try:
with open(output) as f:
text = f.read()
except FileNotFoundError as e:
logging.error(e)
exit(1)
text = text.replace('<table border="1"', "<table")
with open(output, "w") as f:
f.write(text)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
fire.Fire(main)
| 24.244444
| 82
| 0.579285
|
5b4b253beb1491b165669b2b289bdc13781af29f
| 909
|
py
|
Python
|
tensorflow_datasets/video/__init__.py
|
suvarnak/datasets
|
682b5adee6c36e9867f397076080ec23d9616dcc
|
[
"Apache-2.0"
] | 1
|
2019-03-02T22:54:29.000Z
|
2019-03-02T22:54:29.000Z
|
tensorflow_datasets/video/__init__.py
|
rsepassi/datasets
|
299f482da52aebe910e91053dbb06a36355f4cde
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/video/__init__.py
|
rsepassi/datasets
|
299f482da52aebe910e91053dbb06a36355f4cde
|
[
"Apache-2.0"
] | 1
|
2020-01-01T04:48:04.000Z
|
2020-01-01T04:48:04.000Z
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video datasets."""
from tensorflow_datasets.video.bair_robot_pushing import BairRobotPushingSmall
from tensorflow_datasets.video.moving_mnist import MovingMnist
from tensorflow_datasets.video.starcraft import StarcraftVideo
from tensorflow_datasets.video.starcraft import StarcraftVideoConfig
| 41.318182
| 78
| 0.80308
|
a7a0260d4a4ae9676e24e33944a886e76e03b9e8
| 9,400
|
py
|
Python
|
instrumentation/opentelemetry-instrumentation-pyramid/tests/test_programmatic.py
|
willarmiros/opentelemetry-python-contrib
|
0d34ef26b75f9a3bc275bf828b5a806d39ba1a40
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-07-18T07:59:09.000Z
|
2021-07-18T07:59:09.000Z
|
instrumentation/opentelemetry-instrumentation-pyramid/tests/test_programmatic.py
|
willarmiros/opentelemetry-python-contrib
|
0d34ef26b75f9a3bc275bf828b5a806d39ba1a40
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2020-12-30T17:37:13.000Z
|
2021-06-06T01:02:30.000Z
|
instrumentation/opentelemetry-instrumentation-pyramid/tests/test_programmatic.py
|
open-o11y/opentelemetry-python-contrib
|
c5c6977584a3661f5698c3c45e3d92231db13f78
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-11-20T06:31:17.000Z
|
2021-11-20T06:31:17.000Z
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
from pyramid.config import Configurator
from opentelemetry import trace
from opentelemetry.instrumentation.propagators import (
TraceResponsePropagator,
get_global_response_propagator,
set_global_response_propagator,
)
from opentelemetry.instrumentation.pyramid import PyramidInstrumentor
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.test.test_base import TestBase
from opentelemetry.test.wsgitestutil import WsgiTestBase
from opentelemetry.util.http import get_excluded_urls
# pylint: disable=import-error
from .pyramid_base_test import InstrumentationTest
def expected_attributes(override_attributes):
default_attributes = {
SpanAttributes.HTTP_METHOD: "GET",
SpanAttributes.HTTP_SERVER_NAME: "localhost",
SpanAttributes.HTTP_SCHEME: "http",
SpanAttributes.NET_HOST_PORT: 80,
SpanAttributes.HTTP_HOST: "localhost",
SpanAttributes.HTTP_TARGET: "/",
SpanAttributes.HTTP_FLAVOR: "1.1",
SpanAttributes.HTTP_STATUS_CODE: 200,
}
for key, val in override_attributes.items():
default_attributes[key] = val
return default_attributes
class TestProgrammatic(InstrumentationTest, TestBase, WsgiTestBase):
def setUp(self):
super().setUp()
config = Configurator()
PyramidInstrumentor().instrument_config(config)
self.config = config
self._common_initialization(self.config)
self.env_patch = patch.dict(
"os.environ",
{
"OTEL_PYTHON_PYRAMID_EXCLUDED_URLS": "http://localhost/excluded_arg/123,excluded_noarg"
},
)
self.env_patch.start()
self.exclude_patch = patch(
"opentelemetry.instrumentation.pyramid.callbacks._excluded_urls",
get_excluded_urls("PYRAMID"),
)
self.exclude_patch.start()
def tearDown(self):
super().tearDown()
with self.disable_logging():
PyramidInstrumentor().uninstrument_config(self.config)
def test_uninstrument(self):
resp = self.client.get("/hello/123")
self.assertEqual(200, resp.status_code)
self.assertEqual([b"Hello: 123"], list(resp.response))
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
PyramidInstrumentor().uninstrument_config(self.config)
# Need to remake the WSGI app export
self._common_initialization(self.config)
resp = self.client.get("/hello/123")
self.assertEqual(200, resp.status_code)
self.assertEqual([b"Hello: 123"], list(resp.response))
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
def test_simple(self):
expected_attrs = expected_attributes(
{
SpanAttributes.HTTP_TARGET: "/hello/123",
SpanAttributes.HTTP_ROUTE: "/hello/{helloid}",
}
)
self.client.get("/hello/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertEqual(span_list[0].name, "/hello/{helloid}")
self.assertEqual(span_list[0].kind, trace.SpanKind.SERVER)
self.assertEqual(span_list[0].attributes, expected_attrs)
def test_response_headers(self):
orig = get_global_response_propagator()
set_global_response_propagator(TraceResponsePropagator())
response = self.client.get("/hello/500")
headers = response.headers
span = self.memory_exporter.get_finished_spans()[0]
self.assertIn("traceresponse", headers)
self.assertEqual(
headers["access-control-expose-headers"], "traceresponse",
)
self.assertEqual(
headers["traceresponse"],
"00-{0}-{1}-01".format(
trace.format_trace_id(span.get_span_context().trace_id),
trace.format_span_id(span.get_span_context().span_id),
),
)
set_global_response_propagator(orig)
def test_not_recording(self):
mock_tracer = Mock()
mock_span = Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
with patch("opentelemetry.trace.get_tracer"):
self.client.get("/hello/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
def test_404(self):
expected_attrs = expected_attributes(
{
SpanAttributes.HTTP_METHOD: "POST",
SpanAttributes.HTTP_TARGET: "/bye",
SpanAttributes.HTTP_STATUS_CODE: 404,
}
)
resp = self.client.post("/bye")
self.assertEqual(404, resp.status_code)
resp.close()
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertEqual(span_list[0].name, "HTTP POST")
self.assertEqual(span_list[0].kind, trace.SpanKind.SERVER)
self.assertEqual(span_list[0].attributes, expected_attrs)
def test_internal_error(self):
expected_attrs = expected_attributes(
{
SpanAttributes.HTTP_TARGET: "/hello/500",
SpanAttributes.HTTP_ROUTE: "/hello/{helloid}",
SpanAttributes.HTTP_STATUS_CODE: 500,
}
)
resp = self.client.get("/hello/500")
self.assertEqual(500, resp.status_code)
resp.close()
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertEqual(span_list[0].name, "/hello/{helloid}")
self.assertEqual(span_list[0].kind, trace.SpanKind.SERVER)
self.assertEqual(span_list[0].attributes, expected_attrs)
def test_tween_list(self):
tween_list = "opentelemetry.instrumentation.pyramid.trace_tween_factory\npyramid.tweens.excview_tween_factory"
config = Configurator(settings={"pyramid.tweens": tween_list})
PyramidInstrumentor().instrument_config(config)
self._common_initialization(config)
resp = self.client.get("/hello/123")
self.assertEqual(200, resp.status_code)
self.assertEqual([b"Hello: 123"], list(resp.response))
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
PyramidInstrumentor().uninstrument_config(config)
# Need to remake the WSGI app export
self._common_initialization(config)
resp = self.client.get("/hello/123")
self.assertEqual(200, resp.status_code)
self.assertEqual([b"Hello: 123"], list(resp.response))
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
@patch("opentelemetry.instrumentation.pyramid.callbacks._logger")
def test_warnings(self, mock_logger):
tween_list = "pyramid.tweens.excview_tween_factory"
config = Configurator(settings={"pyramid.tweens": tween_list})
PyramidInstrumentor().instrument_config(config)
self._common_initialization(config)
self.client.get("/hello/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.assertEqual(mock_logger.warning.called, True)
mock_logger.warning.called = False
tween_list = (
"opentelemetry.instrumentation.pyramid.trace_tween_factory"
)
config = Configurator(settings={"pyramid.tweens": tween_list})
self._common_initialization(config)
self.client.get("/hello/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.assertEqual(mock_logger.warning.called, True)
def test_exclude_lists(self):
self.client.get("/excluded_arg/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.client.get("/excluded_arg/125")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.client.get("/excluded_noarg")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.client.get("/excluded_noarg2")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
| 38.52459
| 118
| 0.674255
|
55f5e32c3bc577bbed3936951abbd391c2ebc823
| 5,538
|
py
|
Python
|
MLOps/Neptune.AI/examples_ThetaGPU/1Layer_ANN/run_trainTurbModel.py
|
rickybalin/ALCF
|
3696756d2af90f1ba179caa46d2001d07db5e01d
|
[
"BSD-3-Clause"
] | null | null | null |
MLOps/Neptune.AI/examples_ThetaGPU/1Layer_ANN/run_trainTurbModel.py
|
rickybalin/ALCF
|
3696756d2af90f1ba179caa46d2001d07db5e01d
|
[
"BSD-3-Clause"
] | null | null | null |
MLOps/Neptune.AI/examples_ThetaGPU/1Layer_ANN/run_trainTurbModel.py
|
rickybalin/ALCF
|
3696756d2af90f1ba179caa46d2001d07db5e01d
|
[
"BSD-3-Clause"
] | null | null | null |
# General imports
import numpy as np
from time import perf_counter
from datetime import datetime
import logging
import argparse
import torch
# Neptune
import neptune.new as neptune
# Import help functions
from NeuralNets import trainNN, predictNN, timeStats
## Set up logger
def setup_logger(name, log_file, level=logging.INFO):
"""To setup as many loggers as you want"""
handler = logging.FileHandler(log_file,mode='w')
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
## Main
def main():
# Start timer for entire program
t_start = perf_counter()
# Create log files
now = datetime.now()
date_string = now.strftime("%Y-%m-%d_%H-%M-%S_")
logger_info = setup_logger('info', date_string+'info.log')
logger_conv = setup_logger('convergence', date_string+'convergence.log')
logger_time = setup_logger('time_stats', date_string+'time.log')
# Parse arguments
parser = argparse.ArgumentParser(description='')
parser.add_argument('--device',default='cpu',help='Device to run on')
parser.add_argument('--batch',default=64,type=int,help='Batch size')
parser.add_argument('--precision',default='float',help='Precision to be used for training and inference')
parser.add_argument('--tolerance',default=7.0e-5,help='Tolerance on loss function')
parser.add_argument('--Nepochs',default=10,type=int,help='Number of epochs to train for')
parser.add_argument('--learning_rate',default=0.001,help='Learning rate')
parser.add_argument('--nNeurons',default=20,type=int,help='Number of neurons in network layer')
parser.add_argument('--nSamples',default=100000,type=int,help='Number of training and inference samples')
parser.add_argument('--nInputs',default=6,type=int,help='Number of model input features')
parser.add_argument('--nOutputs',default=6,type=int,help='Number of model output targets')
args = parser.parse_args()
logger_info.info("Training parameters:")
logger_info.info("Precision: %s",args.precision)
logger_info.info("Tolerance: %.12e",args.tolerance)
logger_info.info("Number of epochs: %d",args.Nepochs)
logger_info.info("Training mini-batch size: %d",args.batch)
logger_info.info("Inference mini-batch size: %d",args.batch)
logger_info.info("Learning rate: %.12e",args.learning_rate)
logger_info.info("Number of neurons: %d",args.nNeurons)
logger_info.info("Number of samples: %d",args.nSamples)
logger_info.info("Number of inputs: %d",args.nInputs)
logger_info.info("Number of outputs: %d",args.nOutputs)
logger_info.info("")
# Set device to run on
device = torch.device(args.device)
logger_info.info('Running on device: %s\n', args.device)
# Initialize Neptune logging
run = neptune.init(
project="rickybalin/testALCF",
api_token="eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiJhMjllYmRkMS1lMzI2LTQ0NzctOWE2MS05M2MwNzE2YzhhYzkifQ==",
custom_run_id='laptop-1',
name='laptop',
description='run on my laptop',
source_files=["*.py"],
) # my credentials taken from the project I created on my Neptune account
# Log training parameters
train_params = {'n_epochs': args.Nepochs, 'mini_batch': args.batch,
'learning_rate': args.learning_rate, 'n_samples': args.nSamples}
model_params = {'n_neurons': args.nNeurons, 'n_inputs': args.nInputs, 'n_outputs': args.nOutputs}
system_params = {'device': args.device}
run['train_params'] = train_params
run['model_params'] = model_params
run['system_params'] = system_params
# Load the test data
logger_info.info("Computing inputs and outputs ...")
inputs = np.random.rand(args.nSamples,args.nInputs)
outputs = np.random.rand(args.nSamples,args.nOutputs)
logger_info.info("Done\n")
print('Generated training data \n')
# Log training data on Neptune
# If loaded data from file: run["dataset/train_data"].upload("./data/train_data.csv")
# Can also ave dataset versions as Neptune artifacts with the track_files() method
# run["dataset/train_data"].track_files('data/train_data.csv')
# Train and output model
logger_info.info("Training model ...")
print("Training model ... \n")
#t_start_train = perf_counter()
model, timeStats = trainNN(inputs, outputs, args, logger_conv, run)
#t_end_train = perf_counter()
logger_info.info("Done\n")
print('Done training \n')
# Make some predictions
logger_info.info("Making Predictions ...")
print("Making predictions ... \n")
inputs = np.random.rand(args.nSamples,args.nInputs)
outputs = np.random.rand(args.nSamples,args.nOutputs)
#t_start_pred = perf_counter()
predictions, accuracy, timeStats = predictNN(model, inputs, outputs, args)
#t_end_pred = perf_counter()
logger_info.info("Done\n")
print('Done\n')
# End timer for entire program
t_end = perf_counter()
# Print some timing information
logger_time.info("Total run time: %.12e", t_end - t_start)
logger_time.info("Total train time: %.12e", timeStats.t_train)
logger_time.info("Total prediction time: %.12e", timeStats.t_inf)
# Stop Neptune logging
run.stop()
if __name__ == '__main__':
main()
| 40.423358
| 195
| 0.698989
|
7c33e0364cc970a0e2431daa3333d37b2aee679c
| 4,537
|
py
|
Python
|
src/m2/src/feat38.py
|
pvzteam/pvz_recsys2019
|
3fd14d3b82033474d2e172402abd0ebc5e7b0afc
|
[
"Apache-2.0"
] | 1
|
2019-07-24T08:41:53.000Z
|
2019-07-24T08:41:53.000Z
|
src/m2/src/feat38.py
|
pvzteam/pvz_recsys2019
|
3fd14d3b82033474d2e172402abd0ebc5e7b0afc
|
[
"Apache-2.0"
] | null | null | null |
src/m2/src/feat38.py
|
pvzteam/pvz_recsys2019
|
3fd14d3b82033474d2e172402abd0ebc5e7b0afc
|
[
"Apache-2.0"
] | 1
|
2020-12-02T09:49:12.000Z
|
2020-12-02T09:49:12.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 基础模块
import os
import sys
import gc
import json
import time
import functools
from datetime import datetime
# 数据处理
import numpy as np
import pandas as pd
from math import sqrt
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
# 自定义工具包
sys.path.append('../tools/')
import loader
import cate_encoding
import custom_cate_encoding
# 设置随机种子
SEED = 2018
np.random.seed (SEED)
FEA_NUM = 38
input_root_path = '../input/'
output_root_path = '../feature/'
tr_base_path = input_root_path + 'train.ftr'
te_base_path = input_root_path + 'test.ftr'
cv_id_path = input_root_path + 'cv_id.csv.0329'
postfix = 's0_{}'.format(FEA_NUM)
file_type = 'ftr'
# 当前特征
tr_fea_out_path = output_root_path + 'tr_fea_{}.{}'.format(postfix, file_type)
te_fea_out_path = output_root_path + 'te_fea_{}.{}'.format(postfix, file_type)
# 当前特征 + 之前特征 merge 之后的完整训练数据
tr_out_path = '../../../feat/' + 'm2_tr_{}.{}'.format(postfix, file_type)
te_out_path = '../../../feat/' + 'm2_te_{}.{}'.format(postfix, file_type)
ID_NAMES = ['session_id', 'impressions']
TARGET_NAME = 'target'
def feat_extract(df):
df['item_impr_count'] = \
df.groupby('impressions')['session_id'].transform('nunique')
df_feat = df[ID_NAMES + ['item_impr_count']]
df_feat = cate_encoding.cate_num_stat(df, df_feat, \
['session_id'], 'item_impr_count', ['min', 'max', 'median', 'std'])
df_feat['item_impr_count_sub_session_median'] = \
df_feat['item_impr_count'] - df_feat['session_id_by_item_impr_count_median']
df_feat['item_impr_count_div_session_median'] = \
df_feat['item_impr_count'] / df_feat['session_id_by_item_impr_count_median']
print (df_feat.shape)
print (df_feat.head())
print (df_feat.columns.tolist())
return df_feat
def output_fea(tr, te):
# 特征重排,保证输出顺序一致
# ...
# 特征文件只保留主键 & 本次新增特征
#primary_keys = ['session_id', 'impressions']
#fea_cols = []
#required_cols = primary_keys + fea_cols
# 特征输出
#tr = tr[required_cols]
#te = te[required_cols]
print (tr.head())
print (te.head())
loader.save_df(tr, tr_fea_out_path)
loader.save_df(te, te_fea_out_path)
# 生成特征
def gen_fea(base_tr_path=None, base_te_path=None):
#tr = loader.load_df('../input/train.ftr')
#te = loader.load_df('../input/test.ftr')
tr = loader.load_df('../input/tr.ftr')
te = loader.load_df('../input/te.ftr')
#tr = loader.load_df('../feature/tr_s0_0.ftr')
#te = loader.load_df('../feature/te_s0_0.ftr')
#tr = loader.load_df('../feature/tr_fea_s0_1.ftr')
#te = loader.load_df('../feature/te_fea_s0_1.ftr')
#tr = tr.head(1000)
#te = te.head(1000)
df_base = pd.concat([tr, te])
df_feat = feat_extract(df_base)
tr_sample = loader.load_df('../feature/tr_s0_0.ftr')
te_sample = loader.load_df('../feature/te_s0_0.ftr')
merge_keys = ['session_id', 'impressions']
#merge_keys = ['session_id']
#merge_keys = ['impressions']
tr = tr_sample[ID_NAMES].merge(df_feat, on=merge_keys, how='left')
te = te_sample[ID_NAMES].merge(df_feat, on=merge_keys, how='left')
float_cols = [c for c in tr.columns if tr[c].dtype == 'float']
tr[float_cols] = tr[float_cols].astype('float32')
te[float_cols] = te[float_cols].astype('float32')
print (tr.shape, te.shape)
print (tr.head())
print (te.head())
print (tr.columns)
output_fea(tr, te)
# merge 已有特征
def merge_fea(tr_list, te_list):
tr = loader.merge_fea(tr_list, primary_keys=ID_NAMES)
te = loader.merge_fea(te_list, primary_keys=ID_NAMES)
tr['impressions'] = tr['impressions'].astype('int')
te['impressions'] = te['impressions'].astype('int')
print (tr.head())
print (te.head())
print (tr[ID_NAMES].head())
loader.save_df(tr, tr_out_path)
loader.save_df(te, te_out_path)
if __name__ == "__main__":
print('start time: %s' % datetime.now())
root_path = '../feature/'
base_tr_path = root_path + 'tr_s0_0.ftr'
base_te_path = root_path + 'te_s0_0.ftr'
gen_fea()
# merge fea
prefix = 's0'
fea_list = [1,3,6,8,9,14,15,22,24,26,27,35,36,37,FEA_NUM]
tr_list = [base_tr_path] + \
[root_path + 'tr_fea_{}_{}.ftr'.format(prefix, i) for i in fea_list]
te_list = [base_te_path] + \
[root_path + 'te_fea_{}_{}.ftr'.format(prefix, i) for i in fea_list]
merge_fea(tr_list, te_list)
print('all completed: %s' % datetime.now())
| 26.074713
| 88
| 0.659026
|
6c17dcb7c0626ebfea970e94b97807e2e321e8f5
| 14,818
|
py
|
Python
|
lettuce/features/softwaresupport/kea4_server_bind/functions.py
|
godfryd/forge
|
711cae4c59be06229b6aad09941e643b8ff972fd
|
[
"ISC"
] | null | null | null |
lettuce/features/softwaresupport/kea4_server_bind/functions.py
|
godfryd/forge
|
711cae4c59be06229b6aad09941e643b8ff972fd
|
[
"ISC"
] | null | null | null |
lettuce/features/softwaresupport/kea4_server_bind/functions.py
|
godfryd/forge
|
711cae4c59be06229b6aad09941e643b8ff972fd
|
[
"ISC"
] | null | null | null |
# Copyright (C) 2013 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from softwaresupport.multi_server_functions import fabric_run_command, fabric_send_file, remove_local_file,\
copy_configuration_file
from lettuce import world
from logging_facility import *
from textwrap import dedent
from logging_facility import get_common_logger
from softwaresupport.kea6_server_bind.functions import search_for_errors, parsing_bind_stdout, prepare_config_file,\
set_logger, cfg_write, set_time, save_leases, save_logs, clear_all
world.kea_options4 = {"subnet-mask": 1, # ipv4-address (array)
"time-offset": 2,
"routers": 3, # ipv4-address (single)
"time-servers": 4, # ipv4-address (single)
"name-servers": 5, # ipv4-address (array)
"domain-name-servers": 6, # ipv4-address (array)
"log-servers": 7, # ipv4-address (single)
"cookie-servers": 8, # ipv4-address (single)
"lpr-servers": 9, # ipv4-address (single)
"impress-servers": 10, # ipv4-address (single)
"resource-location-servers": 11, # ipv4-address (single)
"host-name": 12, # string
"boot-size": 13,
"merit-dump": 14, # string
"domain-name": 15, # fqdn (single)
"swap-server": 16, # ipv4-address (single)
"root-path": 17, # string
"extensions-path": 18, # string
"ip-forwarding": 19, # boolean
"non-local-source-routing": 20, # boolean
"policy-filter": 21, # ipv4-address (single)
"max-dgram-reassembly": 22,
"default-ip-ttl": 23,
"path-mtu-aging-timeout": 24,
"path-mtu-plateau-table": 25,
"interface-mtu": 26,
"all-subnets-local": 27, # boolean
"broadcast-address": 28, # ipv4-address (single)
"perform-mask-discovery": 29, # boolean
"mask-supplier": 30, # boolean
"router-discovery": 31, # boolean
"router-solicitation-address": 32, # ipv4-address (single)
"static-routes": 33, # ipv4-address (array)
"trailer-encapsulation": 34, # boolean
"arp-cache-timeout": 35,
"ieee802-3-encapsulation": 36,
"default-tcp-ttl": 37,
"tcp-keepalive-interval": 38,
"tcp-keepalive-garbage": 39, # boolean
"nis-domain": 40, # string (single)
"nis-servers": 41, # ipv4-address (array)
"ntp-servers": 42, # ipv4-address (array)
"vendor-encapsulated-options": 43, # empty
"netbios-name-servers": 44, # ipv4-address
"netbios-dd-server": 45, # ipv4-address
"netbios-node-type": 46, # uint8
"netbios-scope": 47, # string
"font-servers": 48, # ipv4-address
"x-display-manager": 49, # ipv4-address
"dhcp-requested-address": 50, # ipv4-address
"dhcp-option-overload": 52, # uint8
"server_id": 54,
"dhcp-message": 56, # string
"dhcp-max-message-size": 57, # uint16
"vendor-class-identifier": 60, # binary
"client_id": 61,
"nwip-domain-name": 62, # string
"nwip-suboptions": 63, # binary
"boot-file-name": 67, #string
"user-class": 77, # binary
"fqdn": 81, # record
"dhcp-agent-options": 82, # empty
"authenticate": 90, # binary
"client-last-transaction-time": 91, # uint32
"associated-ip": 92, # ipv4-address
"subnet-selection": 118, # ipv4-address
"domain-search": 119, # binary
"vivco-suboptions": 124, # binary
"vivso-suboptions": 125, # binary
"end": 255}
def check_empty_value(val):
return ("false", "") if val == "<empty>" else ("true", val)
def prepare_cfg_subnet(step, subnet, pool):
if not "conf" in world.cfg:
world.cfg["conf"] = ""
eth = world.f_cfg.server_iface
# subnet defintion Kea4
t1 = world.cfg["server_times"]["renew-timer"]
t2 = world.cfg["server_times"]["rebind-timer"]
t3 = world.cfg["server_times"]["valid-lifetime"]
subnetcfg = '''
config set Dhcp4/renew-timer {t1}
config set Dhcp4/rebind-timer {t2}
config set Dhcp4/valid-lifetime {t3}
config add Dhcp4/subnet4
config set Dhcp4/subnet4[0]/subnet "{subnet}"
config set Dhcp4/subnet4[0]/pool [ "{pool}" ]
'''.format(**locals())
if eth != "":
world.cfg["conf"] += '''
config add Dhcp4/interfaces "{eth}"
'''.format(**locals())
world.cfg["conf"] += dedent(subnetcfg)
world.dhcp["subnet_cnt"] += 1
def config_srv_another_subnet(step, subnet, pool, interface):
count = world.dhcp["subnet_cnt"]
subnetcfg = '''
config add Dhcp4/subnet4
config set Dhcp4/subnet4[{count}]/subnet "{subnet}"
config set Dhcp4/subnet4[{count}]/pool [ "{pool}" ]
'''.format(**locals())
if interface is not None:
world.cfg["conf"] += '''
config add Dhcp4/interfaces "{interface}"
'''.format(**locals())
world.cfg["conf"] += dedent(subnetcfg)
world.dhcp["subnet_cnt"] += 1
def config_client_classification(step, subnet, option_value):
world.cfg["conf"] += '''
config set Dhcp4/subnet4[{subnet}]/client-class "{option_value}"
'''.format(**locals())
def prepare_cfg_add_custom_option(step, opt_name, opt_code, opt_type, opt_value, space):
if not "conf" in world.cfg:
world.cfg["conf"] = ""
number = world.dhcp["option_cnt"]
number_def = world.dhcp["option_usr_cnt"]
csv_format, opt_value = check_empty_value(opt_value)
world.cfg["conf"] += '''config add Dhcp4/option-def
config set Dhcp4/option-def[{number_def}]/name "{opt_name}"
config set Dhcp4/option-def[{number_def}]/code {opt_code}
config set Dhcp4/option-def[{number_def}]/type "{opt_type}"
config set Dhcp4/option-def[{number_def}]/array false
config set Dhcp4/option-def[{number_def}]/record-types ""
config set Dhcp4/option-def[{number_def}]/space "{space}"
config set Dhcp4/option-def[{number_def}]/encapsulate ""
config add Dhcp4/option-data
config set Dhcp4/option-data[{number}]/name "{opt_name}"
config set Dhcp4/option-data[{number}]/code {opt_code}
config set Dhcp4/option-data[{number}]/space "{space}"
config set Dhcp4/option-data[{number}]/csv-format {csv_format}
config set Dhcp4/option-data[{number}]/data "{opt_value}"
'''.format(**locals())
world.dhcp["option_usr_cnt"] += 1
world.dhcp["option_cnt"] += 1
def add_siaddr(step, addr, subnet_number):
if subnet_number is None:
world.cfg["conf"] += '''
config set Dhcp4/next-server "{addr}"
'''.format(**locals())
else:
world.cfg["conf"] += '''
config set Dhcp4/subnet4[{subnet_number}]/next-server "{addr}"
'''.format(**locals())
def prepare_cfg_add_option_subnet(step, option_name, subnet, option_value):
assert option_name in world.kea_options4, "Unsupported option name " + option_name
option_code = world.kea_options4.get(option_name)
csv_format, option_value = check_empty_value(option_value)
# need to have numbers for multiple options for each subnet!
world.cfg["conf"] += '''
config add Dhcp4/subnet4[{subnet}]/option-data
config set Dhcp4/subnet4[{subnet}]/option-data[0]/name "{option_name}"
config set Dhcp4/subnet4[{subnet}]/option-data[0]/code {option_code}
config set Dhcp4/subnet4[{subnet}]/option-data[0]/space "dhcp4"
config set Dhcp4/subnet4[{subnet}]/option-data[0]/csv-format {csv_format}
config set Dhcp4/subnet4[{subnet}]/option-data[0]/data "{option_value}"
'''.format(**locals())
def run_command(step, command):
world.cfg["conf"] += ('\n'+command+'\n')
def disable_client_echo(step):
# after using it, we should revert that at the end!
# keep that in mind when first time using it.
world.cfg["conf"] += '''
config set Dhcp4/echo-client-id False
config commit
'''.format(**locals())
def add_interface(step, interface):
# not jet tested!
world.cfg["conf"] += '''
config add Dhcp4/interfaces {interface}
'''.format(**locals())
def prepare_cfg_add_option(step, option_name, option_value, space):
if not "conf" in world.cfg:
world.cfg["conf"] = ""
assert option_name in world.kea_options4, "Unsupported option name " + option_name
option_code = world.kea_options4.get(option_name)
csv_format, option_value = check_empty_value(option_value)
option_cnt = world.dhcp["option_cnt"]
options = '''
config add Dhcp4/option-data
config set Dhcp4/option-data[{option_cnt}]/name "{option_name}"
config set Dhcp4/option-data[{option_cnt}]/code {option_code}
config set Dhcp4/option-data[{option_cnt}]/space "{space}"
config set Dhcp4/option-data[{option_cnt}]/csv-format {csv_format}
config set Dhcp4/option-data[{option_cnt}]/data "{option_value}"
'''.format(**locals())
world.cfg["conf"] += dedent(options)
world.dhcp["option_cnt"] += 1
def prepare_cfg_kea4_for_kea4_start(filename):
"""
config file for kea4 start
"""
config = '''
# This config file starts b10-dhcp4 server.
config add Init/components b10-dhcp4
config set Init/components/b10-dhcp4/kind dispensable
config commit
'''
cfg_file = open(filename, "w")
cfg_file.write(config)
cfg_file.close()
def prepare_cfg_kea4_for_kea4_stop(filename):
"""
config file for kea4 clear configuration and stopping
"""
config = '''
# This config file stops b10-dhcp4 server and removes its configuration.
# Get rid of any subnets
config set Dhcp4/subnet4 []
# Get rid of any option format definitions
config set Dhcp4/option-def []
# Get rid of any option values
config set Dhcp4/option-data []
# clear loggers
config set Logging/loggers []
#config set Dhcp4/echo-client-id True
config set Dhcp4/next-server ""
config set Dhcp4/interfaces []
config commit
# Stop b10-dhcp4 server from starting again
config remove Init/components b10-dhcp4
config commit
# And stop it
Dhcp4 shutdown
'''
cfg_file = open(filename, "w")
cfg_file.write(config)
cfg_file.close()
def run_bindctl(succeed, opt):
"""
Run bindctl with prepered config file
"""
world.cfg['leases'] = world.f_cfg.software_install_path + 'var/bind10/kea-leases4.csv'
if opt == "clean":
get_common_logger().debug('cleaning kea configuration')
cfg_file = 'kea4-stop.cfg'
prepare_cfg_kea4_for_kea4_stop(cfg_file)
prepare_config_file(cfg_file)
fabric_send_file(cfg_file + '_processed', cfg_file + '_processed')
remove_local_file(cfg_file + '_processed')
if opt == "start":
if world.f_cfg.save_logs:
set_logger()
get_common_logger().debug('starting fresh kea')
cfg_file = 'kea4-start.cfg'
prepare_cfg_kea4_for_kea4_start(cfg_file)
prepare_config_file(cfg_file)
fabric_send_file(cfg_file + '_processed', cfg_file + '_processed')
remove_local_file(cfg_file + '_processed')
if opt == "configuration":
get_common_logger().debug('kea configuration')
cfg_file = world.cfg["cfg_file"]
prepare_config_file(cfg_file)
add_last = open(cfg_file + "_processed", 'a')
# add 'config commit' we don't put it before
add_last.write("config commit")
add_last.close()
fabric_send_file(cfg_file + '_processed', cfg_file + '_processed')
copy_configuration_file(cfg_file + '_processed')
remove_local_file(cfg_file + '_processed')
world.cfg["conf"] = ""
if opt == "restart":
restart_srv()
result = fabric_run_command('(echo "execute file ' + cfg_file + '_processed" | '
+ world.f_cfg.software_install_path + 'bin/bindctl ); sleep 1')
search_for_errors(succeed, opt, result, ["ImportError:", '"config revert".', "Error"])
parsing_bind_stdout(result.stdout, opt, ['Broken pipe'])
def start_srv(start, process):
configuration = True
start = True
clean = True
# Switch one of three processess to false, which? That is decided in
# Server failed to start. During (\S+) process.) step.
if process is None and start:
pass
elif process == 'configuration':
configuration = False
elif process == 'start':
start = False
elif process == 'clean':
clean = False
else:
assert False, "Process: '" + process + "' not supported."
cfg_write()
get_common_logger().debug("Bind10, dhcp4 configuration procedure:")
run_bindctl(clean, 'clean') # clean and stop
run_bindctl(start, 'start') # start
run_bindctl(configuration, 'configuration') # conf
def stop_srv(value = False):
# value not used but have to be here
run_bindctl(True, 'clean')
def restart_srv():
# can't be less then 7, server needs time to restart.
fabric_run_command('(echo "Dhcp4 shutdown" | ' + world.f_cfg.software_install_path + 'bin/bindctl ); sleep 10')
def prepare_cfg_prefix(step, prefix, length, delegated_length, subnet):
assert False, "This function can be used only with DHCPv6"
| 39.097625
| 116
| 0.611621
|
36f66bfdecde3132e6cb0c74afc512cddee97f9f
| 3,114
|
py
|
Python
|
droxi/drox/resolver.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
droxi/drox/resolver.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
droxi/drox/resolver.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# droxi
# Copyright (c) 2014, Andrew Robbins, All rights reserved.
#
# This library ("it") is free software; it is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; you can redistribute it and/or modify it under the terms of the
# GNU Lesser General Public License ("LGPLv3") <https://www.gnu.org/licenses/lgpl.html>.
from __future__ import absolute_import
import importlib
from .config import DEBUG
from .models import Sym
from .etree import etree
class BuiltinResolver(object):
def __init__(self, cdbase=None, package=None):
self.cdbase = cdbase
self.package = package
def __call__(self, sym):
if not isinstance(sym, Sym):
raise ValueError, sym
_, cd, name = sym.omsym
module_name = '.'.join([self.package, cd, '_mapping'])
if DEBUG: print("module_name = " + module_name)
DictCls = importlib.import_module(module_name)
if hasattr(DictCls, '_namespace_mapping'): # preferred
ElemCls = getattr(DictCls._namespace_mapping, name)
elif hasattr(DictCls, '__content_dictionary_mapping__'): # legacy (yesterday)
ElemCls = DictCls.__content_dictionary_mapping__[name]
else:
ElemCls = getattr(DictCls, name)
# if hasattr(DictCls, '__content_dictionary_mapping__'):
# ElemCls = DictCls.__content_dictionary_mapping__[name]
# else:
# ElemCls = getattr(DictCls, name)
url = self.cdbase + '/' + cd + '#' + name
ast = ElemCls(url)
return ast
class BuiltinReader(object):
def __init__(self, ns=None, package=None):
self.package = package
self.ns = ns
def __call__(self, sym, tree):
if not etree.iselement(tree):
raise ValueError, tree
ns, name = Sym.from_etree(tree.tag).xmlns
if ns != self.ns:
raise ValueError, ns
module_name = '.'.join([self.package, '_mapping'])
if DEBUG: print("module_name = " + module_name)
DictCls = importlib.import_module(module_name)
if hasattr(DictCls, '_namespace_mapping'): # preferred
ElemCls = getattr(DictCls._namespace_mapping, name)
elif hasattr(DictCls, '__content_dictionary_mapping__'): # legacy (yesterday)
ElemCls = DictCls.__content_dictionary_mapping__[name]
else:
ElemCls = getattr(DictCls, name)
ast = ElemCls.from_cmathml(tree)
return ast
class BuiltinWriter(object):
def __init__(self, ns=None, package=None):
self.package = package
self.ns = ns
def __call__(self, ast):
try:
tree = ast.__tree__() # preferred
return tree
except Exception as err:
try:
tree = ast.cmathml # legacy (yesterday)
return tree
except Exception as err:
print("cought exception in Writer" + repr(err))
raise
raise NotImplementedError
| 34.21978
| 93
| 0.614644
|
e974bbd0911a9ecec9d892fbeac7384493209056
| 85,324
|
py
|
Python
|
venv/Lib/site-packages/numpy/testing/_private/utils.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 41
|
2021-06-19T13:57:18.000Z
|
2021-12-02T17:08:53.000Z
|
venv/Lib/site-packages/numpy/testing/_private/utils.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 14
|
2021-03-26T20:54:22.000Z
|
2021-04-06T17:18:53.000Z
|
venv/Lib/site-packages/numpy/testing/_private/utils.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 8
|
2021-06-19T14:25:50.000Z
|
2022-03-25T02:00:29.000Z
|
"""
Utility function to facilitate testing.
"""
import os
import sys
import platform
import re
import gc
import operator
import warnings
from functools import partial, wraps
import shutil
import contextlib
from tempfile import mkdtemp, mkstemp
from unittest.case import SkipTest
from warnings import WarningMessage
import pprint
from numpy.core import(
intp, float32, empty, arange, array_repr, ndarray, isnat, array)
import numpy.linalg.lapack_lite
from io import StringIO
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
'break_cycles', 'HAS_LAPACK64'
]
class KnownFailureException(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
KnownFailureTest = KnownFailureException # backwards compat
verbose = 0
IS_PYPY = platform.python_implementation() == 'PyPy'
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
HAS_LAPACK64 = numpy.linalg.lapack_lite._ilp64
def import_nose():
""" Import nose only when needed.
"""
nose_is_good = True
minimum_nose_version = (1, 0, 0)
try:
import nose
except ImportError:
nose_is_good = False
else:
if nose.__versioninfo__ < minimum_nose_version:
nose_is_good = False
if not nose_is_good:
msg = ('Need nose >= %d.%d.%d for tests - see '
'https://nose.readthedocs.io' %
minimum_nose_version)
raise ImportError(msg)
return nose
def assert_(val, msg=''):
"""
Assert that works in release mode.
Accepts callable msg to allow deferring evaluation until failure.
The Python built-in ``assert`` does not work when executing code in
optimized mode (the ``-O`` flag) - no byte-code is generated for it.
For documentation on usage, refer to the Python documentation.
"""
__tracebackhide__ = True # Hide traceback for py.test
if not val:
try:
smsg = msg()
except TypeError:
smsg = msg
raise AssertionError(smsg)
def gisnan(x):
"""like isnan, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isnan and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isnan
st = isnan(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isnan not supported for this type")
return st
def gisfinite(x):
"""like isfinite, but always raise an error if type not supported instead
of returning a TypeError object.
Notes
-----
isfinite and other ufunc sometimes return a NotImplementedType object
instead of raising any exception. This function is a wrapper to make sure
an exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isfinite, errstate
with errstate(invalid='ignore'):
st = isfinite(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isfinite not supported for this type")
return st
def gisinf(x):
"""like isinf, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isinf and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isinf, errstate
with errstate(invalid='ignore'):
st = isinf(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isinf not supported for this type")
return st
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
inum=-1, format=None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link)
# My older explanation for this was that the "AddCounter" process
# forced the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None:
format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine, object, instance, None,
inum, counter))
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
elif sys.platform[:5] == 'linux':
def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'):
"""
Return virtual memory size in bytes of the running python.
"""
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[22])
except Exception:
return
else:
def memusage():
"""
Return memory usage of running python. [Not implemented]
"""
raise NotImplementedError
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
if isinstance(a, ndarray):
# precision argument is only needed if the objects are ndarrays
r_func = partial(array_repr, precision=precision)
else:
r_func = repr
try:
r = r_func(a)
except Exception as exc:
r = f'[repr failed for <{type(a).__name__}>: {exc}]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(f' {names[i]}: {r}')
return '\n'.join(msg)
def assert_equal(actual, desired, err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal.
Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
When one of `actual` and `desired` is a scalar and the other is array_like,
the function checks that each element of the array_like object is equal to
the scalar.
This function handles NaN comparisons as if NaN was a "normal" number.
That is, AssertionError is not raised if both objects have NaNs in the same
positions. This is in contrast to the IEEE standard on NaNs, which says
that NaN compared to anything must return False.
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal.
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
Traceback (most recent call last):
...
AssertionError:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
The following comparison does not raise an exception. There are NaNs
in the inputs, but they are in the same positions.
>>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
"""
__tracebackhide__ = True # Hide traceback for py.test
if isinstance(desired, dict):
if not isinstance(actual, dict):
raise AssertionError(repr(type(actual)))
assert_equal(len(actual), len(desired), err_msg, verbose)
for k, i in desired.items():
if k not in actual:
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}',
verbose)
return
if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
assert_equal(len(actual), len(desired), err_msg, verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}',
verbose)
return
from numpy.core import ndarray, isscalar, signbit
from numpy.lib import iscomplexobj, real, imag
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except (ValueError, TypeError):
usecomplex = False
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_equal(actualr, desiredr)
assert_equal(actuali, desiredi)
except AssertionError:
raise AssertionError(msg)
# isscalar test to check cases such as [np.nan] != np.nan
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
try:
isdesnat = isnat(desired)
isactnat = isnat(actual)
dtypes_match = array(desired).dtype.type == array(actual).dtype.type
if isdesnat and isactnat:
# If both are NaT (and have the same dtype -- datetime or
# timedelta) they are considered equal.
if dtypes_match:
return
else:
raise AssertionError(msg)
except (TypeError, ValueError, NotImplementedError):
pass
# Inf/nan/negative zero handling
try:
isdesnan = gisnan(desired)
isactnan = gisnan(actual)
if isdesnan and isactnan:
return # both nan, so equal
# handle signed zero specially for floats
array_actual = array(actual)
array_desired = array(desired)
if (array_actual.dtype.char in 'Mm' or
array_desired.dtype.char in 'Mm'):
# version 1.18
# until this version, gisnan failed for datetime64 and timedelta64.
# Now it succeeds but comparison to scalar with a different type
# emits a DeprecationWarning.
# Avoid that by skipping the next check
raise NotImplementedError('cannot compare to a scalar '
'with a different type')
if desired == 0 and actual == 0:
if not signbit(desired) == signbit(actual):
raise AssertionError(msg)
except (TypeError, ValueError, NotImplementedError):
pass
try:
# Explicitly use __eq__ for comparison, gh-2552
if not (desired == actual):
raise AssertionError(msg)
except (DeprecationWarning, FutureWarning) as e:
# this handles the case when the two types are not even comparable
if 'elementwise == comparison' in e.args[0]:
raise AssertionError(msg)
else:
raise
def print_assert_equal(test_string, actual, desired):
"""
Test if two objects are equal, and print an error message if test fails.
The test is performed with ``actual == desired``.
Parameters
----------
test_string : str
The message supplied to AssertionError.
actual : object
The object to test for equality against `desired`.
desired : object
The expected result.
Examples
--------
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
Traceback (most recent call last):
...
AssertionError: Test XYZ of func xyz failed
ACTUAL:
[0, 1]
DESIRED:
[0, 2]
"""
__tracebackhide__ = True # Hide traceback for py.test
import pprint
if not (actual == desired):
msg = StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual, msg)
msg.write('DESIRED: \n')
pprint.pprint(desired, msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test verifies that the elements of ``actual`` and ``desired`` satisfy.
``abs(desired-actual) < 1.5 * 10**(-decimal)``
That is a looser test than originally documented, but agrees with what the
actual implementation in `assert_array_almost_equal` did up to rounding
vagaries. An exception is raised at conflicting values. For ndarrays this
delegates to assert_array_almost_equal
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
decimal : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> import numpy.testing as npt
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 10 decimals
ACTUAL: 2.3333333333333
DESIRED: 2.33333334
>>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
... np.array([1.0,2.33333334]), decimal=9)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 9 decimals
<BLANKLINE>
Mismatched elements: 1 / 2 (50%)
Max absolute difference: 6.66669964e-09
Max relative difference: 2.85715698e-09
x: array([1. , 2.333333333])
y: array([1. , 2.33333334])
"""
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import ndarray
from numpy.lib import iscomplexobj, real, imag
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
def _build_err_msg():
header = ('Arrays are not almost equal to %d decimals' % decimal)
return build_err_msg([actual, desired], err_msg, verbose=verbose,
header=header)
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_almost_equal(actualr, desiredr, decimal=decimal)
assert_almost_equal(actuali, desiredi, decimal=decimal)
except AssertionError:
raise AssertionError(_build_err_msg())
if isinstance(actual, (ndarray, tuple, list)) \
or isinstance(desired, (ndarray, tuple, list)):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(_build_err_msg())
else:
if not desired == actual:
raise AssertionError(_build_err_msg())
return
except (NotImplementedError, TypeError):
pass
if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
raise AssertionError(_build_err_msg())
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to significant
digits.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
Given two numbers, check that they are approximately equal.
Approximately equal is defined as the number of significant digits
that agree.
Parameters
----------
actual : scalar
The object to check.
desired : scalar
The expected object.
significant : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
... significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
... significant=8)
Traceback (most recent call last):
...
AssertionError:
Items are not equal to 8 significant digits:
ACTUAL: 1.234567e-21
DESIRED: 1.2345672e-21
the evaluated condition that raises the exception is
>>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
True
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
(actual, desired) = map(float, (actual, desired))
if desired == actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
# scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
with np.errstate(invalid='ignore'):
scale = 0.5*(np.abs(desired) + np.abs(actual))
scale = np.power(10, np.floor(np.log10(scale)))
try:
sc_desired = desired/scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg(
[actual, desired], err_msg,
header='Items are not equal to %d significant digits:' % significant,
verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
except (TypeError, NotImplementedError):
pass
if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
precision=6, equal_nan=True, equal_inf=True):
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
# original array for output formatting
ox, oy = x, y
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def istime(x):
return x.dtype.char in "Mm"
def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
"""Handling nan/inf.
Combine results of running func on x and y, checking that they are True
at the same locations.
"""
__tracebackhide__ = True # Hide traceback for py.test
x_id = func(x)
y_id = func(y)
# We include work-arounds here to handle three types of slightly
# pathological ndarray subclasses:
# (1) all() on `masked` array scalars can return masked arrays, so we
# use != True
# (2) __eq__ on some ndarray subclasses returns Python booleans
# instead of element-wise comparisons, so we cast to bool_() and
# use isinstance(..., bool) checks
# (3) subclasses with bare-bones __array_function__ implementations may
# not implement np.all(), so favor using the .all() method
# We are not committed to supporting such subclasses, but it's nice to
# support them if possible.
if bool_(x_id == y_id).all() != True:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
if isinstance(x_id, bool) or x_id.ndim == 0:
return bool_(x_id)
elif isinstance(y_id, bool) or y_id.ndim == 0:
return bool_(y_id)
else:
return y_id
try:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ f'\n(shapes {x.shape}, {y.shape} mismatch)',
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
flagged = bool_(False)
if isnumber(x) and isnumber(y):
if equal_nan:
flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
if equal_inf:
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == +inf,
hasval='+inf')
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == -inf,
hasval='-inf')
elif istime(x) and istime(y):
# If one is datetime64 and the other timedelta64 there is no point
if equal_nan and x.dtype.type == y.dtype.type:
flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
if flagged.ndim > 0:
x, y = x[~flagged], y[~flagged]
# Only do the comparison if actual values are left
if x.size == 0:
return
elif flagged:
# no sense doing comparison if everything is flagged.
return
val = comparison(x, y)
if isinstance(val, bool):
cond = val
reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
n_mismatch = reduced.size - reduced.sum(dtype=intp)
n_elements = flagged.size if flagged.ndim != 0 else reduced.size
percent_mismatch = 100 * n_mismatch / n_elements
remarks = [
'Mismatched elements: {} / {} ({:.3g}%)'.format(
n_mismatch, n_elements, percent_mismatch)]
with errstate(invalid='ignore', divide='ignore'):
# ignore errors for non-numeric types
with contextlib.suppress(TypeError):
error = abs(x - y)
max_abs_error = max(error)
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max absolute difference: '
+ str(max_abs_error))
else:
remarks.append('Max absolute difference: '
+ array2string(max_abs_error))
# note: this definition of relative error matches that one
# used by assert_allclose (found in np.isclose)
# Filter values where the divisor would be zero
nonzero = bool_(y != 0)
if all(~nonzero):
max_rel_error = array(inf)
else:
max_rel_error = max(error[nonzero] / abs(y[nonzero]))
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max relative difference: '
+ str(max_rel_error))
else:
remarks.append('Max relative difference: '
+ array2string(max_rel_error))
err_msg += '\n' + '\n'.join(remarks)
msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
except ValueError:
import traceback
efmt = traceback.format_exc()
header = f'error during assertion:\n\n{efmt}\n\n{header}'
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise ValueError(msg)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""
Raises an AssertionError if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
elements of these objects are equal (but see the Notes for the special
handling of a scalar). An exception is raised at shape mismatch or
conflicting values. In contrast to the standard usage in numpy, NaNs
are compared like numbers, no assertion is raised if both objects have
NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Notes
-----
When one of `x` and `y` is a scalar and the other is array_like, the
function checks that each element of the array_like object is equal to
the scalar.
Examples
--------
The first assert does not raise an exception:
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
... [np.exp(0),2.33333, np.nan])
Assert fails with numerical imprecision with floats:
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
Traceback (most recent call last):
...
AssertionError:
Arrays are not equal
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 4.4408921e-16
Max relative difference: 1.41357986e-16
x: array([1. , 3.141593, nan])
y: array([1. , 3.141593, nan])
Use `assert_allclose` or one of the nulp (number of floating point values)
functions for these cases instead:
>>> np.testing.assert_allclose([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan],
... rtol=1e-10, atol=0)
As mentioned in the Notes section, `assert_array_equal` has special
handling for scalars. Here the test checks that each value in `x` is 3:
>>> x = np.full((2, 5), fill_value=3)
>>> np.testing.assert_array_equal(x, 3)
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test verifies identical shapes and that the elements of ``actual`` and
``desired`` satisfy.
``abs(desired-actual) < 1.5 * 10**(-decimal)``
That is a looser test than originally documented, but agrees with what the
actual implementation did up to rounding vagaries. An exception is raised
at shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if both
objects have NaNs in the same positions.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
decimal : int, optional
Desired precision, default is 6.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
... [1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33339,np.nan], decimal=5)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 5 decimals
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 6.e-05
Max relative difference: 2.57136612e-05
x: array([1. , 2.33333, nan])
y: array([1. , 2.33339, nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33333, 5], decimal=5)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 5 decimals
<BLANKLINE>
x and y nan location mismatch:
x: array([1. , 2.33333, nan])
y: array([1. , 2.33333, 5. ])
"""
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import number, float_, result_type, array
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
def compare(x, y):
try:
if npany(gisinf(x)) or npany( gisinf(y)):
xinfid = gisinf(x)
yinfid = gisinf(y)
if not (xinfid == yinfid).all():
return False
# if one item, x and y is +- inf
if x.size == y.size == 1:
return x == y
x = x[~xinfid]
y = y[~yinfid]
except (TypeError, NotImplementedError):
pass
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = result_type(y, 1.)
y = array(y, dtype=dtype, copy=False, subok=True)
z = abs(x - y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
return z < 1.5 * 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header=('Arrays are not almost equal to %d decimals' % decimal),
precision=decimal)
def assert_array_less(x, y, err_msg='', verbose=True):
"""
Raises an AssertionError if two array_like objects are not ordered by less
than.
Given two array_like objects, check that the shape is equal and all
elements of the first object are strictly smaller than those of the
second object. An exception is raised at shape mismatch or incorrectly
ordered values. Shape mismatch does not raise if an object has zero
dimension. In contrast to the standard usage in numpy, NaNs are
compared, no assertion is raised if both objects have NaNs in the same
positions.
Parameters
----------
x : array_like
The smaller object to check.
y : array_like
The larger object to compare.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_equal: tests objects for equality
assert_array_almost_equal: test objects for equality up to precision
Examples
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
Traceback (most recent call last):
...
AssertionError:
Arrays are not less-ordered
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 1.
Max relative difference: 0.5
x: array([ 1., 1., nan])
y: array([ 1., 2., nan])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
Traceback (most recent call last):
...
AssertionError:
Arrays are not less-ordered
<BLANKLINE>
Mismatched elements: 1 / 2 (50%)
Max absolute difference: 2.
Max relative difference: 0.66666667
x: array([1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
Traceback (most recent call last):
...
AssertionError:
Arrays are not less-ordered
<BLANKLINE>
(shapes (3,), (1,) mismatch)
x: array([1., 2., 3.])
y: array([4])
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not less-ordered',
equal_inf=False)
def runstring(astr, dict):
exec(astr, dict)
def assert_string_equal(actual, desired):
"""
Test if two strings are equal.
If the given strings are equal, `assert_string_equal` does nothing.
If they are not equal, an AssertionError is raised, and the diff
between the strings is shown.
Parameters
----------
actual : str
The string to test for equality against the expected string.
desired : str
The expected string.
Examples
--------
>>> np.testing.assert_string_equal('abc', 'abc')
>>> np.testing.assert_string_equal('abc', 'abcd')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
AssertionError: Differences in strings:
- abc+ abcd? +
"""
# delay import of difflib to reduce startup time
__tracebackhide__ = True # Hide traceback for py.test
import difflib
if not isinstance(actual, str):
raise AssertionError(repr(type(actual)))
if not isinstance(desired, str):
raise AssertionError(repr(type(desired)))
if desired == actual:
return
diff = list(difflib.Differ().compare(actual.splitlines(True),
desired.splitlines(True)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ '):
raise AssertionError(repr(d2))
l.append(d2)
if diff:
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if d2[2:] == d1[2:]:
continue
diff_list.extend(l)
continue
raise AssertionError(repr(d1))
if not diff_list:
return
msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
if actual != desired:
raise AssertionError(msg)
def rundocs(filename=None, raise_on_error=True):
"""
Run doctests found in the given file.
By default `rundocs` raises an AssertionError on failure.
Parameters
----------
filename : str
The path to the file for which the doctests are run.
raise_on_error : bool
Whether to raise an AssertionError when a doctest fails. Default is
True.
Notes
-----
The doctests can be run by the user/developer by adding the ``doctests``
argument to the ``test()`` call. For example, to run all tests (including
doctests) for `numpy.lib`:
>>> np.lib.test(doctests=True) # doctest: +SKIP
"""
from numpy.compat import npy_load_module
import doctest
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
m = npy_load_module(name, filename)
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = lambda s: msg.append(s)
else:
out = None
for test in tests:
runner.run(test, out=out)
if runner.failures > 0 and raise_on_error:
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def raises(*args):
"""Decorator to check for raised exceptions.
The decorated test function must raise one of the passed exceptions to
pass. If you want to test many assertions about exceptions in a single
test, you may want to use `assert_raises` instead.
.. warning::
This decorator is nose specific, do not use it if you are using a
different test framework.
Parameters
----------
args : exceptions
The test passes if any of the passed exceptions is raised.
Raises
------
AssertionError
Examples
--------
Usage::
@raises(TypeError, ValueError)
def test_raises_type_error():
raise TypeError("This test passes")
@raises(Exception)
def test_that_fails_by_passing():
pass
"""
nose = import_nose()
return nose.tools.raises(*args)
#
# assert_raises and assert_raises_regex are taken from unittest.
#
import unittest
class _Dummy(unittest.TestCase):
def nop(self):
pass
_d = _Dummy('nop')
def assert_raises(*args, **kwargs):
"""
assert_raises(exception_class, callable, *args, **kwargs)
assert_raises(exception_class)
Fail unless an exception of class exception_class is thrown
by callable when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
Alternatively, `assert_raises` can be used as a context manager:
>>> from numpy.testing import assert_raises
>>> with assert_raises(ZeroDivisionError):
... 1 / 0
is equivalent to
>>> def div(x, y):
... return x / y
>>> assert_raises(ZeroDivisionError, div, 1, 0)
"""
__tracebackhide__ = True # Hide traceback for py.test
return _d.assertRaises(*args,**kwargs)
def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
"""
assert_raises_regex(exception_class, expected_regexp, callable, *args,
**kwargs)
assert_raises_regex(exception_class, expected_regexp)
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Alternatively, can be used as a context manager like `assert_raises`.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
Notes
-----
.. versionadded:: 1.9.0
"""
__tracebackhide__ = True # Hide traceback for py.test
return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
def decorate_methods(cls, decorator, testmatch=None):
"""
Apply a decorator to all methods in a class matching a regular expression.
The given decorator is applied to all public methods of `cls` that are
matched by the regular expression `testmatch`
(``testmatch.search(methodname)``). Methods that are private, i.e. start
with an underscore, are ignored.
Parameters
----------
cls : class
Class whose methods to decorate.
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or str, optional
The regular expression. Default value is None, in which case the
nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
is used.
If `testmatch` is a string, it is compiled to a regular expression
first.
"""
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = [_m for _m in cls_attr.values() if isfunction(_m)]
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
return
def measure(code_str, times=1, label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
The supplied code string is compiled with the Python builtin ``compile``.
The precision of the timing is 10 milli-seconds. If the code will execute
fast on this timescale, it can be executed many times to get reasonable
timing accuracy.
Parameters
----------
code_str : str
The code to be timed.
times : int, optional
The number of times the code is executed. Default is 1. The code is
only compiled once.
label : str, optional
A label to identify `code_str` with. This is passed into ``compile``
as the second argument (for run-time error messages).
Returns
-------
elapsed : float
Total elapsed time in seconds for executing `code_str` `times` times.
Examples
--------
>>> times = 10
>>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
>>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP
Time for a single execution : 0.005 s
"""
frame = sys._getframe(1)
locs, globs = frame.f_locals, frame.f_globals
code = compile(code_str, f'Test name: {label} ', 'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec(code, globs, locs)
elapsed = jiffies() - elapsed
return 0.01*elapsed
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
if not HAS_REFCOUNT:
return True
import gc
import numpy as np
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
gc.disable()
try:
rc = sys.getrefcount(i)
for j in range(15):
d = op(b, c)
assert_(sys.getrefcount(i) >= rc)
finally:
gc.enable()
del d # for pyflakes
def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
tolerance.
The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
that ``allclose`` has different default values). It compares the difference
between `actual` and `desired` to ``atol + rtol * abs(desired)``.
.. versionadded:: 1.5.0
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
equal_nan : bool, optional.
If True, NaNs will compare equal.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal_nulp, assert_array_max_ulp
Examples
--------
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
def compare(x, y):
return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}'
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
verbose=verbose, header=header, equal_nan=equal_nan)
def assert_array_almost_equal_nulp(x, y, nulp=1):
"""
Compare two arrays relatively to their spacing.
This is a relatively robust method to compare two arrays whose amplitude
is variable.
Parameters
----------
x, y : array_like
Input arrays.
nulp : int, optional
The maximum number of unit in the last place for tolerance (see Notes).
Default is 1.
Returns
-------
None
Raises
------
AssertionError
If the spacing between `x` and `y` for one or more elements is larger
than `nulp`.
See Also
--------
assert_array_max_ulp : Check that all items of arrays differ in at most
N Units in the Last Place.
spacing : Return the distance between x and the nearest adjacent number.
Notes
-----
An assertion is raised if the following condition is not met::
abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
Examples
--------
>>> x = np.array([1., 1e-10, 1e-20])
>>> eps = np.finfo(x.dtype).eps
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
Traceback (most recent call last):
...
AssertionError: X and Y are not equal to 1 ULP (max is 2)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ax = np.abs(x)
ay = np.abs(y)
ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
if not np.all(np.abs(x-y) <= ref):
if np.iscomplexobj(x) or np.iscomplexobj(y):
msg = "X and Y are not equal to %d ULP" % nulp
else:
max_nulp = np.max(nulp_diff(x, y))
msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
raise AssertionError(msg)
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g "
"ULP (max difference is %g ULP)" %
(maxulp, np.max(ret)))
return ret
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, return the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
dtype : dtype, optional
Data-type to convert `x` and `y` to if given. Default is None.
Returns
-------
nulp : array_like
number of representable floating point numbers between each item in x
and y.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
else:
x = np.array(x)
y = np.array(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array([x], dtype=t)
y = np.array([y], dtype=t)
x[np.isnan(x)] = np.nan
y[np.isnan(y)] = np.nan
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" %
(x.shape, y.shape))
def _diff(rx, ry, vdt):
diff = np.array(rx-ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
# https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
rx = x.view(vdt)
if not (rx.size == 1):
rx[rx < 0] = comp - rx[rx < 0]
else:
if rx < 0:
rx = comp - rx
return rx
def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation
of x."""
import numpy as np
if x.dtype == np.float16:
return _integer_repr(x, np.int16, np.int16(-2**15))
elif x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-2**31))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-2**63))
else:
raise ValueError(f'Unsupported dtype {x.dtype}')
@contextlib.contextmanager
def _assert_warns_context(warning_class, name=None):
__tracebackhide__ = True # Hide traceback for py.test
with suppress_warnings() as sup:
l = sup.record(warning_class)
yield
if not len(l) > 0:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError("No warning raised" + name_str)
def assert_warns(warning_class, *args, **kwargs):
"""
Fail unless the given callable throws the specified warning.
A warning of class warning_class should be thrown by the callable when
invoked with arguments args and keyword arguments kwargs.
If a different type of warning is thrown, it will not be caught.
If called with all arguments other than the warning class omitted, may be
used as a context manager:
with assert_warns(SomeWarning):
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. versionadded:: 1.4.0
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable, optional
Callable to test
*args : Arguments
Arguments for `func`.
**kwargs : Kwargs
Keyword arguments for `func`.
Returns
-------
The value returned by `func`.
Examples
--------
>>> import warnings
>>> def deprecated_func(num):
... warnings.warn("Please upgrade", DeprecationWarning)
... return num*num
>>> with np.testing.assert_warns(DeprecationWarning):
... assert deprecated_func(4) == 16
>>> # or passing a func
>>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
>>> assert ret == 16
"""
if not args:
return _assert_warns_context(warning_class)
func = args[0]
args = args[1:]
with _assert_warns_context(warning_class, name=func.__name__):
return func(*args, **kwargs)
@contextlib.contextmanager
def _assert_no_warnings_context(name=None):
__tracebackhide__ = True # Hide traceback for py.test
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
yield
if len(l) > 0:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError(f'Got warnings{name_str}: {l}')
def assert_no_warnings(*args, **kwargs):
"""
Fail if the given callable produces any warnings.
If called with all arguments omitted, may be used as a context manager:
with assert_no_warnings():
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. versionadded:: 1.7.0
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
if not args:
return _assert_no_warnings_context()
func = args[0]
args = args[1:]
with _assert_no_warnings_context(name=func.__name__):
return func(*args, **kwargs)
def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
"""
generator producing data with different alignment and offsets
to test simd vectorization
Parameters
----------
dtype : dtype
data type to produce
type : string
'unary': create data for unary operations, creates one input
and output array
'binary': create data for unary operations, creates two input
and output array
max_size : integer
maximum size of data to produce
Returns
-------
if type is 'unary' yields one output, one input array and a message
containing information on the data
if type is 'binary' yields one output array, two input array and a message
containing information on the data
"""
ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
for o in range(3):
for s in range(o + 2, max(o + 3, max_size)):
if type == 'unary':
inp = lambda: arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
d = inp()
yield d, d, ufmt % (o, o, s, dtype, 'in place')
yield out[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'out of place')
yield inp()[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'aliased')
yield inp()[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'aliased')
if type == 'binary':
inp1 = lambda: arange(s, dtype=dtype)[o:]
inp2 = lambda: arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'out of place')
d = inp1()
yield d, d, inp2(), bfmt % \
(o, o, o, s, dtype, 'in place1')
d = inp2()
yield d, inp1(), d, bfmt % \
(o, o, o, s, dtype, 'in place2')
yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'out of place')
yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'aliased')
class IgnoreException(Exception):
"Ignoring this exception due to disabled feature"
pass
@contextlib.contextmanager
def tempdir(*args, **kwargs):
"""Context manager to provide a temporary test folder.
All arguments are passed as this to the underlying tempfile.mkdtemp
function.
"""
tmpdir = mkdtemp(*args, **kwargs)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
@contextlib.contextmanager
def temppath(*args, **kwargs):
"""Context manager for temporary files.
Context manager that returns the path to a closed temporary file. Its
parameters are the same as for tempfile.mkstemp and are passed directly
to that function. The underlying file is removed when the context is
exited, so it should be closed at that time.
Windows does not allow a temporary file to be opened if it is already
open, so the underlying file must be closed after opening before it
can be opened again.
"""
fd, path = mkstemp(*args, **kwargs)
os.close(fd)
try:
yield path
finally:
os.remove(path)
class clear_and_catch_warnings(warnings.catch_warnings):
""" Context manager that resets warning registry for catching warnings
Warnings can be slippery, because, whenever a warning is triggered, Python
adds a ``__warningregistry__`` member to the *calling* module. This makes
it impossible to retrigger the warning in this module, whatever you put in
the warnings filters. This context manager accepts a sequence of `modules`
as a keyword argument to its constructor and:
* stores and removes any ``__warningregistry__`` entries in given `modules`
on entry;
* resets ``__warningregistry__`` to its previous state on exit.
This makes it possible to trigger any warning afresh inside the context
manager without disturbing the state of warnings outside.
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
Parameters
----------
record : bool, optional
Specifies whether warnings should be captured by a custom
implementation of ``warnings.showwarning()`` and be appended to a list
returned by the context manager. Otherwise None is returned by the
context manager. The objects appended to the list are arguments whose
attributes mirror the arguments to ``showwarning()``.
modules : sequence, optional
Sequence of modules for which to reset warnings registry on entry and
restore on exit. To work correctly, all 'ignore' filters should
filter by one of these modules.
Examples
--------
>>> import warnings
>>> with np.testing.clear_and_catch_warnings(
... modules=[np.core.fromnumeric]):
... warnings.simplefilter('always')
... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
... # do something that raises a warning but ignore those in
... # np.core.fromnumeric
"""
class_modules = ()
def __init__(self, record=False, modules=()):
self.modules = set(modules).union(self.class_modules)
self._warnreg_copies = {}
super(clear_and_catch_warnings, self).__init__(record=record)
def __enter__(self):
for mod in self.modules:
if hasattr(mod, '__warningregistry__'):
mod_reg = mod.__warningregistry__
self._warnreg_copies[mod] = mod_reg.copy()
mod_reg.clear()
return super(clear_and_catch_warnings, self).__enter__()
def __exit__(self, *exc_info):
super(clear_and_catch_warnings, self).__exit__(*exc_info)
for mod in self.modules:
if hasattr(mod, '__warningregistry__'):
mod.__warningregistry__.clear()
if mod in self._warnreg_copies:
mod.__warningregistry__.update(self._warnreg_copies[mod])
class suppress_warnings:
"""
Context manager and decorator doing much the same as
``warnings.catch_warnings``.
However, it also provides a filter mechanism to work around
https://bugs.python.org/issue4180.
This bug causes Python before 3.4 to not reliably show warnings again
after they have been ignored once (even within catch_warnings). It
means that no "ignore" filter can be used easily, since following
tests might need to see the warning. Additionally it allows easier
specificity for testing warnings and can be nested.
Parameters
----------
forwarding_rule : str, optional
One of "always", "once", "module", or "location". Analogous to
the usual warnings module filter mode, it is useful to reduce
noise mostly on the outmost level. Unsuppressed and unrecorded
warnings will be forwarded based on this rule. Defaults to "always".
"location" is equivalent to the warnings "default", match by exact
location the warning warning originated from.
Notes
-----
Filters added inside the context manager will be discarded again
when leaving it. Upon entering all filters defined outside a
context will be applied automatically.
When a recording filter is added, matching warnings are stored in the
``log`` attribute as well as in the list returned by ``record``.
If filters are added and the ``module`` keyword is given, the
warning registry of this module will additionally be cleared when
applying it, entering the context, or exiting it. This could cause
warnings to appear a second time after leaving the context if they
were configured to be printed once (default) and were already
printed before the context was entered.
Nesting this context manager will work as expected when the
forwarding rule is "always" (default). Unfiltered and unrecorded
warnings will be passed out and be matched by the outer level.
On the outmost level they will be printed (or caught by another
warnings context). The forwarding rule argument can modify this
behaviour.
Like ``catch_warnings`` this context manager is not threadsafe.
Examples
--------
With a context manager::
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Some text")
sup.filter(module=np.ma.core)
log = sup.record(FutureWarning, "Does this occur?")
command_giving_warnings()
# The FutureWarning was given once, the filtered warnings were
# ignored. All other warnings abide outside settings (may be
# printed/error)
assert_(len(log) == 1)
assert_(len(sup.log) == 1) # also stored in log attribute
Or as a decorator::
sup = np.testing.suppress_warnings()
sup.filter(module=np.ma.core) # module must match exactly
@sup
def some_function():
# do something which causes a warning in np.ma.core
pass
"""
def __init__(self, forwarding_rule="always"):
self._entered = False
# Suppressions are either instance or defined inside one with block:
self._suppressions = []
if forwarding_rule not in {"always", "module", "once", "location"}:
raise ValueError("unsupported forwarding rule.")
self._forwarding_rule = forwarding_rule
def _clear_registries(self):
if hasattr(warnings, "_filters_mutated"):
# clearing the registry should not be necessary on new pythons,
# instead the filters should be mutated.
warnings._filters_mutated()
return
# Simply clear the registry, this should normally be harmless,
# note that on new pythons it would be invalidated anyway.
for module in self._tmp_modules:
if hasattr(module, "__warningregistry__"):
module.__warningregistry__.clear()
def _filter(self, category=Warning, message="", module=None, record=False):
if record:
record = [] # The log where to store warnings
else:
record = None
if self._entered:
if module is None:
warnings.filterwarnings(
"always", category=category, message=message)
else:
module_regex = module.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=category, message=message,
module=module_regex)
self._tmp_modules.add(module)
self._clear_registries()
self._tmp_suppressions.append(
(category, message, re.compile(message, re.I), module, record))
else:
self._suppressions.append(
(category, message, re.compile(message, re.I), module, record))
return record
def filter(self, category=Warning, message="", module=None):
"""
Add a new suppressing filter or apply it if the state is entered.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
self._filter(category=category, message=message, module=module,
record=False)
def record(self, category=Warning, message="", module=None):
"""
Append a new recording filter or apply it if the state is entered.
All warnings matching will be appended to the ``log`` attribute.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Returns
-------
log : list
A list which will be filled with all matched warnings.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
return self._filter(category=category, message=message, module=module,
record=True)
def __enter__(self):
if self._entered:
raise RuntimeError("cannot enter suppress_warnings twice.")
self._orig_show = warnings.showwarning
self._filters = warnings.filters
warnings.filters = self._filters[:]
self._entered = True
self._tmp_suppressions = []
self._tmp_modules = set()
self._forwarded = set()
self.log = [] # reset global log (no need to keep same list)
for cat, mess, _, mod, log in self._suppressions:
if log is not None:
del log[:] # clear the log
if mod is None:
warnings.filterwarnings(
"always", category=cat, message=mess)
else:
module_regex = mod.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=cat, message=mess,
module=module_regex)
self._tmp_modules.add(mod)
warnings.showwarning = self._showwarning
self._clear_registries()
return self
def __exit__(self, *exc_info):
warnings.showwarning = self._orig_show
warnings.filters = self._filters
self._clear_registries()
self._entered = False
del self._orig_show
del self._filters
def _showwarning(self, message, category, filename, lineno,
*args, use_warnmsg=None, **kwargs):
for cat, _, pattern, mod, rec in (
self._suppressions + self._tmp_suppressions)[::-1]:
if (issubclass(category, cat) and
pattern.match(message.args[0]) is not None):
if mod is None:
# Message and category match, either recorded or ignored
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# Use startswith, because warnings strips the c or o from
# .pyc/.pyo files.
elif mod.__file__.startswith(filename):
# The message and module (filename) match
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# There is no filter in place, so pass to the outside handler
# unless we should only pass it once
if self._forwarding_rule == "always":
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno,
*args, **kwargs)
else:
self._orig_showmsg(use_warnmsg)
return
if self._forwarding_rule == "once":
signature = (message.args, category)
elif self._forwarding_rule == "module":
signature = (message.args, category, filename)
elif self._forwarding_rule == "location":
signature = (message.args, category, filename, lineno)
if signature in self._forwarded:
return
self._forwarded.add(signature)
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno, *args,
**kwargs)
else:
self._orig_showmsg(use_warnmsg)
def __call__(self, func):
"""
Function decorator to apply certain suppressions to a whole
function.
"""
@wraps(func)
def new_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return new_func
@contextlib.contextmanager
def _assert_no_gc_cycles_context(name=None):
__tracebackhide__ = True # Hide traceback for py.test
# not meaningful to test if there is no refcounting
if not HAS_REFCOUNT:
yield
return
assert_(gc.isenabled())
gc.disable()
gc_debug = gc.get_debug()
try:
for i in range(100):
if gc.collect() == 0:
break
else:
raise RuntimeError(
"Unable to fully collect garbage - perhaps a __del__ method "
"is creating more reference cycles?")
gc.set_debug(gc.DEBUG_SAVEALL)
yield
# gc.collect returns the number of unreachable objects in cycles that
# were found -- we are checking that no cycles were created in the context
n_objects_in_cycles = gc.collect()
objects_in_cycles = gc.garbage[:]
finally:
del gc.garbage[:]
gc.set_debug(gc_debug)
gc.enable()
if n_objects_in_cycles:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError(
"Reference cycles were found{}: {} objects were collected, "
"of which {} are shown below:{}"
.format(
name_str,
n_objects_in_cycles,
len(objects_in_cycles),
''.join(
"\n {} object with id={}:\n {}".format(
type(o).__name__,
id(o),
pprint.pformat(o).replace('\n', '\n ')
) for o in objects_in_cycles
)
)
)
def assert_no_gc_cycles(*args, **kwargs):
"""
Fail if the given callable produces any reference cycles.
If called with all arguments omitted, may be used as a context manager:
with assert_no_gc_cycles():
do_something()
.. versionadded:: 1.15.0
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
Nothing. The result is deliberately discarded to ensure that all cycles
are found.
"""
if not args:
return _assert_no_gc_cycles_context()
func = args[0]
args = args[1:]
with _assert_no_gc_cycles_context(name=func.__name__):
func(*args, **kwargs)
def break_cycles():
"""
Break reference cycles by calling gc.collect
Objects can call other objects' methods (for instance, another object's
__del__) inside their own __del__. On PyPy, the interpreter only runs
between calls to gc.collect, so multiple calls are needed to completely
release all cycles.
"""
gc.collect()
if IS_PYPY:
# interpreter runs now, to call deleted objects' __del__ methods
gc.collect()
# two more, just to make sure
gc.collect()
gc.collect()
def requires_memory(free_bytes):
"""Decorator to skip a test if not enough memory is available"""
import pytest
def decorator(func):
@wraps(func)
def wrapper(*a, **kw):
msg = check_free_memory(free_bytes)
if msg is not None:
pytest.skip(msg)
try:
return func(*a, **kw)
except MemoryError:
# Probably ran out of memory regardless: don't regard as failure
pytest.xfail("MemoryError raised")
return wrapper
return decorator
def check_free_memory(free_bytes):
"""
Check whether `free_bytes` amount of memory is currently free.
Returns: None if enough memory available, otherwise error message
"""
env_var = 'NPY_AVAILABLE_MEM'
env_value = os.environ.get(env_var)
if env_value is not None:
try:
mem_free = _parse_size(env_value)
except ValueError as exc:
raise ValueError(f'Invalid environment variable {env_var}: {exc}')
msg = (f'{free_bytes/1e9} GB memory required, but environment variable '
f'NPY_AVAILABLE_MEM={env_value} set')
else:
mem_free = _get_mem_available()
if mem_free is None:
msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
"environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
"the test.")
mem_free = -1
else:
msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available'
return msg if mem_free < free_bytes else None
def _parse_size(size_str):
"""Convert memory size strings ('12 GB' etc.) to float"""
suffixes = {'': 1, 'b': 1,
'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
'|'.join(suffixes.keys())), re.I)
m = size_re.match(size_str.lower())
if not m or m.group(2) not in suffixes:
raise ValueError(f'value {size_str!r} not a valid size')
return int(float(m.group(1)) * suffixes[m.group(2)])
def _get_mem_available():
"""Return available memory in bytes, or None if unknown."""
try:
import psutil
return psutil.virtual_memory().available
except (ImportError, AttributeError):
pass
if sys.platform.startswith('linux'):
info = {}
with open('/proc/meminfo', 'r') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = int(p[1]) * 1024
if 'memavailable' in info:
# Linux >= 3.14
return info['memavailable']
else:
return info['memfree'] + info['cached']
return None
def _no_tracing(func):
"""
Decorator to temporarily turn off tracing for the duration of a test.
Needed in tests that check refcounting, otherwise the tracing itself
influences the refcounts
"""
if not hasattr(sys, 'gettrace'):
return func
else:
@wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
| 33.85873
| 97
| 0.599374
|
969b817c6eb4bcec28a9b673d20d80823ec3a455
| 29
|
py
|
Python
|
python/testData/refactoring/rename/renameUpdatesImportReferences/before/bar.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/refactoring/rename/renameUpdatesImportReferences/before/bar.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/refactoring/rename/renameUpdatesImportReferences/before/bar.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
import foo
from foo import f
| 9.666667
| 17
| 0.793103
|
911d97d36f228fe8f68be1e7de5a43ee6a3724b6
| 1,073
|
py
|
Python
|
setup.py
|
GAUTAMMISTRY/pybiology
|
ff082055fb6ec973c800f85da5fa4c6ae9992940
|
[
"Unlicense"
] | null | null | null |
setup.py
|
GAUTAMMISTRY/pybiology
|
ff082055fb6ec973c800f85da5fa4c6ae9992940
|
[
"Unlicense"
] | null | null | null |
setup.py
|
GAUTAMMISTRY/pybiology
|
ff082055fb6ec973c800f85da5fa4c6ae9992940
|
[
"Unlicense"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
"Operating System :: OS Independent",
"License :: Freely Distributable",
'Programming Language :: Python :: 3',
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Software Development :: Libraries :: Python Modules",
]
setuptools.setup(
name="pybiology", # GAUTAMMISTRY
version="0.0.1",
author="GAUTAM PARMAR",
author_email="gautammistry48@gmail.com",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/GAUTAMMISTRY/pybiology",
packages=setuptools.find_packages(),
classifiers=classifiers,
python_requires='>=3.6',
)
| 32.515152
| 66
| 0.67288
|
42c5638bf5ecc7d2b6a1c56fa046cf42fb9132b8
| 46,226
|
py
|
Python
|
tests/test_functionality.py
|
Girgitt/yappi
|
f6fa1abaa4ec30c750b615e35176a76cdaaae0cc
|
[
"MIT"
] | null | null | null |
tests/test_functionality.py
|
Girgitt/yappi
|
f6fa1abaa4ec30c750b615e35176a76cdaaae0cc
|
[
"MIT"
] | null | null | null |
tests/test_functionality.py
|
Girgitt/yappi
|
f6fa1abaa4ec30c750b615e35176a76cdaaae0cc
|
[
"MIT"
] | 1
|
2018-03-26T15:30:42.000Z
|
2018-03-26T15:30:42.000Z
|
import os
import sys
import time
import yappi
import _yappi
import utils
import multiprocessing # added to fix http://bugs.python.org/issue15881 for > Py2.6
if sys.version_info < (2, 7): # use unittest2 for < Py2.7
import unittest2 as _unittest
else:
import unittest as _unittest
class BasicUsage(utils.YappiUnitTestCase):
def test_print_formatting(self):
def a():
pass
def b():
a()
func_cols={1:("name",48), 0:("ncall", 5), 2:("tsub", 8),}
thread_cols = {1:("name", 48), 0:("ttot", 8), }
yappi.start()
a(); b();
yappi.stop()
fs = yappi.get_func_stats()
cs = fs[1].children
ts = yappi.get_thread_stats()
#fs.print_all(out=sys.stderr, columns={1:("name", 70), })
#cs.print_all(out=sys.stderr, columns=func_cols)
#ts.print_all(out=sys.stderr, columns=thread_cols)
#cs.print_all(out=sys.stderr, columns={})
self.assertRaises(yappi.YappiError, fs.print_all, columns={1:("namee",9)})
self.assertRaises(yappi.YappiError, cs.print_all, columns={1:("dd",0)})
self.assertRaises(yappi.YappiError, ts.print_all, columns={1:("tidd",0)})
def test_get_clock(self):
yappi.set_clock_type('cpu')
self.assertEqual('cpu', yappi.get_clock_type())
clock_info = yappi.get_clock_info()
self.assertTrue('api' in clock_info)
self.assertTrue('resolution' in clock_info)
yappi.set_clock_type('wall')
self.assertEqual('wall', yappi.get_clock_type())
t0 = yappi.get_clock_time()
time.sleep(0.1)
duration = yappi.get_clock_time() - t0
self.assertAlmostEqual(0.1, duration, places=2)
def test_profile_decorator(self):
def aggregate(func, stats):
fname = "%s.profile" % (func.__name__)
try:
stats.add(fname)
except IOError:
pass
stats.save(fname)
raise Exception("messing around")
@yappi.profile(return_callback=aggregate)
def a(x, y):
if x+y == 25:
raise Exception("")
return x+y
def b():
pass
try:
os.remove("a.profile") # remove the one from prev test, if available
except:
pass
# global profile is on to mess things up
yappi.start()
b()
# assert functionality and call function at same time
try:
self.assertEqual(a(1, 2), 3)
except:
pass
try:
self.assertEqual(a(2, 5), 7)
except:
pass
try:
a(4, 21)
except:
pass
stats = yappi.get_func_stats().add("a.profile")
fsa = utils.find_stat_by_name(stats, 'a')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(len(stats), 1) # b() should be cleared out.
@yappi.profile(return_callback=aggregate)
def count_down_rec(n):
if n == 0:
return
count_down_rec(n-1)
try:
os.remove("count_down_rec.profile") # remove the one from prev test, if available
except:
pass
try:
count_down_rec(4)
except:
pass
try:
count_down_rec(3)
except:
pass
stats = yappi.YFuncStats("count_down_rec.profile")
fsrec = utils.find_stat_by_name(stats, 'count_down_rec')
self.assertEqual(fsrec.ncall, 9)
self.assertEqual(fsrec.nactualcall, 2)
def test_strip_dirs(self):
def a():
pass
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
fsa = utils.find_stat_by_name(stats, "a")
self.assertEqual(fsa.module, os.path.basename(fsa.module))
def test_yappi_overhead(self):
import time
LOOP_COUNT = 10000
def a(): pass
def b():
for i in range(LOOP_COUNT): a()
t0 = time.time()
yappi.start()
b()
yappi.stop()
time_with_yappi = time.time() - t0
t0 = time.time()
b()
time_without_yappi = time.time() - t0
if time_without_yappi == 0:
time_without_yappi = 0.000001
# in latest v0.82, I calculated this as close to "7.0" in my machine.
# however, %83 of this overhead is coming from tickcount(). The other %17
# seems to have been evenly distributed to the internal bookkeeping
# structures/algorithms which seems acceptable. Note that our test only
# tests one function being profiled at-a-time in a short interval.
# profiling high number of functions in a small time
# is a different beast, (which is pretty unlikely in most applications)
# So as a conclusion: I cannot see any optimization window for Yappi that
# is worth implementing as we will only optimize %17 of the time.
sys.stderr.write("\r\nYappi puts %0.1f times overhead to the profiled application in average.\r\n" % \
(time_with_yappi / time_without_yappi))
def test_clear_stats_while_running(self):
def a():
pass
yappi.start()
a()
yappi.clear_stats()
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
self.assertEqual(fsa.ncall, 1)
def test_generator(self):
def _gen(n):
while(n > 0):
yield n
n -= 1
yappi.start()
for x in _gen(5):
pass
self.assertTrue(yappi.convert2pstats(yappi.get_func_stats()) is not None)
def test_slice_child_stats_and_strip_dirs(self):
def b():
for i in range(10000000): pass
def a():
b()
yappi.start(builtins=True)
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertTrue(fsa.children[0:1] is not None)
prev_afullname = fsa.full_name
prev_bchildfullname = fsa.children[fsb].full_name
stats.strip_dirs()
self.assertTrue(len(prev_afullname) > len(fsa.full_name))
self.assertTrue(len(prev_bchildfullname) > len(fsa.children[fsb].full_name))
def test_children_stat_functions(self):
_timings = {"a_1":5, "b_1":3, "c_1":1}
_yappi._set_test_timings(_timings)
def b():
pass
def c():
pass
def a():
b()
c()
yappi.start()
a()
b() # non-child call
c() # non-child call
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
childs_of_a = fsa.children.get().sort("tavg", "desc")
prev_item = None
for item in childs_of_a:
if prev_item:
self.assertTrue(prev_item.tavg > item.tavg)
prev_item = item
childs_of_a.sort("name", "desc")
prev_item = None
for item in childs_of_a:
if prev_item:
self.assertTrue(prev_item.name > item.name)
prev_item = item
childs_of_a.clear()
self.assertTrue(childs_of_a.empty())
def test_no_stats_different_clock_type_load(self):
def a(): pass
yappi.start()
a()
yappi.stop()
yappi.get_func_stats().save("ystats1.ys")
yappi.clear_stats()
yappi.set_clock_type("WALL")
yappi.start()
yappi.stop()
stats = yappi.get_func_stats().add("ystats1.ys")
fsa = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa is not None)
def test_subsequent_profile(self):
_timings = {"a_1":1, "b_1":1}
_yappi._set_test_timings(_timings)
def a(): pass
def b(): pass
yappi.start()
a()
yappi.stop()
yappi.start()
b()
yappi.stop()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertTrue(fsa is not None)
self.assertTrue(fsb is not None)
self.assertEqual(fsa.ttot, 1)
self.assertEqual(fsb.ttot, 1)
def test_lambda(self):
import time
f = lambda : time.sleep(0.3)
yappi.set_clock_type("wall")
yappi.start()
f()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, '<lambda>')
self.assertTrue(fsa.ttot > 0.1)
def test_module_stress(self):
self.assertEqual(yappi.is_running(), False)
yappi.start()
yappi.clear_stats()
self.assertRaises(_yappi.error, yappi.set_clock_type, "wall")
yappi.stop()
yappi.clear_stats()
yappi.set_clock_type("cpu")
self.assertRaises(yappi.YappiError, yappi.set_clock_type, "dummy")
self.assertEqual(yappi.is_running(), False)
yappi.clear_stats()
yappi.clear_stats()
def test_stat_sorting(self):
_timings = {"a_1":13,"b_1":10,"a_2":6,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
b()
def b():
if self._ncall == 2:
return
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
stats = stats.sort("totaltime", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot >= stat.ttot)
prev_stat = stat
stats = stats.sort("totaltime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot <= stat.ttot)
prev_stat = stat
stats = stats.sort("avgtime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.tavg <= stat.tavg)
prev_stat = stat
stats = stats.sort("name", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.name <= stat.name)
prev_stat = stat
stats = stats.sort("subtime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.tsub <= stat.tsub)
prev_stat = stat
self.assertRaises(yappi.YappiError, stats.sort, "invalid_func_sorttype_arg")
self.assertRaises(yappi.YappiError, stats.sort, "totaltime", "invalid_func_sortorder_arg")
def test_start_flags(self):
self.assertEqual(_yappi._get_start_flags(), None)
yappi.start()
def a(): pass
a()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multithread"], 1)
self.assertEqual(len(yappi.get_thread_stats()), 1)
def test_builtin_profiling(self):
import threading
def a():
import time
time.sleep(0.4) # is a builtin function
yappi.set_clock_type('wall')
yappi.start(builtins=True)
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'sleep')
self.assertTrue(fsa is not None)
self.assertTrue(fsa.ttot > 0.3)
yappi.stop()
yappi.clear_stats()
def a():
pass
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
stats = yappi.get_func_stats()
def test_singlethread_profiling(self):
import threading
import time
yappi.set_clock_type('wall')
def a():
time.sleep(0.2)
class Worker1(threading.Thread):
def a(self):
time.sleep(0.3)
def run(self):
self.a()
yappi.start(profile_threads=False)
c = Worker1()
c.start()
c.join()
a()
stats = yappi.get_func_stats()
fsa1 = utils.find_stat_by_name(stats, 'Worker1.a')
fsa2 = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa1 is None)
self.assertTrue(fsa2 is not None)
self.assertTrue(fsa2.ttot > 0.1)
class StatSaveScenarios(utils.YappiUnitTestCase):
def test_pstats_conversion(self):
def pstat_id(fs):
return (fs.module, fs.lineno, fs.name)
def a():
d()
def b():
d()
def c():
pass
def d():
pass
_timings = {"a_1":12,"b_1":7,"c_1":5,"d_1":2}
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
stats.save("a1.pstats", type="pstat")
fsa_pid = pstat_id(utils.find_stat_by_name(stats, "a"))
fsd_pid = pstat_id(utils.find_stat_by_name(stats, "d"))
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
stats.save("a2.pstats", type="pstat")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(b,)
stats.strip_dirs()
stats.save("b1.pstats", type="pstat")
fsb_pid = pstat_id(utils.find_stat_by_name(stats, "b"))
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(c,)
stats.strip_dirs()
stats.save("c1.pstats", type="pstat")
fsc_pid = pstat_id(utils.find_stat_by_name(stats, "c"))
# merge saved stats and check pstats values are correct
import pstats
p = pstats.Stats('a1.pstats', 'a2.pstats', 'b1.pstats', 'c1.pstats')
p.strip_dirs()
# ct = ttot, tt = tsub
(cc, nc, tt, ct, callers) = p.stats[fsa_pid]
self.assertEqual(cc, nc, 2)
self.assertEqual(tt, 20)
self.assertEqual(ct, 24)
(cc, nc, tt, ct, callers) = p.stats[fsd_pid]
self.assertEqual(cc, nc, 3)
self.assertEqual(tt, 6)
self.assertEqual(ct, 6)
self.assertEqual(len(callers), 2)
(cc, nc, tt, ct) = callers[fsa_pid]
self.assertEqual(cc, nc, 2)
self.assertEqual(tt, 4)
self.assertEqual(ct, 4)
(cc, nc, tt, ct) = callers[fsb_pid]
self.assertEqual(cc, nc, 1)
self.assertEqual(tt, 2)
self.assertEqual(ct, 2)
def test_merge_stats(self):
_timings = {"a_1":15,"b_1":14,"c_1":12,"d_1":10,"e_1":9,"f_1":7,"g_1":6,"h_1":5,"i_1":1}
_yappi._set_test_timings(_timings)
def a():
b()
def b():
c()
def c():
d()
def d():
e()
def e():
f()
def f():
g()
def g():
h()
def h():
i()
def i():
pass
yappi.start()
a()
a()
yappi.stop()
stats = yappi.get_func_stats()
self.assertRaises(NotImplementedError, stats.save, "", "INVALID_SAVE_TYPE")
stats.save("ystats2.ys")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
yappi.start()
a()
stats = yappi.get_func_stats().add("ystats2.ys")
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
fsd = utils.find_stat_by_name(stats, "d")
fse = utils.find_stat_by_name(stats, "e")
fsf = utils.find_stat_by_name(stats, "f")
fsg = utils.find_stat_by_name(stats, "g")
fsh = utils.find_stat_by_name(stats, "h")
fsi = utils.find_stat_by_name(stats, "i")
self.assertEqual(fsa.ttot, 45)
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 3)
self.assertEqual(fsa.tsub, 3)
self.assertEqual(fsa.children[fsb].ttot, fsb.ttot)
self.assertEqual(fsa.children[fsb].tsub, fsb.tsub)
self.assertEqual(fsb.children[fsc].ttot, fsc.ttot)
self.assertEqual(fsb.children[fsc].tsub, fsc.tsub)
self.assertEqual(fsc.tsub, 6)
self.assertEqual(fsc.children[fsd].ttot, fsd.ttot)
self.assertEqual(fsc.children[fsd].tsub, fsd.tsub)
self.assertEqual(fsd.children[fse].ttot, fse.ttot)
self.assertEqual(fsd.children[fse].tsub, fse.tsub)
self.assertEqual(fse.children[fsf].ttot, fsf.ttot)
self.assertEqual(fse.children[fsf].tsub, fsf.tsub)
self.assertEqual(fsf.children[fsg].ttot, fsg.ttot)
self.assertEqual(fsf.children[fsg].tsub, fsg.tsub)
self.assertEqual(fsg.ttot, 18)
self.assertEqual(fsg.tsub, 3)
self.assertEqual(fsg.children[fsh].ttot, fsh.ttot)
self.assertEqual(fsg.children[fsh].tsub, fsh.tsub)
self.assertEqual(fsh.ttot, 15)
self.assertEqual(fsh.tsub, 12)
self.assertEqual(fsh.tavg, 5)
self.assertEqual(fsh.children[fsi].ttot, fsi.ttot)
self.assertEqual(fsh.children[fsi].tsub, fsi.tsub)
#stats.debug_print()
def test_merge_multithreaded_stats(self):
import threading
import _yappi
timings = {"a_1":2, "b_1":1}
_yappi._set_test_timings(timings)
def a(): pass
def b(): pass
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
t = threading.Thread(target=b)
t.start()
t.join()
yappi.get_func_stats().save("ystats1.ys")
yappi.clear_stats()
_yappi._set_test_timings(timings)
self.assertEqual(len(yappi.get_func_stats()), 0)
self.assertEqual(len(yappi.get_thread_stats()), 1)
t = threading.Thread(target=a)
t.start()
t.join()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multithread"], 1)
yappi.get_func_stats().save("ystats2.ys")
stats = yappi.YFuncStats(["ystats1.ys", "ystats2.ys",])
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
self.assertEqual(fsa.ncall, 2)
self.assertEqual(fsb.ncall, 1)
self.assertEqual(fsa.tsub, fsa.ttot, 4)
self.assertEqual(fsb.tsub, fsb.ttot, 1)
def test_merge_load_different_clock_types(self):
import threading
yappi.start(builtins=True)
def a(): b()
def b(): c()
def c(): pass
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().sort("name", "asc").save("ystats1.ys")
yappi.stop()
yappi.clear_stats()
yappi.start(builtins=False)
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().save("ystats2.ys")
yappi.stop()
self.assertRaises(_yappi.error, yappi.set_clock_type, "wall")
yappi.clear_stats()
yappi.set_clock_type("wall")
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().save("ystats3.ys")
self.assertRaises(yappi.YappiError, yappi.YFuncStats().add("ystats1.ys").add, "ystats3.ys")
stats = yappi.YFuncStats(["ystats1.ys", "ystats2.ys"]).sort("name")
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
self.assertEqual(fsa.ncall, 2)
self.assertEqual(fsa.ncall, fsb.ncall, fsc.ncall)
def test_merge_aabab_aabbc(self):
_timings = {"a_1":15,"a_2":14,"b_1":12,"a_3":10,"b_2":9, "c_1":4}
_yappi._set_test_timings(_timings)
def a():
if self._ncall == 1:
self._ncall += 1
a()
elif self._ncall == 5:
self._ncall += 1
a()
else:
b()
def b():
if self._ncall == 2:
self._ncall += 1
a()
elif self._ncall == 6:
self._ncall += 1
b()
elif self._ncall == 7:
c()
else:
return
def c():
pass
self._ncall = 1
stats = utils.run_and_get_func_stats(a,)
stats.save("ystats1.ys")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
#stats.print_all()
self._ncall = 5
stats = utils.run_and_get_func_stats(a,)
stats.save("ystats2.ys")
#stats.print_all()
def a(): # same name but another function(code object)
pass
yappi.start()
a()
stats = yappi.get_func_stats().add(["ystats1.ys", "ystats2.ys"])
#stats.print_all()
self.assertEqual(len(stats), 4)
fsa = None
for stat in stats:
if stat.name == "a" and stat.ttot == 45:
fsa = stat
break
self.assertTrue(fsa is not None)
self.assertEqual(fsa.ncall, 7)
self.assertEqual(fsa.nactualcall, 3)
self.assertEqual(fsa.ttot, 45)
self.assertEqual(fsa.tsub, 10)
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
self.assertEqual(fsb.ncall, 6)
self.assertEqual(fsb.nactualcall, 3)
self.assertEqual(fsb.ttot, 36)
self.assertEqual(fsb.tsub, 27)
self.assertEqual(fsb.tavg, 6)
self.assertEqual(fsc.ttot, 8)
self.assertEqual(fsc.tsub, 8)
self.assertEqual(fsc.tavg, 4)
self.assertEqual(fsc.nactualcall, fsc.ncall, 2)
"""
"""
class MultithreadedScenarios(utils.YappiUnitTestCase):
def test_subsequent_profile(self):
import threading
WORKER_COUNT = 5
def a(): pass
def b(): pass
def c(): pass
_timings = {"a_1":3,"b_1":2,"c_1":1,}
yappi.start()
def g(): pass
g()
yappi.stop()
yappi.clear_stats()
_yappi._set_test_timings(_timings)
yappi.start()
_dummy = []
for i in range(WORKER_COUNT):
t = threading.Thread(target=a)
t.start()
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=b)
t.start()
_dummy.append(t)
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=a)
t.start()
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=c)
t.start()
t.join()
yappi.stop()
yappi.start()
def f():
pass
f()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
self.assertEqual(fsa.ncall, 10)
self.assertEqual(fsb.ncall, 5)
self.assertEqual(fsc.ncall, 5)
self.assertEqual(fsa.ttot, fsa.tsub, 30)
self.assertEqual(fsb.ttot, fsb.tsub, 10)
self.assertEqual(fsc.ttot, fsc.tsub, 5)
# MACOSx optimizes by only creating one worker thread
self.assertTrue(len(yappi.get_thread_stats()) >= 2)
def test_basic(self):
import threading
import time
yappi.set_clock_type('wall')
def a():
time.sleep(0.2)
class Worker1(threading.Thread):
def a(self):
time.sleep(0.3)
def run(self):
self.a()
yappi.start(builtins=False, profile_threads=True)
c = Worker1()
c.start()
c.join()
a()
stats = yappi.get_func_stats()
fsa1 = utils.find_stat_by_name(stats, 'Worker1.a')
fsa2 = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa1 is not None)
self.assertTrue(fsa2 is not None)
self.assertTrue(fsa1.ttot > 0.2)
self.assertTrue(fsa2.ttot > 0.1)
tstats = yappi.get_thread_stats()
self.assertEqual(len(tstats), 2)
tsa = utils.find_stat_by_name(tstats, 'Worker1')
tsm = utils.find_stat_by_name(tstats, '_MainThread')
self.assertTrue(tsa is not None)
self.assertTrue(tsm is not None) # FIX: I see this fails sometimes?
def test_ctx_stats(self):
from threading import Thread
DUMMY_WORKER_COUNT = 5
yappi.start()
class DummyThread(Thread): pass
def dummy_worker():
pass
for i in range(DUMMY_WORKER_COUNT):
t = DummyThread(target=dummy_worker)
t.start()
t.join()
yappi.stop()
stats = yappi.get_thread_stats()
tsa = utils.find_stat_by_name(stats, "DummyThread")
self.assertTrue(tsa is not None)
yappi.clear_stats()
import time
time.sleep(1.0)
_timings = {"a_1":6,"b_1":5,"c_1":3, "d_1":1, "a_2":4,"b_2":3,"c_2":2, "d_2":1}
_yappi._set_test_timings(_timings)
class Thread1(Thread): pass
class Thread2(Thread): pass
def a():
b()
def b():
c()
def c():
d()
def d():
time.sleep(0.6)
yappi.set_clock_type("wall")
yappi.start()
t1 = Thread1(target=a)
t1.start()
t2 = Thread2(target=a)
t2.start()
t1.join()
t2.join()
stats = yappi.get_thread_stats()
# the fist clear_stats clears the context table?
tsa = utils.find_stat_by_name(stats, "DummyThread")
self.assertTrue(tsa is None)
tst1 = utils.find_stat_by_name(stats, "Thread1")
tst2 = utils.find_stat_by_name(stats, "Thread2")
tsmain = utils.find_stat_by_name(stats, "_MainThread")
#stats.print_all()
self.assertTrue(len(stats) == 3)
self.assertTrue(tst1 is not None)
self.assertTrue(tst2 is not None)
self.assertTrue(tsmain is not None) # I see this fails sometimes, probably
# because Py_ImportNoBlock() fails to import and get the thread class name
# sometimes.
self.assertTrue(1.0 > tst2.ttot >= 0.5)
self.assertTrue(1.0 > tst1.ttot >= 0.5)
# test sorting of the ctx stats
stats = stats.sort("totaltime", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot >= stat.ttot)
prev_stat = stat
stats = stats.sort("totaltime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot <= stat.ttot)
prev_stat = stat
stats = stats.sort("schedcount", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.sched_count >= stat.sched_count)
prev_stat = stat
stats = stats.sort("name", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.name >= stat.name)
prev_stat = stat
self.assertRaises(yappi.YappiError, stats.sort, "invalid_thread_sorttype_arg")
self.assertRaises(yappi.YappiError, stats.sort, "invalid_thread_sortorder_arg")
def test_producer_consumer_with_queues(self):
# we currently just stress yappi, no functionality test is done here.
yappi.start()
import time
if utils.is_py3x():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
WORKER_THREAD_COUNT = 50
WORK_ITEM_COUNT = 2000
def worker():
while True:
item = q.get()
# do the work with item
q.task_done()
q = Queue()
for i in range(WORKER_THREAD_COUNT):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in range(WORK_ITEM_COUNT):
q.put(item)
q.join()# block until all tasks are done
#yappi.get_func_stats().sort("callcount").print_all()
yappi.stop()
def test_temporary_lock_waiting(self):
import threading
import time
yappi.start()
_lock = threading.Lock()
def worker():
_lock.acquire()
try:
time.sleep(1.0)
finally:
_lock.release()
t1 = threading.Thread(target=worker)
t2 = threading.Thread(target=worker)
t1.start()
t2.start()
t1.join()
t2.join()
#yappi.get_func_stats().sort("callcount").print_all()
yappi.stop()
@_unittest.skipIf(os.name != "posix", "requires Posix compliant OS")
def test_signals_with_blocking_calls(self):
import signal, os, time
# just to verify if signal is handled correctly and stats/yappi are not corrupted.
def handler(signum, frame):
raise Exception("Signal handler executed!")
yappi.start()
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
self.assertRaises(Exception, time.sleep, 2)
stats = yappi.get_func_stats()
fsh = utils.find_stat_by_name(stats, "handler")
self.assertTrue(fsh is not None)
@_unittest.skipIf(not sys.version_info >= (3, 2), "requires Python 3.2")
def test_concurrent_futures(self):
yappi.start()
import time
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=5) as executor:
f = executor.submit(pow, 5, 2)
self.assertEqual(f.result(), 25)
time.sleep(1.0)
yappi.stop()
@_unittest.skipIf(not sys.version_info >= (3, 2), "requires Python 3.2")
def test_barrier(self):
yappi.start()
import threading
b = threading.Barrier(2, timeout=1)
def worker():
try:
b.wait()
except threading.BrokenBarrierError:
pass
except Exception:
raise Exception("BrokenBarrierError not raised")
t1 = threading.Thread(target=worker)
t1.start()
#b.wait()
t1.join()
yappi.stop()
class NonRecursiveFunctions(utils.YappiUnitTestCase):
def test_abcd(self):
_timings = {"a_1":6,"b_1":5,"c_1":3, "d_1":1}
_yappi._set_test_timings(_timings)
def a():
b()
def b():
c()
def c():
d()
def d():
pass
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
fsd = utils.find_stat_by_name(stats, 'd')
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfscd = fsc.children[fsd]
self.assertEqual(fsa.ttot , 6)
self.assertEqual(fsa.tsub , 1)
self.assertEqual(fsb.ttot , 5)
self.assertEqual(fsb.tsub , 2)
self.assertEqual(fsc.ttot , 3)
self.assertEqual(fsc.tsub , 2)
self.assertEqual(fsd.ttot , 1)
self.assertEqual(fsd.tsub , 1)
self.assertEqual(cfsab.ttot , 5)
self.assertEqual(cfsab.tsub , 2)
self.assertEqual(cfsbc.ttot , 3)
self.assertEqual(cfsbc.tsub , 2)
self.assertEqual(cfscd.ttot , 1)
self.assertEqual(cfscd.tsub , 1)
def test_stop_in_middle(self):
import time
_timings = {"a_1":6,"b_1":4}
_yappi._set_test_timings(_timings)
def a():
b()
yappi.stop()
def b():
time.sleep(0.2)
yappi.start()
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertEqual(fsa.ncall , 1)
self.assertEqual(fsa.nactualcall, 0)
self.assertEqual(fsa.ttot , 0) # no call_leave called
self.assertEqual(fsa.tsub , 0) # no call_leave called
self.assertEqual(fsb.ttot , 4)
class RecursiveFunctions(utils.YappiUnitTestCase):
def test_fibonacci(self):
def fib(n):
if n > 1:
return fib(n-1) + fib(n-2)
else:
return n
stats = utils.run_and_get_func_stats(fib, 22)
fs = utils.find_stat_by_name(stats, 'fib')
self.assertEqual(fs.ncall, 57313)
self.assertEqual(fs.ttot, fs.tsub)
def test_abcadc(self):
_timings = {"a_1":20,"b_1":19,"c_1":17, "a_2":13, "d_1":12, "c_2":10, "a_3":5}
_yappi._set_test_timings(_timings)
def a(n):
if n == 3:
return
if n == 1 + 1:
d(n)
else:
b(n)
def b(n):
c(n)
def c(n):
a(n+1)
def d(n):
c(n)
stats = utils.run_and_get_func_stats(a, 1)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
fsd = utils.find_stat_by_name(stats, 'd')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 1)
self.assertEqual(fsa.ttot, 20)
self.assertEqual(fsa.tsub, 7)
self.assertEqual(fsb.ttot, 19)
self.assertEqual(fsb.tsub, 2)
self.assertEqual(fsc.ttot, 17)
self.assertEqual(fsc.tsub, 9)
self.assertEqual(fsd.ttot, 12)
self.assertEqual(fsd.tsub, 2)
cfsca = fsc.children[fsa]
self.assertEqual(cfsca.nactualcall, 0)
self.assertEqual(cfsca.ncall, 2)
self.assertEqual(cfsca.ttot, 13)
self.assertEqual(cfsca.tsub, 6)
def test_aaaa(self):
_timings = {"d_1":9, "d_2":7, "d_3":3, "d_4":2}
_yappi._set_test_timings(_timings)
def d(n):
if n == 3:
return
d(n+1)
stats = utils.run_and_get_func_stats(d, 0)
fsd = utils.find_stat_by_name(stats, 'd')
self.assertEqual(fsd.ncall , 4)
self.assertEqual(fsd.nactualcall , 1)
self.assertEqual(fsd.ttot , 9)
self.assertEqual(fsd.tsub , 9)
cfsdd = fsd.children[fsd]
self.assertEqual(cfsdd.ttot , 7)
self.assertEqual(cfsdd.tsub , 7)
self.assertEqual(cfsdd.ncall , 3)
self.assertEqual(cfsdd.nactualcall , 0)
def test_abcabc(self):
_timings = {"a_1":20,"b_1":19,"c_1":17, "a_2":13, "b_2":11, "c_2":9, "a_3":6}
_yappi._set_test_timings(_timings)
def a(n):
if n == 3:
return
else:
b(n)
def b(n):
c(n)
def c(n):
a(n+1)
stats = utils.run_and_get_func_stats(a, 1)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
self.assertEqual(fsa.ncall , 3)
self.assertEqual(fsa.nactualcall , 1)
self.assertEqual(fsa.ttot , 20)
self.assertEqual(fsa.tsub , 9)
self.assertEqual(fsb.ttot , 19)
self.assertEqual(fsb.tsub , 4)
self.assertEqual(fsc.ttot , 17)
self.assertEqual(fsc.tsub , 7)
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfsca = fsc.children[fsa]
self.assertEqual(cfsab.ttot , 19)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbc.ttot , 17)
self.assertEqual(cfsbc.tsub , 7)
self.assertEqual(cfsca.ttot , 13)
self.assertEqual(cfsca.tsub , 8)
def test_abcbca(self):
_timings = {"a_1":10,"b_1":9,"c_1":7,"b_2":4,"c_2":2,"a_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
else:
return
def b():
c()
def c():
if self._ncall == 1:
self._ncall += 1
b()
else:
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfsca = fsc.children[fsa]
self.assertEqual(fsa.ttot , 10)
self.assertEqual(fsa.tsub , 2)
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 4)
self.assertEqual(fsc.ttot , 7)
self.assertEqual(fsc.tsub , 4)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 2)
self.assertEqual(cfsbc.ttot , 7)
self.assertEqual(cfsbc.tsub , 4)
self.assertEqual(cfsca.ttot , 1)
self.assertEqual(cfsca.tsub , 1)
self.assertEqual(cfsca.ncall , 1)
self.assertEqual(cfsca.nactualcall , 0)
def test_aabccb(self):
_timings = {"a_1":13,"a_2":11,"b_1":9,"c_1":5,"c_2":3,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
self._ncall += 1
a()
else:
b()
def b():
if self._ncall == 3:
return
else:
c()
def c():
if self._ncall == 2:
self._ncall += 1
c()
else:
b()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
cfsaa = fsa.children[fsa.index]
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc.full_name]
cfscc = fsc.children[fsc]
cfscb = fsc.children[fsb]
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 5)
self.assertEqual(cfsbc.ttot , 5)
self.assertEqual(cfsbc.tsub , 2)
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 4)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsaa.ttot , 11)
self.assertEqual(cfsaa.tsub , 2)
self.assertEqual(fsc.ttot , 5)
self.assertEqual(fsc.tsub , 4)
def test_abaa(self):
_timings = {"a_1":13,"b_1":10,"a_2":9,"a_3":5}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
elif self._ncall == 2:
self._ncall += 1
a()
else:
return
def b():
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsba = fsb.children[fsa]
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 1)
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 12)
self.assertEqual(cfsaa.ttot , 5)
self.assertEqual(cfsaa.tsub , 5)
self.assertEqual(cfsba.ttot , 9)
self.assertEqual(cfsba.tsub , 4)
def test_aabb(self):
_timings = {"a_1":13,"a_2":10,"b_1":9,"b_2":5}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
self._ncall += 1
a()
elif self._ncall == 2:
b()
else:
return
def b():
if self._ncall == 2:
self._ncall += 1
b()
else:
return
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsab = fsa.children[fsb]
cfsbb = fsb.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 4)
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 9)
self.assertEqual(cfsaa.ttot , 10)
self.assertEqual(cfsaa.tsub , 1)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbb.ttot , 5)
self.assertEqual(cfsbb.tsub , 5)
def test_abbb(self):
_timings = {"a_1":13,"b_1":10,"b_2":6,"b_3":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
def b():
if self._ncall == 3:
return
self._ncall += 1
b()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsab = fsa.children[fsb]
cfsbb = fsb.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 3)
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 10)
self.assertEqual(fsb.ncall , 3)
self.assertEqual(fsb.nactualcall , 1)
self.assertEqual(cfsab.ttot , 10)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbb.ttot , 6)
self.assertEqual(cfsbb.tsub , 6)
self.assertEqual(cfsbb.nactualcall , 0)
self.assertEqual(cfsbb.ncall , 2)
def test_aaab(self):
_timings = {"a_1":13,"a_2":10,"a_3":6,"b_1":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 3:
b()
return
self._ncall += 1
a()
def b():
return
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsab = fsa.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 12)
self.assertEqual(fsb.ttot , 1)
self.assertEqual(fsb.tsub , 1)
self.assertEqual(cfsaa.ttot , 10)
self.assertEqual(cfsaa.tsub , 9)
self.assertEqual(cfsab.ttot , 1)
self.assertEqual(cfsab.tsub , 1)
def test_abab(self):
_timings = {"a_1":13,"b_1":10,"a_2":6,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
b()
def b():
if self._ncall == 2:
return
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsab = fsa.children[fsb]
cfsba = fsb.children[fsa]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 8)
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 5)
self.assertEqual(cfsab.ttot , 10)
self.assertEqual(cfsab.tsub , 5)
self.assertEqual(cfsab.ncall , 2)
self.assertEqual(cfsab.nactualcall , 1)
self.assertEqual(cfsba.ttot , 6)
self.assertEqual(cfsba.tsub , 5)
| 34.522778
| 111
| 0.525397
|
23bfec3adb85c5125f40138266a830f1d04896f9
| 1,724
|
py
|
Python
|
recipes/extract_a_sub_table_from_some_big_table.py
|
jdum/odfdo
|
2494d0bed39f5a55974643206e9bafeed40f3a6b
|
[
"Apache-2.0"
] | 18
|
2018-04-19T08:30:48.000Z
|
2022-02-14T11:00:27.000Z
|
recipes/extract_a_sub_table_from_some_big_table.py
|
jdum/odfdo
|
2494d0bed39f5a55974643206e9bafeed40f3a6b
|
[
"Apache-2.0"
] | 15
|
2018-04-22T00:52:41.000Z
|
2021-07-05T10:16:38.000Z
|
recipes/extract_a_sub_table_from_some_big_table.py
|
jdum/odfdo
|
2494d0bed39f5a55974643206e9bafeed40f3a6b
|
[
"Apache-2.0"
] | 6
|
2018-04-22T00:14:12.000Z
|
2021-12-06T01:42:07.000Z
|
#!/usr/bin/env python
"""
Create a table of 1000 lines and 100 columns, extract a sub table of 100 lines
26 columns, save the result in a spreadsheet document.
"""
import os
from odfdo import Document, Table, Row, Cell
def suite(n):
if n % 2 == 0:
return n / 2
return 3 * n + 1
if __name__ == "__main__":
spreadsheet = Document("spreadsheet")
# Populate the table in the spreadsheet
body = spreadsheet.body
table = Table("Big Table")
body.append(table)
lines = 1000
cols = 100
for line in range(lines):
row = Row()
values = []
n = line
for i in range(cols):
values.append(n)
n = suite(n)
row.set_values(values)
table.append(row)
print("Size of Big Table :", table.size)
# now extract 100 rows of 26 columns :
table1 = Table("Extract 1")
for r in range(800, 900):
row = table.get_row(r)
values = [row.get_value(x) for x in range(50, 76)]
row2 = Row()
row2.set_values(values)
table1.append(row2)
body.append(table1)
print("Size of extracted table 1 :", table1.size)
# other method
table2 = Table("Extract 2")
cells = table.get_cells(coord=(50, 800, 75, 899))
table2.set_cells(coord=(2, 3), cells=cells)
body.append(table2)
print("Size of extracted table 2 :", table2.size)
if not os.path.exists("test_output"):
os.mkdir("test_output")
output = os.path.join("test_output", "my_big_spreadsheet.ods")
spreadsheet.save(target=output, pretty=True)
expected_result = """
Size of Big Table : (100, 1000)
Size of extracted table 1 : (26, 100)
Size of extracted table 2 : (26, 100)
"""
| 23.944444
| 78
| 0.609629
|
8f2c7d2f82cd6cc570dd692b1b9d1f01e95f882c
| 5,239
|
py
|
Python
|
mmdet/models/roi_heads/mask_scoring_roi_head.py
|
hyperlist/mmdetection
|
ba4918de7fb21a96edc373584fa21a17d098a843
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/roi_heads/mask_scoring_roi_head.py
|
hyperlist/mmdetection
|
ba4918de7fb21a96edc373584fa21a17d098a843
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/roi_heads/mask_scoring_roi_head.py
|
hyperlist/mmdetection
|
ba4918de7fb21a96edc373584fa21a17d098a843
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import paddle
from mmdet.core import bbox2roi
from ..builder import HEADS, build_head
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class MaskScoringRoIHead(StandardRoIHead):
"""Mask Scoring RoIHead for Mask Scoring RCNN.
https://arxiv.org/abs/1903.00241
"""
def __init__(self, mask_iou_head, **kwargs):
assert mask_iou_head is not None
super(MaskScoringRoIHead, self).__init__(**kwargs)
self.mask_iou_head = build_head(mask_iou_head)
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for Mask head in
training."""
pos_labels = paddle.concat([res.pos_gt_labels for res in sampling_results])
mask_results = super(MaskScoringRoIHead,
self)._mask_forward_train(x, sampling_results,
bbox_feats, gt_masks,
img_metas)
if mask_results['loss_mask'] is None:
return mask_results
# mask iou head forward and loss
pos_mask_pred = mask_results['mask_pred'][
range(mask_results['mask_pred'].size(0)), pos_labels]
mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],
pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
pos_labels]
mask_iou_targets = self.mask_iou_head.get_targets(
sampling_results, gt_masks, pos_mask_pred,
mask_results['mask_targets'], self.train_cfg)
loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,
mask_iou_targets)
mask_results['loss_mask'].update(loss_mask_iou)
return mask_results
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Obtain mask prediction without augmentation."""
# image shapes of images in the batch
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
num_imgs = len(det_bboxes)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
num_classes = self.mask_head.num_classes
segm_results = [[[] for _ in range(num_classes)]
for _ in range(num_imgs)]
mask_scores = [[[] for _ in range(num_classes)]
for _ in range(num_imgs)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
paddle.to_tensor(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i]
for i in range(num_imgs)
]
mask_rois = bbox2roi(_bboxes)
mask_results = self._mask_forward(x, mask_rois)
concat_det_labels = paddle.concat(det_labels)
# get mask scores with mask iou head
mask_feats = mask_results['mask_feats']
mask_pred = mask_results['mask_pred']
mask_iou_pred = self.mask_iou_head(
mask_feats, mask_pred[range(concat_det_labels.size(0)),
concat_det_labels])
# split batch mask prediction back to each image
num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes)
mask_preds = mask_pred.split(num_bboxes_per_img, 0)
mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0)
# apply mask post-processing to each image individually
segm_results = []
mask_scores = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[] for _ in range(self.mask_head.num_classes)])
mask_scores.append(
[[] for _ in range(self.mask_head.num_classes)])
else:
segm_result = self.mask_head.get_seg_masks(
mask_preds[i], _bboxes[i], det_labels[i],
self.test_cfg, ori_shapes[i], scale_factors[i],
rescale)
# get mask scores with mask iou head
mask_score = self.mask_iou_head.get_mask_scores(
mask_iou_preds[i], det_bboxes[i], det_labels[i])
segm_results.append(segm_result)
mask_scores.append(mask_score)
return list(zip(segm_results, mask_scores))
| 45.95614
| 83
| 0.563657
|
f35276b28fc8444f642f39de9a8661e85ab36bf2
| 11,867
|
py
|
Python
|
micronet/compression/quantization/wbwtab/quantize.py
|
jay757425789/micronet
|
351d184527e9867e0394878cf91b64ffd5c6b109
|
[
"MIT"
] | 1
|
2021-07-30T08:34:19.000Z
|
2021-07-30T08:34:19.000Z
|
micronet/compression/quantization/wbwtab/quantize.py
|
jay757425789/micronet
|
351d184527e9867e0394878cf91b64ffd5c6b109
|
[
"MIT"
] | null | null | null |
micronet/compression/quantization/wbwtab/quantize.py
|
jay757425789/micronet
|
351d184527e9867e0394878cf91b64ffd5c6b109
|
[
"MIT"
] | null | null | null |
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
# ********************* 二值(+-1) ***********************
# activation
class BinaryActivation(Function):
@staticmethod
def forward(self, input):
self.save_for_backward(input)
output = torch.sign(input)
output[output == 0] = 1
# ******************** A —— 1、0 *********************
#output = torch.clamp(output, min=0)
return output
@staticmethod
def backward(self, grad_output):
input, = self.saved_tensors
# *******************ste*********************
grad_input = grad_output.clone()
# ****************saturate_ste***************
grad_input[input.ge(1.0)] = 0
grad_input[input.le(-1.0)] = 0
'''
#******************soft_ste*****************
size = input.size()
zeros = torch.zeros(size).cuda()
grad = torch.max(zeros, 1 - torch.abs(input))
grad_input = grad_output * grad
'''
return grad_input
# weight
class BinaryWeight(Function):
@staticmethod
def forward(self, input):
output = torch.sign(input)
output[output == 0] = 1
return output
@staticmethod
def backward(self, grad_output):
# *******************ste*********************
grad_input = grad_output.clone()
return grad_input
# ********************* 三值(+-1、0) ***********************
class Ternary(Function):
@staticmethod
def forward(self, input):
# **************** channel级 - E(|W|) ****************
E = torch.mean(torch.abs(input), (3, 2, 1), keepdim=True)
# **************** 阈值 ****************
threshold = E * 0.7
# ************** W —— +-1、0 **************
output = torch.sign(torch.add(torch.sign(torch.add(input, threshold)), torch.sign(torch.add(input, -threshold))))
return output, threshold
@staticmethod
def backward(self, grad_output, grad_threshold):
# *******************ste*********************
grad_input = grad_output.clone()
return grad_input
# ********************* A(特征)量化(二值) ***********************
class ActivationQuantizer(nn.Module):
def __init__(self, A=2):
super(ActivationQuantizer, self).__init__()
self.A = A
self.relu = nn.ReLU(inplace=True)
def binary(self, input):
output = BinaryActivation.apply(input)
return output
def forward(self, input):
if self.A == 2:
output = self.binary(input)
else:
output = self.relu(input)
return output
# ********************* W(模型参数)量化(三/二值) ***********************
def meancenter_clamp_convparams(w):
mean = w.data.mean(1, keepdim=True)
w.data.sub_(mean) # W中心化(C方向)
w.data.clamp_(-1.0, 1.0) # W截断
return w
class WeightQuantizer(nn.Module):
def __init__(self, W=2):
super(WeightQuantizer, self).__init__()
self.W = W
def binary(self, input):
output = BinaryWeight.apply(input)
return output
def ternary(self, input):
output = Ternary.apply(input)
return output
def forward(self, input):
if self.W == 2 or self.W == 3:
# **************************************** W二值 *****************************************
if self.W == 2:
output = meancenter_clamp_convparams(input) # W中心化+截断
# **************** channel级 - E(|W|) ****************
E = torch.mean(torch.abs(output), (3, 2, 1), keepdim=True)
# **************** α(缩放因子) ****************
alpha = E
# ************** W —— +-1 **************
output = self.binary(output)
# ************** W * α **************
output = output * alpha # 若不需要α(缩放因子),注释掉即可
# **************************************** W三值 *****************************************
elif self.W == 3:
output_fp = input.clone()
# ************** W —— +-1、0 **************
output, threshold = self.ternary(input) # threshold(阈值)
# **************** α(缩放因子) ****************
output_abs = torch.abs(output_fp)
mask_le = output_abs.le(threshold)
mask_gt = output_abs.gt(threshold)
output_abs[mask_le] = 0
output_abs_th = output_abs.clone()
output_abs_th_sum = torch.sum(output_abs_th, (3, 2, 1), keepdim=True)
mask_gt_sum = torch.sum(mask_gt, (3, 2, 1), keepdim=True).float()
alpha = output_abs_th_sum / mask_gt_sum # α(缩放因子)
# *************** W * α ****************
output = output * alpha # 若不需要α(缩放因子),注释掉即可
else:
output = input
return output
class QuantConv2d(nn.Conv2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
W=2,
quant_inference=False):
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups,
bias, padding_mode)
self.quant_inference = quant_inference
self.weight_quantizer = WeightQuantizer(W=W)
def forward(self, input):
if not self.quant_inference:
tnn_bin_weight = self.weight_quantizer(self.weight)
else:
tnn_bin_weight = self.weight
output = F.conv2d(input, tnn_bin_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConvTranspose2d(nn.ConvTranspose2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
W=2,
quant_inference=False):
super(QuantConvTranspose2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, output_padding,
dilation, groups, bias, padding_mode)
self.quant_inference = quant_inference
self.weight_quantizer = WeightQuantizer(W=W)
def forward(self, input):
if not self.quant_inference:
tnn_bin_weight = self.weight_quantizer(self.weight)
else:
tnn_bin_weight = self.weight
output = F.conv_transpose2d(input, tnn_bin_weight, self.bias, self.stride, self.padding, self.output_padding,
self.groups, self.dilation)
return output
def add_quant_op(module, layer_counter, layer_num, A=2, W=2,
quant_inference=False):
for name, child in module.named_children():
if isinstance(child, nn.Conv2d):
layer_counter[0] += 1
if layer_counter[0] > 1 and layer_counter[0] < layer_num:
if child.bias is not None:
quant_conv = QuantConv2d(child.in_channels, child.out_channels,
child.kernel_size, stride=child.stride,
padding=child.padding, dilation=child.dilation,
groups=child.groups, bias=True, padding_mode=child.padding_mode,
W=W, quant_inference=quant_inference)
quant_conv.bias.data = child.bias
else:
quant_conv = QuantConv2d(child.in_channels, child.out_channels,
child.kernel_size, stride=child.stride,
padding=child.padding, dilation=child.dilation,
groups=child.groups, bias=False, padding_mode=child.padding_mode,
W=W, quant_inference=quant_inference)
quant_conv.weight.data = child.weight
module._modules[name] = quant_conv
elif isinstance(child, nn.ConvTranspose2d):
layer_counter[0] += 1
if layer_counter[0] > 1 and layer_counter[0] < layer_num:
if child.bias is not None:
quant_conv_transpose = QuantConvTranspose2d(child.in_channels,
child.out_channels,
child.kernel_size,
stride=child.stride,
padding=child.padding,
output_padding=child.output_padding,
dilation=child.dilation,
groups=child.groups,
bias=True,
padding_mode=child.padding_mode,
W=W,
quant_inference=quant_inference)
quant_conv_transpose.bias.data = child.bias
else:
quant_conv_transpose = QuantConvTranspose2d(child.in_channels,
child.out_channels,
child.kernel_size,
stride=child.stride,
padding=child.padding,
output_padding=child.output_padding,
dilation=child.dilation,
groups=child.groups,
bias=False,
padding_mode=child.padding_mode,
W=W,
quant_inference=quant_inference)
quant_conv_transpose.weight.data = child.weight
module._modules[name] = quant_conv_transpose
elif isinstance(child, nn.ReLU):
if layer_counter[0] > 0 and layer_counter[0] < layer_num:
quant_relu = ActivationQuantizer(A=A)
module._modules[name] = quant_relu
else:
add_quant_op(child, layer_counter, layer_num, A=A, W=W,
quant_inference=quant_inference)
def prepare(model, inplace=False, A=2, W=2, quant_inference=False):
if not inplace:
model = copy.deepcopy(model)
layer_counter = [0]
layer_num = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
layer_num += 1
elif isinstance(m, nn.ConvTranspose2d):
layer_num += 1
add_quant_op(model, layer_counter, layer_num, A=A, W=W,
quant_inference=quant_inference)
return model
| 43.468864
| 123
| 0.45142
|
20aa01f089431e83405bdfefdda891fa467ddc3b
| 17,472
|
py
|
Python
|
tests/app/main/views/test_tour.py
|
alphagov-mirror/notifications-admin
|
04d051df6b85cf596a7d6d0f28474b04673e420a
|
[
"MIT"
] | null | null | null |
tests/app/main/views/test_tour.py
|
alphagov-mirror/notifications-admin
|
04d051df6b85cf596a7d6d0f28474b04673e420a
|
[
"MIT"
] | null | null | null |
tests/app/main/views/test_tour.py
|
alphagov-mirror/notifications-admin
|
04d051df6b85cf596a7d6d0f28474b04673e420a
|
[
"MIT"
] | null | null | null |
import pytest
from flask import url_for
from app import current_user
from tests import validate_route_permission
from tests.conftest import SERVICE_ONE_ID, create_template, normalize_spaces
def test_should_200_for_tour_start(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
page = client_request.get(
'main.begin_tour',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
)
assert normalize_spaces(
page.select('.banner-tour .heading-medium')[0].text
) == (
'Try sending yourself this example'
)
selected_hint = page.select('.banner-tour .govuk-grid-row')[0]
selected_hint_text = normalize_spaces(selected_hint.select(".govuk-body")[0].text)
assert "greyed-out-step" not in selected_hint["class"]
assert selected_hint_text == 'Every message is sent from a template'
assert normalize_spaces(
page.select('.sms-message-recipient')[0].text
) == (
'To: 07700 900762'
)
assert normalize_spaces(
page.select('.sms-message-wrapper')[0].text
) == (
'service one: ((one)) ((two)) ((three))'
)
assert page.select('a.govuk-button')[0]['href'] == url_for(
'.tour_step', service_id=SERVICE_ONE_ID, template_id=fake_uuid, step_index=1
)
def test_should_clear_session_on_tour_start(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {'one': 'hello', 'phone number': '07700 900762'}
client_request.get(
'main.begin_tour',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
)
with client_request.session_transaction() as session:
assert session['placeholders'] == {}
@pytest.mark.parametrize('template_type', ['email', 'letter', 'broadcast'])
def test_should_404_if_non_sms_template_for_tour_start(
client_request,
fake_uuid,
mocker,
template_type,
):
mocker.patch(
'app.service_api_client.get_service_template',
return_value={'data': create_template(template_type=template_type)}
)
client_request.get(
'main.begin_tour',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_expected_status=404,
)
def test_should_404_if_no_mobile_number_for_tour_start(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
active_user_with_permissions_no_mobile
):
client_request.login(active_user_with_permissions_no_mobile)
assert current_user.mobile_number is None
client_request.get(
'main.begin_tour',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_expected_status=404,
)
def test_should_403_if_user_does_not_have_send_permissions_for_tour_start(
mocker,
app_,
client,
api_user_active,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
validate_route_permission(
mocker,
app_,
"GET",
403,
url_for(
'main.begin_tour',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
),
['view_activity'],
api_user_active,
service_one)
def test_should_200_for_get_tour_step(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {}
page = client_request.get(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1
)
assert 'Example text message' in normalize_spaces(page.select_one('title').text)
assert normalize_spaces(
page.select('.banner-tour .heading-medium')[0].text
) == (
'Try sending yourself this example'
)
selected_hint = page.select('.banner-tour .govuk-grid-row')[1]
selected_hint_text = normalize_spaces(selected_hint.select(".govuk-body")[0].text)
assert "greyed-out-step" not in selected_hint["class"]
assert selected_hint_text == 'The template pulls in the data you provide'
assert normalize_spaces(
page.select('.sms-message-recipient')[0].text
) == (
'To: 07700 900762'
)
assert normalize_spaces(
page.select('.sms-message-wrapper')[0].text
) == (
'service one: ((one)) ((two)) ((three))'
)
def test_should_prefill_answers_for_get_tour_step(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = session['placeholders'] = {'one': 'hello', 'phone number': '07700 900762'}
page = client_request.get(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1
)
assert page.select('.govuk-input')[0]['value'] == 'hello'
@pytest.mark.parametrize('template_type', ['email', 'letter', 'broadcast'])
@pytest.mark.parametrize('method', ['get', 'post'])
def test_should_404_if_non_sms_template_for_tour_step(
client_request,
fake_uuid,
mocker,
template_type,
method
):
mocker.patch(
'app.service_api_client.get_service_template',
return_value={'data': create_template(template_type=template_type)}
)
getattr(client_request, method)(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1,
_expected_status=404
)
def test_should_404_for_get_tour_step_0(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {}
client_request.get(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=0,
_expected_status=404
)
@pytest.mark.parametrize('method', ['GET', 'POST'])
def test_should_403_if_user_does_not_have_send_permissions_for_tour_step(
mocker,
app_,
client,
api_user_active,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
method
):
validate_route_permission(
mocker,
app_,
method,
403,
url_for(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1
),
['view_activity'],
api_user_active,
service_one
)
def test_tour_step_redirects_to_tour_start_if_placeholders_doesnt_exist_in_session(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
assert 'placeholders' not in session
client_request.get(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1,
_expected_status=302,
_expected_redirect=url_for(
'main.begin_tour',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_external=True,
),
)
def test_back_link_from_first_get_tour_step_points_to_tour_start(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {}
page = client_request.get(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1
)
assert page.select('.govuk-back-link')[0]['href'] == url_for(
"main.begin_tour",
service_id=SERVICE_ONE_ID,
template_id=fake_uuid
)
def test_back_link_from_get_tour_step_points_to_previous_step(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {}
page = client_request.get(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=2
)
assert page.select('.govuk-back-link')[0]['href'] == url_for(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1
)
def test_post_tour_step_saves_data_and_redirects_to_next_step(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {}
client_request.post(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1,
_data={'placeholder_value': 'hello'},
_expected_status=302,
_expected_redirect=url_for(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=2,
_external=True,
),
)
with client_request.session_transaction() as session:
assert session['placeholders'] == {'one': 'hello', 'phone number': '07700 900762'}
def test_post_tour_step_adds_data_to_saved_data_and_redirects_to_next_step(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {'one': 'hello', 'phone number': '07700 900762'}
client_request.post(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=2,
_data={'placeholder_value': 'is it me you are looking for'},
_expected_status=302,
_expected_redirect=url_for(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=3,
_external=True,
),
)
with client_request.session_transaction() as session:
assert session['placeholders'] == {
'one': 'hello', 'two': 'is it me you are looking for', 'phone number': '07700 900762'
}
def test_post_tour_step_raises_validation_error_for_form_error(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {'one': 'hi', 'phone number': '07700 900762'}
page = client_request.post(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=2,
_data={'placeholder_value': ''},
_expected_status=200, # should this be 400
)
assert normalize_spaces(
page.select('.govuk-error-message')[0].text
) == (
'Error: Cannot be empty'
)
assert normalize_spaces(
page.select('.sms-message-recipient')[0].text
) == (
'To: 07700 900762'
)
assert normalize_spaces(
page.select('.sms-message-wrapper')[0].text
) == (
'service one: hi ((two)) ((three))'
)
with client_request.session_transaction() as session:
assert session['placeholders'] == {'one': 'hi', 'phone number': '07700 900762'}
def test_post_final_tour_step_saves_data_and_redirects_to_check_notification(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {'one': 'hello', 'two': 'hi', 'phone number': '07700 900762'}
client_request.post(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=3,
_data={'placeholder_value': 'howdy'},
_expected_status=302,
_expected_redirect=url_for(
'main.check_tour_notification',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_external=True
),
)
with client_request.session_transaction() as session:
assert session['placeholders'] == {
'one': 'hello', 'two': 'hi', 'three': 'howdy', 'phone number': '07700 900762'
}
def test_get_test_step_out_of_index_redirects_to_first_step(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {}
client_request.get(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=4,
_expected_status=302,
_expected_redirect=url_for(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1,
_external=True
),
)
def test_get_test_step_out_of_index_redirects_to_check_notification_if_all_placeholders_filled(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {'one': 'hello', 'two': 'hi', 'three': 'howdy', 'phone number': '07700 900762'}
client_request.get(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=4,
_expected_status=302,
_expected_redirect=url_for(
'main.check_tour_notification',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_external=True
),
)
def test_should_200_for_check_tour_notification(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {'one': 'hello', 'two': 'hi', 'three': 'howdy', 'phone number': '07700 900762'}
page = client_request.get(
'main.check_tour_notification',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
)
assert normalize_spaces(
page.select('.banner-tour .heading-medium')[0].text
) == (
'Try sending yourself this example'
)
selected_hint = page.select('.banner-tour .govuk-grid-row')[1]
selected_hint_text = normalize_spaces(selected_hint.select(".govuk-body")[0].text)
assert "greyed-out-step" not in selected_hint["class"]
assert selected_hint_text == 'The template pulls in the data you provide'
assert normalize_spaces(
page.select('.sms-message-recipient')[0].text
) == (
'To: 07700 900762'
)
assert normalize_spaces(
page.select('.sms-message-wrapper')[0].text
) == (
'service one: hello hi howdy'
)
# post to send_notification keeps help argument
assert page.form.attrs['action'] == url_for(
'main.send_notification',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
help='3'
)
def test_back_link_from_check_tour_notification_points_to_last_tour_step(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {'one': 'hello', 'two': 'hi', 'three': 'howdy', 'phone number': '07700 900762'}
page = client_request.get(
'main.check_tour_notification',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
)
assert page.select('.govuk-back-link')[0]['href'] == url_for(
"main.tour_step",
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=3
)
def test_check_tour_notification_redirects_to_tour_start_if_placeholders_doesnt_exist_in_session(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
assert 'placeholders' not in session
client_request.get(
'main.check_tour_notification',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1,
_expected_status=302,
_expected_redirect=url_for(
'main.begin_tour',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_external=True,
),
)
def test_check_tour_notification_redirects_to_first_step_if_not_all_placeholders_in_session(
client_request,
mock_get_service_template_with_multiple_placeholders,
service_one,
fake_uuid,
):
with client_request.session_transaction() as session:
session['placeholders'] = {'one': 'hello', 'two': 'hi', 'phone number': '07700 900762'}
client_request.get(
'main.check_tour_notification',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_expected_status=302,
_expected_redirect=url_for(
'main.tour_step',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
step_index=1,
_external=True
),
)
| 28.226171
| 113
| 0.663004
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.