content
stringlengths 5
1.05M
|
|---|
import os
import unittest
import numpy as np
import openmdao.api as om
import numpy.testing as npt
import wisdem.rotorse.rotor_power as rp
# Load in airfoil and blade shape inputs for NREL 5MW
ARCHIVE = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + "regulation.npz"
NPZFILE = np.load(ARCHIVE)
def fillprob(prob, n_pc, n_span):
prob.setup()
for k in NPZFILE.files:
prob[k] = NPZFILE[k]
prob.set_val("v_min", 4.0, units="m/s")
prob.set_val("v_max", 25.0, units="m/s")
prob.set_val("rated_power", 5e6, units="W")
prob.set_val("omega_min", 0.0, units="rpm")
prob.set_val("omega_max", 100.0, units="rpm")
prob.set_val("control_maxTS", 90.0, units="m/s")
prob.set_val("tsr_operational", 10.0)
prob.set_val("control_pitch", 0.0, units="deg")
prob.set_val("gearbox_efficiency", 0.975)
prob.set_val("generator_efficiency", 0.975 * np.ones(n_pc))
prob.set_val("lss_rpm", np.linspace(0.1, 100, n_pc))
prob.set_val("drivetrainType", "GEARED")
prob.set_val("Rhub", 1.0, units="m")
prob.set_val("Rtip", 70.0, units="m")
prob.set_val("hub_height", 100.0, units="m")
prob.set_val("precone", 0.0, units="deg")
prob.set_val("tilt", 0.0, units="deg")
prob.set_val("yaw", 0.0, units="deg")
prob.set_val("precurve", np.zeros(n_span), units="m")
prob.set_val("precurveTip", 0.0, units="m")
prob.set_val("presweep", np.zeros(n_span), units="m")
prob.set_val("presweepTip", 0.0, units="m")
prob.set_val("shearExp", 0.25)
prob.set_val("nSector", 4)
prob.set_val("tiploss", True)
prob.set_val("hubloss", True)
prob.set_val("wakerotation", True)
prob.set_val("usecd", True)
return prob
class TestServo(unittest.TestCase):
def testGust(self):
inputs = {}
outputs = {}
discrete_inputs = {}
discrete_outputs = {}
myobj = rp.GustETM(std=2.5)
inputs["V_mean"] = 10.0
inputs["V_hub"] = 15.0
discrete_inputs["turbulence_class"] = "A"
myobj.compute(inputs, outputs, discrete_inputs, discrete_outputs)
sigma = 0.32 * (0.072 * 8.0 * 3.5 + 10.0)
expect = 15.0 + 2.5 * sigma
self.assertEqual(outputs["V_gust"], expect)
# Test lower case
discrete_inputs["turbulence_class"] = "c"
myobj.compute(inputs, outputs, discrete_inputs, discrete_outputs)
sigma = 0.24 * (0.072 * 8.0 * 3.5 + 10.0)
expect = 15.0 + 2.5 * sigma
self.assertEqual(outputs["V_gust"], expect)
# Test bad class
discrete_inputs["turbulence_class"] = "d"
try:
myobj.compute(inputs, outputs, discrete_inputs, discrete_outputs)
except ValueError:
self.assertTrue(True)
def testRegulationTrajectory(self):
prob = om.Problem()
(n_span, n_aoa, n_Re, n_tab) = NPZFILE["airfoils_cl"].shape
n_pc = 22
modeling_options = {}
modeling_options["WISDEM"] = {}
modeling_options["WISDEM"]["RotorSE"] = {}
modeling_options["WISDEM"]["RotorSE"]["n_span"] = n_span
modeling_options["WISDEM"]["RotorSE"]["n_aoa"] = n_aoa
modeling_options["WISDEM"]["RotorSE"]["n_Re"] = n_Re
modeling_options["WISDEM"]["RotorSE"]["n_tab"] = n_tab
modeling_options["WISDEM"]["RotorSE"]["regulation_reg_III"] = True
modeling_options["WISDEM"]["RotorSE"]["n_pc"] = n_pc
modeling_options["WISDEM"]["RotorSE"]["n_pc_spline"] = n_pc
modeling_options["WISDEM"]["RotorSE"]["peak_thrust_shaving"] = False
prob.model.add_subsystem(
"powercurve", rp.RegulatedPowerCurve(modeling_options=modeling_options), promotes=["*"]
)
prob = fillprob(prob, n_pc, n_span)
# All reg 2: no maxTS, no max rpm, no power limit
prob["omega_max"] = 1e3
prob["control_maxTS"] = 1e5
prob["rated_power"] = 1e16
prob.run_model()
grid0 = np.cumsum(np.abs(np.diff(np.cos(np.linspace(-np.pi / 4.0, np.pi / 2.0, n_pc)))))
grid1 = (grid0 - grid0[0]) / (grid0[-1] - grid0[0])
V_expect0 = grid1 * (prob["v_max"] - prob["v_min"]) + prob["v_min"]
V_spline = np.linspace(prob["v_min"], prob["v_max"], n_pc)
irated = 12
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
npt.assert_equal(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_tsr)
npt.assert_equal(prob["pitch"], np.zeros(n_pc))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_allclose(prob["Cp"], prob["Cp"][0])
npt.assert_allclose(prob["Cp_aero"], prob["Cp_aero"][0])
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp, myCp[0], rtol=1e-6)
self.assertGreater(myCp[0], 0.4)
self.assertGreater(0.5, myCp[0])
npt.assert_allclose(myCp, prob["Cp"], rtol=1e-6)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertEqual(prob["rated_V"], V_expect1[-1])
self.assertAlmostEqual(prob["rated_Omega"][0], Omega_tsr[-1], 5)
self.assertEqual(prob["rated_pitch"], 0.0)
# Test no maxTS, max rpm, no power limit
prob["omega_max"] = 15.0
prob["control_maxTS"] = 1e5
prob["rated_power"] = 1e16
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.minimum(Omega_tsr, 15.0)
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_expect)
npt.assert_equal(prob["pitch"][:irated], 0.0)
npt.assert_array_less(0.0, np.abs(prob["pitch"][(irated + 1) :]))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertAlmostEqual(prob["rated_V"], V_expect1[-1], 3)
self.assertAlmostEqual(prob["rated_Omega"][0], 15.0)
self.assertEqual(prob["rated_pitch"], 0.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[:irated], myCp[0])
npt.assert_allclose(myCp[:irated], prob["Cp"][:irated])
# Test maxTS, no max rpm, no power limit
prob["omega_max"] = 1e3
prob["control_maxTS"] = 105.0
prob["rated_power"] = 1e16
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
# V_expect1[irated] = 105./10.
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.minimum(Omega_tsr, 105.0 / 70.0 / 2 / np.pi * 60)
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_expect)
npt.assert_equal(prob["pitch"][:irated], 0.0)
npt.assert_array_less(0.0, np.abs(prob["pitch"][irated:]))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertEqual(prob["rated_V"], V_expect1[-1])
self.assertAlmostEqual(prob["rated_Omega"][0], Omega_expect[-1])
self.assertEqual(prob["rated_pitch"], 0.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[:irated], myCp[0])
npt.assert_allclose(myCp[:irated], prob["Cp"][:irated])
# Test no maxTS, no max rpm, power limit
prob["omega_max"] = 1e3
prob["control_maxTS"] = 1e4
prob["rated_power"] = 5e6
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.minimum(Omega_tsr, prob["rated_Omega"])
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_expect)
npt.assert_equal(prob["pitch"][:irated], 0.0)
npt.assert_array_less(0.0, np.abs(prob["pitch"][(irated + 1) :]))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:irated], prob["P"][1 : (irated + 1)])
npt.assert_allclose(prob["P"][irated:], 5e6, rtol=1e-4, atol=0)
# npt.assert_array_less(prob['Q'], prob['Q'][1:])
npt.assert_array_less(prob["T"], prob["T"][irated] + 1e-1)
# print('RATED T',prob["T"][irated])
# self.assertEqual(prob['rated_V'], V_expect1[-1])
self.assertAlmostEqual(prob["rated_Omega"][0], Omega_expect[-1])
self.assertEqual(prob["rated_pitch"], 0.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[:irated], myCp[0])
npt.assert_allclose(myCp[:irated], prob["Cp"][:irated])
# Test min & max rpm, no power limit
prob["omega_min"] = 7.0
prob["omega_max"] = 15.0
prob["control_maxTS"] = 1e5
prob["rated_power"] = 1e16
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
# V_expect1[irated] = 15.*70*2*np.pi/(10.*60.)
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.maximum(np.minimum(Omega_tsr, 15.0), 7.0)
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_expect)
npt.assert_array_less(0.0, np.abs(prob["pitch"][Omega_expect != Omega_tsr]))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertEqual(prob["rated_V"], V_expect1[-1])
self.assertAlmostEqual(prob["rated_Omega"][0], 15.0)
self.assertEqual(prob["rated_pitch"], 0.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[Omega_expect == Omega_tsr], myCp[6])
npt.assert_allclose(myCp[Omega_expect == Omega_tsr], prob["Cp"][Omega_expect == Omega_tsr])
# Test fixed pitch
prob["omega_min"] = 0.0
prob["omega_max"] = 15.0
prob["control_maxTS"] = 1e5
prob["rated_power"] = 1e16
prob["control_pitch"] = 5.0
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
# V_expect1[irated] = 15.*70*2*np.pi/(10.*60.)
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.minimum(Omega_tsr, 15.0)
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_expect)
npt.assert_equal(prob["pitch"][:irated], 5.0)
npt.assert_array_less(0.0, np.abs(prob["pitch"][irated:]))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertAlmostEqual(prob["rated_V"], V_expect1[-1], 3)
self.assertAlmostEqual(prob["rated_Omega"][0], 15.0)
self.assertEqual(prob["rated_pitch"], 5.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[:irated], myCp[0])
npt.assert_allclose(myCp[:irated], prob["Cp"][:irated])
def testRegulationTrajectoryNoRegion3(self):
prob = om.Problem()
# Load in airfoil and blade shape inputs for NREL 5MW
(n_span, n_aoa, n_Re, n_tab) = NPZFILE["airfoils_cl"].shape
n_pc = 22
modeling_options = {}
modeling_options["WISDEM"] = {}
modeling_options["WISDEM"]["RotorSE"] = {}
modeling_options["WISDEM"]["RotorSE"]["n_span"] = n_span
modeling_options["WISDEM"]["RotorSE"]["n_aoa"] = n_aoa
modeling_options["WISDEM"]["RotorSE"]["n_Re"] = n_Re
modeling_options["WISDEM"]["RotorSE"]["n_tab"] = n_tab
modeling_options["WISDEM"]["RotorSE"]["regulation_reg_III"] = False
modeling_options["WISDEM"]["RotorSE"]["n_pc"] = n_pc
modeling_options["WISDEM"]["RotorSE"]["n_pc_spline"] = n_pc
modeling_options["WISDEM"]["RotorSE"]["peak_thrust_shaving"] = False
prob.model.add_subsystem(
"powercurve", rp.RegulatedPowerCurve(modeling_options=modeling_options), promotes=["*"]
)
prob = fillprob(prob, n_pc, n_span)
# All reg 2: no maxTS, no max rpm, no power limit
prob["omega_max"] = 1e3
prob["control_maxTS"] = 1e5
prob["rated_power"] = 1e16
prob.run_model()
grid0 = np.cumsum(np.abs(np.diff(np.cos(np.linspace(-np.pi / 4.0, np.pi / 2.0, n_pc)))))
grid1 = (grid0 - grid0[0]) / (grid0[-1] - grid0[0])
V_expect0 = grid1 * (prob["v_max"] - prob["v_min"]) + prob["v_min"]
V_spline = np.linspace(prob["v_min"], prob["v_max"], n_pc)
irated = 12
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
npt.assert_equal(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_tsr)
npt.assert_equal(prob["pitch"], np.zeros(n_pc))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_allclose(prob["Cp"], prob["Cp"][0], rtol=1e-6)
npt.assert_allclose(prob["Cp_aero"], prob["Cp_aero"][0])
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp, myCp[0], rtol=1e-6)
self.assertGreater(myCp[0], 0.4)
self.assertGreater(0.5, myCp[0])
npt.assert_allclose(myCp, prob["Cp"], rtol=1e-6)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertEqual(prob["rated_V"], V_expect1[-1])
self.assertAlmostEqual(prob["rated_Omega"][0], Omega_tsr[-1], 5)
self.assertEqual(prob["rated_pitch"], 0.0)
# Test no maxTS, no max rpm, power limit
prob["omega_max"] = 1e3
prob["control_maxTS"] = 1e4
prob["rated_power"] = 5e6
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.minimum(Omega_tsr, prob["rated_Omega"])
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_expect)
npt.assert_equal(prob["pitch"], 0.0)
# npt.assert_array_less(0.0, np.abs(prob['pitch'][(irated+1):]))
npt.assert_allclose(prob["Cp"][: (irated + 1)], prob["Cp_aero"][: (irated + 1)] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:irated], prob["P"][1 : (irated + 1)])
npt.assert_allclose(prob["P"][irated:], 5e6, rtol=1e-6, atol=0)
# npt.assert_equal(prob['Q'][(irated+1):], prob['Q'][irated])
npt.assert_equal(prob["T"][(irated + 1) :], 0.0)
npt.assert_array_less(prob["T"], prob["T"][irated] + 1e-1)
# self.assertEqual(prob['rated_V'], V_expect1[-1])
self.assertAlmostEqual(prob["rated_Omega"][0], Omega_expect[-1])
self.assertEqual(prob["rated_pitch"], 0.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[:irated], myCp[0])
npt.assert_allclose(myCp[:irated], prob["Cp"][:irated])
def testRegulationTrajectory_PeakShaving(self):
prob = om.Problem()
(n_span, n_aoa, n_Re, n_tab) = NPZFILE["airfoils_cl"].shape
n_pc = 22
modeling_options = {}
modeling_options["WISDEM"] = {}
modeling_options["WISDEM"]["RotorSE"] = {}
modeling_options["WISDEM"]["RotorSE"]["n_span"] = n_span
modeling_options["WISDEM"]["RotorSE"]["n_aoa"] = n_aoa
modeling_options["WISDEM"]["RotorSE"]["n_Re"] = n_Re
modeling_options["WISDEM"]["RotorSE"]["n_tab"] = n_tab
modeling_options["WISDEM"]["RotorSE"]["regulation_reg_III"] = True
modeling_options["WISDEM"]["RotorSE"]["n_pc"] = n_pc
modeling_options["WISDEM"]["RotorSE"]["n_pc_spline"] = n_pc
modeling_options["WISDEM"]["RotorSE"]["peak_thrust_shaving"] = True
modeling_options["WISDEM"]["RotorSE"]["thrust_shaving_coeff"] = 0.8
prob.model.add_subsystem(
"powercurve", rp.RegulatedPowerCurve(modeling_options=modeling_options), promotes=["*"]
)
prob = fillprob(prob, n_pc, n_span)
# All reg 2: no maxTS, no max rpm, no power limit
prob["omega_max"] = 1e3
prob["control_maxTS"] = 1e5
prob["rated_power"] = 1e16
prob.run_model()
grid0 = np.cumsum(np.abs(np.diff(np.cos(np.linspace(-np.pi / 4.0, np.pi / 2.0, n_pc)))))
grid1 = (grid0 - grid0[0]) / (grid0[-1] - grid0[0])
V_expect0 = grid1 * (prob["v_max"] - prob["v_min"]) + prob["v_min"]
V_spline = np.linspace(prob["v_min"], prob["v_max"], n_pc)
irated = 12
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
npt.assert_equal(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_tsr)
npt.assert_equal(prob["pitch"], np.zeros(n_pc))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_allclose(prob["Cp"], prob["Cp"][0], rtol=1e-6)
npt.assert_allclose(prob["Cp_aero"], prob["Cp_aero"][0])
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp, myCp[0], rtol=1e-6)
self.assertGreater(myCp[0], 0.4)
self.assertGreater(0.5, myCp[0])
npt.assert_allclose(myCp, prob["Cp"], rtol=1e-6)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertEqual(prob["rated_V"], V_expect1[-1])
self.assertAlmostEqual(prob["rated_Omega"][0], Omega_tsr[-1], 5)
self.assertEqual(prob["rated_pitch"], 0.0)
# Test no maxTS, max rpm, no power limit
prob["omega_max"] = 15.0
prob["control_maxTS"] = 1e5
prob["rated_power"] = 1e16
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.minimum(Omega_tsr, 15.0)
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_expect)
npt.assert_equal(prob["pitch"][:irated], 0.0)
npt.assert_array_less(0.0, np.abs(prob["pitch"][(irated + 1) :]))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertAlmostEqual(prob["rated_V"], V_expect1[-1], 3)
self.assertAlmostEqual(prob["rated_Omega"][0], 15.0)
self.assertEqual(prob["rated_pitch"], 0.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[:irated], myCp[0])
npt.assert_allclose(myCp[:irated], prob["Cp"][:irated])
# Test maxTS, no max rpm, no power limit
prob["omega_max"] = 1e3
prob["control_maxTS"] = 105.0
prob["rated_power"] = 1e16
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
# V_expect1[irated] = 105./10.
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.minimum(Omega_tsr, 105.0 / 70.0 / 2 / np.pi * 60)
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"], Omega_expect)
npt.assert_equal(prob["pitch"][:irated], 0.0)
npt.assert_array_less(0.0, np.abs(prob["pitch"][irated:]))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:-2], prob["P"][1:-1])
npt.assert_array_less(prob["Q"][:-2], prob["Q"][1:-1])
npt.assert_array_less(prob["T"][:-2], prob["T"][1:-1])
self.assertEqual(prob["rated_V"], V_expect1[-1])
self.assertAlmostEqual(prob["rated_Omega"][0], Omega_expect[-1])
self.assertEqual(prob["rated_pitch"], 0.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[:irated], myCp[0])
npt.assert_allclose(myCp[:irated], prob["Cp"][:irated])
# Test no maxTS, no max rpm, power limit
prob["omega_max"] = 1e3
prob["control_maxTS"] = 1e4
prob["rated_power"] = 5e6
prob.run_model()
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
Omega_tsr = V_expect1 * 10 * 60 / 70.0 / 2.0 / np.pi
Omega_expect = np.minimum(Omega_tsr, prob["rated_Omega"])
npt.assert_allclose(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
npt.assert_allclose(prob["Omega"][:irated], Omega_expect[:irated])
npt.assert_equal(prob["pitch"][: (irated - 1)], 0.0)
npt.assert_array_less(0.0, np.abs(prob["pitch"][irated:]))
npt.assert_array_almost_equal(prob["Cp"], prob["Cp_aero"] * 0.975 * 0.975)
npt.assert_array_less(prob["P"][:irated], prob["P"][1 : (irated + 1)])
npt.assert_allclose(prob["P"][irated:], 5e6, rtol=1e-4, atol=0)
npt.assert_array_less(prob["T"], 0.8 * 880899) # From print out in first test
self.assertAlmostEqual(prob["rated_Omega"][0], Omega_expect[-1])
self.assertGreater(prob["rated_pitch"], 0.0)
myCp = prob["P"] / (0.5 * 1.225 * V_expect1 ** 3.0 * np.pi * 70 ** 2)
npt.assert_allclose(myCp[: (irated - 1)], myCp[0])
npt.assert_allclose(myCp[: (irated - 1)], prob["Cp"][: (irated - 1)])
def testRegulationTrajectory_reindex(self):
prob = om.Problem()
debug_archive = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + "debug.npz"
debug_npz = np.load(debug_archive)
(n_span, n_aoa, n_Re, n_tab) = debug_npz["airfoils_cl"].shape
n_pc = 50
modeling_options = {}
modeling_options["WISDEM"] = {}
modeling_options["WISDEM"]["RotorSE"] = {}
modeling_options["WISDEM"]["RotorSE"]["n_span"] = n_span
modeling_options["WISDEM"]["RotorSE"]["n_aoa"] = n_aoa
modeling_options["WISDEM"]["RotorSE"]["n_Re"] = n_Re
modeling_options["WISDEM"]["RotorSE"]["n_tab"] = n_tab
modeling_options["WISDEM"]["RotorSE"]["regulation_reg_III"] = True
modeling_options["WISDEM"]["RotorSE"]["n_pc"] = n_pc
modeling_options["WISDEM"]["RotorSE"]["n_pc_spline"] = n_pc
modeling_options["WISDEM"]["RotorSE"]["peak_thrust_shaving"] = False
modeling_options["WISDEM"]["RotorSE"]["thrust_shaving_coeff"] = 1.0
prob.model.add_subsystem(
"powercurve", rp.RegulatedPowerCurve(modeling_options=modeling_options), promotes=["*"]
)
prob.setup()
for k in debug_npz.files:
prob[k] = debug_npz[k]
prob.run_model()
grid0 = np.cumsum(np.abs(np.diff(np.cos(np.linspace(-np.pi / 4.0, np.pi / 2.0, n_pc)))))
grid1 = (grid0 - grid0[0]) / (grid0[-1] - grid0[0])
V_expect0 = grid1 * (prob["v_max"] - prob["v_min"]) + prob["v_min"]
V_spline = np.linspace(prob["v_min"], prob["v_max"], n_pc)
V_expect1 = np.sort(np.r_[V_expect0, prob["rated_V"]])
npt.assert_equal(prob["V"], V_expect1)
npt.assert_equal(prob["V_spline"], V_spline.flatten())
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestServo))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ====================
# Set-up
# ====================
# Import required modules
import pyperclip
import regex as re
__author__ = 'Victoria Morris'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
# ISBN format codes
# U - unknown
# P - print book
# E - e-book
# A - audio-book
# C - collective
# O - other
# X - contradiction
import json
from time import sleep, time as timestamp
from urllib.request import Request, urlopen
SERVICE_URL = 'https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}' \
'&fields=items/volumeInfo(title,authors,industryIdentifiers),items/saleInfo&maxResults=1'
URLOPEN_TIMEOUT = 10 # seconds
class WEBService(object):
def __init__(self, url):
self._url = url
self._request = Request(url, None)
self.response = None
def _response(self):
try: self.response = urlopen(self._request, timeout=URLOPEN_TIMEOUT)
except: self.response = None
def data(self):
self._response()
return self.response.read().decode(encoding='utf-8', errors='replace')
class WEBQuery(object):
"""Base class to query a webservice and parse the result to py objects."""
T = {'id': timestamp()}
def __init__(self, service_url):
srv = service_url[8:20]
last = WEBQuery.T[srv] if srv in WEBQuery.T else 0.0
wait = 0 if timestamp() - last > 1 else 1
sleep(wait)
self.url = service_url
self.service = WEBService(service_url)
self.data = self.service.data()
WEBQuery.T[srv] = timestamp()
def check_data(self):
if self.data == '{}':
print('Not found')
if 'No results match your search' in self.data:
print('Not found')
if 'Temporarily out of service' in self.data:
print('Out of service')
return True
def parse_data(self):
decoder = json.JSONDecoder()
return decoder.decode(str(self.data))
def _records(isbn, data):
try: recs = data['items'][0]['volumeInfo']
except: return None
if recs:
ids = recs.get('industryIdentifiers', '')
if 'ISBN_13' in repr(ids) and isbn not in repr(ids): return None
try: return data['items'][0]['saleInfo'].get('isEbook', '')
except: return None
def query(isbn):
"""Query the Google Books (JSON API v1) service for metadata."""
wq = WEBQuery(SERVICE_URL.format(isbn=isbn))
r = wq.parse_data() if wq.check_data() else None
if r:
return _records(isbn, r)
return r
# ====================
# Constants
# ====================
ISBN_FORMATS = ['U', 'P', 'E', 'A', 'C', 'O', 'X']
# ====================
# Regular expressions
# ====================
RE_ISBN10 = re.compile(r'ISBN\x20(?=.{13}$)\d{1,5}([- ])\d{1,7}'r'\1\d{1,6}\1(\d|X)$|[- 0-9X]{10,16}')
RE_ISBN13 = re.compile(r'97[89]{1}(?:-?\d){10,16}|97[89]{1}[- 0-9]{10,16}')
RE_PUB_PREFIX = re.compile(r'^(?P<pub>0[01][0-9]|'
r'0[2-6][0-9]{2}|'
r'07[0-9]{3}|'
r'08[0-4][0-9]{2}|08[5-9][0-9]{3}|'
r'09[0-4][0-9]{4}|09[5-9][0-9]{5}|'
r'10[0-9]|'
r'1[1-3][0-9]{2}|14[0-9]{3}|'
r'15[0-4][0-9]{2}|15[5-8][0-9]{3}|159[0-8][0-9]{2}|1599[0-8][0-9]|15999[0-9]|'
r'1[67][0-9]{4}|'
r'18[0-5][0-9]{3}|186[0-8][0-9]{2}|1869[0-6][0-9]|18697[0-9]|18698[0-9]{2}|'
r'18699[0-8][0-9]|186999[0-9]|18[7-9][0-9]{4}|'
r'19[0-8][0-9]{4}|199[0-7][0-9]{3}|1998[0-8][0-9]{2}|19989[0-8][0-9]|'
r'199899[0-9]1999[0-8][0-9]{3}|19999[0-8][0-9]{2}|199999[0-8][0-9]|'
r'1999999[0-9]|'
r'[2-5]|6[01][0-9]|62[01]|[7-8]|9[0-4]|'
r'9[5-7][0-9]|98[0-9]|99[0-7][0-9]|998[0-9]|999[0-8][0-9]|9999[0-9])')
# Captures the publisher group for area codes 0 and 1, but only the language area for other ISBNs
RE_PUB_PREFIX_979 = re.compile(r'^(?P<pub>(10(?:[01][0-9]|[2-6][0-9]{2}|[7-8][0-9]{3}|9[0-9]{4}))|'
r'(11(?:[01][0-9]|[2-4][0-9]{2}|[5-7][0-9]{3}|8[0-9]{4}|9[0-9]{5}))|'
r'(12(?:[01][0-9]|[2-6][0-9]{2}|[7-8][0-9]{3}|9[0-9]{4})))')
# ====================
# Classes
# ====================
class Isbn(object):
def __init__(self, content, format='U'):
self.valid = True
self.format = format
if self.format not in ISBN_FORMATS:
self.isbn = content.strip()
self.prefix = ''
self.valid = False
else:
self.isbn = re.sub(r'[^0-9X]', '', content.upper())
if is_isbn_10(self.isbn):
self.isbn = isbn_convert(self.isbn)
if not is_isbn_13(self.isbn):
self.valid = False
if not (len(self.isbn) == 10 or len(self.isbn) == 13):
self.isbn = None
self.prefix = isbn_prefix(self.isbn)
if get_resource_format(content):
self.format = get_resource_format(content)
def set_format(self, format):
self.format = format
def __str__(self):
return '{}\t{}\t{}\t{}'.format(self.isbn, self.prefix, self.format, str(self.valid))
# ====================
# Functions
# ====================
def isbn_10_check_digit(nine_digits):
"""Function to get the check digit for a 10-digit ISBN"""
if len(nine_digits) != 9: return None
try: int(nine_digits)
except: return None
remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)
if remainder == 0: tenth_digit = 0
else: tenth_digit = 11 - remainder
if tenth_digit == 10: tenth_digit = 'X'
return str(tenth_digit)
def isbn_13_check_digit(twelve_digits):
"""Function to get the check digit for a 13-digit ISBN"""
if len(twelve_digits) != 12: return None
try: int(twelve_digits)
except: return None
thirteenth_digit = 10 - int(sum((i % 2 * 2 + 1) * int(x) for i, x in enumerate(twelve_digits)) % 10)
if thirteenth_digit == 10: thirteenth_digit = '0'
return str(thirteenth_digit)
def isbn_10_check_structure(isbn10):
"""Function to check the structure of a 10-digit ISBN"""
return True if re.match(RE_ISBN10, isbn10) else False
def isbn_13_check_structure(isbn13):
"""Function to check the structure of a 13-digit ISBN"""
return True if re.match(RE_ISBN13, isbn13) else False
def is_isbn_10(isbn10):
"""Function to validate a 10-digit ISBN"""
isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))
if len(isbn10) != 10: return False
return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True
def is_isbn_13(isbn13):
"""Function to validate a 13-digit ISBN"""
isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))
if len(isbn13) != 13: return False
if isbn13[0:3] not in ('978', '979'): return False
return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True
def isbn_convert(isbn10):
"""Function to convert a 10-digit ISBN to a 13-digit ISBN"""
if not is_isbn_10(isbn10): return None
return '978' + isbn10[:-1] + isbn_13_check_digit('978' + isbn10[:-1])
def isbn13_convert(isbn13):
"""Function to convert a 13-digit ISBN to a 10-digit ISBN"""
if not is_isbn_13(isbn13): return None
return isbn13[3:-1] + isbn_10_check_digit(isbn13[3:-1])
def get_resource_format(s):
if re.search(r'\b(pack|set|seri(es|a))\b', s, re.I):
return 'C'
if re.search(r'\baudio[\-\s]*(b(oo)?k)?\b', s, re.I):
return 'A'
if re.search(r'\b(p(aper)?|h(ar)?d?)b(ac|oo)?k?\b|(hard|soft)[\-\s]*cover|(case|spiral)[\-\s]*bound|cased|'
r'alk(aline)?\.? paper|print(\b|ed)|\bpaper\b|loose[\-\s]*leaf|\b(h|s)b\b|p-?isbn|\bcloth', s, re.I):
return 'P'
if re.search(r'e-?p(ub|df)|\be(-|lectronic)?\s*b(oo)?k|e-?isbn|electronic|'
r'\b(adobe|digital|eb|kindle|mobi(pocket)?|myilibrary|u?pdf|online)\b', s, re.I):
return 'E'
if re.search(r'\b(cassette|cd(-?rom)?|map)\b', s, re.I):
return 'O'
return None
def check_format(isbn, current_format, new_format, checked, skip_check=False):
if 'C' in [current_format, new_format]:
return 'C', True
if checked:
return current_format, True
if current_format == new_format:
return current_format, checked
if new_format == 'U':
return current_format, checked
if current_format == 'U' and new_format in ISBN_FORMATS:
return new_format, checked
if isbn.startswith(('978311', '9783484')):
return 'P', True
if 'E' in [current_format, new_format] and 'P' in [current_format, new_format]:
print('\nTrying Google ...')
try: e = query(isbn)
except: e = None
else:
print('Resolved format of {} using Google Books'.format(isbn))
if e: return 'P', True
else: return 'E', True
if skip_check:
return 'X', False
f = None
while f not in ISBN_FORMATS:
pyperclip.copy(isbn)
f = input('Please enter the format of ISBN {} '
'(current formats are {}, {}): '.format(isbn, current_format, new_format)).upper()
return f, True
def isbn_prefix(isbn):
"""Function to return the publisher prefix from a 13-digit ISBN"""
if is_null(isbn): return ''
if is_isbn_10(isbn): isbn = isbn_convert(isbn)
if not is_isbn_13(isbn): return ''
if isbn.startswith('979'):
isbn = isbn[3:]
try: return '979' + RE_PUB_PREFIX_979.search(isbn).group('pub')
except: return '979' + isbn[3:5]
elif isbn.startswith('978'):
isbn = isbn[3:]
try: return '978' + RE_PUB_PREFIX.search(isbn).group('pub')
except: return ''
return ''
def is_null(var):
"""Function to test whether a variable is null"""
if var is None or not var: return True
if any(isinstance(var, s) for s in [str, list, tuple, set]) and len(var) == 0: return True
if isinstance(var, str) and var == '': return True
if any( isinstance(var, s) for s in [int, float, complex, bool] ) and int(var) == 0: return True
return False
|
from wq.core import wq
import click
import os
import json
import scss as pyScss
import logging
import pystache
from .collect import readfiles
import requirejs
from babeljs import transformer as babeljs
@wq.command()
@wq.pass_config
def optimize(config):
"""
Use r.js to optimize JS and CSS assets. This command requires an
"optimize" section in your configuration file, which will be passed as-is
to r.js for compilation. See http://requirejs.org/docs/optimization.html
for available configuration options.
"""
conf = config.get('optimize', None)
if not conf:
raise click.UsageError(
"optimize section not found in %s" % config.filename
)
# Defer to r.js for actual processing
click.echo("Optimizing with r.js...")
try:
requirejs.optimize(conf)
except requirejs.RJSException as e:
raise click.ClickException(e.args[0])
click.echo("Optimization complete")
@wq.command()
@wq.pass_config
def babel(config):
"""
Use babel.js to compile ES6/2015+. Generates ES5-compatible JavaScript for
older browsers. Note that wq babel is run after wq optimize, on the
compiled modules created by r.js. Support for running babel at other
stages of the build process may be added in a future version of wq.app.
"""
rconf = config.get('optimize', None)
if not rconf:
raise click.UsageError(
"optimize section not found in %s" % config.filename
)
babel = config.get('babel', {})
files = []
if 'modules' in rconf and 'dir' in rconf:
base_url = rconf.get('baseUrl', '.')
for module in rconf['modules']:
path = module['name']
if path in rconf.get('paths', {}):
path = rconf['paths'][path]
path = os.path.join(rconf['dir'], base_url, path)
files.append(path + '.js')
for filename in files:
label = os.path.normpath(filename)
try:
with open(filename) as f:
content = f.read()
except OSError:
raise click.ClickException(
"Error loading %s - run wq optimize first?" % label
)
try:
print("Transforming %s with Babel..." % label)
output = babeljs.transform_string(content, **babel)
except babeljs.TransformError as e:
raise click.ClickException(e.args[0])
with open(filename, 'w') as f:
f.write(output)
@wq.command()
@click.option(
'--indir', type=click.Path(exists=True), default="scss",
help="Path to SCSS/SASS files"
)
@click.option(
'--outdir', type=click.Path(exists=True), default="css",
help="Path to CSS files"
)
def scss(**conf):
"""
Render all SCSS/SASS files into CSS. The input directory will be searched
for *.scss files, which will be compiled to corresponding *.css files in
the output directory.
"""
compiler = pyScss.Scss(scss_opts={'compress': 0})
logging.getLogger("scss").addHandler(logging.StreamHandler())
def compile(path, source):
css = compiler.compile(source)
outfile = open(path, 'w')
outfile.write(css)
outfile.close()
files = readfiles(conf['indir'], "scss")
pyScss.config.LOAD_PATHS = [
conf['indir'],
os.path.join(conf['indir'], 'lib'),
# FIXME: Why aren't these paths automatically picked up on Windows?
os.path.join(conf['indir'], 'lib', 'compass'),
os.path.join(conf['indir'], 'lib', 'compass', 'css3'),
]
for name, source in files.items():
if isinstance(source, dict):
continue
path = "%s/%s.css" % (conf['outdir'], name)
compile(path, source)
click.echo("%s compiled from %s/%s.scss" % (path, conf['indir'], name))
@wq.command()
@click.option('--template', help="Path to template")
@click.option('--partials', help="Path to partials")
@click.option('--context', help="Path to context (JSON or YAML)")
@click.option(
'--output', type=click.Path(), default="output.html",
help="Output filename"
)
def mustache(**conf):
"""
Render a mustache template into static HTML. The template context can be
provided via a nexted object in wq.yml, or by pointing to a folder
containing JSON or YAML files. Similarly, the partials can be defined as a
nested object in wq.yml or by a folder path.
Example YAML configuration:
\b
mustache:
template: "<html><body>{{>header}}{{>footer}}</body></html>"
partials:
header: "<h3>{{title}}</h3>"
footer: "<a href='mailto:{{email}}'>{{email}}</a>"
context:
title: "Example"
email: "email@example.com"
output: index.html
Example command line configuration:
wq mustache --template tmpl.html --partials partials/ --context conf/
"""
template = conf['template']
if template is None:
return
if os.path.exists(template) or template.endswith('.html'):
try:
template = open(template).read()
except IOError as e:
raise click.FileError(template, hint=str(e))
context = conf["context"] or {}
if not isinstance(context, dict):
if context.startswith('{'):
context = json.loads(context)
else:
path = context
context = readfiles(path, "yaml", "yml")
context.update(**readfiles(path, "json"))
partials = conf['partials'] or {}
if not isinstance(partials, dict):
partials = readfiles(partials, "html")
click.echo("Generating %s from %s" % (conf['output'], conf['template']))
renderer = pystache.Renderer(partials=partials)
html = renderer.render(template, context)
f = open(conf['output'], 'w')
f.write(html)
f.close()
|
#!/usr/bin/env python3
""" Holmakefile generator.
This script generates `Holmakefile` files from `Holmakefile.gen` files, in
order to add support for inclusion.
Syntax:
Lines beginning with `include ` will be replaced by the content of all
files whose paths follow on the line, in the order they appear on the line.
For example,
include ../Holmakefile /some/other/file
will be replaced by the content of `../Holmakefile` first, then the content
of `/some/other/file`.
"""
import sys
import os
out_filename = "Holmakefile"
gen_filename = out_filename + ".gen"
src_root = "./src"
def gen_holmakefile_in(dir_path):
""" Generates a `Holmakefile` from the `Holmakefile.gen` file present in
dir_path.
"""
out_path = os.path.join(dir_path, out_filename)
gen_path = os.path.join(dir_path, gen_filename)
assert os.path.isfile(gen_path), \
"Cannot generate '{}': missing '{}'.".format(out_path, gen_path)
print("Generating: {}".format(out_path))
result = ""
with open(gen_path) as gen_file:
for line in gen_file:
if line.startswith("include "):
files_to_include = map(str.strip, line.split(" ")[1:])
for inc_path in files_to_include:
inc_path = os.path.join(dir_path, inc_path)
assert os.path.isfile(inc_path), \
"Cannot include '{}': invalid path.".format(inc_path)
with open(inc_path) as inc_file:
result += "".join(inc_file.readlines())
else:
result += line
with open(out_path, 'w') as f:
f.write(result)
def main():
argc = len(sys.argv)
if len(sys.argv) == 1: # Working in `src_root/`
print("Recursively working in: {}".format(src_root))
dir_paths = []
for path, subdirs, files in os.walk(src_root):
for f in files:
if f == gen_filename:
dir_paths.append(path)
for dir_path in dir_paths:
gen_holmakefile_in(dir_path)
elif argc == 2: # Working in the given directory
path = sys.argv[1]
dir_path = os.path.dirname(os.path.abspath(path))
# print("Working in: {}".format(dir_path))
gen_holmakefile_in(dir_path)
else:
print("Invalid invocation.\nUsage: {} [directory]".format(
sys.argv[0]), file=sys.stderr)
main()
|
import logging
import os
import re
class DialogueTextFile:
def __init__(
self,
data_path,
input_paths,
label_name,
eoc_regex,
eoc,
limit=None,
):
self.input_paths = [
os.path.join(data_path, input_path) for input_path in input_paths
]
self.label_name = label_name
self.eoc_regex = eoc_regex
self.eoc = eoc
self.limit = limit
def load_dialogue(self, path: str):
splitter = f"(.*){self.eoc_regex}(.*)"
with open(path, "r") as f:
lines = f.readlines()
res = []
for i, line in enumerate(lines):
if self.limit and i == self.limit:
break
match = re.findall(splitter, line)
if match is None or len(match) == 0 or len(match[0]) != 2:
logging.warning(
f"Could not split line {i} into context/response pair.\
got {len(match)} splits: \n"
)
logging.warning(line)
else:
match = match[0]
res.append(
[match[0].strip() + " " + self.eoc, {"response": match[1].strip()}]
)
print("Loading data from disk finished")
return res
def __call__(self):
out = []
for path in self.input_paths:
out.append(self.load_dialogue(path))
return out
|
import json
import logging
import tweepy
logger = logging.getLogger()
def createApi():
with open("../config.json", "r") as file:
config = json.load(file)
auth = tweepy.OAuthHandler(config["consumerApiKey"], config["consumerApiSecretKey"])
auth.set_access_token(config["authApiKey"], config["authApiSecret"])
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
try:
api.verify_credentials()
except Exception as e:
logger.error("Error creating API", exc_info=True)
raise e
logger.info("API created")
return api
|
from ppf_date_time import \
weekdays \
, months_of_year \
, nth_kday_of_month \
, year_based_generator
from nth_imm_of_year import *
def first_imm_after(start):
'''Find the next IMM date after the given date.
>>> from ppf_date_time import date
>>> from ppf_date_time import months_of_year
>>> Jun = months_of_year.Jun
>>> print first_imm_after(date(2007, Jun, 27))
2007-Sep-19
'''
imm = nth_imm_of_year
last_imm_of_year = imm(imm.fourth).get_date(start.year())
imm_date = None
if start >= last_imm_of_year:
imm_date = imm(imm.first).get_date(start.year() + 1)
else:
for imm_no in [imm.first, imm.second, imm.third, imm.fourth]:
imm_date = imm(imm_no).get_date(start.year())
if imm_date > start:
break
return imm_date
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
#!/usr/bin/python
import sys
import os
import time
import datetime
from datetime import timedelta
import requests
from bs4 import BeautifulSoup
from ftplib import FTP
#if len(sys.argv) != 2:
# print >>sys.stderr, "Useage: ",sys.argv[0]," [YYYY_MM_DD]"
# quit()
#date = sys.argv[1]
# get current date and time minus one hour
UTC_OFFSET_TIMEDELTA = datetime.datetime.utcnow() - datetime.datetime.now()
date_1_hour_ago = datetime.datetime.now() - timedelta(hours=1) + UTC_OFFSET_TIMEDELTA
date = date_1_hour_ago.strftime("%Y_%m_%d")
dateNoHyphens = date_1_hour_ago.strftime("%Y%m%d")
hour = date_1_hour_ago.strftime("%H")
#nowTime = time.gmtime()
#now = datetime.datetime(nowTime.tm_year, nowTime.tm_mon, nowTime.tm_mday,
# nowTime.tm_hour, nowTime.tm_min, nowTime.tm_sec)
#date = now.strftime("%Y_%m_%d")
#hour = now.strftime("%H")
#date = '2018_11_01'
url = 'https://engineering.arm.gov/~radar/amf1_csapr2_incoming_images/ppi/'+date+'/'
ext = 'png'
homeDir = os.getenv('HOME')
outDir = os.path.join(homeDir, 'radar/csapr2_ppi/' + date)
category = 'radar'
platform = 'DOE_CSapr2'
ftpCatalogServer = 'catalog.eol.ucar.edu'
ftpCatalogUser = 'anonymous'
catalogDestDir = '/pub/incoming/catalog/relampago'
debug = 1
def listFD(url, ext=''):
page = requests.get(url).text
print page
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
if not os.path.exists(outDir):
os.makedirs(outDir)
os.chdir(outDir)
for file in listFD(url, ext):
tmp = os.path.basename(file)
(f,e) = os.path.splitext(tmp)
parts = f.split('_')
(fdate,ftime) = parts[3].split('-')
fhour = ftime[0:2]
angleParts = parts[5].split('.')
if angleParts[0] == '00' or angleParts[0] == '03':
if fdate == dateNoHyphens and fhour == hour:
print file
cmd = 'wget '+file
os.system(cmd)
# correct names of -0.0 files
#cmd = 'mmv "*_-0.0.png" "#1_00.0.png"'
#os.system(cmd)
# rename files and ftp them
for file in os.listdir(outDir):
if file.startswith('cor_'):
if debug:
print >>sys.stderr, "file = ",file
(filename, file_ext) = os.path.splitext(file)
parts = filename.split('_')
(date,time) = parts[3].split('-')
angle_parts = parts[5].split('.')
if angle_parts[0] == '00':
angle = '0_5'
else:
angle = '3_x'
product = parts[2]+'_'+parts[4]+'_'+angle
file_cat = category+'.'+platform+'.'+date+time+'.'+product+file_ext
if debug:
print >>sys.stderr, "file_cat = ",file_cat
cmd = 'mv '+file+' '+file_cat
os.system(cmd)
# ftp file
try:
catalogFTP = FTP(ftpCatalogServer,ftpCatalogUser)
catalogFTP.cwd(catalogDestDir)
file = open(file_cat,'rb')
catalogFTP.storbinary('STOR '+file_cat,file)
file.close()
catalogFTP.quit()
except Exception as e:
print >>sys.stderr, "FTP failed, exception: ", e
|
# -*- coding: utf-8 -*-
# Copyright 2022, SERTIT-ICube - France, https://sertit.unistra.fr/
# This file is part of eoreader project
# https://github.com/sertit/eoreader
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Environment variables that can change the processes """
PP_GRAPH = "EOREADER_PP_GRAPH"
"""Environment variable for overriding default pre-processing graph path"""
DSPK_GRAPH = "EOREADER_DSPK_GRAPH"
"""Environment variable for overriding default despeckling graph path"""
SAR_DEF_RES = "EOREADER_SAR_DEFAULT_RES"
"""Environment variable for SAR default resolution, used for SNAP orthorectification to override default resolution."""
DEM_PATH = "EOREADER_DEM_PATH"
"""Environment variable for overriding default DEM path"""
SNAP_DEM_NAME = "EOREADER_SNAP_DEM_NAME"
"""
Environment variable for overriding default DEM name used in SNAP.
Default is :code:`Copernicus 30m Global DEM`.
Can be :code:`GETASSE30`, :code:`SRTM 3Sec`, :code:`External DEM`...
If :code:`EOREADER_SNAP_DEM_NAME` is set to :code:`External DEM`,
SNAP will use your DEM stored in :code:`EOREADER_DEM_PATH` as an external DEM.
"""
S3_DB_URL_ROOT = "S3_DB_URL_ROOT"
"""Environment variable used for specify DB base url (e.g. :code:`https://s3.unistra.fr/bucket_name/`) """
TEST_USING_S3_DB = "TESTING_USING_S3_DB"
"""Environment variable to specify to use external DB as a opposed to local one. (For testing purposes only)"""
CI_EOREADER_BAND_FOLDER = "CI_EOREADER_BAND_FOLDER"
"""
Environment variable used in CI to override the existing band path
in order to bypass SNAP process and DEM reprojection.
"""
USE_DASK = "EOREADER_USE_DASK"
"""
If set and :code:`dask` is installed, EOReader will read products as dask arrays instead of numpy arrays.
"""
|
from ProcessData.Utils import *
from ProcessData.Skeleton import Skeleton
# rotation = rotType.SixDim
# latentDim = 20
# deltaT = 6
# poseComponents = ['R{}'.format(rotation.name), 'Height', 'Root_Position_Velocity', 'Root_HipAngleRad', 'Root_HipTurnVelRad']
# featureComponents = ['Height_last', 'Root_Position_Velocity_last', 'Root_HipAngleRad_last', 'Root_HipTurnVelRad_last', 'Future_past_pos_last', 'Future_past_ori_last', 'Future_past_vel_last', 'Future_past_speed_last', 'X_feet_last', 'V_feet_last', 'X_hands_last', 'V_hands_last']
#name = "N1_Six_20_6"
rotation = rotType.SixDim
latentDim = 25
deltaT = 6
tag = "_new" ##"_adaptive"
epRange = [5,15]
poseComponents = [ 'R{}'.format(rotation.name),
'Height',
'Root_Position_Velocity',
'Root_HipAngleRad',
'Root_HipTurnVelRad',
'Contacts']
featureComponents = [ 'Height_last',
'Root_Position_Velocity_last',
'Root_HipAngleRad_last',
'Root_HipTurnVelRad_last',
"Future_pos_last",
"Future_ori_last",
"Past_pos_last",
"Past_ori_last",
'X_feet_last',
'V_feet_last',
'X_hands_last',
'V_hands_last']
name = "{}_{}_{}".format(rotation.name, latentDim, deltaT) + tag
dropoutseperation = [1 + 2 + 2 + 1 + 6 + 3, 6 + 3 + 6 + 6 + 6 + 6] #[1 + 2 + 2 + 1 + 6 + 3 + 6 + 3, 6 + 3 + 6 + 3 + 6 + 6 + 6 + 6]
footIndices = [3,4,7,8]
skeleton = Skeleton(offsets=[ [ 0.00000000, 0.00000000, 0.00000000],
[ 1.0345e-01, 1.8578e+00, 1.0549e+01],
[ 4.3500e+01, -6.1000e-05, 0.0000e+00],
[ 4.2372e+01, 8.0000e-06, -2.0000e-06],
[ 1.7300e+01, 1.1000e-05, 1.2000e-05],
[ 1.0346e-01, 1.8578e+00, -1.0548e+01],
[ 4.3500e+01, 1.5000e-05, 6.0000e-06],
[ 4.2372e+01, -4.6000e-05, 1.7000e-05],
[ 1.7300e+01, 1.1000e-05, 1.5000e-05],
[ 6.9020e+00, -2.6037e+00, -4.0000e-06],
[ 1.2588e+01, -4.0000e-06, -1.0000e-06],
[ 1.2343e+01, -2.6000e-05, 3.0000e-06],
[ 2.5833e+01, 4.0000e-06, -6.0000e-06],
[ 1.1767e+01, 3.1000e-05, 4.0000e-06],
[ 1.9746e+01, -1.4803e+00, 6.0001e+00],
[ 1.1284e+01, 3.0000e-06, -1.5000e-05],
[ 3.3000e+01, -2.3000e-05, 2.7000e-05],
[ 2.5200e+01, 5.1000e-05, 2.1000e-05],
[ 1.9746e+01, -1.4804e+00, -6.0001e+00],
[ 1.1284e+01, 4.0000e-06, -1.5000e-05],
[ 3.3000e+01, -2.3000e-05, 1.1000e-05],
[ 2.5200e+01, 1.4800e-04, 4.2200e-04] ],
parents= [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 11, 18, 19, 20])
left = [1,2,3,4,14,15,16,17]
right = [5,6,7,8,18,19,20,21]
central = [0,9,10,11,12,13]
training_source_dir = 'TrainingData'
validation_source_dir = 'ValidationData'
|
#!/usr/bin/env python3
import argparse
import serial
from time import sleep
import pynput
import pyautogui
import win32api
import win32con
# for time delaying the input:
from threading import Timer
import time
from math import sqrt
from switchcontroller.switchcontroller import *
screenWidth, screenHeight = pyautogui.size()
x = screenWidth/2
y = screenHeight/2
prevX = 0
prevY = 0
controller = SwitchController()
controller.connect("COM6")
start = time.clock()
while True:
prevX = x
prevY = y
x, y = pyautogui.position()
# deltaX = x - prevX
# deltaY = y - prevY
deltaX = x - screenWidth/2
deltaY = y - screenHeight/2
# pyautogui.moveTo(screenWidth/2, screenHeight/2)
length = sqrt(deltaX**2 + deltaY**2)
# if(length > 200):
# pyautogui.moveTo(prevX, prevY)
multiplier = 16
# controller.RX = int((deltaX*multiplier)+128)
# controller.RY = int((deltaY*multiplier)+128)
# clamp:
controller.RX = max(STICK_MIN, min(controller.RX, STICK_MAX))
controller.RY = max(STICK_MIN, min(controller.RY, STICK_MAX))
#sleep(0.01)
controller.reset()
if win32api.GetAsyncKeyState(ord("W")):
controller.LY = STICK_MIN
if win32api.GetAsyncKeyState(ord("S")):
controller.LY = STICK_MAX
if win32api.GetAsyncKeyState(ord("A")):
controller.LX = STICK_MIN
if win32api.GetAsyncKeyState(ord("D")):
controller.LX = STICK_MAX
if win32api.GetAsyncKeyState(ord("I")):
controller.RY = STICK_MIN
if win32api.GetAsyncKeyState(ord("K")):
controller.RY = STICK_MAX
if win32api.GetAsyncKeyState(ord("J")):
controller.RX = STICK_MIN
if win32api.GetAsyncKeyState(ord("L")):
controller.RX = STICK_MAX
if(win32api.GetAsyncKeyState(win32con.VK_RIGHT)):
controller.a = 1
if(win32api.GetAsyncKeyState(win32con.VK_DOWN)):
controller.b = 1
if(win32api.GetAsyncKeyState(win32con.VK_UP)):
controller.x = 1
if(win32api.GetAsyncKeyState(win32con.VK_LEFT)):
controller.y = 1
if win32api.GetAsyncKeyState(ord("T")):
controller.dpad = DPAD_UP
if win32api.GetAsyncKeyState(ord("G")):
controller.dpad = DPAD_DOWN
if win32api.GetAsyncKeyState(ord("F")):
controller.dpad = DPAD_LEFT
if win32api.GetAsyncKeyState(ord("H")):
controller.dpad = DPAD_RIGHT
# l/r and zl/zr:
if win32api.GetAsyncKeyState(ord("O")):
controller.l = 1
if win32api.GetAsyncKeyState(ord("P")):
controller.r = 1
if win32api.GetAsyncKeyState(ord("9")):
controller.zl = 1
if win32api.GetAsyncKeyState(ord("0")):
controller.zr = 1
# minus/plus
if win32api.GetAsyncKeyState(ord("5")):
controller.minus = 1
if win32api.GetAsyncKeyState(ord("6")):
controller.plus = 1
# so I don't get stuck:
if(win32api.GetAsyncKeyState(win32con.VK_ESCAPE)):
controller.send("RELEASE")
controller.ser.close()
exit()
controller.getOutput()
end = time.clock()
diffInSeconds = end - start
diffInMilliSeconds = diffInSeconds*1000
if(diffInMilliSeconds > 80):
start = time.clock()
controller.send(controller.output)
|
import os
import urllib.request, urllib.error, urllib.parse
import time
from minerl.dependencies.pySmartDL.pySmartDL import utils
def download(url, dest, startByte=0, endByte=None, headers=None, timeout=4, shared_var=None, thread_shared_cmds=None, logger=None, retries=3):
"The basic download function that runs at each thread."
logger = logger or utils.DummyLogger()
if not headers:
headers = {}
if endByte:
headers['Range'] = 'bytes=%d-%d' % (startByte, endByte)
logger.info("Downloading '{}' to '{}'...".format(url, dest))
req = urllib.request.Request(url, headers=headers)
try:
urlObj = urllib.request.urlopen(req, timeout=timeout)
except urllib.error.HTTPError as e:
if e.code == 416:
'''
HTTP 416 Error: Requested Range Not Satisfiable. Happens when we ask
for a range that is not available on the server. It will happen when
the server will try to send us a .html page that means something like
"you opened too many connections to our server". If this happens, we
will wait for the other threads to finish their connections and try again.
'''
if retries > 0:
logger.warning("Thread didn't got the file it was expecting. Retrying ({} times left)...".format(retries-1))
time.sleep(5)
return download(url, dest, startByte, endByte, headers, timeout, shared_var, thread_shared_cmds, logger, retries-1)
else:
raise
else:
raise
with open(dest, 'wb') as f:
if endByte:
filesize = endByte-startByte
else:
try:
meta = urlObj.info()
filesize = int(urlObj.headers["Content-Length"])
logger.info("Content-Length is {}.".format(filesize))
except (IndexError, KeyError, TypeError):
logger.warning("Server did not send Content-Length.")
filesize_dl = 0 # total downloaded size
limitspeed_timestamp = time.time()
limitspeed_filesize = 0
block_sz = 8192
while True:
if thread_shared_cmds:
if 'stop' in thread_shared_cmds:
logger.info('stop command received. Stopping.')
raise CanceledException()
if 'pause' in thread_shared_cmds:
time.sleep(0.2)
continue
if 'limit' in thread_shared_cmds:
now = time.time()
time_passed = now - limitspeed_timestamp
if time_passed > 0.1: # we only observe the limit after 100ms
# if we passed the limit, we should
if (filesize_dl-limitspeed_filesize)/time_passed >= thread_shared_cmds['limit']:
time_to_sleep = (filesize_dl-limitspeed_filesize) / thread_shared_cmds['limit']
logger.debug('Thread has downloaded {} in {}. Limit is {}/s. Slowing down...'.format(utils.sizeof_human(filesize_dl-limitspeed_filesize), utils.time_human(time_passed, fmt_short=True, show_ms=True), utils.sizeof_human(thread_shared_cmds['limit'])))
time.sleep(time_to_sleep)
continue
else:
limitspeed_timestamp = now
limitspeed_filesize = filesize_dl
try:
buff = urlObj.read(block_sz)
except Exception as e:
logger.error(str(e))
if shared_var:
shared_var.value -= filesize_dl
raise
if not buff:
break
filesize_dl += len(buff)
if shared_var:
shared_var.value += len(buff)
f.write(buff)
urlObj.close()
|
import sys
def dfs(node):
if visit[graph[node]] is 0:
visit[graph[node]] = 1
dfs(graph[node])
if visit[graph[node]] is 1:
visit[graph[node]] = 2
dfs(graph[node])
if visit[node] is 1:
visit[node] = 0
n = int(sys.stdin.readline())
graph = [int(sys.stdin.readline()) - 1 for _ in range(n)]
visit = [0 for _ in range(n)]
for i in range(n):
if visit[i] is 0:
visit[i] = 1
dfs(i)
print(visit.count(2))
for i,j in enumerate(visit):
if j is 2:
print(i+1)
|
import re
import urllib.request
from urllib.error import URLError
class Skeleton:
NON_URL_SKELETON = re.compile('^[A-Za-z0-9_-]+$')
_url: str
def __init__(self, skeleton_identified: str) -> None:
self._skeleton_identified = skeleton_identified
def get_url(self) -> str:
try:
return self._url
except AttributeError:
skeleton_url = self._skeleton_identified
if self.NON_URL_SKELETON.match(skeleton_url):
skeleton_url = 'https://git.team23.de/build/b5-skel-{skeleton}.git'.format(
skeleton=self._skeleton_identified,
)
# If it's not a public repository, clone using ssh in order to allow ssh key file auth
if not self._is_public_repository(skeleton_url):
skeleton_url = 'git@git.team23.de:build/b5-skel-{skeleton}.git'.format(
skeleton=self._skeleton_identified,
)
self._url = skeleton_url
return self._url
def _is_public_repository(self, url: str) -> bool:
request = urllib.request.urlopen(url)
request_url = request.geturl()
if url == request_url or url.rsplit('.', 1)[0] == request_url:
try:
if request.getcode() == 200:
return True
except URLError:
pass
return False
|
from aiovault import Vault
from conftest import async_test
import pytest
@async_test
def test_raw(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
with pytest.raises(KeyError):
yield from client.raw.read('foo')
written = yield from client.raw.write('foo', {'bar': 'baz'})
assert written
response = yield from client.raw.read('foo')
print(response)
assert response['value'] == {'bar': 'baz'}
response = yield from client.raw.delete('foo')
assert response is True
# still absent
response = yield from client.raw.delete('foo')
assert response is True
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import RelaxedOneHotCategorical
import numpy as np
class GumbelSoftmaxWrapper(nn.Module):
"""
Gumbel-Softmax Wrapper for an agent that outputs a single symbol. Assumes that during the forward pass,
the agent returns log-probabilities over the potential output symbols. During training, the wrapper
transforms them into a sample from the Gumbel Softmax (GS) distribution; eval-time it returns greedy one-hot encoding
of the same shape.
The temperature of the GS distribution can be annealed using `update_temp`.
"""
def __init__(self, agent, temperature=1.0, trainable_temperature=False):
"""
:param agent: The agent to be wrapped. agent.forward() has to output log-probabilities over the vocabulary
:param temperature: The temperature of the Gumbel Softmax distribution
:param trainable_temperature: If set to True, the temperature becomes a trainable parameter of the model
"""
super(GumbelSoftmaxWrapper, self).__init__()
self.agent = agent
if not trainable_temperature:
self.temperature = temperature
else:
self.temperature = torch.nn.Parameter(torch.tensor([temperature]), requires_grad=True)
def forward(self, *args, **kwargs):
logits = self.agent(*args, **kwargs)
if self.training:
return RelaxedOneHotCategorical(logits=logits, temperature=self.temperature).rsample()
else:
return torch.zeros_like(logits).scatter_(-1, logits.argmax(dim=-1, keepdim=True), 1.0)
class SymbolGameGS(nn.Module):
"""
Implements one-symbol Sender/Receiver game. The loss must be differentiable wrt the parameters of the agents.
Typically, this assumes Gumbel Softmax relaxation of the communication channel.
>>> class Receiver(nn.Module):
... def forward(self, x, _input=None):
... return x
>>> receiver = Receiver()
>>> sender = nn.Sequential(nn.Linear(10, 10), nn.LogSoftmax(dim=1))
>>> def mse_loss(sender_input, _1, _2, receiver_output, _3):
... return (sender_input - receiver_output).pow(2.0).mean(dim=1), {}
>>> game = SymbolGameGS(sender=sender, receiver=Receiver(), loss=mse_loss)
>>> forward_result = game(torch.ones((2, 10)), None) # the second argument is labels, we don't need any
>>> forward_result[1]
{}
>>> (forward_result[0] > 0).item()
1
"""
def __init__(self, sender, receiver, loss):
"""
:param sender: Sender agent. sender.forward() has to output log-probabilities over the vocabulary.
:param receiver: Receiver agent. receiver.forward() has to accept two parameters: message and receiver_input.
`message` is shaped as (batch_size, vocab_size).
:param loss: Callable that outputs differentiable loss, takes the following parameters:
* sender_input: input to Sender (comes from dataset)
* message: message sent from Sender
* receiver_input: input to Receiver from dataset
* receiver_output: output of Receiver
* labels: labels that come from dataset
"""
super(SymbolGameGS, self).__init__()
self.sender = sender
self.receiver = receiver
self.loss = loss
def forward(self, sender_input, labels, receiver_input=None):
message = self.sender(sender_input)
receiver_output = self.receiver(message, receiver_input)
loss, rest_info = self.loss(sender_input, message, receiver_input, receiver_output, labels)
for k, v in rest_info.items():
if hasattr(v, 'mean'):
rest_info[k] = v.mean().item()
return loss.mean(), rest_info
class RelaxedEmbedding(nn.Embedding):
"""
A drop-in replacement for `nn.Embedding` such that it can be used _both_ with Reinforce-based training
and with Gumbel-Softmax one.
Important: nn.Linear and nn.Embedding have different initialization strategies, hence replacing nn.Linear with
`RelaxedEmbedding` might change results.
>>> emb = RelaxedEmbedding(15, 10) # vocab size 15, embedding dim 10
>>> long_query = torch.tensor([[1], [2], [3]]).long()
>>> long_query.size()
torch.Size([3, 1])
>>> emb(long_query).size()
torch.Size([3, 1, 10])
>>> float_query = torch.zeros((3, 15)).scatter_(-1, long_query, 1.0).float().unsqueeze(1)
>>> float_query.size()
torch.Size([3, 1, 15])
>>> emb(float_query).size()
torch.Size([3, 1, 10])
# make sure it's the same query, one-hot and symbol-id encoded
>>> (float_query.argmax(dim=-1) == long_query).all().item()
1
>>> (emb(float_query) == emb(long_query)).all().item()
1
"""
def forward(self, x):
if isinstance(x, torch.LongTensor) or (torch.cuda.is_available() and isinstance(x, torch.cuda.LongTensor)):
return F.embedding(x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
else:
return torch.matmul(x, self.weight)
class SymbolReceiverWrapper(nn.Module):
"""
An optional wrapper for single-symbol Receiver, both Gumbel-Softmax and Reinforce. Receives a message, embeds it,
and passes to the wrapped agent.
"""
def __init__(self, agent, vocab_size, agent_input_size):
super(SymbolReceiverWrapper, self).__init__()
self.agent = agent
self.embedding = RelaxedEmbedding(vocab_size, agent_input_size)
def forward(self, message, input=None):
embedded_message = self.embedding(message)
return self.agent(embedded_message, input)
class RnnSenderGS(nn.Module):
"""
Gumbel Softmax wrapper for Sender that outputs variable-length sequence of symbols.
The user-defined `agent` takes an input and outputs an initial hidden state vector for the RNN cell;
`RnnSenderGS` then unrolls this RNN for the `max_len` symbols. The end-of-sequence logic
is supposed to be handled by the game implementation. Supports vanilla RNN ('rnn'), GRU ('gru'), and LSTM ('lstm')
cells.
>>> agent = nn.Linear(10, 5) # input size 10, the RNN's hidden size is 5
>>> agent = RnnSenderGS(agent, vocab_size=2, embed_dim=10, hidden_size=5, max_len=3, temperature=1.0, cell='lstm')
>>> output = agent(torch.ones((1, 10)))
>>> output.size() # batch size x max_len x vocab_size
torch.Size([1, 3, 2])
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, max_len, temperature, cell='rnn', force_eos=True,
trainable_temperature=False):
super(RnnSenderGS, self).__init__()
self.agent = agent
self.force_eos = force_eos
self.max_len = max_len
if self.force_eos:
self.max_len -= 1
self.hidden_to_output = nn.Linear(hidden_size, vocab_size)
self.embedding = nn.Linear(vocab_size, embed_dim)
self.sos_embedding = nn.Parameter(torch.zeros(embed_dim))
self.embed_dim = embed_dim
self.vocab_size = vocab_size
if not trainable_temperature:
self.temperature = temperature
else:
self.temperature = torch.nn.Parameter(torch.tensor([temperature]), requires_grad=True)
self.cell = None
cell = cell.lower()
if cell == 'rnn':
self.cell = nn.RNNCell(input_size=embed_dim, hidden_size=hidden_size)
elif cell == 'gru':
self.cell = nn.GRUCell(input_size=embed_dim, hidden_size=hidden_size)
elif cell == 'lstm':
self.cell = nn.LSTMCell(input_size=embed_dim, hidden_size=hidden_size)
else:
raise ValueError(f"Unknown RNN Cell: {cell}")
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.sos_embedding, 0.0, 0.01)
def forward(self, x):
prev_hidden = self.agent(x)
prev_c = torch.zeros_like(prev_hidden) # only for LSTM
e_t = torch.stack([self.sos_embedding] * prev_hidden.size(0))
sequence = []
for step in range(self.max_len):
if isinstance(self.cell, nn.LSTMCell):
h_t, prev_c = self.cell(e_t, (prev_hidden, prev_c))
else:
h_t = self.cell(e_t, prev_hidden)
step_logits = F.log_softmax(self.hidden_to_output(h_t), dim=1)
distr = RelaxedOneHotCategorical(logits=step_logits, temperature=self.temperature)
if self.training:
x = distr.rsample()
else:
x = torch.zeros_like(step_logits).scatter_(-1, step_logits.argmax(dim=-1, keepdim=True), 1.0)
prev_hidden = h_t
e_t = self.embedding(x)
sequence.append(x)
sequence = torch.stack(sequence).permute(1, 0, 2)
if self.force_eos:
eos = torch.zeros_like(sequence[:, 0, :]).unsqueeze(1)
eos[:, 0, 0] = 1
sequence = torch.cat([sequence, eos], dim=1)
return sequence
class RnnReceiverGS(nn.Module):
"""
Gumbel Softmax-based wrapper for Receiver agent in variable-length communication game. The user implemented logic
is passed in `agent` and is responsible for mapping (RNN's hidden state + Receiver's optional input)
into the output vector. Since, due to the relaxation, end-of-sequence symbol might have non-zero probability at
each timestep of the message, `RnnReceiverGS` is applied for each timestep. The corresponding EOS logic is handled by
`SenderReceiverRnnGS`.
"""
def __init__(self, agent, vocab_size, embed_dim, hidden_size, cell='rnn'):
super(RnnReceiverGS, self).__init__()
self.agent = agent
self.cell = None
cell = cell.lower()
if cell == 'rnn':
self.cell = nn.RNNCell(input_size=embed_dim, hidden_size=hidden_size)
elif cell == 'gru':
self.cell = nn.GRUCell(input_size=embed_dim, hidden_size=hidden_size)
elif cell == 'lstm':
self.cell = nn.LSTMCell(input_size=embed_dim, hidden_size=hidden_size)
else:
raise ValueError(f"Unknown RNN Cell: {cell}")
self.embedding = nn.Linear(vocab_size, embed_dim)
def forward(self, message, input=None):
outputs = []
emb = self.embedding(message)
prev_hidden = None
prev_c = None
# to get an access to the hidden states, we have to unroll the cell ourselves
for step in range(message.size(1)):
e_t = emb[:, step, ...]
if isinstance(self.cell, nn.LSTMCell):
h_t, prev_c = self.cell(e_t, (prev_hidden, prev_c)) if prev_hidden is not None else \
self.cell(e_t)
else:
h_t = self.cell(e_t, prev_hidden)
outputs.append(self.agent(h_t, input))
prev_hidden = h_t
outputs = torch.stack(outputs).permute(1, 0, 2)
return outputs
class SenderReceiverRnnGS(nn.Module):
"""
This class implements the Sender/Receiver game mechanics for the Sender/Receiver game with variable-length
communication messages and Gumber-Softmax relaxation of the channel. The vocabulary term with id `0` is assumed
to the end-of-sequence symbol. It is assumed that communication is stopped either after all the message is processed
or when the end-of-sequence symbol is met.
>>> sender = nn.Linear(10, 5)
>>> sender = RnnSenderGS(sender, vocab_size=2, embed_dim=3, hidden_size=5, max_len=3, temperature=5.0, cell='gru')
>>> class Receiver(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc = nn.Linear(7, 10)
... def forward(self, x, _input):
... return self.fc(x)
>>> receiver = RnnReceiverGS(Receiver(), vocab_size=2, embed_dim=4, hidden_size=7, cell='rnn')
>>> def loss(sender_input, _message, _receiver_input, receiver_output, labels):
... return (sender_input - receiver_output).pow(2.0).mean(dim=1), {'aux' : 0}
>>> game = SenderReceiverRnnGS(sender, receiver, loss)
>>> output = game.forward(torch.ones((3, 10)), None, None) # batch of 3 10d vectors
>>> output[1]['aux'].item()
0.0
>>> output[0].item() > 0
True
"""
def __init__(self, sender, receiver, loss, length_cost=0.0):
"""
:param sender: sender agent
:param receiver: receiver agent
:param loss: the optimized loss that accepts
sender_input: input of Sender
message: the is sent by Sender
receiver_input: input of Receiver from the dataset
receiver_output: output of Receiver
labels: labels assigned to Sender's input data
and outputs a tuple of (1) a loss tensor of shape (batch size, 1) (2) the dict with auxiliary information
of the same shape. The loss will be minimized during training, and the auxiliary information aggregated over
all batches in the dataset.
:param length_cost: the penalty applied to Sender for each symbol produced
"""
super(SenderReceiverRnnGS, self).__init__()
self.sender = sender
self.receiver = receiver
self.loss = loss
self.length_cost = length_cost
def forward(self, sender_input, labels, receiver_input=None):
message = self.sender(sender_input)
receiver_output = self.receiver(message, receiver_input)
loss = 0
not_eosed_before = torch.ones(receiver_output.size(0)).to(receiver_output.device)
expected_length = 0.0
rest = {}
z = 0.0
for step in range(receiver_output.size(1)):
step_loss, step_rest = self.loss(sender_input, message[:, step, ...], receiver_input, receiver_output[:, step, ...], labels)
eos_mask = message[:, step, 0] # always eos == 0
add_mask = eos_mask * not_eosed_before
z += add_mask
loss += step_loss * add_mask + self.length_cost * (1.0 + step) * add_mask
expected_length += add_mask.detach() * (1.0 + step)
for name, value in step_rest.items():
rest[name] = value * add_mask + rest.get(name, 0.0)
not_eosed_before = not_eosed_before * (1.0 - eos_mask)
# the remainder of the probability mass
loss += step_loss * not_eosed_before + self.length_cost * (step + 1.0) * not_eosed_before
expected_length += (step + 1) * not_eosed_before
z += not_eosed_before
assert z.allclose(torch.ones_like(z)), f"lost probability mass, {z.min()}, {z.max()}"
for name, value in step_rest.items():
rest[name] = value * not_eosed_before + rest.get(name, 0.0)
for name, value in rest.items():
rest[name] = value.mean()
rest['mean_length'] = expected_length.mean()
return loss.mean(), rest
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
n, m = map(int, input().strip().split())
dist = [[float('inf')] * n for _ in range(n)]
for _ in range(m):
x, y, c = map(int, input().strip().split())
x -= 1
y -= 1
dist[x][y] = min(dist[x][y], c)
dist[y][x] = min(dist[y][x], c)
s, a, h = map(lambda v: int(v) - 1, input().strip().split())
for k in range(n):
for i in range(n):
for j in range(n):
if dist[i][j] > dist[i][k] + dist[k][j]:
dist[i][j] = dist[i][k] + dist[k][j]
ans = 0
for i in range(n):
if i != s and i != a and i != h:
ans = max(ans, dist[s][i] + 2 * dist[i][a] + dist[i][h])
print(ans)
|
# -*- coding: utf-8 -*-
import xmind
from xmind.core.const import TOPIC_DETACHED
from xmind.core.markerref import MarkerId
from xmind.core.topic import TopicElement
import os
import time
def gen_my_xmind_file():
workbook = xmind.load("my.xmind")
sheet1 = workbook.getPrimarySheet()
time_name = basic_sheet(sheet1, workbook)
# gen_sheet2(sheet1, workbook)
xmind.save(workbook, path='minmap/{}.xmind'.format(time_name))
def tomato():
os.system("../tomato-clock/tomato.py")
def create_node(get_node, get_web):
b_web = True
while b_web:
task_node = input("create node | end \n at {}->{}\n input node:".format(get_web, get_node))
if task_node == "end":
b_web = False
# dict['node'].append(get_node.copy())
else:
get_node.append(task_node)
return get_node
def creat_child_node(child_node):
test = {'child_node': "如何傳值到後端", 'web': [
{"name": "b", "src": "https://github.com/zhuifengshen/xmind"},
{"name": "c", "src": "https://github.com/zhuifengshen/xmind"}
], 'node': [
["fals 阻止頁面更新", "jQuery 動態生成物件"],
["頁面沒有刷新因為 hs6"]
]
}
# return (test)
dict = {"child_node": child_node, "web":[], "node":[]}
b = True
get_web = {"name":"", "src":""}
get_now_web = ""
get_now_node = []
while b:
task_web = input("w:web | k:keep | ed:edic web | e:end \n 當前 web {}->{} \n 請輸入選擇:".format(get_now_web, get_now_node))
if task_web =="w" or task_web =="web":
get_web["name"] = input("web_name:")
get_web["src"] = input("web_src:")
dict['web'].append(get_web.copy())
get_now_web = get_web["name"]
# dict['node'].extend(create_node([], get_now_web).copy())
node = create_node([], get_now_web).copy()
dict['node'].append(node)
get_now_node = node
get_web = {"name": "", "src": ""}
if task_web == 'k' or task_web == 'keep':
tomato()
if task_web == "ed" or task_web == "edic":
web_name = input("web_name:")
b_edic = True
while b_edic:
try:
num = [i for i, _ in enumerate(dict['web']) if _['name'] == web_name][0]
get_now_web = web_name
# dict['node'] = (create_node(dict['node'], get_now_web).copy())
node = create_node(dict['node'][num], get_now_web).copy()
dict['node'][num] = node
get_now_node = node
b_edic = False
except IndexError:
if web_name == "end" or web_name == "e":
b_edic = False
else:
web_name = input("請輸入正確 web_name | end:")
if task_web =="e" or task_web =="end" or task_web =="q":
b = False
return dict
def get_tree(a):
c = a['child_node']
c2 = a['web']
c3 = a['node']
# for i, val in enumerate(c):
print(".{}".format(c))
for i2, val2 in enumerate(c2):
print("├── {}".format(val2['name']))
for i4, val4 in enumerate(c3):
if i4 == i2:
for i5, val5 in enumerate(val4):
print("│ ├── {}".format(val5))
print("└───")
def painting_time(s1, time, child_node):
t1 = s1.getRootTopic()
t1.setTitle("Time")
t = t1.addSubTopic()
t.setTitle("{}-{}".format(time['st'], time['et']))
tt = t.addSubTopic()
tt.setTitle(child_node)
def painting_child_node(s1, root, dict):
r1 = s1.getRootTopic() # get the root topic of this sheet
r1.setTitle(root) # set its title
c = dict['child_node']
c2 = dict['web']
c3 = dict['node']
a = r1.addSubTopic()
a.setTitle(c) # set its title
print(".{}".format(c))
for i2, val2 in enumerate(c2):
a2 = a.addSubTopic()
# if isinstance(val, list):
print("├── {}".format(val2['name']))
a3 = 'b3' + str(val2['name'])
a3 = a2.addSubTopic()
a3.setTitle(val2['name'])
a3.setURLHyperlink("{}".format(val2['src']))
for i4, val4 in enumerate(c3):
if i4 == i2:
for i5, val5 in enumerate(val4):
print("│ ├── {}".format(val5))
a4 = a3.addSubTopic()
a4.setTitle(val5)
print("└───")
def time_sheet(s2, time, child_node):
t1 = s2.getRootTopic()
t1.setTitle("Time")
t = t1.addSubTopic()
t.setTitle("{}-{}".format(time['st'], time['et']))
tt = t.addSubTopic()
tt.setTitle(child_node)
def basic_sheet(s1, workbook):
s1.setTitle("child_node sheet") # set its title
s2 = workbook.createSheet()
s2.setTitle("time sheet")
creat_child = []
creat_child_time = []
result = True
root = input("create root node: \n")
start_time = time.strftime("%b%d|%H:%M")
while result:
task = input("t:tree | c:create child_node | e:end \ninput:")
child_time = {"st": "", "et": ""}
if task == "t" or task == "tree":
for i, val in enumerate(creat_child):
get_tree(val)
if task == "c" or task == "create":
child_node = input("child_node_name:")
child_time['st'] = time.strftime("%H:%M")
tomato()
creat_child.append(creat_child_node(child_node).copy())
child_time['et'] = time.strftime("%H:%M")
creat_child_time.append(child_time)
if task == "e" or task == "end" or task == "q":
end_time = time.strftime("%H:%M")
for i, val in enumerate(creat_child):
painting_child_node(s1, root, val)
time_sheet(s2, creat_child_time[i], val['child_node'])
result = False
print('\n {}-{}.xmind'.format(start_time, end_time))
return("{}-{}".format(start_time, end_time))
def gen_sheet2(workbook, sheet1):
# ***** second sheet *****
# create a new sheet and add to the workbook by default
sheet2 = workbook.createSheet()
sheet2.setTitle("second sheet")
# a sheet has a blank sheet by default
root_topic2 = sheet2.getRootTopic()
root_topic2.setTitle("root node")
# use other methods to create some sub topic element
topic1 = TopicElement(ownerWorkbook=workbook)
# set a topic hyperlink from this topic to the first sheet given by s1.getID()
topic1.setTopicHyperlink(sheet1.getID())
topic1.setTitle("redirection to the first sheet") # set its title
topic2 = TopicElement(ownerWorkbook=workbook)
topic2.setTitle("topic with an url hyperlink")
topic2.setURLHyperlink("https://github.com/zhuifengshen/xmind") # set an url hyperlink
topic3 = TopicElement(ownerWorkbook=workbook)
topic3.setTitle("third node")
topic3.setPlainNotes("notes for this topic") # set notes (F4 in XMind)
topic3.setTitle("topic with \n notes")
topic4 = TopicElement(ownerWorkbook=workbook)
topic4.setFileHyperlink("logo.png") # set a file hyperlink
topic4.setTitle("topic with a file")
topic1_1 = TopicElement(ownerWorkbook=workbook)
topic1_1.setTitle("sub topic")
topic1_1.addLabel("a label") # official XMind only can a one label
topic1_1_1 = TopicElement(ownerWorkbook=workbook)
topic1_1_1.setTitle("topic can add multiple markers")
topic1_1_1.addMarker(MarkerId.starBlue)
topic1_1_1.addMarker(MarkerId.flagGreen)
topic2_1 = TopicElement(ownerWorkbook=workbook)
topic2_1.setTitle("topic can add multiple comments")
topic2_1.addComment("I'm a comment!")
topic2_1.addComment(content="Hello comment!", author='devin')
# then the topics must be added to the root element
root_topic2.addSubTopic(topic1)
root_topic2.addSubTopic(topic2)
root_topic2.addSubTopic(topic3)
root_topic2.addSubTopic(topic4)
topic1.addSubTopic(topic1_1)
topic2.addSubTopic(topic2_1)
topic1_1.addSubTopic(topic1_1_1)
# to loop on the subTopics
topics = root_topic2.getSubTopics()
for index, topic in enumerate(topics):
topic.addMarker("priority-" + str(index + 1))
# create a relationship
sheet2.createRelationship(topic1.getID(), topic2.getID(), "relationship test")
if __name__ == '__main__':
gen_my_xmind_file()
|
import unittest
import pytest
from xpath_string.xpath import Xpath
class TestXpath(unittest.TestCase):
def setUp(self):
self.object_1 = Xpath("//div")
self.object_2 = Xpath("//span")
def test_object_creation(self):
assert isinstance(self.object_1, Xpath), \
'object is not a Xpath class object. Is {} instead'.format(type(self.object_1))
def test_object_arg_check(self):
assert self.object_1.xpath == "//div", \
'object xpath arg has value {} instead of {}'.format(self.object_1.xpath, '//div')
def test_str(self):
assert str(self.object_1) == "//div", \
'__str__ for Xpath is not working correctly. Received {} instead of {}'.format(str(self.object_1), '//div')
def test_arg_format_return(self):
object_1 = Xpath("//div[class='{}']")
expected_string = "//div[class='ONE']"
assert str(object_1.format('ONE')) == expected_string, \
'object_1.format(\'One\') is not {}, but instead {}'.format(expected_string, object_1.format('ONE'))
def test_args_format_return(self):
object_1 = Xpath("//div[class='{}']|//span[type='{}']")
expected_string = "//div[class='ONE']|//span[type='Two']"
assert str(object_1.format('ONE', 'Two')) == expected_string, \
'object_1.format(\'One\', \'Two\') is not {}, but instead {}'.format(expected_string,
object_1.format('ONE', 'Two'))
def test_kwarg_format_return(self):
object_1 = Xpath("//div[class='{it}']")
expected_string = "//div[class='ONE']"
assert str(object_1.format(it='ONE')) == expected_string, \
'object_1.format(it=\'One\') is not {}, but instead {}'.format(expected_string, object_1.format(it='ONE'))
def test_kwargs_format_return(self):
object_1 = Xpath("//div[class='{first}']|//span[type='{second}']")
expected_string = "//div[class='ONE']|//span[type='Two']"
assert str(object_1.format(first='ONE', second='Two')) == expected_string, \
'object_1.format(\'One\', \'Two\') is not {}, but instead {}'.format(
expected_string, object_1.format(first='ONE', second='Two'))
def test_failure_format_kwargs(self):
with pytest.raises(KeyError):
object_1 = Xpath("//div[class='{first}']|//span[type='{second}']")
object_1.format(third='wrong')
def test_failure_format_args_but_kwargs_given(self):
with pytest.raises(IndexError):
object_1 = Xpath("//div[class='{}']|//span[type='{}']")
object_1.format(third='wrong')
def test_too_many_args_format(self):
object_1 = Xpath("//div[class='{}']|//span[type='{}']")
assert str(object_1.format('wrong', 'number', 'of arguments')) == "//div[class='wrong']|//span[type='number']"
def test_failure_format_kwargs_but_args_given(self):
with pytest.raises(KeyError):
object_1 = Xpath("//div[class='{first}']|//span[type='{second}']")
object_1.format('not', 'kwargs')
def test_format_return_type(self):
object_1 = Xpath("//div[class='{}']").format('ONE')
assert isinstance(object_1, Xpath), 'Xpath.format() is not a Xpath class object. Is {} instead.'.format(
type(object_1))
def test_format_return_object(self):
assert Xpath("//div[class='{}']").format('ONE') == Xpath("//div[class='ONE']"), \
'"Xpath(\'//div[class="{}"]).format(\'ONE\')" is not the same object as: "Xpath(\'//div[class="ONE"]\')"'
def test_failure_format_object(self):
assert Xpath("//div[class='{}']").format('ONE') != Xpath("//div[class='TWO']"), \
'"Xpath(\'//div[class="{}"]).format(\'ONE\')" is the same object as: Xpath(\'//div[class="TWO"]\')'
def test_equal(self):
assert self.object_1 == Xpath("//div"), '"self.object_1 == Xpath("//div")" is False'
def test_unequal(self):
assert self.object_1 != self.object_2, '"self.object_1 != self.object_2" is True'
def test_not_supported_type_add(self):
with pytest.raises(TypeError):
self.object_1 + 123
def test_string_add(self):
assert self.object_1 + 'Test' == '//divTest', 'Addition of: "{}" and "Test" gives {} instead of {}'.format(
self.object_1, self.object_1 + 'Test', '//divTest')
def test_string_add_with_or_operator(self):
assert self.object_1 + '|/Test' == '//div|/Test', 'Addition of: "{}" and "Test" gives {} instead of {}'.format(
self.object_1, self.object_1 + 'Test', '//div|/Test')
def test_simple_xpath_add(self):
assert self.object_1 + self.object_2 == '//div//span', \
'Addition of: "{}" and "{}" gives {} instead of {}'.format(self.object_1, self.object_2,
self.object_1 + self.object_2, '//div//span')
def test_xpath_with_square_brackets_add(self):
object_1 = Xpath("//div[@id='timezone']")
object_2 = Xpath("//span[@class='btn btn-default form-control ui-select-toggle']")
add_result = object_1 + object_2
assert add_result.xpath == "//div[@id='timezone']//span[@class='btn btn-default form-control ui-select-toggle']", \
"Addition result of {} and {} is {} instead of " \
"//div[@id='timezone']//span[@class='btn btn-default form-control ui-select-toggle']".format(
object_1, object_2, add_result)
def test_string_add_result_type(self):
result = self.object_1 + 'Test'
assert isinstance(result, Xpath), \
'Addition of Xpath("{}") and "Test" is {} object instead of Xpath object'.format(self.object_1, result)
def test_xpath_add_result_type(self):
result = self.object_1 + self.object_2
assert isinstance(result, Xpath), \
'Addition of Xpath("{}") and Xpath("{}") is {} object instead of Xpath object'.format(
self.object_1, self.object_2, result)
def test_equal_two_xpath(self):
object_2 = Xpath("//div")
assert self.object_1 == object_2, 'Xpath("{}") is not equal to Xpath("{}")'.format(self.object_1, object_2)
def test_equal_xpath_and_string(self):
string_1 = '//div'
assert self.object_1 == string_1, 'Xpath("{}") is not equal to string: "{}"'.format(self.object_1, string_1)
def test_not_equal_two_xpath(self):
assert self.object_1 != self.object_2, 'Xpath("{}") is equal to Xpath("{}")'.format(
self.object_1, self.object_2)
def test_not_equal_xpath_and_string(self):
string_1 = '//span'
assert self.object_1 != string_1, 'Xpath("{}") is equal to string: "{}"'.format(self.object_1, string_1)
def test_equal_not_supported_type(self):
with pytest.raises(TypeError):
assert self.object_1 == 123
def test_not_equal_not_supported_type(self):
with pytest.raises(TypeError):
assert self.object_1 != 123
def test_double_quote_change_to_single_quote(self):
object_1 = Xpath('//div[@class="not"]')
assert object_1 == "//div[@class='not']"
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
Builder.load_string('''
<SeedOptionsDialog@Popup>
id: popup
title: _('Seed Options')
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Label:
id: description
text: _('You may extend your seed with custom words')
halign: 'left'
text_size: self.width, None
size: self.texture_size
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Label:
text: _('Extend Seed')
CheckBox:
id:ext
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Label:
text: _('BIP39')
id:bip39_label
CheckBox:
id:bip39
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(ext.active, bip39.active)
popup.dismiss()
''')
class SeedOptionsDialog(Factory.Popup):
def __init__(self, is_ext, is_bip39, callback):
Factory.Popup.__init__(self)
self.ids.ext.active = is_ext
if is_bip39 is None:
self.ids.bip39.opacity = 0
self.ids.bip39_label.opacity = 0
self.ids.bip39.disabled = True
else:
self.ids.bip39.active = is_bip39
self.callback = callback
|
"""
Show how to use libRocket in Panda3D.
"""
import sys
from panda3d.core import loadPrcFile, loadPrcFileData, Point3,Vec4, Mat4, LoaderOptions # @UnusedImport
from panda3d.core import DirectionalLight, AmbientLight, PointLight
from panda3d.core import Texture, PNMImage
from panda3d.core import PandaSystem
import random
from direct.interval.LerpInterval import LerpHprInterval, LerpPosInterval, LerpFunc
from direct.showbase.ShowBase import ShowBase
# workaround: https://www.panda3d.org/forums/viewtopic.php?t=10062&p=99697#p99054
#from panda3d import rocket
import _rocketcore as rocket
from panda3d.rocket import RocketRegion, RocketInputHandler
loadPrcFileData("", "model-path $MAIN_DIR/assets")
import console
global globalClock
class MyApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
self.win.setClearColor(Vec4(0.2, 0.2, 0.2, 1))
self.disableMouse()
self.render.setShaderAuto()
dlight = DirectionalLight('dlight')
alight = AmbientLight('alight')
dlnp = self.render.attachNewNode(dlight)
alnp = self.render.attachNewNode(alight)
dlight.setColor((0.8, 0.8, 0.5, 1))
alight.setColor((0.2, 0.2, 0.2, 1))
dlnp.setHpr(0, -60, 0)
self.render.setLight(dlnp)
self.render.setLight(alnp)
# Put lighting on the main scene
plight = PointLight('plight')
plnp = self.render.attachNewNode(plight)
plnp.setPos(0, 0, 10)
self.render.setLight(plnp)
self.render.setLight(alnp)
self.loadRocketFonts()
self.loadingTask = None
#self.startModelLoadingAsync()
self.startModelLoading()
self.inputHandler = RocketInputHandler()
self.mouseWatcher.attachNewNode(self.inputHandler)
self.openLoadingDialog()
def loadRocketFonts(self):
""" Load fonts referenced from e.g. 'font-family' RCSS directives.
Note: the name of the font as used in 'font-family'
is not always the same as the filename;
open the font in your OS to see its display name.
"""
rocket.LoadFontFace("modenine.ttf")
def startModelLoading(self):
self.monitorNP = None
self.keyboardNP = None
self.loadingError = False
self.taskMgr.doMethodLater(1, self.loadModels, 'loadModels')
def loadModels(self, task):
self.monitorNP = self.loader.loadModel("monitor")
self.keyboardNP = self.loader.loadModel("takeyga_kb")
def startModelLoadingAsync(self):
"""
NOTE: this seems to invoke a few bugs (crashes, sporadic model
reading errors, etc) so is disabled for now...
"""
self.monitorNP = None
self.keyboardNP = None
self.loadingError = False
# force the "loading" to take some time after the first run...
options = LoaderOptions()
options.setFlags(options.getFlags() | LoaderOptions.LFNoCache)
def gotMonitorModel(model):
if not model:
self.loadingError = True
self.monitorNP = model
self.loader.loadModel("monitor", loaderOptions=options, callback=gotMonitorModel)
def gotKeyboardModel(model):
if not model:
self.loadingError = True
self.keyboardNP = model
self.loader.loadModel("takeyga_kb", loaderOptions=options, callback=gotKeyboardModel)
def openLoadingDialog(self):
self.userConfirmed = False
self.windowRocketRegion = RocketRegion.make('pandaRocket', self.win)
self.windowRocketRegion.setActive(1)
self.windowRocketRegion.setInputHandler(self.inputHandler)
self.windowContext = self.windowRocketRegion.getContext()
self.loadingDocument = self.windowContext.LoadDocument("loading.rml")
if not self.loadingDocument:
raise AssertionError("did not find loading.rml")
self.loadingDots = 0
el = self.loadingDocument.GetElementById('loadingLabel')
self.loadingText = el.first_child
self.stopLoadingTime = globalClock.getFrameTime() + 3
self.loadingTask = self.taskMgr.add(self.cycleLoading, 'doc changer')
# note: you may encounter errors like 'KeyError: 'document'"
# when invoking events using methods from your own scripts with this
# obvious code:
#
# self.loadingDocument.AddEventListener('aboutToClose',
# self.onLoadingDialogDismissed, True)
#
# A workaround is to define callback methods in standalone Python
# files with event, self, and document defined to None.
#
# see https://www.panda3d.org/forums/viewtopic.php?f=4&t=16412
#
# Or, use this indirection technique to work around the problem,
# by publishing the app into the context, then accessing it through
# the document's context...
self.windowContext.app = self
self.loadingDocument.AddEventListener('aboutToClose',
'document.context.app.handleAboutToClose()', True)
self.loadingDocument.Show()
def handleAboutToClose(self):
self.userConfirmed = True
if self.monitorNP and self.keyboardNP:
self.onLoadingDialogDismissed()
def attachCustomRocketEvent(self, document, rocketEventName, pandaHandler, once=False):
# handle custom event
# note: you may encounter errors like 'KeyError: 'document'"
# when invoking events using methods from your own scripts with this
# obvious code:
#
# self.loadingDocument.AddEventListener('aboutToClose',
# self.onLoadingDialogDismissed, True)
#
# see https://www.panda3d.org/forums/viewtopic.php?f=4&t=16412
# this technique converts Rocket events to Panda3D events
pandaEvent = 'panda.' + rocketEventName
document.AddEventListener(
rocketEventName,
"messenger.send('" + pandaEvent + "', [event])")
if once:
self.acceptOnce(pandaEvent, pandaHandler)
else:
self.accept(pandaEvent, pandaHandler)
def cycleLoading(self, task):
"""
Update the "loading" text in the initial window until
the user presses Space, Enter, or Escape or clicks (see loading.rxml)
or sufficient time has elapsed (self.stopLoadingTime).
"""
text = self.loadingText
now = globalClock.getFrameTime()
if self.monitorNP and self.keyboardNP:
text.text = "Ready"
if now > self.stopLoadingTime or self.userConfirmed:
self.onLoadingDialogDismissed()
return task.done
elif self.loadingError:
text.text = "Assets not found"
else:
count = 5
intv = int(now * 4) % count # @UndefinedVariable
text.text = "Loading" + ("." * (1+intv)) + (" " * (2 - intv))
return task.cont
def onLoadingDialogDismissed(self):
""" Once a models are loaded, stop 'loading' and proceed to 'start' """
if self.loadingDocument:
if self.loadingTask:
self.taskMgr.remove(self.loadingTask)
self.loadingTask = None
self.showStarting()
def fadeOut(self, element, time):
""" Example updating RCSS attributes from code
by modifying the 'color' RCSS attribute to slowly
change from solid to transparent.
element: the Rocket element whose style to modify
time: time in seconds for fadeout
"""
# get the current color from RCSS effective style
color = element.style.color
# convert to RGBA form
prefix = color[:color.rindex(',')+1].replace('rgb(', 'rgba(')
def updateAlpha(t):
# another way of setting style on a specific element
attr = 'color: ' + prefix + str(int(t)) +');'
element.SetAttribute('style', attr)
alphaInterval = LerpFunc(updateAlpha,
duration=time,
fromData=255,
toData=0,
blendType='easeIn')
return alphaInterval
def showStarting(self):
""" Models are loaded, so update the dialog,
fade out, then transition to the console. """
self.loadingText.text = 'Starting...'
alphaInterval = self.fadeOut(self.loadingText, 0.5)
alphaInterval.setDoneEvent('fadeOutFinished')
def fadeOutFinished():
if self.loadingDocument:
self.loadingDocument.Close()
self.loadingDocument = None
self.createConsole()
self.accept('fadeOutFinished', fadeOutFinished)
alphaInterval.start()
def createConsole(self):
""" Create the in-world console, which displays
a RocketRegion in a GraphicsBuffer, which appears
in a Texture on the monitor model. """
self.monitorNP.reparentTo(self.render)
self.monitorNP.setScale(1.5)
self.keyboardNP.reparentTo(self.render)
self.keyboardNP.setHpr(-90, 0, 15)
self.keyboardNP.setScale(20)
self.placeItems()
self.setupRocketConsole()
# re-enable mouse
mat=Mat4(self.camera.getMat())
mat.invertInPlace()
self.mouseInterfaceNode.setMat(mat)
self.enableMouse()
def placeItems(self):
self.camera.setPos(0, -20, 0)
self.camera.setHpr(0, 0, 0)
self.monitorNP.setPos(0, 0, 1)
self.keyboardNP.setPos(0, -5, -2.5)
def setupRocketConsole(self):
"""
Place a new rocket window onto a texture
bound to the front of the monitor.
"""
self.win.setClearColor(Vec4(0.5, 0.5, 0.8, 1))
faceplate = self.monitorNP.find("**/Faceplate")
assert faceplate
mybuffer = self.win.makeTextureBuffer("Console Buffer", 1024, 512)
tex = mybuffer.getTexture()
tex.setMagfilter(Texture.FTLinear)
tex.setMinfilter(Texture.FTLinear)
faceplate.setTexture(tex, 1)
self.rocketConsole = RocketRegion.make('console', mybuffer)
self.rocketConsole.setInputHandler(self.inputHandler)
self.consoleContext = self.rocketConsole.getContext()
self.console = console.Console(self, self.consoleContext, 40, 13, self.handleCommand)
self.console.addLine("Panda DOS")
self.console.addLine("type 'help'")
self.console.addLine("")
self.console.allowEditing(True)
def handleCommand(self, command):
if command is None:
# hack for Ctrl-Break
self.spewInProgress = False
self.console.addLine("*** break ***")
self.console.allowEditing(True)
return
command = command.strip()
if not command:
return
tokens = [x.strip() for x in command.split(' ')]
command = tokens[0].lower()
if command == 'help':
self.console.addLines([
"Sorry, this is utter fakery.",
"You won't get much more",
"out of this simulation unless",
"you program it yourself. :)"
])
elif command == 'dir':
self.console.addLines([
"Directory of C:\\:",
"HELP COM 72 05-06-2015 14:07",
"DIR COM 121 05-06-2015 14:11",
"SPEW COM 666 05-06-2015 15:02",
" 2 Files(s) 859 Bytes.",
" 0 Dirs(s) 7333 Bytes free.",
""])
elif command == 'cls':
self.console.cls()
elif command == 'echo':
self.console.addLine(' '.join(tokens[1:]))
elif command == 'ver':
self.console.addLine('Panda DOS v0.01 in Panda3D ' + PandaSystem.getVersionString())
elif command == 'spew':
self.startSpew()
elif command == 'exit':
self.console.setPrompt("System is shutting down NOW!")
self.terminateMonitor()
else:
self.console.addLine("command not found")
def startSpew(self):
self.console.allowEditing(False)
self.console.addLine("LINE NOISE 1.0")
self.console.addLine("")
self.spewInProgress = True
# note: spewage always occurs in 'doMethodLater';
# time.sleep() would be pointless since the whole
# UI would be frozen during the wait.
self.queueSpew(2)
def queueSpew(self, delay=0.1):
self.taskMgr.doMethodLater(delay, self.spew, 'spew')
def spew(self, task):
# generate random spewage, just like on TV!
if not self.spewInProgress:
return
def randchr():
return chr(int(random.random() < 0.25 and 32 or random.randint(32, 127)))
line = ''.join([randchr() for _ in range(40) ])
self.console.addLine(line)
self.queueSpew()
def terminateMonitor(self):
alphaInterval = self.fadeOut(self.console.getTextContainer(), 2)
alphaInterval.setDoneEvent('fadeOutFinished')
def fadeOutFinished():
sys.exit(0)
self.accept('fadeOutFinished', fadeOutFinished)
alphaInterval.start()
app = MyApp()
app.run()
|
"""
Plotting experimental data.
"""
import numpy as np
import h5py
import microval
class ImageData:
"""
Abstract class for image data.
"""
def _clip_data(self, data_arr, region):
sel_x, sel_y = range(region[0], region[1] -
1), range(region[2], region[3] - 1)
data_arr = data_arr[np.array(sel_x, dtype=int)[
:, None], np.array(sel_y, dtype=int)[None, :]]
return data_arr
class TimeSeriesData(ImageData):
"""
Abstract class for time series data from a `mat`m file.
Attributes
----------
fatigue_data_key : string
key for accessing the data in the `mat` file
fatigue_data
data of the `mat` file
"""
fatigue_data_key = "FatigueDataStruct"
def __init__(self, fatiguedata_mat_file_name):
"""
Initialize the `TimeSeriesData`.
Parameters
----------
fatiguedata_mat_file_name : string
path to the mat file to read data from
"""
self._fatigue_mat_data = self._get_mat_file_contents(fatiguedata_mat_file_name)
self.fatigue_data = self._fatigue_mat_data[self.fatigue_data_key]
def _get_mat_file_contents(self, mat_file_name):
return h5py.File(mat_file_name, 'r')
def _clip_data(self, data_arr, region, scale_factor=1.):
region = [int(r / scale_factor) for r in region]
return super(TimeSeriesData, self)._clip_data(data_arr, region)
def _invert_blackwhite_colorimage(self, image_data):
w = np.where(
np.logical_and(
image_data[0] == 0.0,
image_data[1] == 0.0,
image_data[2] == 0.0))
w2 = np.where(
np.logical_and(
image_data[0] == 1.0,
image_data[1] == 1.0,
image_data[2] == 1.0))
for i in range(3):
image_data[i][w] = 1 - image_data[i][w]
image_data[i][w2] = 1 - image_data[i][w2]
return image_data
def imshow(self, data, region=None, **kwargs):
"""
Plot the data.
Parameters
----------
data : np.array
image data to plot
region : list or None
optional, plot only a segement of the data, if value is not `None`
list or array in the form `[x_min, x_max, y_min, y_max]`
default : None
kwargs
optional, keyword arguments for `scaled_imshow`
"""
if region is None:
offset = [0, 0]
else:
offset = [region[0], region[2]]
return microval.scaled_imshow(
data, offset=offset, **kwargs)
def cyclenumbers(self):
"""
Get cycle numbers.
Returns
-------
cycles : np.array
array of cycle numbers for all time frames
"""
xs, ys = self.fatigue_data['Cyclenumber'].shape
cyclenumbers = []
for x in range(xs):
for y in range(ys):
v = self._fatigue_mat_data[self.fatigue_data['Cyclenumber'][x, y]]
cyclenumbers.append(v[0])
return np.array(cyclenumbers)[:, 0]
class BinaryData(TimeSeriesData):
"""
Data structure for binary data.
Attributes
----------
binary_data_key : string
key for accessing the binaray data in the `mat` file
"""
binary_data_key = 'image_time_series'
def __init__(self, binaraydata_mat_file_name, *args, **kwargs):
super(BinaryData, self).__init__(*args, **kwargs)
self._binary_mat_data = self._get_mat_file_contents(binaraydata_mat_file_name)
self.binary_data = self._binary_mat_data[self.binary_data_key]
def imshow(self, time_step_number=-1, subtract_initial_state=False, **kwargs):
"""
Plot binary data.
Parameters
----------
time_step_number : int
optional, index time step array of time step to plot
default : -1
subtract_initial_state : bool
optional, subtract first frame to get difference image
default : False
kwargs
optional, keyword arguments for `scaled_imshow` or `get_binary_data`
"""
data = self.get_binary_data(
time_step_number=time_step_number, subtract_initial_state=subtract_initial_state, **kwargs)
return TimeSeriesData.imshow(self, data, Cmap='Greys', **kwargs)
def get_binary_data(
self,
time_step_number=-1,
region=None,
scale_factor=1.0,
subtract_initial_state = False,
**kwargs):
"""
Get data for a defined time step.
Parameters
----------
time_step_number : int
optional, index time step array of time step to plot
default : -1
region : list or None
optional, return only a segement of the data, if value is not `None`
list or array in the form `[x_min, x_max, y_min, y_max]`
default : None
scale_factor : float
optional, scale factor for clipping the image
default : 1.0
subtract_initial_state : bool
optional, subtract first frame to get difference image
default : False
Returns
-------
np.array
matrix of binarized data
"""
def data_for_timestepno(n):
binary_data_timestep = self._binary_mat_data[self.binary_data[n][0]]
return np.array(binary_data_timestep)/255.
data_arr = data_for_timestepno(time_step_number)
if subtract_initial_state:
data_arr = data_arr - data_for_timestepno(0)
if region is not None:
data_arr = self._clip_data(data_arr, region, scale_factor)
return data_arr
def get_num_binary_data(self):
"""
Get number of frames in the binary data.
Returns
-------
int
number of frames
"""
return len(self.binary_data)
class SegmentedData(TimeSeriesData):
"""
Data structure for segemented data.
Attributes
----------
segmented_data_key : string
key for accessing the segmented data in the `mat` file
"""
segmented_data_key = 'foreground_mask'
def __init__(self, segmenteddata_mat_file_name, *args, **kwargs):
super(SegmentedData, self).__init__(*args, **kwargs)
self._seg_mat_data = self._get_mat_file_contents(segmenteddata_mat_file_name)
self.segmented_data = self._read_segmented_data()
def _read_segmented_data(self):
segmented_data = np.array(self._seg_mat_data[self.segmented_data_key])
return np.append(segmented_data,[np.zeros(segmented_data[0].shape)],axis=0)
def imshow(self, invert_blackwhite=False, as_binary = False, **kwargs):
"""
Plot segmented data.
Note that no definition of time step is required as segemented data only exists for last frame.
Parameters
----------
invert_blackwhite : bool
optional, change black and white colors in plot
default : False
as_binary : bool
optional, plot as binary data
default : False
kwargs
optional, keyword arguments for `scaled_imshow` or `get_segmented_data`
"""
data = self.get_segmented_data(**kwargs)
if as_binary:
data = data.T
data[np.where(np.any(data==1,axis=2))] = 1
data = data.T
if invert_blackwhite:
data = self._invert_blackwhite_colorimage(data)
super(SegmentedData, self).imshow(data, **kwargs)
def get_dimensions(self, scale_factor=1.):
"""
Get dimensions for segemented image.
Parameters
----------
scale_factor : float
optional, scale dimensions by a factor
default : 1
"""
return np.array(self.get_segmented_data().shape) * scale_factor
def get_segmented_data(self, region=None, scale_factor=1., **kwargs):
"""
Get segmented data.
Parameters
----------
region : list or None
optional, return only a segement of the data, if value is not `None`
list or array in the form `[x_min, x_max, y_min, y_max]`
default : None
scale_factor : float
optional, scale factor for clipping the image
default : 1.0
Returns
-------
np.array
matrix of segmented data
"""
data_arr = np.array(self.segmented_data)
if region is not None:
data_arr = np.array(
[self._clip_data(da, region, scale_factor) for da in data_arr])
return data_arr
def imshow_cracks(self, invert_blackwhite=False, **kwargs):
"""
Plot only the cracks of segmented data.
Note that no definition of time step is required as segemented data only exists for last frame.
Parameters
----------
invert_blackwhite : bool
optional, change black and white colors in plot
default : False
kwargs
optional, keyword arguments for `scaled_imshow` or `get_cracks_from_segmented_data`
"""
data = self.get_cracks_from_segmented_data(**kwargs)
if invert_blackwhite:
data = self._invert_blackwhite_colorimage(data)
return super(SegmentedData, self).imshow(data, **kwargs)
def get_cracks_from_segmented_data(
self, region=None, scale_factor=1., **kwargs):
"""
Get only cracks in segmented data.
Parameters
----------
region : list or None
optional, return only a segement of the data, if value is not `None`
list or array in the form `[x_min, x_max, y_min, y_max]`
default : None
scale_factor : float
optional, scale factor for clipping the image
default : 1.0
Returns
-------
np.array
matrix of segmented data
"""
segdat_mod = np.copy(self.segmented_data)
segdat_mod[1] = 0
if region is not None:
segdat_mod = np.array(
[self._clip_data(da, region, scale_factor) for da in segdat_mod])
return segdat_mod
|
'''Submódulo IBGE contendo funções diversas.
Este submódulo é importado automaticamente com o módulo `ibge`.
>>> from DadosAbertosBrasil import ibge
'''
from typing import Union
import pandas as _pd
import requests
from DadosAbertosBrasil._utils import parse
from DadosAbertosBrasil._utils.errors import DAB_LocalidadeError
from DadosAbertosBrasil._utils.get_data import get_data
_normalize = _pd.io.json.json_normalize \
if _pd.__version__[0] == '0' else _pd.json_normalize
def populacao(
projecao: str = None,
localidade: int = None
) -> Union[dict, int]:
'''Obtém a projecao da população referente ao Brasil.
Parâmetros
----------
projecao : str (default=None)
- 'populacao' obtém o valor projetado da população total da localidade;
- 'nascimento' obtém o valor projetado de nascimentos da localidade
- 'obito' obtém o valor projetado de óbitos da localidade;
- 'incremento' obtém o incremento populacional projetado.
- None obtém um dicionário com todos os valores anteriores.
localidade : int (default=None)
Código da localidade desejada.
Por padrão, obtém os valores do Brasil.
Utilize a função `ibge.localidades` para identificar
a localidade desejada.
Retorna
-------
dict ou int:
Valor(es) projetado(s) para o indicador escolhido.
Erros
-----
DAB_LocalidadeError
Caso código da localidade seja inválido.
ValueError
Caso o argumento `projecao` seja inválido.
Exemplos
--------
Projeção de óbito do Brasil.
>>> ibge.populacao('obito')
45000
Obter dados do Rio de Janeiro (localidade 33)
>>> ibge.populacao(localidade=33)
{
'localidade': '33',
'horario': '03/07/2021 19:15:48',
'projecao': {
'populacao': 17459953,
'periodoMedio': {
'incrementoPopulacional': 330508
}
}
}
'''
localidade = parse.localidade(localidade, '')
query = f'https://servicodados.ibge.gov.br/api/v1/projecoes/populacao/{localidade}'
r = requests.get(query).json()
if projecao == None:
return r
elif projecao == 'populacao':
return r['projecao']['populacao']
elif projecao == 'nascimento':
return r['projecao']['periodoMedio']['nascimento']
elif projecao == 'obito':
return r['projecao']['periodoMedio']['obito']
elif projecao == 'incremento':
return r['projecao']['periodoMedio']['incrementoPopulacional']
else:
raise ValueError('''O argumento 'projecao' deve ser um dos seguintes valores tipo string:
- 'populacao';
- 'nascimento';
- 'obito';
- 'incremento'.''')
def localidades(
nivel: str = 'distritos',
divisoes: str = None,
localidade: Union[int, str, list] = None,
ordenar_por: str = None,
index: bool = False
) -> _pd.DataFrame:
'''Obtém o conjunto de localidades do Brasil e suas intrarregiões.
Parameters
----------
nivel : str (default='distritos')
Nível geográfico dos dados.
divisoes : str (default=None)
Subdiviões intrarregionais do nível.
Se None, captura todos os registros do `nivel`.
localidade : int | str | list (default=None)
ID (os lista de IDs) da localidade que filtrará o `nivel`.
ordenar_por : str (default=None)
Coluna pela qual a tabela será ordenada.
index : bool (default=False)
Se True, defina a coluna 'id' como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
DataFrame contendo os localidades desejadas.
Erros
-----
DAB_LocalidadeError
Caso o nível geográfico seja inválido.
Exemplos
--------
Captura todos os estados do Brasil
>>> ibge.localidades(nivel='estados')
id sigla nome regiao_id regiao_sigla regiao_nome
0 11 RO Rondônia 1 N Norte
1 12 AC Acre 1 N Norte
2 13 AM Amazonas 1 N Norte
3 14 RR Roraima 1 N Norte
4 15 PA Pará 1 N Norte
.. ... ... ... ... ... ...
Captura todos os distritos do Brasil, colocando o ID como index.
>>> ibge.localidades(index=True)
nome municipio_id ... regiao_sigla regiao_nome
id ...
520005005 Abadia de Goiás 5200050 ... CO Centro-Oeste
310010405 Abadia dos Dourados 3100104 ... SE Sudeste
520010005 Abadiânia 5200100 ... CO Centro-Oeste
520010010 Posse d'Abadia 5200100 ... CO Centro-Oeste
310020305 Abaeté 3100203 ... SE Sudeste
... ... ... ... ... ...
Captura todos os municípios do estado do Rio de Janeiro (localidade=33)
>>> ibge.localidades(nivel='estados', divisoes='municipios', localidade=33)
id nome microrregiao_id microrregiao_nome \
0 3300100 Angra dos Reis 33013 Baía da Ilha Grande
1 3300159 Aperibé 33002 Santo Antônio de Pádua
2 3300209 Araruama 33010 Lagos
3 3300225 Areal 33005 Três Rios
4 3300233 Armação dos Búzios 33010 Lagos
.. ... ... ... ...
Documentação original
---------------------
https://servicodados.ibge.gov.br/api/docs/localidades
'''
NIVEIS = {
'distritos',
'estados',
'mesorregioes',
'microrregioes',
'municipios',
'regioes-imediatas',
'regioes-intermediarias',
'regioes',
'paises'
}
nivel = nivel.lower()
if nivel not in NIVEIS:
raise DAB_LocalidadeError(f'''Nível inválido:
Preencha o argumento `nivel` com um dos seguintes valores:
{NIVEIS}''')
path = ['localidades', nivel]
params = {}
if localidade is not None:
if isinstance(localidade, list):
localidade = '|'.join([str(loc) for loc in localidade])
path.append(localidade)
if divisoes is not None:
divisoes = divisoes.lower()
if divisoes not in NIVEIS:
raise DAB_LocalidadeError(f'''Subdivisões inválida:
Preencha o argumento `divisoes` com um dos seguintes valores:
{NIVEIS}''')
if nivel != divisoes:
path.append(divisoes)
if ordenar_por is not None:
params['orderBy'] = ordenar_por
data = get_data(
endpoint = 'https://servicodados.ibge.gov.br/api/v1/',
path = path,
params = params
)
df = _normalize(data)
def _loc_columns(x: str) -> str:
y = x.replace('-', '_').split('.')
return f'{y[-2]}_{y[-1]}' if len(y)>1 else y[0]
df.columns = df.columns.map(_loc_columns)
if index:
df.set_index('id', inplace=True)
return df
def malha(
localidade: int,
nivel: str = 'estados',
divisoes: str = None,
periodo: int = 2020,
formato: str = 'svg',
qualidade: str = 'maxima'
) -> _pd.DataFrame:
'''Obtém a URL para a malha referente ao identificador da localidade.
Parâmetros
----------
localidade : int (default=None)
Código da localidade desejada.
Utilize a função `ibge.localidades` para identificar a localidade.
nivel : str (default='estados')
Nível geográfico dos dados.
divisoes : str (default=None)
Subdiviões intrarregionais do nível.
Se None, apresenta a malha sem subdivisões.
periodo : int (default=2020)
Ano da revisão da malha.
formato : str {'svg', 'json', 'geojson'} (default='svg')
Formato dos dados da malha.
qualidade : str {'minima', 'intermediaria', 'maxima'} (default='maxima')
Qualidade de imagem da malha.
Retorna
-------
str
Se formato='svg', retorna a URL da malha da localidade desejada.
json
Se formato='json', retorna a malha em formato TopoJSON.
geojson
Se formato='geojson', retorna a malha em formato GeoJSON.
Erros
-----
DAB_LocalidadeError
Caso o nível geográfico seja inválido.
Exemplos
--------
Captura a malha do Distrito Federal (localidade=53) em formato GeoJSON.
>>> ibge.malha(localidade=53, formato='geojson')
{'type': 'FeatureCollection',
'features': [{'type': 'Feature',
'geometry': {'type': 'Polygon',
'coordinates': [[[-47.31, -16.0363], ...
Captura a malha de Joinville em formato SVG com qualidade mínima.
>>> ibge.malha(
... nivel = 'municipios',
... localidade = 4209102,
... formato = 'svg',
... qualidade = 'minima'
... )
'https://servicodados.ibge.gov.br/api/v3/malhas/municipios/4209102?...'
Captura a malha do Brasil subdividido por UF em formato TopoJSON.
>>> ibge.malha(
... nivel = 'paises',
... localidade = 'BR',
... divisoes = 'uf',
... formato = 'json'
... )
{'type': 'Topology',
'arcs': [[[32967, 111009], [-821, 372]],
[[32146, 111381],
[133, 124],
[15, 106], ...
Documentação original
---------------------
https://servicodados.ibge.gov.br/api/docs/malhas?versao=3
'''
FORMATOS = {
'svg': 'image/svg+xml',
'geojson': 'application/vnd.geo+json',
'json': 'application/json'
}
NIVEIS = {
'estados',
'mesorregioes',
'microrregioes',
'municipios',
'regioes-imediatas',
'regioes-intermediarias',
'regioes',
'paises'
}
DIVISOES = {
'uf',
'mesorregiao',
'microrregiao',
'municipio',
'regiao-imediata',
'regiao-intermediaria',
'regiao'
}
nivel = nivel.lower()
if nivel not in NIVEIS:
raise DAB_LocalidadeError(f'''Nível inválido:
Preencha o argumento `nivel` com um dos seguintes valores:
{NIVEIS}''')
path = ['malhas', nivel, localidade]
params = {
'periodo': periodo,
'qualidade': qualidade.lower(),
'formato': FORMATOS[formato.lower()]
}
if divisoes is not None:
divisoes = divisoes.lower()
if divisoes not in DIVISOES:
raise DAB_LocalidadeError(f'''Subdivisões inválida:
Preencha o argumento `divisoes` com um dos seguintes valores:
{DIVISOES}''')
if nivel != divisoes:
params['intrarregiao'] = divisoes
url = 'https://servicodados.ibge.gov.br/api/v3/'
url += '/'.join([str(p) for p in path])
data = requests.get(
url = url,
params = params
)
if formato.lower().endswith('json'):
return data.json()
else:
return data.url
def coordenadas() -> _pd.DataFrame:
'''Obtém as coordenadas de todas as localidades brasileiras, incluindo
latitude, longitude e altitude.
Retorna
-------
pandas.core.frame.DataFrame
DataFrame das coordenadas de todas as localidade brasileiras.
Exemplos
--------
>>> ibge.coordenadas()
GM_PONTO ID CD_GEOCODIGO TIPO CD_GEOCODBA NM_BAIRRO \
0 NaN 1 110001505000001 URBANO 1.100015e+11 Redondo \
1 NaN 2 110001515000001 URBANO NaN NaN \
2 NaN 3 110001520000001 URBANO NaN NaN \
3 NaN 4 110001525000001 URBANO NaN NaN \
4 NaN 5 110001530000001 URBANO NaN NaN \
.. ... .. ... ... ... ... \
'''
return _pd.read_csv(
r'https://raw.githubusercontent.com/GusFurtado/DadosAbertosBrasil/master/data/coordenadas.csv',
sep = ';'
)
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred_frame = cv2.GaussianBlur(frame, (5, 5), 0)
laplacian = cv2.Laplacian(blurred_frame, cv2.CV_64F)
canny = cv2.Canny(blurred_frame, 100, 150)
cv2.imshow("Frame", frame)
cv2.imshow("Laplacian", laplacian)
cv2.imshow("Canny", canny)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
|
test_cases = [
test_case(
cmd='yb_check_db_views.py @{argsdir}/db1 --database_in {db1}'
, exit_code=0
, stdout="""-- Running broken view check.
-- 0 broken view/s in "{db1}".
-- Completed check, found 0 broken view/s in 1 db/s."""
, stderr='')
, test_case(
cmd='yb_check_db_views.py @{argsdir}/db2 --database_in {db2}'
, exit_code=0
, stdout="""-- Running broken view check.
{db2}.dev.broken1_v
{db2}.dev.broken2_v
{db2}.dev."Broken3_v"
{db2}."Prod".broken1_v
-- 4 broken view/s in "{db2}".
-- Completed check, found 4 broken view/s in 1 db/s."""
, stderr='')
]
|
import warnings
import numbers
import collections
import numpy as np
import pickle
import os
from autoscalingsim.scaling.policiesbuilder.metric.scaling_aspect_calculation.calculators.learning_based.model.model import ScalingAspectToQualityMetricModel
class LinearModel(ScalingAspectToQualityMetricModel):
def _internal_predict(self, model_input):
return self._model.predict(model_input)
def _internal_fit(self, model_input, model_output):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if self.kind == 'offline':
self._model.fit(model_input, model_output)
elif self.kind == 'online':
self._model.partial_fit(model_input, model_output)
def save_to_location(self, path_to_model_file : str):
pickle.dump(self._model, open(path_to_model_file, 'wb'))
def load_from_location(self, path_to_model_file : str):
if not path_to_model_file is None:
if os.path.exists(path_to_model_file):
self._model = pickle.load( open( path_to_model_file, 'rb' ) )
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Denys Duchier, IUT d'Orléans
#==============================================================================
import threading, os, os.path, json, pickle
#==============================================================================
# basic functionality for all databases
#==============================================================================
class BasicDB(dict):
def __init__(self, filename):
self.lock = threading.RLock()
self.json_path = "%s.json" % filename
self.pickle_path = "%s.pckl" % filename
def reset(self):
self.clear()
def json_save(self):
with self.lock:
os.makedirs(os.path.dirname(self.json_path), exist_ok=True)
with open(self.json_path, "w") as f:
json.dump(self, f, ensure_ascii=False, indent=4)
def json_load(self, required=False):
try:
with open(self.json_path, "r") as f:
data = json.load(f)
with self.lock:
self.clear()
self.update(data)
except FileNotFoundError:
if required:
raise
def pickle_save(self):
with self.lock:
os.makedirs(os.path.dirname(self.pickle_path), exist_ok=True)
with open(self.pickle_path, "wb") as f:
pickle.dump(dict(self), f, protocol=-1)
def pickle_load(self, required=False):
try:
with open(self.pickle_path, "rb") as f:
data = pickle.load(f)
with self.lock:
self.clear()
self.update(data)
except FileNotFoundError:
if required:
raise
except EOFError:
if required:
raise
save = pickle_save
load = pickle_load
|
from setuptools import find_packages, setup
setup(
name='baking_cookies',
packages=find_packages(),
version='0.1.0',
description='Tutorials and examples for working with data at Nesta.',
author='Nesta',
license='MIT',
)
|
import os
import shutil
import unittest
from click.testing import CliRunner
from blogger_cli import ROOT_DIR
from blogger_cli.cli import cli
from pkg_resources import resource_filename
class TestBasic(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
HOME_DIR = os.path.expanduser("~")
self.export_dir = os.path.join(HOME_DIR, ".blogger_tmp")
os.mkdir(self.export_dir)
self.blog_dir = os.path.join(self.export_dir, "blog")
self.index_path = os.path.join(self.blog_dir, "index.html")
self.runner.invoke(
cli, ["addblog", "test1"], input=self.export_dir + "\nn \nn \nn \nn \nn \nn"
)
self.runner.invoke(cli, ["config", "-b", "test1", "blog_posts_dir", "blog/"])
self.runner.invoke(cli, ["config", "-b", "test1", "blog_images_dir", "images/"])
self.runner.invoke(cli, ["export", "-b", "test1", "blog_index", "-o", "blog"])
def test_html(self):
html_path = resource_filename("blogger_cli", "tests/tests_resources/html.html")
test_index_path = resource_filename(
"blogger_cli", "tests/tests_resources/index/html_index.html"
)
result = self.runner.invoke(cli, ["convert", "-b", "test1", html_path, "-v"])
self.assertEqual(result.exit_code, 0)
self.assertEqual({"blog", "images"}, set(os.listdir(self.export_dir)))
self.assertEqual({"html.html", "index.html"}, set(os.listdir(self.blog_dir)))
self.assertEqual(
self.read_file(self.index_path), self.read_file(test_index_path)
)
@staticmethod
def read_file(file_path):
with open(file_path, "r", encoding="utf-8") as rf:
data = rf.read()
return data
def test_ipynb(self):
ipynb1_path = resource_filename(
"blogger_cli", "tests/tests_resources/ipynb1.ipynb"
)
test_index_path = resource_filename(
"blogger_cli", "tests/tests_resources/index/ipynb1_index.html"
)
result = self.runner.invoke(cli, ["convert", "-b", "test1", ipynb1_path, "-v"])
self.assertEqual(result.exit_code, 0)
self.assertEqual({"blog", "images"}, set(os.listdir(self.export_dir)))
self.assertEqual(
{"index.html", "ipynb1.html", "ipynb1.ipynb"},
set(os.listdir(self.blog_dir)),
)
self.assertEqual(
self.read_file(self.index_path), self.read_file(test_index_path)
)
def test_ipynb_images_and_index(self):
self.test_ipynb()
ipynb2_path = resource_filename(
"blogger_cli", "tests/tests_resources/ipynb2.ipynb"
)
test_index_path = resource_filename(
"blogger_cli", "tests/tests_resources/index/ipynb2_index.html"
)
result = self.runner.invoke(cli, ["setdefault", "test1"])
result = self.runner.invoke(cli, ["convert", ipynb2_path, "-v"])
self.assertEqual(result.exit_code, 0)
self.assertEqual({"blog", "images"}, set(os.listdir(self.export_dir)))
self.assertEqual(
{
"index.html",
"ipynb1.html",
"ipynb1.ipynb",
"ipynb2.html",
"ipynb2.ipynb",
},
set(os.listdir(self.blog_dir)),
)
self.assertEqual(
self.read_file(self.index_path), self.read_file(test_index_path)
)
images_dir = os.path.join(self.export_dir, "images")
post_image_dir = os.path.join(images_dir, "ipynb2")
self.assertEqual(["ipynb2"], os.listdir(images_dir))
self.assertEqual(["image_1.png"], os.listdir(post_image_dir))
def test_md(self):
md_path = resource_filename("blogger_cli", "tests/tests_resources/md1.md")
test_index_path = resource_filename(
"blogger_cli", "tests/tests_resources/index/md_index.html"
)
result = self.runner.invoke(cli, ["convert", "-b", "test1", md_path, "-v"])
self.assertEqual(result.exit_code, 0)
self.assertEqual({"blog", "images"}, set(os.listdir(self.export_dir)))
self.assertEqual(
{"index.html", "md1.html", "md1.md"}, set(os.listdir(self.blog_dir))
)
self.assertEqual(
self.read_file(self.index_path), self.read_file(test_index_path)
)
def test_md_meta_and_custom_templates(self):
md_path = resource_filename("blogger_cli", "tests/tests_resources/md2.md")
test_results_path = resource_filename(
"blogger_cli", "tests/tests_resources/results/md2.html"
)
test_index_path = resource_filename(
"blogger_cli", "tests/tests_resources/index/meta_and_templates_index.html"
)
templates_path = os.path.join(
ROOT_DIR, "tests", "tests_resources", "_blogger_templates"
)
result = self.runner.invoke(
cli, ["convert", "-b", "test1", md_path, "-v", "-temp", templates_path]
)
self.assertEqual(result.exit_code, 0)
self.assertEqual({"blog", "images"}, set(os.listdir(self.export_dir)))
topic_dir = os.path.join(self.blog_dir, "meta")
self.assertEqual({"md2.html", "md2.md"}, set(os.listdir(topic_dir)))
converted_html = os.path.join(topic_dir, "md2.html")
self.assertEqual(
self.read_file(self.index_path), self.read_file(test_index_path)
)
# os.system('cp '+ converted_html + ' ' + test_results_path)
self.assertEqual(
self.read_file(converted_html), self.read_file(test_results_path)
)
def tearDown(self):
self.runner.invoke(cli, ["rmblog", "test1"])
shutil.rmtree(self.export_dir)
if __name__ == "__main__":
unittest.main()
|
import numpy as np
from typing import Union, Callable, Tuple
__all__ = ['LossLayer', 'FCLayer']
class Layer:
def __init__(self, activation: Union[str, Tuple[Callable[[np.array], np.array],
Callable[[np.array], np.array]]]) -> None:
"""
Abstract class defining various methods and attributes shared by different Layer types:w
:param activation:
Activation function, one out of ['sigmoid', ...] or a tuple of callables with the first being
the activation function and the second its derivative
"""
self._sigmoid = np.vectorize(lambda x: 1 / (1 + np.exp(-x)))
self._sigmoid_der = np.vectorize(lambda x: np.exp(-x) / (1 + np.exp(-x)) ** 2)
if isinstance(activation, str):
activations = {'sigmoid': (self._sigmoid, self._sigmoid_der)}
self.activation = activations[activation]
else:
self.activation = activation
def forward_pass(self, inputs: Tuple[np.array]) -> np.array:
pass
def reverse_pass(self, inputs: Tuple[np.array]) -> np.array:
pass
class LossLayer(Layer):
def __init__(self, loss: Union[str, Tuple[Callable[[np.array], np.array],
Callable[[np.array], np.array]]]) -> None:
"""
Class implementing various loss functions
:param loss:
Loss function, one out of ['MAE', 'RMSE', ...]
"""
super().__init(activation=loss)
self.loss_function = {
'RMSE': np.vectorize(lambda x, y: np.sqrt((x - y) ** 2))
}[loss]
def forward_pass(self, inputs: Tuple[np.array]):
return self.lossfunction[0](inputs[0], inputs[1])
class FCLayer(Layer):
def __init__(self, n_inputs: int, n_outputs: int, activation='sigmoid') -> None:
super().__init__(activation)
self.weights = np.random.rand(shape=(n_inputs + 1, n_outputs))
self.n_inputs = n_inputs
self.n_outputs = n_outputs
def forward_pass(self, inputs: Tuple[np.array]) -> np.array:
return self.activation(np.concat([np.ones(shape=self.n_outputs), inputs]) @ self.weights)
def reverse_pass(self, inputs: Tuple[np.array]) -> np.array:
pass
|
import diagrams
import diagrams.aws.compute as compute
import diagrams.aws.devtools as devtools
import diagrams.aws.management as manage
import diagrams.aws.network as network
import diagrams.aws.storage as storage
diagram_attr = {
'margin': '-0.8,-0.8',
'size': '10,8',
'bgcolor': 'transparent'
}
with diagrams.Diagram(
'',
show=False,
graph_attr=diagram_attr,
filename='docs/overview',
# direction='TB'
direction='LR'
):
attr = {
'margin': '30'
}
with diagrams.Cluster(
'Cloud Development Kit (CDK)', graph_attr=attr):
app = devtools.CloudDevelopmentKit(
'CDK App\n[mcservers]')
with diagrams.Cluster(
'CDK Stack = Network',
graph_attr=attr):
networkStack = manage.CloudformationStack(
'CloudFormation Stack\n[mcservers-env-network]')
with diagrams.Cluster(
'CDK Stack = Hosts',
graph_attr=attr):
hostAStack = manage.CloudformationStack(
'Nested Stack\n[Server A]')
hostBStack = manage.CloudformationStack(
'Nested Stack\n[Server B]')
hostCStack = manage.CloudformationStack(
'Nested Stack\n[Server C]')
hostsStack = manage.CloudformationStack(
'CloudFormation Stack\n[mcservers-env-hosts]')
hostsStack >> [hostCStack, hostBStack, hostAStack]
app >> [hostsStack, networkStack]
with diagrams.Cluster('Networking Resources', graph_attr=attr):
vpc = network.VPC('VPC')
subnet = network.PublicSubnet('Public Subnet')
vpc - subnet
networkStack >> [vpc, subnet]
with diagrams.Cluster('Compute Resources', graph_attr=attr):
ec2a = compute.EC2('Minecraft\nServer A')
ec2b = compute.EC2('Minecraft\nServer B')
ec2c = compute.EC2('Minecraft\nServer C')
hostAStack >> ec2a
hostBStack >> ec2b
hostCStack >> ec2c
ec2a - subnet
ec2b - subnet
ec2c - subnet
|
import importlib
import builtins
from .scope import Scope, allowed_classes
from .vmbuiltins import setBuiltins
# Return scope-before-scope0
def fillScope0(scope0):
scope0.inherits = {}
# Built-in constants
scope0.inherits["False"] = False
scope0.inherits["True"] = True
scope0.inherits["None"] = None
scope0.inherits["NotImplemented"] = NotImplemented
scope0.inherits["Ellipsis"] = Ellipsis
# Types and exceptions
for type_object in allowed_classes:
type_name = type_object.__name__
if hasattr(builtins, type_name):
scope0.inherits[type_name] = getattr(builtins, type_name)
# Secure default functions
funcs = [
"abs", "all", "any", "ascii", "bin", "callable", "chr", "delattr",
"dir", "divmod", "format", "getattr", "hasattr", "hash", "hex", "id",
"isinstance", "issubclass", "iter", "len", "max", "min", "next", "oct",
"ord", "pow", "repr", "round", "setattr", "sorted", "sum"
]
for func_name in funcs:
scope0.inherits[func_name] = getattr(builtins, func_name)
# Now add more builtins
setBuiltins(scope0)
|
"""Various network "heads" for classification.
The design is as follows:
... -> RoI ----\
-> RoIFeatureXform -> prn head -> prn output -> prn loss
... -> Feature /
Map
The PRN head produces a feature representation of the RoI for the purpose
of classfying whether the roi needs further refinement . The rpn output module
converts the feature representation into classification predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from core.config import cfg
from utils.c2 import const_fill
from utils.c2 import gauss_fill
import utils.blob as blob_utils
# ---------------------------------------------------------------------------- #
# PRN outputs and losses
# ---------------------------------------------------------------------------- #
def add_prn_outputs(model, blob_in, dim):
"""Add RoI classification output ops."""
blob_out = model.FC(
blob_in,
'prn_logits',
dim,
model.num_classes,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
if not model.train: # == if test
# Only add sigmoid when testing; during training the sigmoid is
# combined with the label cross entropy loss for numerical stability
blob_out = model.net.Sigmoid('prn_logits', 'prn_probs', engine='CUDNN')
return blob_out
def add_prn_losses(model):
"""Add losses for RoI classification."""
loss_prn = model.net.SigmoidCrossEntropyLoss(
['prn_logits', 'prn_labels_int32'],
'loss_prn',
scale=1. / cfg.NUM_GPUS
)
loss_gradients = blob_utils.get_loss_gradients(model, [loss_prn])
model.AddLosses(['loss_prn'])
# And add some useful metrics
model.net.Sigmoid('prn_logits', 'prn_probs')
model.SigmoidAccuracy(['prn_probs', 'prn_labels_int32'], 'accuracy_prn')
model.AddMetrics('accuracy_prn')
model.AddMetrics('refine_ratio')
return loss_gradients
# ---------------------------------------------------------------------------- #
# PRN heads
# ---------------------------------------------------------------------------- #
def add_prn_head(model, blob_in, dim_in, spatial_scale, prefix):
return add_roi_2mlp_head(
model, blob_in, dim_in, spatial_scale, prefix
)
def add_roi_2mlp_head(model, blob_in, dim_in, spatial_scale, prefix):
"""Add a ReLU MLP with two hidden layers."""
hidden_dim = cfg.PRN.MLP_HEAD_DIM
roi_size = cfg.PRN.ROI_XFORM_RESOLUTION
roi_feat = model.RoIFeatureTransform(
blob_in,
'prn_roi_feat',
blob_rois=prefix + '_rois',
method=cfg.PRN.ROI_XFORM_METHOD,
resolution=roi_size,
sampling_ratio=cfg.PRN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
model.FC(roi_feat, 'prn_fc1', dim_in * roi_size * roi_size, hidden_dim)
model.Relu('prn_fc1', 'prn_fc1')
model.FC('prn_fc1', 'prn_fc2', hidden_dim, hidden_dim)
model.Relu('prn_fc2', 'prn_fc2')
return 'prn_fc2', hidden_dim
# ---------------------------------------------------------------------------- #
# PRN labels
# ---------------------------------------------------------------------------- #
def add_prn_labels(model):
assert model.train, 'Only valid at training'
model.PrepareLabelsForPRNAndUpdateRefineBlobs()
|
import torch, pdb
import torch.nn
from IPython import embed
import torch.nn.functional as F
import pdb
import random
import numpy as np
import time
import functools
from .utils import nms, add_box_img, nms_worker
from torch.multiprocessing import Pool, Manager
def rpn_cross_entropy(input, target):
r"""
:param input: (15x15x5,2)
:param target: (15x15x5,)
:return:
"""
mask_ignore = target == -1
mask_calcu = 1 - mask_ignore
loss = F.cross_entropy(input=input[mask_calcu], target=target[mask_calcu])
return loss
def rpn_cross_entropy_balance(input, target, num_pos, num_neg, anchors, ohem_pos=None, ohem_neg=None):
r"""
:param input: (N,1125,2)
:param target: (15x15x5,)
:return:
"""
# if ohem:
# final_loss = rpn_cross_entropy_balance_parallel(input, target, num_pos, num_neg, anchors, ohem=True,
# num_threads=4)
# else:
loss_all = []
for batch_id in range(target.shape[0]):
min_pos = min(len(np.where(target[batch_id].cpu() == 1)[0]), num_pos)
min_neg = int(min(len(np.where(target[batch_id].cpu() == 1)[0]) * num_neg / num_pos, num_neg))
pos_index = np.where(target[batch_id].cpu() == 1)[0].tolist()
neg_index = np.where(target[batch_id].cpu() == 0)[0].tolist()
if ohem_pos:
if len(pos_index) > 0:
pos_loss_bid = F.cross_entropy(input=input[batch_id][pos_index],
target=target[batch_id][pos_index], reduction='none')
selected_pos_index = nms(anchors[pos_index], pos_loss_bid.cpu().detach().numpy(), min_pos)
pos_loss_bid_final = pos_loss_bid[selected_pos_index]
else:
pos_loss_bid = torch.FloatTensor([0]).cuda()
pos_loss_bid_final = pos_loss_bid
else:
pos_index_random = random.sample(pos_index, min_pos)
if len(pos_index) > 0:
pos_loss_bid_final = F.cross_entropy(input=input[batch_id][pos_index_random],
target=target[batch_id][pos_index_random], reduction='none')
else:
pos_loss_bid_final = torch.FloatTensor([0]).cuda()
if ohem_neg:
if len(pos_index) > 0:
neg_loss_bid = F.cross_entropy(input=input[batch_id][neg_index],
target=target[batch_id][neg_index], reduction='none')
selected_neg_index = nms(anchors[neg_index], neg_loss_bid.cpu().detach().numpy(), min_neg)
neg_loss_bid_final = neg_loss_bid[selected_neg_index]
else:
neg_loss_bid = F.cross_entropy(input=input[batch_id][neg_index],
target=target[batch_id][neg_index], reduction='none')
selected_neg_index = nms(anchors[neg_index], neg_loss_bid.cpu().detach().numpy(), num_neg)
neg_loss_bid_final = neg_loss_bid[selected_neg_index]
else:
if len(pos_index) > 0:
neg_index_random = random.sample(np.where(target[batch_id].cpu() == 0)[0].tolist(), min_neg)
neg_loss_bid_final = F.cross_entropy(input=input[batch_id][neg_index_random],
target=target[batch_id][neg_index_random], reduction='none')
else:
neg_index_random = random.sample(np.where(target[batch_id].cpu() == 0)[0].tolist(), num_neg)
neg_loss_bid_final = F.cross_entropy(input=input[batch_id][neg_index_random],
target=target[batch_id][neg_index_random], reduction='none')
loss_bid = (pos_loss_bid_final.mean() + neg_loss_bid_final.mean()) / 2
loss_all.append(loss_bid)
final_loss = torch.stack(loss_all).mean()
return final_loss
def rpn_smoothL1(input, target, label, num_pos=16, ohem=None):
r'''
:param input: torch.Size([1, 1125, 4])
:param target: torch.Size([1, 1125, 4])
label: (torch.Size([1, 1125]) pos neg or ignore
:return:
'''
loss_all = []
for batch_id in range(target.shape[0]):
min_pos = min(len(np.where(label[batch_id].cpu() == 1)[0]), num_pos)
if ohem:
pos_index = np.where(label[batch_id].cpu() == 1)[0]
if len(pos_index) > 0:
loss_bid = F.smooth_l1_loss(input[batch_id][pos_index], target[batch_id][pos_index], reduction='none')
sort_index = torch.argsort(loss_bid.mean(1))
loss_bid_ohem = loss_bid[sort_index[-num_pos:]]
else:
loss_bid_ohem = torch.FloatTensor([0]).cuda()[0]
loss_all.append(loss_bid_ohem.mean())
else:
pos_index = np.where(label[batch_id].cpu() == 1)[0]
pos_index = random.sample(pos_index.tolist(), min_pos)
if len(pos_index) > 0:
loss_bid = F.smooth_l1_loss(input[batch_id][pos_index], target[batch_id][pos_index])
else:
loss_bid = torch.FloatTensor([0]).cuda()[0]
loss_all.append(loss_bid.mean())
final_loss = torch.stack(loss_all).mean()
return final_loss
|
# Names scores
import os
def cVal(m):
s = 0
for n in m:
if n != '"':
s += ord(n) - ord("A") + 1
return s
f = open(os.path.abspath("..") + "\\data\\P022.txt", "r")
names = f.readline().split(",")
f.close()
names.sort()
su = 0
for y in range(len(names)):
su += cVal(names[y]) * (y + 1)
print(su)
# 871198282
|
from .Textos import Textos
from .Opciones import OpcionesCat
from .Rectangulos import Rectangulos
from .Esquinas import DatosEsquinas
from .MovParabolico import *
from .Terreno import Terreno
from .Variables import *
from .Turnos import *
from .Tanques import *
from .IA import *
class Juego:
def juego(Panta,AnguloINPUT,VelocidadINPUT,mapa,turnoJugador):
#Variables bloques de datos:
posiniX=0
posiniY=0
largoBloq = 200
altoBloq = 20
activoAngulo= False
activoVelocidad = False
Validar = False
botonamarillo = False
botonnaranja = False
botonmorado = False
Aux = 0
x = 0
jugador = jugador1
rectAngulo= Rectangulos.rectangulo(posiniX+largoBloq/2-10, posiniY+1,largoBloq,altoBloq)#Angulo azul
rectVelocidad= Rectangulos.rectangulo(posiniX+largoBloq/2-10, posiniY+22,largoBloq,altoBloq)#Velocidead azul
#Auxiliar loop principal
if x == 0:
Terreno.dibuja_mapa(Panta,mapa)
DatosEsquinas.generarBloques(posiniX,posiniY,largoBloq,altoBloq, jugador)
DatosEsquinas.textosEsquinas(AnguloINPUT,rectAngulo)#Angulo ROJO
DatosEsquinas.textosEsquinas(VelocidadINPUT,rectVelocidad)#Velocidad ROJO
pygame.display.update()
Terreno.dibuja_mapa(Panta,mapa)
DatosEsquinas.generarBloques(posiniX,posiniY,largoBloq,altoBloq, jugador)
x += 1
salir = False
while salir!=True:
# Dibuja los rectangulos de disparo de los tanques
botonamarillo1 = Rectangulos.rectangulo(5,50,30,20)
botonnaranja1 = Rectangulos.rectangulo(40,50,30,20)
botonmorado1 = Rectangulos.rectangulo(75,50,30,20)
Rectangulos.dibujaRectangulos(Panta,Amarillo,botonamarillo1,0)
Rectangulos.dibujaRectangulos(Panta,Naranja,botonnaranja1,0)
Rectangulos.dibujaRectangulos(Panta,Morado,botonmorado1,0)
if Viento_Movimiento[0] == True:
Validar = True
Lanzamiento.Viento(Validar)
if Validar == True:
Rectangulos.dibujaRectangulos(Pant,Blanco,(largo[0]-60,20,50,40),0)
Textos.texto_pantalla_rect(str(Viento_Movimiento[0]),Textos.fuentes(None,30),Negro,Pant,largo[0]-50,30)
if jugador1.balas105 == 0 and jugador1.balasperforantes == 0 and jugador1.balas60 == 0:
jugador1.set_Estado(False)
if jugador2.balas105 == 0 and jugador2.balasperforantes == 0 and jugador2.balas60 == 0:
jugador2.set_Estado(False)
if jugador3.balas105 == 0 and jugador3.balasperforantes == 0 and jugador3.balas60 == 0:
jugador3.set_Estado(False)
if jugador4.balas105 == 0 and jugador4.balasperforantes == 0 and jugador4.balas60 == 0:
jugador4.set_Estado(False)
if jugador5.balas105 == 0 and jugador5.balasperforantes == 0 and jugador5.balas60 == 0:
jugador5.set_Estado(False)
if jugador6.balas105 == 0 and jugador6.balasperforantes == 0 and jugador6.balas60 == 0:
jugador6.set_Estado(False)
Contador = 0
for i in range(jugadores[0]+IAR[0]):
if i == 0 and jugador1.get_Estado() == True:
Contador += 1
if i == 1 and jugador2.get_Estado() == True:
Contador += 1
if i == 2 and jugador3.get_Estado() == True:
Contador += 1
if i == 3 and jugador4.get_Estado() == True:
Contador += 1
if i == 4 and jugador5.get_Estado() == True:
Contador += 1
if i == 5 and jugador6.get_Estado() == True:
Contador += 1
if Contador == 1:
Partida[0] = 1
Cantidad_Balas = 0
for i in range(jugadores[0]+IAR[0]):
if i == 0 and jugador1.get_Estado() == True and jugador1.balas105 == 0 and jugador1.balasperforantes == 0 and jugador1.balas60 == 0:
Cantidad_Balas += 1
if i == 1 and jugador2.get_Estado() == True and jugador2.balas105 == 0 and jugador2.balasperforantes == 0 and jugador2.balas60 == 0:
Cantidad_Balas += 1
if i == 2 and jugador3.get_Estado() == True and jugador3.balas105 == 0 and jugador3.balasperforantes == 0 and jugador3.balas60 == 0:
Cantidad_Balas += 1
if i == 3 and jugador4.get_Estado() == True and jugador4.balas105 == 0 and jugador4.balasperforantes == 0 and jugador4.balas60 == 0:
Cantidad_Balas += 1
if i == 4 and jugador5.get_Estado() == True and jugador5.balas105 == 0 and jugador5.balasperforantes == 0 and jugador5.balas60 == 0:
Cantidad_Balas += 1
if i == 5 and jugador6.get_Estado() == True and jugador6.balas105 == 0 and jugador6.balasperforantes == 0 and jugador6.balas60 == 0:
Cantidad_Balas += 1
if Cantidad_Balas == jugadores[0]+IAR[0]:
Partida[0] = 1
if(turnoJugador >= (jugadores[0]+IAR[0])):
turnoJugador = 0
if (Lista_Jugadores[turnoJugador] == 0):
if jugador1.get_Estado() == True:
Textos.texto_pantalla_rect(str(jugador1.kill),Textos.fuentes(None,34),Negro,Panta,220,20)
jugador = jugador1
else:
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
else:
turnoJugador += 1
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
if (Lista_Jugadores[turnoJugador] == 1):
if jugador2.get_Estado() == True:
Textos.texto_pantalla_rect(str(jugador2.kill),Textos.fuentes(None,34),Negro,Panta,220,20)
jugador = jugador2
else:
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
else:
turnoJugador += 1
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
if (Lista_Jugadores[turnoJugador] == 2):
if jugador3.get_Estado() == True:
Textos.texto_pantalla_rect(str(jugador3.kill),Textos.fuentes(None,34),Negro,Panta,220,20)
jugador = jugador3
else:
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
else:
turnoJugador += 1
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
if (Lista_Jugadores[turnoJugador] == 3):
if jugador4.get_Estado() == True:
Textos.texto_pantalla_rect(str(jugador4.kill),Textos.fuentes(None,34),Negro,Panta,220,20)
jugador = jugador4
else:
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
else:
turnoJugador += 1
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
if (Lista_Jugadores[turnoJugador] == 4):
if jugador5.get_Estado() == True:
Textos.texto_pantalla_rect(str(jugador5.kill),Textos.fuentes(None,34),Negro,Panta,220,20)
jugador = jugador5
else:
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
else:
turnoJugador += 1
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
if (Lista_Jugadores[turnoJugador] == 5):
if jugador6.get_Estado() == True:
Textos.texto_pantalla_rect(str(jugador6.kill),Textos.fuentes(None,34),Negro,Panta,220,20)
jugador = jugador6
else:
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
else:
turnoJugador += 1
if(turnoJugador > (jugadores[0]+IAR[0])):
turnoJugador = 0
if Partida[0] == 1:
for i in range(jugadores[0]+IAR[0]):
if i == 0:
Lista_Kills[0] = jugador1.kill
if i == 1:
Lista_Kills[1] = jugador2.kill
if i == 2:
Lista_Kills[2] = jugador3.kill
if i == 3:
Lista_Kills[3] = jugador4.kill
if i == 4:
Lista_Kills[4] = jugador5.kill
if i == 5:
Lista_Kills[5] = jugador6.kill
alto = Lista_Kills[0]
for i in range(jugadores[0]+IAR[0]):
if Lista_Kills[i] > alto:
alto = Lista_Kills[i]
for i in range(jugadores[0]+IAR[0]):
if alto == Lista_Kills[i]:
if i == 0:
jugador1.gana = True
if i == 1:
jugador2.gana = True
if i == 2:
jugador3.gana = True
if i == 3:
jugador4.gana = True
if i == 4:
jugador5.gana = True
if i == 5:
jugador6.gana = True
Gana = 0
for i in range(jugadores[0]+IAR[0]):
if i == 0:
if jugador1.gana == True:
jugador = jugador1
Gana += 1
if i == 1:
if jugador2.gana == True:
jugador = jugador2
Gana += 1
if i == 2:
if jugador3.gana == True:
jugador = jugador3
Gana += 1
if i == 3:
if jugador4.gana == True:
jugador = jugador4
Gana += 1
if i == 4:
if jugador5.gana == True:
jugador = jugador5
Gana += 1
if i == 5:
if jugador6.gana == True:
jugador = jugador6
Gana += 1
print(Gana,"hola ganador")
if Gana == 1:
Rectangulos.dibujaRectangulos(Panta, Amarillo, ((Panta.get_width()/2)-150,290,300,50),0)
Textos.texto_pantalla_rect("Ganó el color: ",Textos.fuentes(None,34),Negro,Panta,(Panta.get_width()/2)-150,305)
colorGanador = Rectangulos.rectangulo((Panta.get_width()/2)+80,300,30,30)
Rectangulos.dibujaRectangulos(Panta,jugador.color,colorGanador,0)
elif Gana == jugadores[0]+IAR[0]:
Rectangulos.dibujaRectangulos(Panta, Amarillo, ((Panta.get_width()/2)-200,290,400,50),0)
Textos.texto_pantalla_rect("Han empatado todos los jugadores",Textos.fuentes(None,34),Negro,Panta,(Panta.get_width()/2)-200,305)
elif Gana >= 2 and Gana < 6:
Rectangulos.dibujaRectangulos(Panta, Amarillo, ((Panta.get_width()/2)-150,290,300,50),0)
Textos.texto_pantalla_rect("Han empatado: ",Textos.fuentes(None,34),Negro,Panta,(Panta.get_width()/2)-100,305)
Textos.texto_pantalla_rect(str(Gana),Textos.fuentes(None,34),Negro,Panta,(Panta.get_width()/2)+80,305)
pygame.display.update()
pygame.time.delay(5000)
salir=True
iniciada = False
return iniciada
Turnos.stockbalas(jugador.get_Balas(),jugador.color)
DatosEsquinas.generarBloques(posiniX,posiniY,largoBloq,altoBloq,jugador)
DatosEsquinas.textosEsquinas(AnguloINPUT,rectAngulo)#Angulo
DatosEsquinas.textosEsquinas(VelocidadINPUT,rectVelocidad)#Velocidad
if(jugador1.get_Estado()==True):
Textos.texto_pantalla_rect(str(jugador1.get_Vida()),Textos.fuentes(None,23),Negro,Panta,jugador1.get_X(),jugador1.get_Y()-60)
if(jugador2.get_Estado()==True):
Textos.texto_pantalla_rect(str(jugador2.get_Vida()),Textos.fuentes(None,23),Negro,Panta,jugador2.get_X(),jugador2.get_Y()-60)
if(jugador3.get_Estado()==True):
Textos.texto_pantalla_rect(str(jugador3.get_Vida()),Textos.fuentes(None,23),Negro,Panta,jugador3.get_X(),jugador3.get_Y()-60)
if(jugador4.get_Estado()==True):
Textos.texto_pantalla_rect(str(jugador4.get_Vida()),Textos.fuentes(None,23),Negro,Panta,jugador4.get_X(),jugador4.get_Y()-60)
if(jugador5.get_Estado()==True):
Textos.texto_pantalla_rect(str(jugador5.get_Vida()),Textos.fuentes(None,23),Negro,Panta,jugador5.get_X(),jugador5.get_Y()-60)
if(jugador6.get_Estado()==True):
Textos.texto_pantalla_rect(str(jugador6.get_Vida()),Textos.fuentes(None,23),Negro,Panta,jugador6.get_X(),jugador6.get_Y()-60)
bala105,balaperfo,bala60 = jugador.get_Balas()
pygame.display.update()
if Partida[0] == 0:
for event in pygame.event.get():
#Detecta cierre de ventana
if event.type==pygame.QUIT:
#Termina el while y sale del juego
salir=True
pygame.quit()
#Detecta el click del mouse
if event.type == pygame.MOUSEBUTTONDOWN:
#For aqui
#Detecta click en rectangulo angulo azul
if rectAngulo.collidepoint(event.pos):
activoAngulo= True
activoVelocidad = False
Angulo_Jugador[turnoJugador] = ''
#Detecta click en rectangulo velocidad azul
elif rectVelocidad.collidepoint(event.pos):
activoVelocidad = True
activoAngulo = False
#Devuelve el valor falso a las variables en caso de no ser clickeadas
elif botonamarillo1.collidepoint(event.pos):
if bala105 > 0:
botonamarillo = True
botonnaranja = False
botonmorado = False
else:
botonamarillo = False
botonnaranja = False
botonmorado = False
elif botonnaranja1.collidepoint(event.pos):
if balaperfo > 0:
botonamarillo= False
botonmorado = False
botonnaranja = True
else:
botonamarillo, botonnaranja, botonmorado = False
elif botonmorado1.collidepoint(event.pos):
if bala60 > 0:
botonamarillo = False
botonnaranja = False
botonmorado = True
else:
botonamarillo = False
botonnaranja = False
botonmorado = False
else:
activoAngulo = False
activoVelocidad = False
#Detecta el teclado
if event.type == pygame.KEYDOWN:
#Si se clickeo el rectangulo del angulo azul se activa
# la opción para el ingreso de datos
if activoAngulo == True:
#Detecta la tecla escape para volver al menú
if event.key == K_ESCAPE:
salir=True
Pantaux = pygame.display.set_mode([(largoaux[0]),(anchoaux[0])])
iniciada = True
return iniciada
#Detecta la telca enter para terminar el ingreso de datos
elif event.key == pygame.K_RETURN:
pygame.display.update()
activoAngulo = False
#Detecta la tecla retroceso para borrar
# en caso de ser necesario
elif event.key == pygame.K_BACKSPACE:
#Borra un elemento a la vez de los datos ingresados por el ususario
# en el apartado de angulo azul, al borrar todos los datos el programa cae
AnguloINPUT = AnguloINPUT[:-1]
DatosEsquinas.generarBloques(posiniX,posiniY,largoBloq,altoBloq, jugador)
#En caso de no borrar datos estará constantemente ingresando
# todas las teclas preNoneonadas en el teclado
elif pygame.K_0 <= event.key <= pygame.K_9:
#Actualiza la variable para el unicode
AnguloINPUT=str(AnguloINPUT)
#Funciona como un input caracter por caracter,
# permite ingresar todo tipo de caracter
AnguloINPUT += event.unicode
Angulo_Jugador[turnoJugador] = int(AnguloINPUT)
#None se clickeo el rectangulo del velocidad azul se activa
# la opción para el ingreso de datos
if activoVelocidad == True:
#Detecta la tecla escape para volver al menú
if event.key == K_ESCAPE:
salir=True
Pantaux = pygame.display.set_mode([(largoaux[0]),(anchoaux[0])])
iniciada = True
return iniciada
#Detecta la telca enter para terminar el ingreso de datos
elif event.key == pygame.K_RETURN:
pygame.display.update()
activoVelocidad = False
#Detecta la tecla retroceso para borrar
# en caso de ser necesario
elif event.key == pygame.K_BACKSPACE:
#Borra un elemento a la vez de los datos ingresados por el ususario
# en el apartado de angulo azul, al borrar todos los datos el programa cae
VelocidadINPUT = VelocidadINPUT[:-1]
DatosEsquinas.generarBloques(posiniX,posiniY,largoBloq,altoBloq,jugador)
#En caso de no borrar datos estará constantemente ingresando
# todas las teclas preNoneonadas en el teclado
elif pygame.K_0 <= event.key <= pygame.K_9:
#Actualiza la variable para el unicode
VelocidadINPUT=str(VelocidadINPUT)
#Funciona como un input caracter por caracter,
# permite ingresar todo tipo de caracter
VelocidadINPUT += event.unicode
Velocidad_Jugador[turnoJugador] = int(VelocidadINPUT)
if event.key == K_SPACE:
if botonamarillo == True or botonnaranja == True or botonmorado == True:
if Velocidad_Jugador[turnoJugador] != str and Angulo_Jugador[turnoJugador] != str:
Velocidad_Jugador[turnoJugador] = int(Velocidad_Jugador[turnoJugador])
if Velocidad_Jugador[turnoJugador] > 0 and Velocidad_Jugador[turnoJugador] < 201:
if Angulo_Jugador[turnoJugador] >= 0 and Angulo_Jugador[turnoJugador] <= 180:
jugador.set_Angulo(Angulo_Jugador[turnoJugador])
jugador.set_Velocidad(Velocidad_Jugador[turnoJugador])
Lanzamiento.lanzamiento(jugador,botonamarillo,botonnaranja,botonmorado,mapa)
VelocidadINPUT = ''
AnguloINPUT = ''
Angulo_Jugador[turnoJugador] = ''
Velocidad_Jugador[turnoJugador] = ''
botonamarillo = False
botonnaranja = False
botonmorado = False
turnoJugador+=1
if(turnoJugador >= (jugadores[0]+IAR[0])):
turnoJugador = 0
if Validar == True:
Viento_Movimiento[0] = True
else:
VelocidadINPUT = ''
AnguloINPUT = ''
Angulo_Jugador[turnoJugador] = ''
Velocidad_Jugador[turnoJugador] = ''
else:
VelocidadINPUT = ''
AnguloINPUT = ''
Angulo_Jugador[turnoJugador] = ''
Velocidad_Jugador[turnoJugador] = ''
else:
VelocidadINPUT = ''
AnguloINPUT = ''
Angulo_Jugador[turnoJugador] = ''
Velocidad_Jugador[turnoJugador] = ''
else:
botonamarillo = False
botonnaranja = False
botonmorado = False
Textos.texto_pantalla_rect("Por favor elija el color y luego presione espacio", Textos.fuentes(None, 40), Negro, Panta,largo[0]//250,100)
#Otras teclas detectadas
else:
#Detecta la tecla escape para volver al menú
if event.key == K_ESCAPE:
Pantaux = pygame.display.set_mode([(largoaux[0]),(anchoaux[0])])
salir=True
iniciada = True
return iniciada
if jugador.IA == True:
IA.lanzamientoRobot(jugador)
Lanzamiento.lanzamiento(jugador,botonamarilloIA[0],botonnaranjaIA[0],botonmoradoIA[0],mapa)
turnoJugador+=1
if(turnoJugador >= (jugadores[0]+IAR[0])):
turnoJugador = 0
if Validar == True:
Viento_Movimiento[0] = True
|
#!/usr/bin/env python
#Script for adding ned specs to existing DB.
#2014, Korbinian Schweiger
import os
import sys
import backend
def addspec(name, value, workdir):
print workdir
lines = readDB(workdir)
print lines
for i in range(len(lines)):
print lines[i]
lines[i] = lines[i] + name +"$"+value+" "
print lines[i]
print lines
saveDB(workdir, lines)
def readDB(directory):
charset = sys.getfilesystemencoding()
lines = []
#read file "main.db" in the starting directory (dirs[0])
try:
open(os.path.join(directory+'.mimir/', 'main.db'), 'r')
except IOError:
print "The Database does not exist"
return False
with open(os.path.join(directory+'.mimir/', 'main.db'), 'r') as f:
input = f.read()
#after this you have one sting with all lines seperatet by \
#so split it! -> self.lines is a list with the lines from the read file
lines = input.split("\n")
lines.pop()
return lines
def saveDB(workdir, lines):
charset = sys.getfilesystemencoding()
with open(os.path.join(workdir+'.mimir/', 'main.db'), 'w+') as f:
for line in lines:
write_items = f.write(line)
write_items = f.write("\n")
def main(workdir):
print "Project Mimir: Script for adding new spacs to a old Database"
#workdir = raw_input("Input the directory of the DB: ")
newspecnum = int(raw_input("Input number of spec to be added? "))
DBspecs = backend.getcurrentspecs()
for i in range(newspecnum):
newspec = raw_input("Input new spec name: ")
if newspec in DBspecs:
newvalue = raw_input("Input the standardvalue for the new spec: ")
addspec(newspec,newvalue,workdir)
if __name__ == '__main__':
main()
|
# author: Navneel Singhal
# functionality: extraction, training and validation
import structure_analysis
import utility
from time import time
import pickle
import numpy as np
import random
classes = [
'Benign',
'Malware/Backdoor',
'Malware/Trojan',
'Malware/Trojandownloader',
'Malware/Trojandropper',
'Malware/Virus',
'Malware/Worm'
]
predict_filename = 'struct_temp_prediction_dump.sav'
feature_list_filename = 'struct_temp_feature_dump.sav'
test_predict_filename = 'struct_temp_test_prediction_dump.sav'
test_feature_list_filename = 'struct_temp_test_feature_list_filename_dump.sav'
list_of_filenames = []
model_filename = 'model_struct_parameters.sav'
def extract_features():
start_time = time()
predictions = []
feature_list = []
total = (utility.get_all('Static_Analysis_Data'))
print(len(total))
random.shuffle(total)
train_fraction = 0.90
train = total[:int(train_fraction * len(total))]
test = total[len(train):]
complete = 0
print("now working on train")
for fl in train:
w = -1
for i in range(len(classes)):
if fl.count(classes[i]) > 0:
w = i
break
assert(w != -1)
try:
feature_list.append(structure_analysis.get_feature_dict(fl))
predictions.append(w)
list_of_filenames.append(fl)
complete += 1
if complete % 50 == 0:
print (str(complete) + " done")
#print (fl)
if complete == 1000:
break
except:
w = 0
test_predictions = []
test_feature_list = []
print("now working on test")
complete = 0
for fl in test:
w = -1
for i in range(len(classes)):
if fl.count(classes[i]) > 0:
w = i
break
assert(w != -1)
try:
test_feature_list.append(structure_analysis.get_feature_dict(fl))
test_predictions.append(w)
complete += 1
if complete % 50 == 0:
print (str(complete) + " done")
#print (fl)
if complete == 1000:
break
except:
w = 0
print("len(predictions) = ", len(predictions))
print("len(feature_list) = ", len(feature_list))
print("len(test_predictions) = ", len(test_predictions))
print("len(test_feature_list) = ", len(test_feature_list))
pickle.dump(predictions, open(predict_filename, 'wb'))
pickle.dump(feature_list, open(feature_list_filename, 'wb'))
pickle.dump(test_predictions, open(test_predict_filename, 'wb'))
pickle.dump(test_feature_list, open(test_feature_list_filename, 'wb'))
end_time = time()
print ('String feature extraction complete in ' + str(end_time - start_time) + ' seconds')
def train():
start_time = time()
from sklearn.feature_extraction import FeatureHasher
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
feat = 300
h = FeatureHasher(n_features = feat)
X = h.transform(pickle.load(open(feature_list_filename, 'rb'))).toarray()
y = np.array(pickle.load(open(predict_filename, 'rb')))
import math
file_index = 0
print(X.shape)
print(len(list_of_filenames))
for x in X:
for w in x:
if math.isnan(w) or math.isinf(w):
print(list_of_filenames[file_index])
file_index += 1
clf = RandomForestClassifier()
clf.fit(X, y)
pickle.dump(clf, open(model_filename, 'wb'))
end_time = time()
print ('Training complete in ' + str(end_time - start_time) + ' seconds')
def test():
from sklearn.feature_extraction import FeatureHasher
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
feat = 300
h = FeatureHasher(n_features = feat)
start_time = time()
TX = h.transform(pickle.load(open(test_feature_list_filename, 'rb'))).toarray()
Ty = np.array(pickle.load(open(test_predict_filename, 'rb')))
clf = pickle.load(open(model_filename, 'rb'))
prediction_values = clf.predict(TX)
f = lambda x: 1 if x > 0 else 0
def fromiter(x):
return np.fromiter((f(xi) for xi in x), x.dtype)
prediction_values = fromiter(prediction_values)
Ty = fromiter(Ty)
print("features:", feat)
print("accuracy:", metrics.accuracy_score(prediction_values, Ty))
print("f1 score:", metrics.f1_score(prediction_values, Ty, average = 'micro'))
print("precision score:", metrics.precision_score(prediction_values, Ty, average = 'micro'))
print("recall score:", metrics.recall_score(prediction_values, Ty, average = 'micro'))
print("f1 score (macro):", metrics.f1_score(prediction_values, Ty, average = 'macro'))
print("precision score (macro):", metrics.precision_score(prediction_values, Ty, average = 'macro'))
print("recall score (macro):", metrics.recall_score(prediction_values, Ty, average = 'macro'))
print("prediction is", prediction_values.tolist())
print("y is", Ty.tolist())
end_time = time()
print ('Testing complete in ' + str(end_time - start_time) + ' seconds')
extract_features()
train()
test()
|
from psycopg2 import sql
import psycopg2.extras
class Query():
'''Helper class for PGStorage'''
def __init__(self, conn, schema, pkeys):
self._conn = conn
self._schema = schema
self._pkeys = pkeys
# Some "internal" informations are cached here
self._default_pkey_expr = {}
self._table_columns_data = {}
# DELETE, UPSERT, SELECT: main methods used by PGStorage.
def delete(self, name, pkey_val):
q = self._delete_sql(name)
args = {'pkey_val': pkey_val}
return self._run_query(q, args, False)
def upsert(self, name, data):
q = self._upsert_sql(name, data)
# DATA contains only changed columns, upsert uses all table columns
all_columns_data = {column_name: None for column_name in self.table_columns(name)}
all_columns_data.update(data)
return self._run_query(q, all_columns_data, False)
def select(self, name, cond={}):
q = self._select_sql(name, cond)
return self._run_query(q, cond, True)
# SQL EXECUTION
def _run_query(self, q, args, fetch_result):
cur = self._conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(q, args)
if fetch_result:
data = cur.fetchall()
return [dict(x) for x in data]
# SQL QUERIES CREATION
def _delete_sql(self, name):
pkey_name = self._pkeys[name]
q = self._sql_template(name, 'delete')
q = q.format(table_schema=sql.Identifier(self._schema), table_name=sql.Identifier(name),
pkey_name=sql.Identifier(pkey_name))
return q.as_string(self._conn)
def _upsert_sql(self, name, data):
table_columns = self.table_columns(name)
# all table columns
all_columns = [sql.Identifier(column) for column in table_columns]
all_columns_sql = sql.SQL(', ').join(all_columns)
# Columns to be updated
updated_columns = [sql.Identifier(name) for name in data.keys()]
updated_columns_sql = sql.SQL(', ').join(updated_columns)
# Pkey name
pkey_name = self._pkeys[name]
# Values placeholders
all_values = [sql.Placeholder(column) for column in table_columns]
all_values_sql = sql.SQL(', ').join(all_values)
q = self._sql_template(name, 'upsert')
q = q.format(table_schema=sql.Identifier(self._schema), table_name=sql.Identifier(name),
pkey_name=sql.Identifier(pkey_name), all_columns=all_columns_sql,
updated_columns=updated_columns_sql, all_values=all_values_sql)
return q.as_string(self._conn)
def _select_sql(self, name, cond):
select = self._select_all_sql(name)
if cond:
where = self._where_sql(name, cond)
select = 'WITH a AS ({}) SELECT * FROM a {}'.format(select, where)
return select
def _select_all_sql(self, name):
q = self._sql_template(name, 'select')
q = q.format(table_schema=sql.Identifier(self._schema), table_name=sql.Identifier(name))
return q.as_string(self._conn)
def _where_sql(self, name, cond):
parts = []
for key, val in cond.items():
if type(val) is list:
template = sql.SQL('{} = ANY({})')
else:
template = sql.SQL('{} = {}')
template = template.format(sql.Identifier(key), sql.Placeholder(key))
parts.append(template)
where = sql.SQL('WHERE ') + sql.SQL(' AND ').join(parts)
return where.as_string(self._conn)
def _sql_template(self, resource_name, query_name):
return sql.SQL(query_str[query_name])
# Additional methods
def dump_table(self, table_name, column_name):
q = query_str['dump_table']
cur = self._conn.cursor()
cur.execute(sql.SQL(q).format(
sql.Identifier(self._schema),
sql.Identifier(table_name),
sql.Identifier(column_name)
))
return cur.fetchall()
def table_columns(self, table_name):
if table_name not in self._table_columns_data:
cur = self._conn.cursor()
cur.execute(query_str['get_table_columns'], (self._schema, table_name))
self._table_columns_data[table_name] = [x[0] for x in cur.fetchall()]
return self._table_columns_data[table_name]
def default_pkey_expr(self, table_name, column_name):
if table_name not in self._default_pkey_expr:
cur = self._conn.cursor()
cur.execute(query_str['get_default_pkey_expr'], (self._schema, table_name, column_name))
self._default_pkey_expr[table_name] = cur.fetchone()[0]
return self._default_pkey_expr[table_name]
# RAW SQL
query_str = {}
# UPSERT/SELECT/DELETE
query_str['upsert'] = '''
-- NOTE: this works with postgresql9.6 and newer,
-- earlier versions were not tested
WITH
row_data AS (
SELECT (ROW({all_values})::{table_schema}.{table_name}).*
)
INSERT INTO {table_schema}.{table_name} ({updated_columns})
SELECT {updated_columns}
FROM row_data
ON CONFLICT ({pkey_name}) DO UPDATE
SET ({updated_columns}) = (SELECT {updated_columns} FROM (SELECT EXCLUDED.*) excl)
'''
query_str['delete'] = '''
DELETE FROM {table_schema}.{table_name}
WHERE {pkey_name} = %(pkey_val)s
'''
query_str['select'] = '''
SELECT *
FROM {table_schema}.{table_name}
'''
# ADDITIONAL SQL
query_str['dump_table'] = '''
SELECT *
FROM {}.{}
ORDER BY {}
'''
query_str['get_table_columns'] = '''
SELECT column_name
FROM information_schema.columns
WHERE table_schema = %s
AND table_name = %s
ORDER BY ordinal_position
'''
query_str['get_default_pkey_expr'] = '''
SELECT column_default
FROM information_schema.columns
WHERE (table_schema, table_name, column_name) = (%s, %s, %s)
'''
|
##########################################################################
#
# Copyright (c) 2009, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import sys
import IECore
class TestIFFHairReader( unittest.TestCase ) :
def testConstruction( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/hairSystem.mchp" )
self.assert_( r.isInstanceOf( "IFFHairReader" ) )
self.assertEqual( type( r ), IECore.IFFHairReader )
self.assertEqual( r["fileName"].getValue().value, "test/IECore/data/iffFiles/hairSystem.mchp" )
def testRead( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/hairSystem.mchp" )
self.assertEqual( type( r ), IECore.IFFHairReader )
self.assertEqual( r.numHairs(), 64 )
self.assertEqual( len(r.frameTimes()), 4 )
expectedAttrNamesAndTypes = {
"P" : IECore.V3dVectorData,
"velocity" : IECore.V3dVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECore.CurvesPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
self.assertEqual( len( c.verticesPerCurve() ), r.numHairs() )
expectedDataLength = 0
for i in c.verticesPerCurve() :
self.assertEqual( i, 10 )
expectedDataLength += i
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type( c[i].data ), expectedAttrNamesAndTypes[i] )
self.assertEqual( len( c[i].data ), expectedDataLength )
for p in c["P"].data :
self.assert_( abs( p.x ) < 16.123 )
self.assert_( abs( p.y ) < 13.222 )
self.assert_( abs( p.z ) < 19.440 )
def testMultiFrameFiles( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/hairSystem.mchp" )
self.assertEqual( len(r.frameTimes()), 4 )
self.assertEqual( r.numHairs(), 64 )
c = r.read()
self.assertEqual( type( c ), IECore.CurvesPrimitive )
self.assertEqual( len( c ), 2 )
self.assertEqual( len(c.verticesPerCurve()), r.numHairs() )
for p in c["P"].data :
self.assert_( abs( p.x ) < 16.123 )
self.assert_( abs( p.y ) < 13.222 )
self.assert_( abs( p.z ) < 19.440 )
r.parameters()['frameIndex'].setValue( 3 )
self.assertEqual( r.numHairs(), 64 )
expectedAttrNamesAndTypes = {
"P" : IECore.V3dVectorData,
"velocity" : IECore.V3dVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECore.CurvesPrimitive )
self.assertEqual( len( c ), 2 )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
self.assertEqual( len( c.verticesPerCurve() ), r.numHairs() )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c.keys() )
expectedDataLength = 0
for i in c.verticesPerCurve() :
self.assertEqual( i, 10 )
expectedDataLength += i
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type( c[i].data ), expectedAttrNamesAndTypes[i] )
self.assertEqual( len( c[i].data ), expectedDataLength )
for p in c["P"].data :
self.assert_( abs( p.x ) < 16.140 )
self.assert_( abs( p.y ) < 13.253 )
self.assert_( abs( p.z ) < 19.456 )
def testConversion( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/hairSystem.mchp" )
self.assertEqual( type( r ), IECore.IFFHairReader )
r.parameters()["realType"].setValue( "float" )
r.parameters()['frameIndex'].setValue( 3 )
expectedAttrNamesAndTypes = {
"P" : IECore.V3fVectorData,
"velocity" : IECore.V3fVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECore.CurvesPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
expectedDataLength = 0
for i in c.verticesPerCurve() :
self.assertEqual( i, 10 )
expectedDataLength += i
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len( c[i].data ), expectedDataLength )
for p in c["P"].data :
self.assert_( abs( p.x ) < 16.140 )
self.assert_( abs( p.y ) < 13.253 )
self.assert_( abs( p.z ) < 19.456 )
def testSparseCacheFailure( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/sparseHairSystem.mchp" )
self.assertEqual( r.numHairs(), 64 )
self.assertRaises( RuntimeError, r.read )
def testFileNameChange( self ) :
"""Now Readers are Ops, the filename can be changed and read() can be called
again. So we need to check that that works."""
r = IECore.Reader.create( "test/IECore/data/iffFiles/hairSystem.mchp" )
self.assertEqual( type( r ), IECore.IFFHairReader )
r.parameters()["realType"].setValue( "float" )
self.assertEqual( r.numHairs(), 64 )
expectedAttrNamesAndTypes = {
"P" : IECore.V3fVectorData,
"velocity" : IECore.V3fVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECore.CurvesPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
expectedDataLength = 0
for i in c.verticesPerCurve() :
self.assertEqual( i, 10 )
expectedDataLength += i
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len( c[i].data ), expectedDataLength )
for p in c["P"].data :
self.assert_( abs( p.x ) < 16.123 )
self.assert_( abs( p.y ) < 13.222 )
self.assert_( abs( p.z ) < 19.440 )
r["fileName"].setValue( IECore.StringData( "test/IECore/data/iffFiles/sparseHairSystem.mchp" ) )
self.assertEqual( r.numHairs(), 64 )
self.assertRaises( RuntimeError, r.read )
r["fileName"].setValue( IECore.StringData( "test/IECore/data/iffFiles/hairSystem.mchp" ) )
r.parameters()['frameIndex'].setValue( 3 )
self.assertEqual( r.numHairs(), 64 )
expectedAttrNamesAndTypes = {
"P" : IECore.V3fVectorData,
"velocity" : IECore.V3fVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECore.CurvesPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
expectedDataLength = 0
for i in c.verticesPerCurve() :
self.assertEqual( i, 10 )
expectedDataLength += i
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len( c[i].data ), expectedDataLength )
for p in c["P"].data :
self.assert_( abs( p.x ) < 16.140 )
self.assert_( abs( p.y ) < 13.253 )
self.assert_( abs( p.z ) < 19.456 )
def testParameterTypes( self ) :
p = IECore.IFFHairReader()
self.assert_( p.resultParameter().isInstanceOf( "ObjectParameter" ) )
self.assertEqual( p.resultParameter().validTypes(), [IECore.TypeId.CurvesPrimitive] )
if __name__ == "__main__":
unittest.main()
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import packer
Packer = packer.Packer
setup(
name = Packer.name,
version = Packer.version,
description = 'A new type of package manager',
long_description = open('README.rst').read(),
url = 'http://github.com/kalail/packer',
author = 'Kashif Malik',
author_email = 'kashif@kalail.com',
license=open('LICENSE').read(),
packages = [
'packer'
],
install_requires=[
'requests',
'docopt',
],
entry_points = {
'console_scripts': ['packer=packer.command_line:main'],
},
zip_safe = False
)
|
# Problem description: https://www.hackerrank.com/challenges/ctci-is-binary-search-tree/problem
""" Node is defined as
class node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
"""
def check(node, min, max):
if node is None:
return True
if node.data <= min or node.data >= max:
return False
# when doing left check, the minimum is the grandparent's value
# and the max is the parent's value
left_check = check(node.left, min, node.data)
right_check = check(node.right, node.data, max)
return left_check and right_check
def checkBST(root):
return check(root, 0, 10e4)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/bfc_memory_map.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/bfc_memory_map.proto',
package='tensorflow',
syntax='proto3',
serialized_options=b'ZHgithub.com/tensorflow/tensorflow/tensorflow/go/core/core_protos_go_proto',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n-tensorflow/core/protobuf/bfc_memory_map.proto\x12\ntensorflow\"\x92\x01\n\x11MemAllocatorStats\x12\x12\n\nnum_allocs\x18\x01 \x01(\x03\x12\x14\n\x0c\x62ytes_in_use\x18\x02 \x01(\x03\x12\x19\n\x11peak_bytes_in_use\x18\x03 \x01(\x03\x12\x1a\n\x12largest_alloc_size\x18\x04 \x01(\x03\x12\x1c\n\x14\x66ragmentation_metric\x18\x05 \x01(\x02\"\xae\x01\n\x08MemChunk\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\x04\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x16\n\x0erequested_size\x18\x03 \x01(\x03\x12\x0b\n\x03\x62in\x18\x04 \x01(\x05\x12\x0f\n\x07op_name\x18\x05 \x01(\t\x12\x16\n\x0e\x66reed_at_count\x18\x06 \x01(\x04\x12\x14\n\x0c\x61\x63tion_count\x18\x07 \x01(\x04\x12\x0e\n\x06in_use\x18\x08 \x01(\x08\x12\x0f\n\x07step_id\x18\t \x01(\x04\"\x8b\x01\n\nBinSummary\x12\x0b\n\x03\x62in\x18\x01 \x01(\x05\x12\x1a\n\x12total_bytes_in_use\x18\x02 \x01(\x03\x12\x1a\n\x12total_bytes_in_bin\x18\x03 \x01(\x03\x12\x1b\n\x13total_chunks_in_use\x18\x04 \x01(\x03\x12\x1b\n\x13total_chunks_in_bin\x18\x05 \x01(\x03\".\n\x08SnapShot\x12\x14\n\x0c\x61\x63tion_count\x18\x01 \x01(\x04\x12\x0c\n\x04size\x18\x02 \x01(\x03\"\xcd\x01\n\nMemoryDump\x12\x16\n\x0e\x61llocator_name\x18\x01 \x01(\t\x12+\n\x0b\x62in_summary\x18\x02 \x03(\x0b\x32\x16.tensorflow.BinSummary\x12#\n\x05\x63hunk\x18\x03 \x03(\x0b\x32\x14.tensorflow.MemChunk\x12\'\n\tsnap_shot\x18\x04 \x03(\x0b\x32\x14.tensorflow.SnapShot\x12,\n\x05stats\x18\x05 \x01(\x0b\x32\x1d.tensorflow.MemAllocatorStatsBJZHgithub.com/tensorflow/tensorflow/tensorflow/go/core/core_protos_go_protob\x06proto3'
)
_MEMALLOCATORSTATS = _descriptor.Descriptor(
name='MemAllocatorStats',
full_name='tensorflow.MemAllocatorStats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='num_allocs', full_name='tensorflow.MemAllocatorStats.num_allocs', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bytes_in_use', full_name='tensorflow.MemAllocatorStats.bytes_in_use', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='peak_bytes_in_use', full_name='tensorflow.MemAllocatorStats.peak_bytes_in_use', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='largest_alloc_size', full_name='tensorflow.MemAllocatorStats.largest_alloc_size', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fragmentation_metric', full_name='tensorflow.MemAllocatorStats.fragmentation_metric', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=208,
)
_MEMCHUNK = _descriptor.Descriptor(
name='MemChunk',
full_name='tensorflow.MemChunk',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='tensorflow.MemChunk.address', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='size', full_name='tensorflow.MemChunk.size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='requested_size', full_name='tensorflow.MemChunk.requested_size', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bin', full_name='tensorflow.MemChunk.bin', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='op_name', full_name='tensorflow.MemChunk.op_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='freed_at_count', full_name='tensorflow.MemChunk.freed_at_count', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='action_count', full_name='tensorflow.MemChunk.action_count', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='in_use', full_name='tensorflow.MemChunk.in_use', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='step_id', full_name='tensorflow.MemChunk.step_id', index=8,
number=9, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=211,
serialized_end=385,
)
_BINSUMMARY = _descriptor.Descriptor(
name='BinSummary',
full_name='tensorflow.BinSummary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='bin', full_name='tensorflow.BinSummary.bin', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_bytes_in_use', full_name='tensorflow.BinSummary.total_bytes_in_use', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_bytes_in_bin', full_name='tensorflow.BinSummary.total_bytes_in_bin', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_chunks_in_use', full_name='tensorflow.BinSummary.total_chunks_in_use', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_chunks_in_bin', full_name='tensorflow.BinSummary.total_chunks_in_bin', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=388,
serialized_end=527,
)
_SNAPSHOT = _descriptor.Descriptor(
name='SnapShot',
full_name='tensorflow.SnapShot',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='action_count', full_name='tensorflow.SnapShot.action_count', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='size', full_name='tensorflow.SnapShot.size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=529,
serialized_end=575,
)
_MEMORYDUMP = _descriptor.Descriptor(
name='MemoryDump',
full_name='tensorflow.MemoryDump',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='allocator_name', full_name='tensorflow.MemoryDump.allocator_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bin_summary', full_name='tensorflow.MemoryDump.bin_summary', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='chunk', full_name='tensorflow.MemoryDump.chunk', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='snap_shot', full_name='tensorflow.MemoryDump.snap_shot', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stats', full_name='tensorflow.MemoryDump.stats', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=578,
serialized_end=783,
)
_MEMORYDUMP.fields_by_name['bin_summary'].message_type = _BINSUMMARY
_MEMORYDUMP.fields_by_name['chunk'].message_type = _MEMCHUNK
_MEMORYDUMP.fields_by_name['snap_shot'].message_type = _SNAPSHOT
_MEMORYDUMP.fields_by_name['stats'].message_type = _MEMALLOCATORSTATS
DESCRIPTOR.message_types_by_name['MemAllocatorStats'] = _MEMALLOCATORSTATS
DESCRIPTOR.message_types_by_name['MemChunk'] = _MEMCHUNK
DESCRIPTOR.message_types_by_name['BinSummary'] = _BINSUMMARY
DESCRIPTOR.message_types_by_name['SnapShot'] = _SNAPSHOT
DESCRIPTOR.message_types_by_name['MemoryDump'] = _MEMORYDUMP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MemAllocatorStats = _reflection.GeneratedProtocolMessageType('MemAllocatorStats', (_message.Message,), {
'DESCRIPTOR' : _MEMALLOCATORSTATS,
'__module__' : 'tensorflow.core.protobuf.bfc_memory_map_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemAllocatorStats)
})
_sym_db.RegisterMessage(MemAllocatorStats)
MemChunk = _reflection.GeneratedProtocolMessageType('MemChunk', (_message.Message,), {
'DESCRIPTOR' : _MEMCHUNK,
'__module__' : 'tensorflow.core.protobuf.bfc_memory_map_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemChunk)
})
_sym_db.RegisterMessage(MemChunk)
BinSummary = _reflection.GeneratedProtocolMessageType('BinSummary', (_message.Message,), {
'DESCRIPTOR' : _BINSUMMARY,
'__module__' : 'tensorflow.core.protobuf.bfc_memory_map_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.BinSummary)
})
_sym_db.RegisterMessage(BinSummary)
SnapShot = _reflection.GeneratedProtocolMessageType('SnapShot', (_message.Message,), {
'DESCRIPTOR' : _SNAPSHOT,
'__module__' : 'tensorflow.core.protobuf.bfc_memory_map_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SnapShot)
})
_sym_db.RegisterMessage(SnapShot)
MemoryDump = _reflection.GeneratedProtocolMessageType('MemoryDump', (_message.Message,), {
'DESCRIPTOR' : _MEMORYDUMP,
'__module__' : 'tensorflow.core.protobuf.bfc_memory_map_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemoryDump)
})
_sym_db.RegisterMessage(MemoryDump)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import json
import logging
from hashlib import sha1
from django.conf import settings
from django.http import HttpResponse, JsonResponse
from rest_framework import status
from rest_framework.views import APIView
from pretix.api.models import ApiCall
logger = logging.getLogger(__name__)
class IdempotencyQueryView(APIView):
# Experimental feature, therefore undocumented for now
authentication_classes = ()
permission_classes = ()
def get(self, request, format=None):
idempotency_key = request.GET.get("key")
auth_hash_parts = '{}:{}'.format(
request.headers.get('Authorization', ''),
request.COOKIES.get(settings.SESSION_COOKIE_NAME, '')
)
auth_hash = sha1(auth_hash_parts.encode()).hexdigest()
if not idempotency_key:
return JsonResponse({
'detail': 'No idempotency key given.'
}, status=status.HTTP_404_NOT_FOUND)
try:
call = ApiCall.objects.get(
auth_hash=auth_hash,
idempotency_key=idempotency_key,
)
except ApiCall.DoesNotExist:
return JsonResponse({
'detail': 'Idempotency key not seen before.'
}, status=status.HTTP_404_NOT_FOUND)
if call.locked:
r = JsonResponse(
{'detail': 'Concurrent request with idempotency key.'},
status=status.HTTP_409_CONFLICT,
)
r['Retry-After'] = 5
return r
content = call.response_body
if isinstance(content, memoryview):
content = content.tobytes()
r = HttpResponse(
content=content,
status=call.response_code,
)
for k, v in json.loads(call.response_headers).values():
r[k] = v
return r
|
from unittest import TestCase
from mock import MagicMock, patch
from tgt_grease.core import GreaseContainer, Configuration
from tgt_grease.enterprise.Model import KafkaSource
import time
import kafka
import multiprocessing as mp
class MockThread():
def __init__(self):
self.alive = False
self.daemon = False
self.is_alive_called = 0
self.start_called = 0
def is_alive(self):
self.is_alive_called += 1
return self.alive
def start(self):
self.start_called += 1
class TestKafka(TestCase):
def setUp(self):
self.ioc = GreaseContainer()
self.good_config = {"source": "kafka", "max_backlog": 20, "min_backlog": 5, "servers": ["server"], "topics": ["topic"]}
self.bad_config = {"source": "not kafka"}
self.mock_thread = MagicMock()
self.ks = KafkaSource()
@patch('tgt_grease.enterprise.Model.KafkaSource.validate_configs')
def test_run_bad_config(self, mock_validate):
mock_validate.return_value = False
self.assertFalse(self.ks.run(self.bad_config))
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_manager_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.validate_configs')
def test_run_good_config(self, mock_validate, mock_create):
mock_thread = MockThread()
mock_create.return_value = mock_thread
mock_validate.return_value = True
self.assertFalse(self.ks.run(self.good_config))
self.assertEqual(self.ks.configs, [self.good_config])
self.assertEqual(mock_thread.is_alive_called, 1)
@patch('tgt_grease.enterprise.Model.KafkaSource.get_configs')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_manager_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.validate_configs')
def test_run_no_config(self, mock_validate, mock_create, mock_get_configs):
mock_get_configs.return_value = [self.good_config]*5
mock_thread = MockThread()
mock_create.return_value = mock_thread
mock_validate.return_value = True
self.assertFalse(self.ks.run())
self.assertEqual(self.ks.configs, [self.good_config]*5)
self.assertEqual(mock_thread.is_alive_called, 5)
@patch('tgt_grease.enterprise.Model.KafkaSource.get_configs')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_manager_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.validate_configs')
def test_run_invalid_config(self, mock_validate, mock_create, mock_get_configs):
mock_get_configs.return_value = [self.good_config]*5
mock_thread = MockThread()
mock_create.return_value = mock_thread
mock_validate.return_value = False
self.assertFalse(self.ks.run())
self.assertEqual(self.ks.configs, [self.good_config]*5)
self.assertEqual(mock_thread.is_alive_called, 0)
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.reallocate_consumers')
def test_consumer_manager(self, mock_reallocate, mock_create, mock_make):
mock_make.return_value = []
mock_thread = MockThread()
mock_create.return_value = (mock_thread, None)
self.assertFalse(self.ks.consumer_manager(self.ioc, self.good_config))
self.assertEqual(mock_thread.is_alive_called, 1)
self.assertEqual(mock_reallocate.call_count, 1)
self.assertEqual(mock_make.call_count, 1)
self.assertEqual(mock_create.call_count, 1)
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer')
def test_consume_empty_consumer(self, mock_make):
# It's hard to test something that is designed to run forever, so going to test when the consumer is empty
mock_make.return_value = []
pipe1, pipe2 = mp.Pipe()
self.assertFalse(self.ks.consume(self.ioc, self.good_config, pipe1))
@patch('tgt_grease.enterprise.Model.KafkaSource.parse_message')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer')
def test_consume_kill_signal(self, mock_make, mock_parse):
# It's hard to test something that is designed to run forever, so going to test the pipe kill signal
mock_make.return_value = ["consumer"]
pipe1, pipe2 = mp.Pipe()
pipe1.send("STOP")
self.assertFalse(self.ks.consume(self.ioc, self.good_config, pipe2))
mock_parse.assert_not_called()
def test_sleep(self):
sleep_time = 1.
now = time.time()
self.ks.sleep(sleep_time)
wake = time.time()
self.assertTrue(wake-now >= sleep_time)
self.assertTrue(wake-now < sleep_time + .1)
def test_parse_message_key_present(self):
parse_config = {
"source": "kafka",
"key_aliases": {"a.b.c": "key"}
}
message = MagicMock()
MagicMock.value = '{"a": {"b": {"c": "value"}}}'
expected = {"key": "value"}
self.assertEqual(self.ks.parse_message(self.ioc, parse_config, message), expected)
def test_parse_message_invalid_json(self):
parse_config = {
"source": "kafka",
"key_aliases": {"a.b.c": "key"}
}
message = MagicMock()
MagicMock.value = '{"a": {"b": {"c": "value"'
expected = {}
self.assertEqual(self.ks.parse_message(self.ioc, parse_config, message), expected)
def test_parse_message_key_present_split(self):
parse_config = {
"source": "kafka",
"key_sep": "@@",
"key_aliases": {"a@@b@@c": "key"}
}
message = MagicMock()
MagicMock.value = '{"a": {"b": {"c": "value"}}}'
expected = {"key": "value"}
self.assertEqual(self.ks.parse_message(self.ioc, parse_config, message), expected)
def test_parse_message_key_missing(self):
parse_config = {
"source": "kafka",
"key_aliases": {"a.b.c": "key"}
}
message = MagicMock()
MagicMock.value = '{"a": {"b": {"d": "value"}}}'
expected = {}
self.assertEqual(self.ks.parse_message(self.ioc, parse_config, message), expected)
def test_parse_message_key_missing_split(self):
parse_config = {
"source": "kafka",
"split_char": "@@",
"key_aliases": {"a@@b@@c": "key"}
}
message = MagicMock()
MagicMock.value = '{"a": {"b": {"d": "value"}}}'
expected = {}
self.assertEqual(self.ks.parse_message(self.ioc, parse_config, message), expected)
def test_parse_message_keys_present(self):
parse_config = {
"source": "kafka",
"key_aliases": {"a.b.c": "abc_key",
"a.b.d": "abd_key",
"aa": "aa_key"
}
}
message = MagicMock()
MagicMock.value = '{"a": {"b": {"c": "cvalue", "d":"dvalue"}}, "aa": "aavalue"}'
expected = {"abc_key": "cvalue", "abd_key": "dvalue", "aa_key": "aavalue"}
self.assertEqual(self.ks.parse_message(self.ioc, parse_config, message), expected)
def test_parse_message_keys_missing(self):
parse_config = {
"source": "kafka",
"key_aliases": {"a.b.c": "abc_key",
"a.b.d": "abd_key",
"aa": "aa_key"
}
}
message = MagicMock()
MagicMock.value = '{"a": {"b": {"c": "cvalue"}}, "aa": "aavalue"}'
expected = {}
self.assertEqual(self.ks.parse_message(self.ioc, parse_config, message), expected)
@patch('tgt_grease.enterprise.Model.KafkaSource.sleep')
@patch('tgt_grease.enterprise.Model.KafkaSource.kill_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.get_backlog')
def test_reallocate_consumers_kill(self, mock_backlog, mock_create, mock_kill, mock_sleep):
mock_backlog.return_value = 3
self.assertEqual(self.ks.reallocate_consumers(self.ioc, self.good_config, None, ["thread1", "thread2"]), -1)
mock_kill.assert_called_once_with(self.ioc, "thread1")
self.assertEqual(mock_backlog.call_count, 2)
mock_create.assert_not_called()
@patch('tgt_grease.enterprise.Model.KafkaSource.sleep')
@patch('tgt_grease.enterprise.Model.KafkaSource.kill_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.get_backlog')
def test_reallocate_consumers_kill_1thread(self, mock_backlog, mock_create, mock_kill, mock_sleep):
mock_backlog.return_value = 3
self.assertEqual(self.ks.reallocate_consumers(self.ioc, self.good_config, None, ["thread1"]), 0)
mock_kill.assert_not_called()
self.assertEqual(mock_backlog.call_count, 2)
mock_create.assert_not_called()
@patch('tgt_grease.enterprise.Model.KafkaSource.sleep')
@patch('tgt_grease.enterprise.Model.KafkaSource.kill_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.get_backlog')
def test_reallocate_consumers_create(self, mock_backlog, mock_create, mock_kill, mock_sleep):
mock_backlog.return_value = 21
mock_create.return_value = "new_thread"
threads = ["thread1"]
self.assertEqual(self.ks.reallocate_consumers(self.ioc, self.good_config, None, threads), 1)
mock_kill.assert_not_called()
self.assertEqual(mock_backlog.call_count, 2)
mock_create.assert_called_once_with(self.ioc, self.good_config)
self.assertTrue("new_thread" in threads)
@patch('tgt_grease.enterprise.Model.KafkaSource.sleep')
@patch('tgt_grease.enterprise.Model.KafkaSource.kill_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.get_backlog')
def test_reallocate_consumers_pass(self, mock_backlog, mock_create, mock_kill, mock_sleep):
mock_backlog.return_value = 10
self.assertEqual(self.ks.reallocate_consumers(self.ioc, self.good_config, None, ["thread1"]), 0)
mock_kill.assert_not_called()
mock_create.assert_not_called()
self.assertEqual(mock_backlog.call_count, 2)
@patch('tgt_grease.enterprise.Model.KafkaSource.sleep')
@patch('tgt_grease.enterprise.Model.KafkaSource.kill_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.get_backlog')
def test_reallocate_consumers_max_thread(self, mock_backlog, mock_create, mock_kill, mock_sleep):
mock_backlog.return_value = 30
threads = ["thread" for i in range(0, 32)]
self.assertEqual(self.ks.reallocate_consumers(self.ioc, self.good_config, None, threads), 0)
mock_kill.assert_not_called()
mock_create.assert_not_called()
self.assertEqual(mock_backlog.call_count, 2)
@patch('tgt_grease.enterprise.Model.KafkaSource.sleep')
@patch('tgt_grease.enterprise.Model.KafkaSource.kill_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.get_backlog')
def test_reallocate_consumers_max_thread_create(self, mock_backlog, mock_create, mock_kill, mock_sleep):
mock_backlog.return_value = 30
threads = ["thread" for i in range(0, 33)]
config = {"max_consumers": 35, "max_backlog":20, "min_backlog":5}
self.assertEqual(self.ks.reallocate_consumers(self.ioc, config, None, threads), 1)
mock_kill.assert_not_called()
mock_create.assert_called()
self.assertEqual(mock_backlog.call_count, 2)
@patch('tgt_grease.enterprise.Model.KafkaSource.sleep')
@patch('tgt_grease.enterprise.Model.KafkaSource.kill_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.create_consumer_thread')
@patch('tgt_grease.enterprise.Model.KafkaSource.get_backlog')
def test_reallocate_consumers_max_thread_pass(self, mock_backlog, mock_create, mock_kill, mock_sleep):
mock_backlog.return_value = 30
threads = ["thread" for i in range(0, 36)]
config = {"max_consumers": 35, "max_backlog":20, "min_backlog":5}
self.assertEqual(self.ks.reallocate_consumers(self.ioc, config, None, threads), 0)
mock_kill.assert_not_called()
mock_create.assert_not_called()
self.assertEqual(mock_backlog.call_count, 2)
@patch('tgt_grease.enterprise.Model.KafkaSource.sleep')
def test_kill_consumer_thread(self, mock_sleep):
conn1, conn2 = mp.Pipe()
self.ks.kill_consumer_thread(self.ioc, (None, conn1))
self.assertEqual(conn2.recv(), "STOP")
def test_get_backlog_happy(self):
mock_consumer = MagicMock()
for part_count in range(1, 10):
for start in range(0, 10):
for end in range(start, 10):
mock_partitions = ["part" + str(part_i) for part_i in range(part_count)] # assignment returns an array of TopicPartitions, but our mocked consumer works with just strings
mock_consumer.assignment.return_value = mock_partitions
mock_consumer.position.return_value = start
mock_consumer.end_offsets.return_value = {part:end for part in mock_partitions}
expected_average = end - start
res = self.ks.get_backlog(self.ioc, mock_consumer)
self.assertTrue(isinstance(res, float))
self.assertEqual(res, expected_average)
def test_get_backlog_not_assigned(self):
mock_consumer = MagicMock()
assigned = {"status": False}
def poll():
assigned["status"] = True
mock_consumer.poll.side_effect = poll
for part_count in range(1, 10):
for start in range(0, 10):
for end in range(start, 10):
mock_partitions = ["part" + str(part_i) for part_i in range(part_count)] # assignment returns an array of TopicPartitions, but our mocked consumer works with just strings
def assignment():
if assigned["status"]:
return mock_partitions
else:
return []
mock_consumer.assignment.side_effect = assignment
mock_consumer.position.return_value = start
mock_consumer.end_offsets.return_value = {part:end for part in mock_partitions}
expected_average = end - start
res = self.ks.get_backlog(self.ioc, mock_consumer)
self.assertTrue(isinstance(res, float))
self.assertEqual(res, expected_average)
def test_get_backlog_no_assign(self):
mock_consumer = MagicMock()
mock_consumer.assignment.return_value = []
self.assertEqual(self.ks.get_backlog(self.ioc, mock_consumer), -1.)
def test_get_backlog_position_error(self):
mock_consumer = MagicMock()
mock_consumer.assignment.return_value = ["part"]
mock_consumer.position.side_effect = kafka.errors.KafkaTimeoutError()
mock_consumer.end_offsets.return_value = {"part": 1}
self.assertEqual(self.ks.get_backlog(self.ioc, mock_consumer), -1.)
def test_get_backlog_end_offsets_error(self):
mock_consumer = MagicMock()
mock_consumer.assignment.return_value = ["part"]
mock_consumer.position.return_value = 1
mock_consumer.end_offsets.side_effect = kafka.errors.UnsupportedVersionError()
self.assertEqual(self.ks.get_backlog(self.ioc, mock_consumer), -1.)
@patch("tgt_grease.enterprise.Model.CentralScheduling.Scheduling.scheduleDetection")
def test_send_to_scheduling_happy(self, mock_scheduling):
config = {
"source": "kafka",
"name": "test_config"
}
mock_msg = {"a": "b"}
mock_scheduling.return_value = True
self.assertTrue(self.ks.send_to_scheduling(self.ioc, config, mock_msg))
mock_scheduling.assert_called_once_with("kafka", "test_config", mock_msg)
@patch("tgt_grease.enterprise.Model.CentralScheduling.Scheduling.scheduleDetection")
def test_send_to_scheduling_sad(self, mock_scheduling):
config = {
"source": "kafka",
"name": "test_config"
}
mock_msg = {"a": "b"}
mock_scheduling.return_value = False
self.assertFalse(self.ks.send_to_scheduling(self.ioc, config, mock_msg))
mock_scheduling.assert_called_once_with("kafka", "test_config", mock_msg)
@patch("threading.Thread")
def test_create_consumer_manager_thread(self, mock_thread):
mockp = MockThread()
mock_thread.return_value = mockp
self.assertEqual(self.ks.create_consumer_manager_thread(self.good_config), mockp)
self.assertEqual(mockp.is_alive_called, 0)
self.assertEqual(mockp.start_called, 1)
self.assertFalse(mockp.daemon)
@patch("threading.Thread")
def test_create_consumer_thread(self, mock_thread):
mockp = MockThread()
mock_thread.return_value = mockp
thread, pipe = self.ks.create_consumer_thread(self.ioc, self.good_config)
self.assertEqual(thread, mockp)
self.assertEqual(type(pipe), type(mp.Pipe()[0]))
self.assertEqual(mockp.is_alive_called, 0)
self.assertEqual(mockp.start_called, 1)
self.assertTrue(mockp.daemon)
def test_validate_configs_happy(self):
good_config = {
"name": "kafka_config",
"source": "kafka",
"key_aliases": {
"a*b*c": "abc_key",
"a*b*d": "abd_key"
},
"key_sep": "*", #opt
"max_consumers": 32, #opt
"topics": [
"topic1",
"topic2"
],
"servers": [
"server.target.com:1234"
],
"max_backlog": 200, #opt
"min_backlog": 100 #opt
}
self.assertTrue(self.ks.validate_configs([good_config]))
self.assertTrue(self.ks.validate_configs([good_config]*5))
def test_validate_configs_wrong_source(self):
config = {
"name": "kafka_config",
"source": "not kafka",
"key_aliases": {
"a*b*c": "abc_key",
"a*b*d": "abd_key"
},
"key_sep": "*", #opt
"max_consumers": 32, #opt
"topics": [
"topic1",
"topic2"
],
"servers": [
"server.target.com:1234"
],
"max_backlog": 200, #opt
"min_backlog": 100 #opt
}
self.assertFalse(self.ks.validate_configs([config]))
self.assertFalse(self.ks.validate_configs([config]*5))
def test_validate_configs_duplicate_aliases(self):
config = {
"name": "kafka_config",
"source": "kafka",
"key_aliases": {
"a*b*c": "key",
"a*b*d": "key"
},
"key_sep": "*", #opt
"max_consumers": 32, #opt
"topics": [
"topic1",
"topic2"
],
"servers": [
"server.target.com:1234"
],
"max_backlog": 200, #opt
"min_backlog": 100 #opt
}
self.assertFalse(self.ks.validate_configs([config]))
self.assertFalse(self.ks.validate_configs([config]*5))
def test_validate_configs_wrong_types(self):
config = {
"name": "kafka_config",
"source": "kafka",
"key_aliases": {
"a*b*c": "abc_key",
"a*b*d": "abd_key"
},
"key_sep": 11, #opt
"max_consumers": "32", #opt
"topics": [
"topic1",
"topic2"
],
"servers": [
"server.target.com:1234"
],
"max_backlog": 200, #opt
"min_backlog": 100 #opt
}
self.assertFalse(self.ks.validate_configs([config]))
self.assertFalse(self.ks.validate_configs([config]*5))
def test_validate_configs_key_missing(self):
config = {
"name": "kafka_config",
"source": "kafka",
"key_sep": "*", #opt
"max_consumers": 32, #opt
"topics": [
"topic1",
"topic2"
],
"servers": [
"server.target.com:1234"
],
"max_backlog": 200, #opt
"min_backlog": 100 #opt
}
self.assertFalse(self.ks.validate_configs([config]))
self.assertFalse(self.ks.validate_configs([config]*5))
def test_validate_configs_empty_keys(self):
config = {
"name": "kafka_config",
"source": "kafka",
"key_aliases": {},
"key_sep": "*", #opt
"max_consumers": 32, #opt
"topics": [],
"servers": [],
"max_backlog": 200, #opt
"min_backlog": 100 #opt
}
self.assertFalse(self.ks.validate_configs([config]))
self.assertFalse(self.ks.validate_configs([config]*5))
def test_validate_configs_no_opt(self):
config = {
"name": "kafka_config",
"source": "kafka",
"key_aliases": {
"a*b*c": "abc_key",
"a*b*d": "abd_key"
},
"topics": [
"topic1",
"topic2"
],
"servers": [
"server.target.com:1234"
]
}
self.assertTrue(self.ks.validate_configs([config]))
self.assertTrue(self.ks.validate_configs([config]*5))
|
import unittest
import unittest.mock as mock
from click.testing import CliRunner
from app import rps_game
def mock_computer_move(cls, *args, **kwargs):
return 0
class TestApp(unittest.TestCase):
def test_rps_game(self):
with mock.patch('random.randint', mock_computer_move):
runner = CliRunner()
result = runner.invoke(rps_game, input='3\nDenis\nrock\npaper\nscissors\n')
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertIn('Rounds [1]: 3', result.output)
self.assertIn('Denis moved with rock, Computer moved with rock.', result.output)
self.assertIn('Denis moved with paper, Computer moved with rock.', result.output)
self.assertIn('Denis moved with scissors, Computer moved with rock.', result.output)
self.assertIn('Denis:Computer - 1:1, 1 draw(s)', result.output)
|
""" Tests for server defaults
"""
from unittest.mock import Mock
from styler_rest_framework.helpers import aiohttp_defaults
class TestAddMiddlewares:
def test_add_middlewares(self):
app = Mock()
app.middlewares = []
aiohttp_defaults.add_middlewares(app)
assert len(app.middlewares) == 2
assert callable(app.middlewares[0])
assert callable(app.middlewares[1])
def test_add_middlewares_with_custom_error_handler(self):
app = Mock()
app.middlewares = []
error_handler = Mock()
aiohttp_defaults.add_middlewares(app, error_handler=error_handler)
assert len(app.middlewares) == 2
assert callable(app.middlewares[0])
assert callable(app.middlewares[1])
|
from __future__ import absolute_import
import numpy as np
import string
import re
from six.moves import range
def cleanzeros(values):
newlist = []
oldlist = values
while oldlist:
interval = oldlist[0:3]
if interval[0] != 0:
newlist += interval
oldlist = oldlist[3:]
return newlist
from pyparsing import (
Literal,Word,ZeroOrMore,Forward,nums,oneOf,Group,
alphas
)
def Syntax():
# op = Word("+",max=1)
op = "+"
restop = "/"
times = "x"
minsep = ":"
# restop = Word("/",max=1)
# times = Word("x",max=1)
# minsep = Word(":",max=1)
lpar = Literal( '(' ).suppress()
rpar = Literal( ')' ).suppress()
num = Word(nums)
timeordist = Group(num+":"+num) | num
ntimes = num+"x"
unit = Word(alphas)
interval = Group(timeordist+unit) # 5min
multipleinterval = Group(ntimes+interval) # 3x5min
set = multipleinterval | interval # 5min or 3x5min
intervalwithrest = Group(set+"/"+interval) # 5min/3min or 3x5min/3min
expr = Forward()
atom = intervalwithrest | set | multipleinterval | interval | Group(lpar+expr+rpar)
bigset = Group(ntimes+atom) | atom
bigsetwithrest = Group(bigset+"/"+interval)
majorset = bigsetwithrest | bigset
expr << majorset + ZeroOrMore( "+" + expr )
return expr
def getinterval(l):
if len(l)==0:
return []
elif len(l)==2:
try:
value = int(l[0])
unit = l[1]
except TypeError:
valuemin = int(l[0][0])
valuesec = int(l[0][2])
value = 60*valuemin+valuesec
unit = 'sec'
return [value,unit,'work']
elif len(l)==3 and l[1] == '/':
a = getinterval(l[0])
b = getinterval(l[2])
b[2] = 'rest'
return a+b
elif len(l)==3 and l[1] == 'x':
u = []
for i in range(int(l[0])):
u+=getinterval(l[2])
return u
elif len(l)==1:
return getinterval(l[0])
else:
return getinterval(l[0])+getinterval(l[2:])
def pieceparse(v):
value = int(v[0])
unit = 'seconds'
if v[1] in ['meter','meters','m']:
unit = 'meters'
if v[1] in ['km','k','kilometer']:
value *= 1000
unit = 'meters'
if v[1] in ['min','minute','minutes',"'"]:
unit = 'seconds'
value *= 60
return [value,unit]
def getlist(s,sel='value'):
s1=s[0:3]
s2=s[3:]
if s2 != []:
if sel == 'value':
return [s1[0]]+getlist(s2,sel=sel)
if sel == 'unit':
return [s1[1]]+getlist(s2,sel=sel)
if sel == 'type':
return [s1[2]]+getlist(s2,sel=sel)
else:
if sel == 'value':
return [s[0]]
if sel == 'unit':
return [s[1]]
if sel == 'type':
return [s[2]]
return 0
def parse(s):
r = Syntax().parseString(s)
if len(r)==2:
res = getinterval(r)
elif len(r)==1:
res = getinterval(r)
else:
res = getinterval(r[0])+getinterval(r[2:])
xres = []
while len(res):
xres += pieceparse(res[0:2]) + [res[2]]
res = res[3:]
return xres
|
"""
This class represent one word beloging to grammar class classified as 'Preposition'
What's a Preposition?
===
A word governing, and usually preceding, a noun or pronoun and expressing a relation
to another word or element in the clause.
Prepositions are often used to express spatial or temporal relations (in, under, towards, before)
"""
from .word import Word
# pylint: disable=too-few-public-methods,missing-docstring,no-self-use
class Preposition(Word):
def has_plural(self):
return False
|
from mock import patch
from autodynatrace.wrappers.django.utils import get_host, get_request_uri, get_app_name
@patch("django.http.HttpRequest")
def test_get_host(django_request_mock):
django_request_mock.get_host = lambda: "myhost:80"
assert get_host(django_request_mock) == "myhost:80"
django_request_mock.get_host = lambda: None
django_request_mock.META = {"HTTP_HOST": "myhost:80"}
assert get_host(django_request_mock) == "myhost:80"
django_request_mock.META = {"SERVER_NAME": "myhost", "SERVER_PORT": 80}
assert get_host(django_request_mock) == "myhost:80"
django_request_mock.META = {}
assert get_host(django_request_mock) == "unknown"
django_request_mock = None
assert get_host(django_request_mock) == "unknown"
@patch("django.http.HttpRequest")
def test_get_request_uri(django_request_mock):
django_request_mock.get_host = lambda: "myhost:80"
django_request_mock.scheme = "http"
django_request_mock.path = "/path"
assert get_request_uri(django_request_mock) == "http://myhost:80/path"
@patch("django.http.HttpRequest")
def test_get_app_name(django_request_mock):
django_request_mock.path = "/path"
django_request_mock.META = {"SERVER_NAME": "myhost", "SERVER_PORT": 80}
assert get_app_name(django_request_mock) == "Django (myhost:80)"
|
import random
import numpy as np
from atcenv.definitions import Flight, Airspace
from shapely.geometry import Point, Polygon
def position_scramble(ac_point, probability, min_dist, max_dist, alt = 0):
'''
Input: a point, a probability that the point. Output: a scrambled position. Must be within airspace.'''
if min_dist > max_dist:
raise Exception('Minimum distance cannot be greater than maximum distance.')
if probability < 0 or probability > 1:
raise Exception('Probability must be between 0 and 1.')
# First of all, do we even do this. Do the probability
probability_roll = random.random()
if probability_roll >= probability:
# Roll failed, return same position
return ac_point
else:
# We scrable the position in a random direction
random_dir = random.random() * 360
# By a random magnitude
random_mag = min_dist + random.random() * (max_dist - min_dist)
# X direction
new_x = ac_point.x + random_mag * np.cos(np.deg2rad(random_dir))
new_y = ac_point.y + random_mag * np.sin(np.deg2rad(random_dir))
return Point(new_x, new_y)
def apply_wind(flight, intensity, track):
'''Apply the current wind to aircraft.'''
wind_dx = intensity * np.sin(np.deg2rad(track))
wind_dy = intensity * np.cos(np.deg2rad(track))
# Get the components of the aircraft
dx, dy = flight.components
return dx + wind_dx, dy + wind_dy
def apply_position_delay(flight, probability, max_delay, dt, dx, dy):
# Assert that max_delay is smaller than dt
if max_delay > dt:
# Problem
raise Exception('Maximum delay cannot be greater than dt.')
delay_roll = random.random()
position = flight.position
prev_dx = flight.prev_dx
prev_dy = flight.prev_dy
if delay_roll >= probability:
# Roll failed, return undelayed stuff
newx = position.x + dx * dt
newy = position.y + dy * dt
return newx, newy
else:
# We delay
random_delay = random.random() * max_delay
# We travel the previous speed for the delay amount, then
# the new speed for the remaining time
newx = position.x + (prev_dx * random_delay + dx * (dt - random_delay))
newy = position.y + (prev_dy * random_delay + dy * (dt - random_delay))
return newx, newy
|
test = False
db_user = 'root'
db_password = ''
|
import engine.db_structure as db_py
import os
filename = "crud.vdb"
if os.path.isfile(filename):
os.remove(filename)
db = db_py.Database(False, filename)
def test_create():
excepted_table = db_py.Table()
excepted_table.name = "vadik_table"
excepted_table.fields = ["zhenya1", "zhenya2"]
excepted_table.fields_count = 2
excepted_table.types = [db_py.Type("int", 4), db_py.Type("str", 256)]
excepted_table.positions = {"row_id": 1, "zhenya1": 4, "zhenya2": 8}
excepted_table.row_length = 305
result_table = db.create_table("vadik_table", {"zhenya1": "int", "zhenya2": "str"})
assert excepted_table == result_table
def test_show_create():
fields_names = ["zhenya1", "zhenya2"]
types_names = ["int", "str"]
fields = [
"'" + v + "' " + types_names[i]
for i, v in enumerate(fields_names)
]
table_name = "vadik_table"
excepted_string = "--------------------------------------------------------\n"
excepted_string += "Create table: \n"
excepted_string += "CREATE TABLE '" + table_name + "' ("
excepted_string += ", ".join(fields) + ")\n"
excepted_string += "--------------------------------------------------------"
result_string = db.tables[0].show_create()
assert result_string == excepted_string
def test_insert():
db.tables[0].insert(["zhenya1", "zhenya2"], [5000, "b"])
db.tables[0].insert(["zhenya1", "zhenya2"], [929, "a"])
assert db.tables[0].count_rows() == 2
def test_delete():
db.tables[0].delete([db.tables[0].get_row_by_id(0).index_in_file])
assert db.tables[0].count_rows() == 1
db.tables[0].delete()
assert db.tables[0].count_rows() == 0
def test_update():
db.tables[0].insert(["zhenya1", "zhenya2"], [99, "test_string_123"])
assert db.tables[0].get_row_by_id(0).fields_values_dict["zhenya2"] == "test_string_123"
db.tables[0].update(["zhenya2"], [["lovetsov"]], [db.tables[0].get_row_by_id(0)])
assert db.tables[0].get_row_by_id(0).fields_values_dict["zhenya1"] == 99
assert db.tables[0].get_row_by_id(0).fields_values_dict["zhenya2"] == "lovetsov"
def test_select():
db.tables[0].insert(["zhenya1", "zhenya2"], [218, "vadik_vadik"])
max_id = db.tables[0].count_rows()
rows_list = []
for id in range(max_id):
rows_list.append(db.tables[0].get_row_by_id(id))
result_rows_1 = db.tables[0].select(db.tables[0].fields, rows_list)
assert len(result_rows_1) == 2
result_rows_2 = db.tables[0].select(["zhenya1"], [db.tables[0].get_row_by_id(1)])
assert len(result_rows_2) == 1
assert result_rows_2[0].fields_values_dict["zhenya1"] == 218
|
import snakypy.{{ cookiecutter.project_slug }} # noqa: F401
|
# -*- coding: utf-8 -*-
from flask import jsonify, make_response, request
import json
from app.api import bp, GET, YES
from app.lib.phonetize import phonetize2
@bp.route('/api/phonetize/<path:text>',
methods=[GET], strict_slashes=False)
def phonetize(text):
level = request.args.get('level')
syllables = request.args.get('syllables')
kwargs = {}
if level:
kwargs['level'] = int(level)
if syllables:
kwargs['syllables'] = syllables.lower() in YES
res = phonetize2(text, **kwargs)
return make_response(jsonify(res), 200)
|
import jwt
import datetime
import random
from flask import current_app as app
from core import db
from .models import Verifications as verify
# class Auth():
# def encode_auth_token(self, user_id):
# """
# Generates the Auth Token
# :return: string
# """
# try:
# payload = {
# 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5),
# 'iat': datetime.datetime.utcnow(),
# 'sub': user_id
# }
# return jwt.encode(
# payload,
# app.config['SECRET_KEY'],
# algorithm='HS256'
# )
# except Exception as e:
# return e
# @staticmethod
# def decode_auth_token(auth_token):
# """
# Decodes the auth token
# :param auth_token:
# :return: integer|string
# """
# try:
# payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
# return payload['sub']
# except jwt.ExpiredSignatureError:
# return 'Signature expired. Please log in again.'
# except jwt.InvalidTokenError:
# return 'Invalid token. Please log in again.'
class OTP():
def generate_otp(phone):
code = random.randint(111111, 999999)
otp_data = verify(phone=phone, code=code)
db.session.add(otp_data)
db.session.commit()
def cancel_code(phone, code):
return
|
"""
Tests that C++ member and static variables are available where they should be.
"""
import lldb
from lldbtest import *
import lldbutil
class CPPThisTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
#rdar://problem/9962849
#@expectedFailureClang
@dsym_test
def test_with_dsym_and_run_command(self):
"""Test that the appropriate member variables are available when stopped in C++ static, inline, and const methods"""
self.buildDsym()
self.static_method_commands()
#rdar://problem/9962849
@expectedFailureGcc # llvm.org/pr15439 The 'this' pointer isn't available during expression evaluation when stopped in an inlined member function.
@expectedFailureIcc # ICC doesn't emit correct DWARF inline debug info for inlined member functions
@dwarf_test
@expectedFlakeyClang(bugnumber='llvm.org/pr23012', compiler_version=['>=','3.6']) # failed with totclang - clang3.7
def test_with_dwarf_and_run_command(self):
"""Test that the appropriate member variables are available when stopped in C++ static, inline, and const methods"""
self.buildDwarf()
self.static_method_commands()
def setUp(self):
TestBase.setUp(self)
def set_breakpoint(self, line):
lldbutil.run_break_set_by_file_and_line (self, "main.cpp", line, num_expected_locations=1, loc_exact=False)
def static_method_commands(self):
"""Test that the appropriate member variables are available when stopped in C++ static, inline, and const methods"""
self.runCmd("file a.out", CURRENT_EXECUTABLE_SET)
self.set_breakpoint(line_number('main.cpp', '// breakpoint 1'))
self.set_breakpoint(line_number('main.cpp', '// breakpoint 2'))
self.set_breakpoint(line_number('main.cpp', '// breakpoint 3'))
self.set_breakpoint(line_number('main.cpp', '// breakpoint 4'))
self.runCmd("process launch", RUN_SUCCEEDED)
self.expect("expression -- m_a = 2",
startstr = "(int) $0 = 2")
self.runCmd("process continue")
# This would be disallowed if we enforced const. But we don't.
self.expect("expression -- m_a = 2",
startstr = "(int) $1 = 2")
self.expect("expression -- (int)getpid(); m_a",
startstr = "(int) $2 = 2")
self.runCmd("process continue")
self.expect("expression -- s_a",
startstr = "(int) $3 = 5")
self.runCmd("process continue")
self.expect("expression -- m_a",
startstr = "(int) $4 = 2")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
import pyglet
from .game import Game
pyglet.resource.path = ['../resources']
pyglet.resource.reindex()
def main():
game = Game(fullscreen=True)
game.run()
if __name__ == '__main__':
main()
|
from django.shortcuts import render, redirect
from django.views.generic.edit import FormView
from .forms import ExelForm
# from .exchange import DataFrame
import matplotlib.pyplot as plt
from scipy import stats
import os
class FileFieldView(FormView):
form_class = ExelForm # Upload from from forms.py for template
template_name = os.path.join("exelchange", "index.html") # Basic template
def get(self, request, **kwargs):
form = self.form_class()
check(request)
path = os.path.join('exelchange', 'static', 'media')
path = os.path.join(path, request.COOKIES.get('sessionid', ), 'graph.png')
cook = os.path.join(request.COOKIES['sessionid'], 'graph.png') if os.path.exists(path) else None
return render(request, os.path.join("exelchange", "index.html"), {'form': form,
'img': cook,
'result': request.session.get('result', '')},)
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class) # Getting form for template
if form.is_valid():
x = list(map(float, request.POST['x_coord'].split()))
y = list(map(float, request.POST['y_coord'].split()))
appr = stats.linregress(x, y)
a = round(appr.slope, 4)
b = round(appr.intercept, 4)
y1 = [a * i + b for i in x]
label = f'y = {a}*x {b}' if b < 0 else f'{a}*x + {b}'
result = [((float(i) - b) / a, float(i)) for i in request.POST['absorb'].split()]
request.session['result'] = result
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111)
ax.set_title('Approximation curve',
fontsize=25,
color='black',
pad=10)
ax.plot(x, y, label='initial', color='black')
ax.plot(x, y1, label=label, color='orange')
for i, j in result:
ax.plot(i, j, 'o', color='green', markersize=20)
plt.legend(fontsize='xx-large')
plt.grid()
check(request)
fig.savefig(os.path.join('exelchange', 'static', 'media', request.COOKIES['sessionid'], 'graph.png'))
# print(request.COOKIES['result'])
return redirect(request.path)
def check(request):
if not request.COOKIES.get('sessionid', '') or not request.COOKIES['sessionid']: # set session cookie
request.session.create()
request.COOKIES['sessionid'] = request.session.session_key
path = os.path.join('exelchange', 'static')
if not os.path.exists(path):
os.mkdir(path)
path = os.path.join(path, 'media')
if not os.path.exists(path):
os.mkdir(path)
path = os.path.join(path, request.COOKIES.get('sessionid', ))
if not os.path.exists(path):
os.mkdir(path)
|
"""
2019/12/2 21:43
134.【Python魔术方法】属性访问控制魔术方法
"""
"""
属性访问控制魔术方法:
__getattr__魔术方法:
在访问一个对象的某个属性的时候,如果这个属性不存在,那么就会执行__getattr__方法,
将属性的名字传进去。如果这个属性存在,那么就不会调用__getattr__方法.
__setattr__魔术方法:
只要给一个对象的属性设置值,那么就会调用这个方法。但要注意的是,不要在这个方法中
调用self.xxx=xxx的形式,因为会产生递归。如果想要给对象的属性设置值有两种方式:
1)使用self.__dict__[name] = value 这个魔术属性.
2)使用super().__setattr(name, value)__依赖基类的方式来实现赋值.
__getattribute__魔术方法:
只要你访问一个对象的属性,不管这个属性存不存在都会执行这个方法,所以在写这个方法的
时候要小心循环调用。使用`super(类名, self).__getattribute__(item)`来避免产生死循
环的递归,这个方法只能在新式类中使用,不能在旧式类中使用。
"""
"""
__getattr__与__getattribute__区别:
1. __getattr__只有属性不存在的时候才会调用。
2. __getattribute__不管属性存不存在都会调用。
"""
import logging
class Person(object):
def __init__(self, name):
self.name = name
self.age = 0
self.is_adult = False # TODO: 是否成年
def __getattr__(self, item):
# if item == 'age':
# return 18
# else:
# raise AttributeError('%s 属性不存在..' % item)
"""
# 0.1 pname
# 0.2 pname可以使用,但是会警告,在这个版本以后推荐使用name属性
# 0.3 pname不能使用
"""
if item == 'pname':
logging.warning('pname可以使用,在下一个版本中不再使用这个属性,在这个版本以后推荐使用name属性...')
return self.name
else:
raise AttributeError('%s 属性不存在..' % item)
def __setattr__(self, key, value):
# TODO: RecursionError: maximum recursion depth exceeded while calling a Python object
# TODO: 达到最大递归层数
# if key == 'name':
# self.name = 'zhiliao'
# TODO: 调用方式1:使用__dict__魔术属性
# self.__dict__[key] = value
# TODO: 调用方式2:使用super().__setattr__依赖基类实现赋值
super(Person, self).__setattr__(key, value)
if key == 'age':
# TODO: 调用无递归,正常改写语法
# if value >= 18:
# self.is_adult = True
# else:
# self.is_adult = False
# print(self.is_adult)
if value >= 18:
self.__dict__['is_adult'] = True
else:
self.__dict__['is_adult'] = False
def __getattribute__(self, item):
# TODO: RecursionError: maximum recursion depth exceeded while calling a Python object
# TODO: 达到最大递归层数
# return self.__dict__[item]
return super(Person, self).__getattribute__(item)
p1 = Person('zhiliao')
# print(p1.name) # TODO: zhiliao
# print(p1.age) # TODO: 18
# print(p1.country) # TODO: AttributeError: country 属性不存在..
# print(p1.pname) # TODO: zhiliao
p1.age = 19
print(p1.pname) # TODO: zhiliao
print(p1.age) # TODO: 19
print(p1.is_adult) # TODO: True
print(p1.abc) # TODO: AttributeError: abc 属性不存在..
|
from tkinter import Tk, Button
import pygame
pygame.init()
sound = r'alarm.mp3'
def alarm_ring_gui():
root = Tk()
btn = Button(text="Выключить будильник",
background="#f00",
foreground="#000",
activebackground='#00f',
activeforeground='#fff',
padx="20",
pady="20",
font="Arial 72",
command=root.destroy)
btn.pack()
pygame.mixer_music.load(sound)
pygame.mixer_music.play(5)
root.title("Будильник")
root.mainloop()
pygame.mixer_music.stop()
if __name__ == '__main__':
alarm_ring_gui()
|
"""
Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
Note: For the purpose of this problem, we define empty string as valid palindrome.
EX:
f("madam") yield True since reverse("madam") == "madam"
"""
def invert_str(s: str) -> str:
return s[::-1]
def is_palindrome(s: str) -> bool:
return s == invert_str(s)
if __name__ == "__main__":
print(is_palindrome("madam")) # True
|
numbers = (1, 2, 3, 4)
print(numbers)
cheeses = ('swiss', 'cheddar','ricotta', 'gouda')
print(cheeses)
|
from __future__ import print_function
from __future__ import division
from past.utils import old_div
import supereeg as se
import glob
from supereeg.helpers import *
from scipy.stats import kurtosis, zscore
import os
## don't understand why i have to do this:
from supereeg.helpers import _std, _gray, _resample_nii, _apply_by_file_index, _kurt_vals, _get_corrmat, _z2r, _r2z, \
_log_rbf, \
_timeseries_recon, _chunker, \
_corr_column, _normalize_Y, _near_neighbor, _vox_size, _count_overlapping, _resample, \
_nifti_to_brain, _brain_to_nifti, _to_log_complex, _to_exp_real, _logsubexp
from supereeg.model import _recover_model
locs = np.array([[-61., -77., -3.],
[-41., -77., -23.],
[-21., -97., 17.],
[-21., -37., 77.],
[-21., 63., -3.],
[ -1., -37., 37.],
[ -1., 23., 17.],
[ 19., -57., -23.],
[ 19., 23., -3.],
[ 39., -57., 17.],
[ 39., 3., 37.],
[ 59., -17., 17.]])
# number of timeseries samples
n_samples = 10
# number of subjects
n_subs = 3
# number of electrodes
n_elecs = 5
# full brain object to parse and compare
bo_full = se.simulate_bo(n_samples=10, sessions=2, sample_rate=10, locs=locs)
# create brain object from subset of locations
sub_locs = bo_full.locs.iloc[6:]
sub_data = bo_full.data.iloc[:, sub_locs.index]
bo = se.Brain(data=sub_data.values, sessions=bo_full.sessions, locs=sub_locs, sample_rate=10,
meta={'brain object locs sampled': 2})
# simulate correlation matrix
data = [se.simulate_model_bos(n_samples=10, locs=locs, sample_locs=n_elecs) for x in range(n_subs)]
# test model to compare
test_model = se.Model(data=data, locs=locs, rbf_width=100)
bo_nii = se.Brain(_gray(20))
nii = _brain_to_nifti(bo_nii, _gray(20))
a = np.array([[1,2,3],[4,5,6],[7,8,9,]])
b = np.array([[-1,2,2],[-4,5,5],[-7,8,8,]])
c = np.array([[ 0, 4, 5], [ 0, 10, 11],[ 0, 16, 17]])
add_log = _to_log_complex(a)
a_log = _to_log_complex(a)
b_log = _to_log_complex(b)
c_log = _to_log_complex(c)
add_log.real = np.logaddexp(a_log.real,b_log.real)
add_log.imag = np.logaddexp(a_log.imag,b_log.imag)
def test_std():
nii = _std(20)
assert isinstance(nii, se.Nifti)
def test_gray():
nii = _gray(20)
assert isinstance(nii, se.Nifti)
def test_resample_nii():
nii = _resample_nii(_gray(), 20, precision=5)
assert isinstance(nii, se.Nifti)
def test_apply_by_file_index():
def vstack_aggregate(prev, next):
return np.max(np.vstack((prev, next)), axis=0)
def kurtosis_xform(bo):
return kurtosis(bo.data)
max_kurtosis_vals = _apply_by_file_index(data[0], kurtosis_xform, vstack_aggregate)
assert isinstance(max_kurtosis_vals, np.ndarray)
def test_kurt_vals():
kurts_2 = _kurt_vals(data[0])
assert isinstance(kurts_2, np.ndarray)
#NOTE: This test won't run because apply_by_file_index calls the kurtosis, but kurtosis doesnt support brain objects
# def test_kurt_vals_compare():
# def aggregate(prev, next):
# return np.max(np.vstack((prev, next)), axis=0)
#
# kurts_1 = _apply_by_file_index(data[0], kurtosis, aggregate)
# kurts_2 = _kurt_vals(data[0])
# assert np.allclose(kurts_1, kurts_2)
def test_logsubexp():
b_try = _to_exp_real(_logsubexp(c_log, a_log))
assert np.allclose(b_try, b)
def test_get_corrmat():
corrmat = _get_corrmat(data[0])
assert isinstance(corrmat, np.ndarray)
def test_z_score():
z_help = bo_full.get_zscore_data()
z = np.vstack(
(zscore(bo_full.get_data()[bo_full.sessions == 1]), zscore(bo_full.get_data()[bo_full.sessions == 2])))
assert np.allclose(z, z_help)
def test_int_z2r():
z = 1
test_val = old_div((np.exp(2 * z) - 1), (np.exp(2 * z) + 1))
input_val = _z2r(z)
assert isinstance(input_val, (float, int))
assert test_val == input_val
def test_array_z2r():
z = np.array([1, 2, 3])
test_val = old_div((np.exp(2 * z) - 1), (np.exp(2 * z) + 1))
test_fun = _z2r(z)
assert isinstance(test_fun, np.ndarray)
assert np.allclose(test_val, test_fun)
def _r2z_z2r():
z = np.array([1, 2, 3])
test_fun = _r2z(_z2r(z))
assert isinstance(test_fun, (int, np.ndarray))
assert z == test_fun
def test_int_r2z():
r = .1
test_val = 0.5 * (np.log(1 + r) - np.log(1 - r))
test_fun = _r2z(r)
assert isinstance(test_fun, (float, int))
assert test_val == test_fun
def test_array_r2z():
r = np.array([.1, .2, .3])
test_val = 0.5 * (np.log(1 + r) - np.log(1 - r))
test_fun = _r2z(r)
assert isinstance(test_fun, np.ndarray)
assert np.allclose(test_val, test_fun)
def test_log_rbf():
weights = _log_rbf(locs, locs[:10])
assert isinstance(weights, np.ndarray)
assert np.allclose(np.diag(weights), 0)
def test_tal2mni():
tal_vals = tal2mni(locs)
assert isinstance(tal_vals, np.ndarray)
def test_reconstruct():
recon_test = test_model.predict(bo, nearest_neighbor=False, force_update=True)
actual_test = bo_full.data.iloc[:, recon_test.locs.index]
# actual_test: the true data
# recon_test: the reconstructed data (using Model.predict)
corr_vals = _corr_column(actual_test.values, recon_test.data.values)
assert np.all(corr_vals[~np.isnan(corr_vals)] <= 1) and np.all(corr_vals[~np.isnan(corr_vals)] >= -1)
def test_filter_elecs():
bo_f = filter_elecs(bo)
assert isinstance(bo_f, se.Brain)
def test_corr_column():
X = np.matrix([[1, 2, 3], [1, 2, 3]])
corr_vals = _corr_column(np.array([[.1, .4], [.2, .5], [.3, .6]]), np.array([[.1, .4], [.2, .5], [.3, .6]]))
print(corr_vals)
assert isinstance(corr_vals, (float, np.ndarray))
def test_normalize_Y():
normed_y = _normalize_Y(np.array([[.1, .4], [.2, .5], [.3, .6]]))
assert isinstance(normed_y, pd.DataFrame)
assert normed_y.iloc[1][0] == 1.0
assert normed_y.iloc[1][1] == 2.0
def test_model_compile(tmpdir):
p = tmpdir.mkdir("sub")
for m in range(len(data)):
model = se.Model(data=data[m], locs=locs)
model.save(fname=os.path.join(p.strpath, str(m)))
model_data = glob.glob(os.path.join(p.strpath, '*.mo'))
mo = se.Model(model_data)
assert isinstance(mo, se.Model)
assert np.allclose(mo.numerator.real, test_model.numerator.real, equal_nan=True)
assert np.allclose(mo.numerator.imag, test_model.numerator.imag, equal_nan=True)
assert np.allclose(mo.denominator, test_model.denominator, equal_nan=True)
def test_timeseries_recon():
recon = _timeseries_recon(bo, test_model, 2)
assert isinstance(recon, np.ndarray)
assert np.shape(recon)[1] == np.shape(test_model.get_locs())[0]
def test_chunker():
chunked = _chunker([1,2,3,4,5], 2)
print(chunked)
assert isinstance(chunked, list)
assert chunked == [(1, 2), (3, 4), (5, None)]
def test_near_neighbor_auto():
new_bo = _near_neighbor(bo, test_model, match_threshold='auto')
assert isinstance(new_bo, se.Brain)
def test_near_neighbor_none():
new_bo = _near_neighbor(bo, test_model, match_threshold=0)
assert isinstance(new_bo, se.Brain)
def test_near_neighbor_int():
new_bo = _near_neighbor(bo, test_model, match_threshold=10)
assert isinstance(new_bo, se.Brain)
def test_vox_size():
v_size = _vox_size(test_model.locs)
assert isinstance(v_size, np.ndarray)
def test_count_overlapping():
bool_overlap = _count_overlapping(bo_full.get_locs(), bo.get_locs())
assert sum(bool_overlap)==bo.get_locs().shape[0]
assert isinstance(bool_overlap, np.ndarray)
def test_resample():
samp_data, samp_sess, samp_rate = _resample(bo, 8)
assert isinstance(samp_data, pd.DataFrame)
assert isinstance(samp_sess, pd.Series)
assert isinstance(samp_rate, list)
assert samp_rate==[8,8]
def test_nifti_to_brain():
b_d, b_l, b_h, affine = _nifti_to_brain(_gray(20))
assert isinstance(b_d, np.ndarray)
assert isinstance(b_l, np.ndarray)
assert isinstance(b_h, dict)
def test_brain_to_nifti():
nii = _brain_to_nifti(bo, _gray(20))
assert isinstance(nii, se.Nifti)
def test_bo_nii_bo():
nii = _brain_to_nifti(bo, _gray(20))
print(type(str(nii.header['descrip'])))
b_d, b_l, b_h, affine =_nifti_to_brain(nii)
assert np.allclose(bo.get_locs(), b_l)
def test_nii_bo_nii():
bo_nii = se.Brain(_gray(20))
nii = _brain_to_nifti(bo_nii, _gray(20))
nii_0 = _gray(20).get_data().flatten()
nii_0[np.isnan(nii_0)] = 0
assert np.allclose(nii_0, nii.get_data().flatten())
|
# Copyright 2021 Norwegian University of Science and Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pythreejs
from pythreejs import (
Group,
Mesh,
Geometry,
SphereGeometry,
BufferGeometry,
PlaneGeometry,
BufferAttribute,
Points,
PointsMaterial,
LineBasicMaterial,
MeshLambertMaterial,
MeshPhongMaterial,
MeshBasicMaterial,
)
from matplotlib.colors import to_hex
from collada import Collada
class Grid(Group):
def __init__(self, num_cells=5, color="#cccccc", linewidth=1, cellsize=0.5):
Group.__init__(self)
material = LineBasicMaterial(color=color, linewidth=linewidth)
for i in range(num_cells + 1):
edge = cellsize * num_cells / 2
position = edge - (i * cellsize)
geometry_h = Geometry(vertices=[(-edge, position, 0), (edge, position, 0)])
geometry_v = Geometry(vertices=[(position, -edge, 0), (position, edge, 0)])
self.add(pythreejs.Line(geometry=geometry_h, material=material))
self.add(pythreejs.Line(geometry=geometry_v, material=material))
class Ball(Mesh):
def __init__(self, color="red", radius=0.025):
Mesh.__init__(
self,
geometry=SphereGeometry(radius=radius),
material=MeshLambertMaterial(color=color),
)
class ColladaMesh(Group):
def __init__(self, filename, scale=1.0):
Group.__init__(self)
self._dae = Collada(filename)
self._load_mesh(self._dae, scale=scale)
def _load_mesh(self, dae, scale):
materials = self._load_material(dae)
for geometry in dae.geometries:
for primitive in geometry.primitives:
vertices = primitive.vertex[primitive.vertex_index] * scale
normals = primitive.normal[primitive.normal_index]
buffer_geometry = BufferGeometry(
attributes={
"position": BufferAttribute(array=vertices),
"normal": BufferAttribute(array=normals),
}
)
material = materials[primitive.material]
mesh = Mesh(geometry=buffer_geometry, material=material)
self.add(mesh)
def _load_material(self, dae):
materials = {}
for material in dae.materials:
name = material.id
color = to_hex(material.effect.diffuse)
specular = to_hex(material.effect.specular)
materials[name] = MeshPhongMaterial(
color=color, specular=specular, shininess=30
)
return materials
class Plane(Mesh):
def __init__(self, color="pink", transparent=False):
Mesh.__init__(
self, geometry=PlaneGeometry(), material=MeshBasicMaterial(color=color)
)
self.material.side = "DoubleSide"
if transparent:
self.material.transparent = transparent
self.material.opacity = 0.5
class Line(pythreejs.Line):
def __init__(self, start, end, color="white", linewidth=1):
geometry = BufferGeometry(
attributes={
"position": BufferAttribute(
np.vstack((start, end)).astype(np.float32), normalized=False
)
}
)
material = LineBasicMaterial(color=color, linewidth=linewidth)
pythreejs.Line.__init__(self, geometry=geometry, material=material)
class Triangle(Mesh):
def __init__(self, p1, p2, p3, color="yellow"):
geometry = BufferGeometry(
attributes={
"position": BufferAttribute(
np.vstack((p1, p2, p3)).reshape(3, 3).astype(np.float32),
normalized=False,
)
}
)
material = MeshBasicMaterial(color=color)
material.side = "DoubleSide"
Mesh.__init__(self, geometry=geometry, material=material)
class PointCloud(Points):
def __init__(self, points, point_size=0.001, color="green"):
geometry = BufferGeometry(
attributes={"position": BufferAttribute(array=points.astype(np.float32))}
)
material = PointsMaterial(size=point_size, color=color)
Points.__init__(self, geometry=geometry, material=material)
|
import random
from scipy.stats import zipf
DEFAULT_SIZE = 1
def randint_array(low_bound, up_bound, size=None, seed=0):
"""
Get randint array
Args:
low_bound(int): lowest bound
up_bound(int): highest bound
size(int): size of array
seed(int): the seed of random
"""
size = size if size is not None else DEFAULT_SIZE
array = list()
for index in xrange(size):
random.seed(index+seed)
array.append(random.randint(low_bound, up_bound))
return array
# def zipf_array(a, low_bound=None, up_bound=None, size=None, seed=100000):
# """
# Get random variables that satisfies Zipf law.
#
# Args:
# a(float): parameter of zipf law, > 1
# low_bound(int): lowest bound
# up_bound(int): highest bound
# size(int): size of array
# seed(int): the seed of random
# """
# size = size if size is not None else DEFAULT_SIZE
# counter, array = -1, list()
# while len(array) < size:
# counter += 1
# candidate = int(zipf.rvs(a, size=1, random_state=counter+seed))
# if low_bound and candidate < low_bound:
# continue
# if up_bound and candidate > up_bound:
# continue
# array.append(candidate)
# return array
def zipf_probabilities(a, low_bound=None, up_bound=None):
"""
Zipf probabilities
"""
candidates = range(low_bound+1, up_bound+1)
aggregation = sum([_ ** (-a) for _ in candidates])
probabilities = list()
for _ in candidates:
probabilities.append(_ ** (-a) / aggregation)
return probabilities
def probability_random(values, probabilities, seed=0):
"""
Generate probability random number
"""
random.seed(seed)
x = random.uniform(0, 1)
cumulative_probability = 0.0
for item, item_probability in zip(values, probabilities):
cumulative_probability += item_probability
if x < cumulative_probability:
break
item = locals().get('item')
return item
def zipf_array(a, low_bound=None, up_bound=None, size=None, seed=100000):
"""
Get random variables that satisfies Zipf law.
Args:
a(float): parameter of zipf law
low_bound(int): lowest bound
up_bound(int): highest bound
size(int): size of array
seed(int): the seed of random
"""
values = range(low_bound+1, up_bound+1)
probabilities = zipf_probabilities(a, low_bound=low_bound, up_bound=up_bound)
size = size if size is not None else DEFAULT_SIZE
counter, array = -1, list()
while len(array) < size:
counter += 1
candidate = probability_random(values, probabilities, seed=(counter + seed))
array.append(candidate)
return array
|
from app.db import db
class Transaction(db.Model):
__tablename__ = 'transactions'
id = db.Column(
db.Integer,
primary_key = True,
autoincrement = True
)
user_id = db_Column(
db.Integer,
db.ForeignKey('users.id')
nullable = False
)
shop_id = db.Column(
db.Integer,
db.ForeignKey('shops.id')
nullable = False
)
payment_method = db.Column(
db.String(50)
nullable = False
)
pos_ref_number = db.Column(
db.String(100)
)
status = db.Column(
db.String(50)
server_default="PENDING"
)
|
# Generated by Django 2.2.1 on 2019-05-23 03:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20180316_1758'),
]
operations = [
migrations.AlterField(
model_name='persona',
name='info_contacto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Contacto', verbose_name='contacto'),
),
]
|
import pytest
from polygon import Polygon
def test_polygon_class():
points = [(0, 0), (2, 0), (2, 2), (0, 2)]
polygon = Polygon(points)
assert isinstance(polygon, Polygon)
assert isinstance(polygon.points, list)
assert polygon.points == points
def test_area():
points = [(0, 0), (2, 0), (2, 2), (0, 2)]
polygon = Polygon(points)
with pytest.raises(NotImplementedError) as e:
polygon.area()
assert str(e.value) == 'Method area should be defined in the child class.'
def test_perimeter():
points = [(0, 0), (2, 0), (2, 2), (0, 2)]
polygon = Polygon(points)
assert polygon.perimeter() == 8
def test_polygon_empty_points():
points = []
polygon = Polygon(points)
assert isinstance(polygon, Polygon)
assert isinstance(polygon.points, list)
assert polygon.points == points
assert polygon.perimeter() == 0
def test_polygon_2_points():
points = [(0, 0), (10, 9)]
polygon = Polygon(points)
assert isinstance(polygon, Polygon)
assert isinstance(polygon.points, list)
assert polygon.points == points
assert polygon.perimeter() == 0
|
import json
import spacy
from sponsors_detector.process_text import find_video_sponsors
from util.util import (get_model_path, is_video_processed,
process_sponsors, add_new_watch_event)
HEADERS = {'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token',
'Access-Control-Allow-Credentials': 'true',
'Content-Type': 'application/json'
}
model = get_model_path("en_core_web_sm-3.0.0")
nlp = spacy.load(model)
def lambda_handler(event, context):
userId = event.get('userId')
videoId = event.get('youtubeVideoId')
timestamp = event.get('timestamp')
if not userId or not videoId or not timestamp:
return {
'statusCode': 400,
'headers': HEADERS
}
if is_video_processed(videoId) == "true":
add_new_watch_event(userId, videoId, timestamp)
return {
'statusCode': 200,
'headers': HEADERS
}
result = find_video_sponsors(videoId, nlp)
if result:
process_sponsors(result, videoId, userId, timestamp)
return {
"statusCode": 200,
"body": json.dumps(f"Sponsorships: {result.get('sponsorships', [])}")
}
|
from .discrete import Discrete
|
from .settings import * # noqa
import os
dbhost = os.environ['PGHOST']
dbname = os.environ['PGDATABASE']
dbuser = os.environ['PGUSER']
dbpassword = os.environ['PGPASSWORD']
dbport = os.environ['PGPORT']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': dbname,
'USER': dbuser,
'PASSWORD': dbpassword,
'HOST': dbhost,
'PORT': dbport,
}
}
|
"""Frontend URL Configuration
path / load react app
"""
from django.conf.urls import url
from django.urls import path
from .views import index
urlpatterns = [
path('', index, name='index'),
url(r'^(?!(api|docs)).*$', index),
]
|
some_list = ['a', 'b', 'c', 'b', 'd', 'm', 'n', 'n']
some_dictionary = {}
for element in some_list:
if element not in some_dictionary:
some_dictionary[element] = 1
else:
some_dictionary[element] += 1
for key, value in some_dictionary.items():
if value > 1:
print(key, end=' ')
# another way
duplicates = []
for x in some_list:
if some_list.count(x) > 1 and x not in duplicates:
duplicates.append(x)
print()
print(duplicates)
|
expected_output = {
"GigabitEthernet0/0/1": {
"service_policy": {
"input": {"policy_name": {"TEST": {}}},
"output": {"policy_name": {"TEST2": {}}},
}
},
"TenGigabitEthernet0/3/0.41": {
"service_policy": {
"output": {
"policy_name": {
"VLAN51_QoS": {
"class_map": {
"VLAN51_QoS": {
"match_evaluation": "match-all",
"packets": 0,
"bytes": 0,
"rate": {
"interval": 300,
"offered_rate_bps": 0,
"drop_rate_bps": 0,
},
"match": ["access-group name VLAN51_QoS"],
"queueing": True,
"queue_limit_packets": "64",
"queue_depth": 0,
"total_drops": 0,
"no_buffer_drops": 0,
"pkts_output": 0,
"bytes_output": 0,
"shape_type": "average",
"shape_cir_bps": 80000,
"shape_bc_bps": 320,
"shape_be_bps": 0,
"target_shape_rate": 80000,
"police": {
"conformed": {
"packets": 0,
"bytes": 0,
"actions": {"transmit": True},
"bps": 0,
},
"exceeded": {
"packets": 0,
"bytes": 0,
"actions": {"transmit": True},
"bps": 0,
},
"violated": {
"packets": 0,
"bytes": 0,
"actions": {"drop": True},
"bps": 0,
},
},
}
}
}
}
}
}
},
}
|
from wrapper import SeleniumWrapper
from selenium.webdriver.common.by import By
from datetime import datetime
import time
import re
class MeetingHandler:
def __init__(self, driver):
self.selenium = SeleniumWrapper(driver)
self.current_meeting = {}
def check_ongoing_meeting(self):
if self.selenium.wait_for_presence(locator=(By.ID, "hangup-button"), timeout=5):
return True
return False
def prepare_calendar(self):
calendar_button = self.selenium.wait_for_clickable(
locator=(By.CSS_SELECTOR, "[aria-label='Calendar Toolbar']"),
timeout=5
)
if not calendar_button: raise Exception("[-] Unable to find Calendar button. Make sure the browser window is maximized and it is visible in the navigation bar.")
calendar_button.click()
view_list_button = self.selenium.wait_for_clickable(
locator=(By.CSS_SELECTOR, "button[title='Switch your calendar view']"),
timeout=5
)
if not view_list_button: raise Exception("[-] Unable to switch to day view.")
view_list_button.click()
time.sleep(1)
day_view_button = self.selenium.wait_for_presence(
locator=(By.CSS_SELECTOR, "button[aria-label='Day view']"),
timeout=5
)
if not day_view_button: raise Exception("[-] Unable to switch to day view.")
day_view_button.click()
def get_next_meeting_element(self):
# All meeting card elements contain "from <> to <>" in their title attributes
available_meetings = self.selenium.wait_for_presence_all(
locator=(By.CSS_SELECTOR, "[title*='from'"), timeout=5
)
if not available_meetings: raise Exception("[-] Could not find any meetings on today's calendar")
for meeting in available_meetings:
result = re.search(".*from (.*?) to (.*?)$", meeting.get_attribute("title"))
meeting_start = datetime.strptime(result.group(1), "%I:%M %p").time()
meeting_end = datetime.strptime(result.group(2), "%I:%M %p").time()
current_time = datetime.now().time()
if meeting_start <= current_time < meeting_end:
return {
"ele": meeting,
"title": meeting.get_attribute("title"),
"starts_at": meeting_start,
"ends_at": meeting_end
}
return {}
def attend_new_meeting(self):
self.current_meeting.get("ele").click()
# Click on "Join" button
join_button = self.selenium.wait_for_presence(
locator=(By.CSS_SELECTOR, "[data-tid='calv2-peek-join-button']"),
timeout=5
)
if not join_button: raise Exception("[-] Could not find Join button after clicking meeting card. Make sure it is visible on the screen.")
join_button.click()
# Click on "Continue without audio or video" button if it appears
print("[+] Checking for appearance of 'Continue without audio or video' button")
continue_without_audio_button = self.selenium.wait_for_presence(
locator=(By.CSS_SELECTOR, "[ng-click='getUserMedia.passWithoutMedia()']"),
timeout=3
)
if continue_without_audio_button: continue_without_audio_button.click()
# Turn off mic
print("[+] Turning off mic (if not already off)")
mic_off_button = self.selenium.wait_for_presence(
locator=(By.CSS_SELECTOR, "toggle-button[title='Mute microphone']"),
timeout=3
)
if mic_off_button: mic_off_button.click()
# Turn off webcam
print("[+] Turning off webcam (if not already off)")
webcam_off_button = self.selenium.wait_for_presence(
locator=(By.CSS_SELECTOR, "toggle-button[title='Turn camera off']"),
timeout=3
)
if webcam_off_button: webcam_off_button.click()
# Join meeting
print("[+] Joining meeting")
pre_join_button = self.selenium.wait_for_clickable(
locator=(By.CSS_SELECTOR, "button[aria-label='Join the meeting']"),
timeout=3
)
if not pre_join_button: raise Exception("[-] Could not find Join button. Make sure it is visibile on the screen.")
pre_join_button.click()
def end_meeting(self):
hangup_button = self.selenium.wait_for_clickable(locator=(By.ID, "hangup-button"), timeout=5)
if not hangup_button: raise Exception("[-] Could not find hangup button to exit meeting. Make sure it is visible on the screen, or exit the meeting manually.")
hangup_button.click()
def handle(self):
if not self.check_ongoing_meeting():
print("[+] Currently not in a meeting. Will proceed to join the next meeting on the calendar.")
self.prepare_calendar()
next_meeting = self.get_next_meeting_element()
if next_meeting:
print("[+] Found meeting: {}. Will proceed to attending meeting.".format(next_meeting.get("title")))
self.current_meeting = next_meeting
self.attend_new_meeting()
else:
current_time = datetime.now().time()
if self.current_meeting and self.current_meeting["starts_at"] <= current_time < self.current_meeting["ends_at"]:
print("[+] Currently attending: {}. Will check for a new meeting at {}".format(
self.current_meeting["title"],
self.current_meeting["ends_at"].strftime("%I:%M %p")
))
else:
print("[+] Exiting meeting: {}".format(self.current_meeting.get("title")))
self.end_meeting()
|
# import functions from packages
import os
import time
import pickle
import numpy as np
import scipy
import matplotlib.pyplot as plt
import multiprocessing as mp
# import local variables
from .. import _allowed_colors, _image_size, _correction_folder
# import local functions
from ..io_tools.load import correct_fov_image
from ..spot_tools.fitting import fit_fov_image
from ..io_tools.crop import crop_neighboring_area
from .chromatic import generate_polynomial_data
# default parameters for bleedthrough profiles
_bleedthrough_channels=['750', '647', '561']
_bleedthrough_default_correction_args = {
'correction_folder': _correction_folder,
'single_im_size':_image_size,
'all_channels':_allowed_colors,
'bleed_corr':False,
'illumination_corr':False,
'chromatic_corr':False,
'z_shift_corr':True,
}
_bleedthrough_default_fitting_args = {
'max_num_seeds':500,
'th_seed': 300,
'use_dynamic_th':True,
}
def check_bleedthrough_info(
_info,
_rsq_th=0.81, _intensity_th=150.,
_check_center_position=True, _center_radius=1.):
"""Function to check one bleedthrough pair"""
if _info['rsquare'] < _rsq_th:
return False
elif 'spot' in _info and _info['spot'][0] < _intensity_th:
return False
elif _check_center_position:
_max_inds = np.array(np.unravel_index(np.argmax(_info['ref_im']), np.shape(_info['ref_im'])))
_im_ct_inds = (np.array(np.shape(_info['ref_im']))-1)/2
if (_max_inds < _im_ct_inds-_center_radius).all() or (_max_inds > _im_ct_inds+_center_radius).all():
return False
return True
def find_bleedthrough_pairs(filename, channel,
corr_channels=_bleedthrough_channels,
correction_args=_bleedthrough_default_correction_args,
fitting_args=_bleedthrough_default_fitting_args,
intensity_th=1.,
crop_size=9, rsq_th=0.81,
check_center_position=True,
save_temp=True, save_name=None,
overwrite=True, verbose=True,
):
"""Function to generate bleedthrough spot pairs"""
if 'LinearRegression' not in locals():
from sklearn.linear_model import LinearRegression
## check inputs
_channel = str(channel)
if _channel not in corr_channels:
raise ValueError(f"{_channel} should be within {corr_channels}")
_info_dict = {}
_load_flags = []
for _ch in corr_channels:
if _ch != _channel:
_basename = os.path.basename(filename).replace('.dax', f'_ref_{_channel}_to_{_ch}.pkl')
_basename = 'bleedthrough_'+_basename
_temp_filename = os.path.join(os.path.dirname(filename), _basename)
if os.path.isfile(_temp_filename) and not overwrite:
_infos = pickle.load(open(_temp_filename, 'rb'))
_kept_infos = [_info for _info in _infos
if check_bleedthrough_info(_info, _rsq_th=rsq_th,
_intensity_th=intensity_th,
_check_center_position=check_center_position)]
_info_dict[f"{_channel}_to_{_ch}"] = _kept_infos
_load_flags.append(1)
else:
_info_dict[f"{_channel}_to_{_ch}"] = []
_load_flags.append(0)
if np.mean(_load_flags) == 1:
if verbose:
print(f"-- directly load from saved tempfile in folder: {os.path.dirname(filename)}")
return _info_dict
## 1. load this file
_ims, _ = correct_fov_image(filename, corr_channels,
calculate_drift=False, warp_image=False,
**correction_args,
return_drift=False, verbose=verbose,
)
## 2. fit centers for target channel
_ref_im = _ims[list(corr_channels).index(_channel)]
_tar_channels = [_ch for _ch, _im in zip(corr_channels, _ims) if _ch != _channel]
_tar_ims = [_im for _ch, _im in zip(corr_channels, _ims) if _ch != _channel]
_ref_spots = fit_fov_image(_ref_im, _channel, #normalize_background=True,
**fitting_args, verbose=verbose)
# threshold intensities
#_ref_spots = _ref_spots[_ref_spots[:,0] >= intensity_th]
## crop
for _ch, _im in zip(_tar_channels, _tar_ims):
_key = f"{_channel}_to_{_ch}"
if verbose:
print(f"--- finding matched bleedthrough pairs for {_key}")
# loop through centers to crop
for _spot in _ref_spots:
_rim = crop_neighboring_area(_ref_im, _spot[1:4], crop_sizes=crop_size)
_cim = crop_neighboring_area(_im, _spot[1:4], crop_sizes=crop_size)
# calculate r-square
_x = np.ravel(_rim)[:,np.newaxis]
_y = np.ravel(_cim)
_reg = LinearRegression().fit(_x,_y)
_rsq = _reg.score(_x,_y)
#print(_reg.coef_, _rsq)
_info = {
'coord': _spot[1:4],
'spot': _spot,
'ref_im': _rim,
'bleed_im': _cim,
'rsquare': _rsq,
'slope': _reg.coef_[0],
'intercept': _reg.intercept_,
'file':filename,
}
_info_dict[_key].append(_info)
if save_temp:
for _ch in corr_channels:
if _ch != _channel:
_key = f"{_channel}_to_{_ch}"
_basename = os.path.basename(filename).replace('.dax', f'_ref_{_channel}_to_{_ch}.pkl')
_basename = 'bleedthrough_'+_basename
_temp_filename = os.path.join(os.path.dirname(filename), _basename)
if verbose:
print(f"--- saving {len(_info_dict[_key])} points to file:{_temp_filename}")
pickle.dump(_info_dict[_key], open(_temp_filename, 'wb'))
else:
if verbose:
print(f"-- channel {_ch} doesn't match {_channel}, skip saving.")
# only return the information with rsquare large enough
_kept_info_dict = {_key:[] for _key in _info_dict}
for _key, _infos in _info_dict.items():
_kept_infos = [_info for _info in _infos
if check_bleedthrough_info(_info, _rsq_th=rsq_th,
_intensity_th=intensity_th,
_check_center_position=check_center_position)]
_kept_info_dict[_key] = _kept_infos
return _kept_info_dict
def check_bleedthrough_pairs(info_list, outlier_sigma=2, keep_per_th=0.95, max_iter=20,
verbose=True,):
"""Function to check bleedthrough pairs"""
from scipy.spatial import Delaunay
# prepare inputs
if verbose:
print(f"- check {len(info_list)} bleedthrough pairs.")
_coords = np.array([_info['coord'] for _info in info_list])
_slopes = np.array([_info['slope'] for _info in info_list])
_intercepts = np.array([_info['intercept'] for _info in info_list])
if verbose:
print(f"- start iteration with outlier_sigma={outlier_sigma:.2f}, keep_percentage={keep_per_th:.2f}")
_n_iter = 0
_kept_flags = np.ones(len(_coords), dtype=np.bool)
_flags = []
while(len(_flags) == 0 or np.mean(_flags) < keep_per_th):
_n_iter += 1
_start_time = time.time()
_flags = []
_tri = Delaunay(_coords[_kept_flags])
for _i, (_coord, _slope, _intercept) in enumerate(zip(_coords[_kept_flags],
_slopes[_kept_flags],
_intercepts[_kept_flags])):
# get neighboring center ids
_nb_ids = np.array([_simplex for _simplex in _tri.simplices.copy()
if _i in _simplex], dtype=np.int)
_nb_ids = np.unique(_nb_ids)
# remove itself
_nb_ids = _nb_ids[(_nb_ids != _i) & (_nb_ids != -1)]
#print(_nb_ids)
# get neighbor slopes
_nb_coords = _coords[_nb_ids]
_nb_slopes = _slopes[_nb_ids]
_nb_intercepts = _intercepts[_nb_ids]
_nb_weights = 1 / np.linalg.norm(_nb_coords-_coord, axis=1)
_nb_weights = _nb_weights / np.sum(_nb_weights)
#print(_nb_slopes)
# calculate expected slope and compare
_exp_slope = np.dot(_nb_slopes.T, _nb_weights)
_keep_slope = (np.abs(_exp_slope - _slope) <= outlier_sigma * np.std(_nb_slopes))
# calculate expected intercept and compare
_exp_intercept = np.dot(_nb_intercepts.T, _nb_weights)
_keep_intercept = (np.abs(_exp_intercept - _intercept) <= outlier_sigma * np.std(_nb_intercepts))
# append keep flags
_flags.append((_keep_slope and _keep_intercept))
# update _kept_flags
_updating_inds = np.where(_kept_flags)[0]
_kept_flags[_updating_inds] = np.array(_flags, dtype=np.bool)
if verbose:
print(f"-- iter: {_n_iter}, kept in this round: {np.mean(_flags):.3f}, total: {np.mean(_kept_flags):.3f} in {time.time()-_start_time:.3f}s")
if _n_iter > max_iter:
if verbose:
print(f"-- exceed maximum number of iterations, exit.")
break
# selected infos
kept_info_list = [_info for _info, _flag in zip(info_list, _kept_flags) if _flag]
if verbose:
print(f"- {len(kept_info_list)} pairs passed.")
return kept_info_list
def interploate_bleedthrough_correction_from_channel(
info_dicts, ref_channel, target_channel,
check_info=True, check_params={},
max_num_spots=1000, min_num_spots=100,
single_im_size=_image_size, ref_center=None,
fitting_order=2, allow_intercept=True,
save_temp=True, save_folder=None,
make_plots=True, save_plots=True,
overwrite=False, verbose=True,
):
"""Function to interpolate and generate the bleedthrough correction profiles between two channels
"""
_key = f"{ref_channel}_to_{target_channel}"
# extract info list of correct channels
_info_list = []
for _dict in info_dicts:
if _key in _dict:
_info_list += list(_dict[_key])
if len(_info_list) < min_num_spots:
if verbose:
print(f"-- not enough spots ({len(_info_list)}) from {ref_channel} to {target_channel}")
return np.zeros(single_im_size), np.zeros(single_im_size)
# keep the spot pairs with the highest rsquares
if len(_info_list) > max_num_spots:
if verbose:
print(f"-- only keep the top {max_num_spots} spots from {len(_info_list)} for bleedthrough interpolation.")
if len(_info_list) > int(max_num_spots):
_rsquares = np.array([_info['rsquare'] for _info in _info_list])
_rsq_th = np.sort(_rsquares)[-int(max_num_spots)]
_info_list = [_info for _info in _info_list if _info['rsquare']>= _rsq_th]
# check
if check_info:
_info_list = check_bleedthrough_pairs(_info_list, **check_params)
# extract information
_coords = []
_slopes = []
_intercepts = []
for _info in _info_list:
_coords.append(_info['coord'])
_slopes.append(_info['slope'])
_intercepts.append(_info['intercept'])
if len(_coords) < min_num_spots:
if verbose:
print(f"-- not enough spots f({len(_coords)}) from {ref_channel} to {target_channel}")
return np.zeros(single_im_size), np.zeros(single_im_size)
else:
if verbose:
print(f"-- {len(_coords)} spots are used to generate profiles from {ref_channel} to {target_channel}")
_coords = np.array(_coords)
_slopes = np.array(_slopes)
_intercepts = np.array(_intercepts)
# adjust ref_coords with ref center
if ref_center is None:
_ref_center = np.array(single_im_size)[:np.shape(_coords)[1]] / 2
else:
_ref_center = np.array(ref_center)[:np.shape(_coords)[1]]
_ref_coords = _coords - _ref_center[np.newaxis, :]
# generate_polynomial_data
_X = generate_polynomial_data(_coords, fitting_order)
# do the least-square optimization for slope
_C_slope, _r,_r2,_r3 = scipy.linalg.lstsq(_X, _slopes)
_rsq_slope = 1 - np.sum((_X.dot(_C_slope) - _slopes)**2)\
/ np.sum((_slopes-np.mean(_slopes))**2) # r2 = 1 - SSR/SST
print(_C_slope, _rsq_slope)
# do the least-square optimization for intercept
_C_intercept, _r,_r2,_r3 = scipy.linalg.lstsq(_X, _intercepts)
_rsq_intercept = 1 - np.sum((_X.dot(_C_intercept) - _intercepts)**2)\
/ np.sum((_intercepts-np.mean(_intercepts))**2) # r2 = 1 - SSR/SST
print(_C_intercept, _rsq_intercept)
## generate profiles
_pixel_coords = np.indices(single_im_size)
_pixel_coords = _pixel_coords.reshape(np.shape(_pixel_coords)[0], -1)
_pixel_coords = _pixel_coords - _ref_center[:, np.newaxis]
# generate predictive pixel coordinates
_pX = generate_polynomial_data(_pixel_coords.transpose(), fitting_order)
_p_slope = np.dot(_pX, _C_slope).reshape(single_im_size)
_p_intercept = np.dot(_pX, _C_intercept).reshape(single_im_size)
## save temp if necessary
if save_temp:
if save_folder is not None and os.path.isdir(save_folder):
if verbose:
print(f"-- saving bleedthrough temp profile from channel: {ref_channel} to channel: {target_channel}.")
else:
print(f"-- save_folder is not given or not valid, skip.")
## make plots if applicable
if make_plots:
plt.figure(dpi=150, figsize=(4,3))
plt.imshow(_p_slope.mean(0))
plt.colorbar()
plt.title(f"{ref_channel} to {target_channel}, slope, rsq={_rsq_slope:.3f}")
if save_plots and (save_folder is not None and os.path.isdir(save_folder)):
plt.savefig(os.path.join(save_folder, f'bleedthrough_profile_{ref_channel}_to_{target_channel}_slope.png'),
transparent=True)
plt.show()
plt.figure(dpi=150, figsize=(4,3))
plt.imshow(_p_intercept.mean(0))
plt.colorbar()
plt.title(f"{ref_channel} to {target_channel}, intercept, rsq={_rsq_intercept:.3f}")
if save_plots and (save_folder is not None and os.path.isdir(save_folder)):
plt.savefig(os.path.join(save_folder, f'bleedthrough_profile_{ref_channel}_to_{target_channel}_intercept.png'),
transparent=True)
plt.show()
return _p_slope, _p_intercept
# Final function to be called to generate bleedthrough correction
def Generate_bleedthrough_correction(bleed_folders,
corr_channels=_bleedthrough_channels,
parallel=True, num_threads=12,
start_fov=1, num_images=40,
correction_args={'single_im_size':_image_size,
'illumination_corr':False,
'chromatic_corr':False},
fitting_args={}, intensity_th=150.,
crop_size=9, rsq_th=0.81, check_center=True,
fitting_order=2, generate_2d=True,
interpolate_args={},
make_plots=True, save_plots=True,
save_folder=None,
save_name='bleedthrough_correction',
overwrite_temp=False, overwrite_profile=False,
verbose=True,
):
"""Function to generate bleedthrough profiles """
## 0. inputs
_correction_args = {_k:_v for _k,_v in _bleedthrough_default_correction_args.items()}
_correction_args.update(correction_args) # update with input info
_fitting_args = {_k:_v for _k,_v in _bleedthrough_default_fitting_args.items()}
_fitting_args.update(fitting_args) # update with input info
## 1. savefiles
if save_folder is None:
save_folder = bleed_folders[0]
filename_base = save_name
# add channel info
for _ch in corr_channels:
filename_base += f"_{_ch}"
# add dimension info
if generate_2d:
for _d in _correction_args['single_im_size'][-2:]:
filename_base += f'_{int(_d)}'
else:
for _d in _correction_args['single_im_size']:
filename_base += f'_{int(_d)}'
saved_profile_filename = os.path.join(save_folder, filename_base+'.npy')
# check existance
if os.path.isfile(saved_profile_filename) and not overwrite_profile:
if verbose:
print(f"+ bleedthrough correction profiles already exists. direct load the profile")
_bleed_profiles = np.load(saved_profile_filename, allow_pickle=True)
### not exist: start processing
else:
if verbose:
print(f"+ generating bleedthrough profiles.")
## 2. select_fov_names
fov_names = [_fl for _fl in os.listdir(bleed_folders[0])
if _fl.split('.')[-1]=='dax']
sel_fov_names = [_fl for _fl in sorted(fov_names, key=lambda v:int(v.split('.dax')[0].split('_')[-1]))]
sel_fov_names = sel_fov_names[int(start_fov):int(start_fov)+int(num_images)]
## 3. prepare args to generate info
# assemble args
_bleed_args = []
for _fov in sel_fov_names:
for _folder, _ch in zip(bleed_folders, corr_channels):
_bleed_args.append(
(
os.path.join(_folder, _fov),
_ch,
corr_channels,
_correction_args,
_fitting_args,
intensity_th,
crop_size,
rsq_th,
check_center,
True, None,
overwrite_temp, verbose,
)
)
## 4. multi-processing
if parallel:
with mp.Pool(num_threads) as _ca_pool:
if verbose:
print(f"++ generating bleedthrough info for {len(sel_fov_names)} images with {num_threads} threads in", end=' ')
_multi_start = time.time()
_info_dicts = _ca_pool.starmap(find_bleedthrough_pairs,
_bleed_args, chunksize=1)
_ca_pool.close()
_ca_pool.join()
_ca_pool.terminate()
if verbose:
print(f"{time.time()-_multi_start:.3f}s.")
else:
if verbose:
print(f"++ generating bleedthrough info for {len(sel_fov_names)} images in", end=' ')
_multi_start = time.time()
_info_dicts = [find_bleedthrough_pairs(*_arg) for _arg in _bleed_args]
if verbose:
print(f"{time.time()-_multi_start:.3f}s.")
## 5. generate_the_whole_profile
profile_shape = np.concatenate([np.array([len(corr_channels),
len(corr_channels)]),
_correction_args['single_im_size']])
bld_corr_profile = np.zeros(profile_shape)
# loop through two images
for _ref_i, _ref_ch in enumerate(corr_channels):
for _tar_i, _tar_ch in enumerate(corr_channels):
if _ref_ch == _tar_ch:
bld_corr_profile[_tar_i, _ref_i] = np.ones(_correction_args['single_im_size'])
else:
_slope_pf, _intercept_pf = interploate_bleedthrough_correction_from_channel(
_info_dicts, _ref_ch, _tar_ch,
single_im_size=_correction_args['single_im_size'],
fitting_order=fitting_order,
save_folder=save_folder,
verbose=True, **interpolate_args,
)
bld_corr_profile[_tar_i, _ref_i] = _slope_pf
# compress to 2d
if generate_2d:
bld_corr_profile = bld_corr_profile.mean(2)
# calculate inverse matrix for each pixel
if verbose:
print(f"-- generating inverse matrix.")
_bleed_profiles = np.zeros(np.shape(bld_corr_profile), dtype=np.float)
for _i in range(np.shape(bld_corr_profile)[-2]):
for _j in range(np.shape(bld_corr_profile)[-1]):
if generate_2d:
_bleed_profiles[:,:,_i,_j] = np.linalg.inv(bld_corr_profile[:,:,_i,_j])
else:
for _z in range(np.shape(bld_corr_profile)[-3]):
_bleed_profiles[:,:,_z,_i,_j] = np.linalg.inv(bld_corr_profile[:,:,_z,_i,_j])
# reshape the profile
## 5. save
if verbose:
print(f"-- saving to file:{saved_profile_filename}")
np.save(saved_profile_filename, _bleed_profiles.reshape(np.concatenate([[len(corr_channels)**2],
np.shape(_bleed_profiles)[2:]])
))
return _bleed_profiles
|
import logging
# log format
format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
format_without_levelname = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
# the root logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# file Log
f_handler = logging.FileHandler("log.txt", encoding='utf-8')
f_handler.setLevel(logging.INFO)
f_handler.setFormatter(format)
# console Log
s_handler = logging.StreamHandler()
s_handler.setLevel(logging.WARNING)
s_handler.setFormatter(format_without_levelname)
# add handler to logger
logger.addHandler(f_handler)
logger.addHandler(s_handler)
|
from . import api
v1 = [
(r"attendance/terminal", api.TerminalViewSet, "attendance-terminal"),
(
r"attendance/campusonlineholding",
api.CampusOnlineHoldingViewSet,
"attendance-campusonline-holding",
),
(
r"attendance/campusonlineentry",
api.CampusOnlineEntryViewSet,
"attendance-campusonline-entry",
),
(
r"attendance/manualcampusonlineentry",
api.ManualCampusOnlineEntryViewSet,
"attendance-manualcampusonline-entry",
),
(r"attendance/roomstate", api.RoomStateViewSet, "attendance-roomstate"),
(r"attendance/statistics", api.StatisticsViewSet, "attendance-statistics"),
]
|
"""Task: Test the check co-ordinates function."""
# Imports --------------------------------------------------------------
from modules.check_coordinates import check_coordinates
# Classes --------------------------------------------------------------
# Functions ------------------------------------------------------------
def test_check_coordinates():
"""This function checks our 'test coordinates' function."""
x_error = "X axis out of bounds error."
y_error = "Y axis out of bounds error."
xy_error = "X and Y axis out of bounds error."
neg_error = "Negative coordinate not valid."
expected_message = "No errors."
image_size = (200, 200)
out_of_bounds_x = [[0, 0], [0, 100], [300, 0], [300, 100]]
out_of_bounds_y = [[0, 0], [0, 300], [100, 0], [100, 300]]
out_of_bounds_xy = [[0, 0], [300, 300], [300, 0], [0, 300]]
assert check_coordinates(image_size, out_of_bounds_x) == x_error
assert check_coordinates(image_size, out_of_bounds_y) == y_error
assert check_coordinates(image_size, out_of_bounds_xy) == xy_error
negative_x = [[0, 0], [0, 100], [-100, 0], [-100, 100]]
negative_y = [[0, 0], [0, -100], [100, 0], [100, -100]]
negative_xy = [[0, 0], [0, 100], [100, 0], [-100, -100]]
assert check_coordinates(image_size, negative_x) == neg_error
assert check_coordinates(image_size, negative_y) == neg_error
assert check_coordinates(image_size, negative_xy) == neg_error
expected = [[0, 0], [0, 100], [100, 0], [100, 100]]
assert check_coordinates(image_size, expected) == expected_message
# Methods --------------------------------------------------------------
# Define Variables -----------------------------------------------------
# Execute Code ---------------------------------------------------------
|
"""
This module contains all the hyperparameters of the model.
It contains a function for each module in the project (that uses at least one hyperparameter);
each function returns a named tuple with the parameters.
"""
from collections import namedtuple
_NetworkT = namedtuple('_NetworkT', ['topk'])
_COCODatasetT = namedtuple('_COCODatasetT',
['FlipProb', 'RotationProb', 'ColorJitterProb', 'RotationDegrees', 'ColorJitterBrightness',
'ColorJitterContrast', 'ColorJitterSaturation', 'ColorJitterHue'])
_utilsT = namedtuple('_utilsT', ['FCReLUSlope'])
_mainT = namedtuple('_mainT',
['workersNum', 'batchSize', 'maxCaptionLenDelta', 'adamLr', 'adamBetas', 'epochsNum', 'evalEvery'])
_Network = _NetworkT(25)
_COCODataset = _COCODatasetT(.5, .3, .3, 10, .1, .1, .1, .1)
_utils = _utilsT(.01)
_main = _mainT(0, 55, 5, .001, (0.9, 0.999), 3, 1)
def network():
return _Network
def COCODataset():
return _COCODataset
def utils():
return _utils
def main():
return _main
|
# =========================================================================
# This code built for MCEN90048 project
# Written by Zhuo LI
# The Univerivsity of Melbourne
# zhuol7@student.unimelb.edu.au
# =========================================================================
import tensorflow as tf
import os
import json
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.client import timeline
class TimeLiner:
_timeline_dict = None
def update_timeline(self, old_chrome_trace):
# convert chrome trace to python dict
chrome_trace_dict = json.loads(old_chrome_trace)
# for first run store full trace
if self._timeline_dict is None:
self._timeline_dict = chrome_trace_dict
# for other - update only time consumption, not definitions
else:
for event in chrome_trace_dict['traceEvents']:
# events time consumption started with 'ts' prefix
if 'ts' in event:
self._timeline_dict['traceEvents'].append(event)
def save(self, f_name):
with open(f_name, 'w') as f:
json.dump(self._timeline_dict, f)
data = input_data.read_data_sets('data/fashion', one_hot=True)
BATCH_SIZE = 100
TRAIN_DATA_SIZE = 55000
EPOCH = int(TRAIN_DATA_SIZE / BATCH_SIZE) # 55000 / 100 = 550
X_VAL = data.validation.images
Y_VAL = data.validation.labels
X_TEST = data.test.images
Y_TEST = data.test.labels
LEARNING_RATE = 0.001
ckpt_path = 'results/MLPBaseline/AdamOptimizer/AdamOptimizer.ckpt'
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32, [None, 784]) / 255. # 28x28
ys = tf.placeholder(tf.float32, [None, 10])
# fc1 layer
h_fc1 = tf.layers.dense(xs, 1024, tf.nn.relu) # hidden layer
tf.summary.histogram('/h_fc1_output', h_fc1)
# fc2 layer
prediction = tf.layers.dense(h_fc1, 10) # output layer
tf.summary.histogram('/prediction', prediction)
# the error between prediction and real data
with tf.name_scope('cross_entropy'):
cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=ys, logits=prediction) # loss
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(LEARNING_RATE, epsilon=1e-8).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", accuracy)
sess = tf.Session()
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
# merge all the summaries
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("results/MLPBaseline/AdamOptimizer/", sess.graph)
saver = tf.train.Saver()
# firstly check if there exit model to be restored
# if no, start training. So if Richard wants to run in this training condition, plz delete the existing files first.
# else, restore model and test it on 'TEST DATA SET', and will just save another event file
if not os.path.exists('results/MLPBaseline/AdamOptimizer/checkpoint'): # if no model to restore
print("no model to restore, start training.")
sess.run(init)
EPOCHS = EPOCH * 20
for step in range(EPOCHS):
batch_xs, batch_ys = data.train.next_batch(BATCH_SIZE)
sess.run(train_step,
feed_dict={xs: batch_xs, ys: batch_ys},
options=None,
run_metadata=None)
if (step+1) % 500 == 0:
current_accuracy = sess.run(accuracy, feed_dict={xs: X_VAL, ys: Y_VAL})
train_result = sess.run(merged, feed_dict={xs: X_VAL, ys: Y_VAL})
writer.add_summary(train_result, step+1)
print("step {}:\tTraining Accuracy={:.4f}".format(step+1, current_accuracy))
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
many_runs_timeline = TimeLiner()
for step in range(5):
batch_xs, batch_ys = data.train.next_batch(BATCH_SIZE)
sess.run(train_step,
feed_dict={xs: batch_xs, ys: batch_ys},
options=options,
run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
many_runs_timeline.update_timeline(chrome_trace)
many_runs_timeline.save('results/MLPBaseline/AdamOptimizer/timeline_merged_of_%d_iterations.json' % 5)
address = saver.save(sess, ckpt_path)
print("Finish training, we'v stored the model's parameters to", address)
final_accuracy = sess.run(accuracy, feed_dict={xs: X_TEST, ys: Y_TEST})
print('test accuracy: %.4f' % final_accuracy)
sess.close()
else:
# restore the parameters and test the model
saver.restore(sess, ckpt_path)
print("Model restore successfully.")
final_accuracy = sess.run(accuracy, feed_dict={xs: X_TEST, ys: Y_TEST})
print('test accuracy: %.4f' % final_accuracy)
sess.close()
|
# -*- coding: utf-8 -*-
# @Time : 2019-10-15 14:32
# @Author : Jason
# @FileName: utils.py
import scipy.sparse as sp
import numpy as np
from scipy.sparse.linalg.eigen.arpack import eigsh, ArpackNoConvergence
import random
import matplotlib.pyplot as plt
import os
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)
return labels_onehot
def load_data(path, dataset, config):
"""Load citation network dataset (cora dataset only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.features".format(path, dataset), dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.edges".format(path, dataset), dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))
y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask = get_splits(labels)
features /= features.sum(1).reshape(-1, 1) # normalization
graph = [features, adj]
if not os.path.exists(config.adj_path):
np.savetxt(config.adj_path, adj.A)
inputs = [graph, adj, features, y_train, y_val, y_test, idx_train, idx_val, idx_test, labels]
return features, adj, labels, inputs
def normalize_adj(adj, symmetric=True):
if symmetric:
d = sp.diags(np.power(np.array(adj.sum(1)), -0.5).flatten(), 0)
a_norm = adj.dot(d).transpose().dot(d).tocsr()
else:
d = sp.diags(np.power(np.array(adj.sum(1)), -1).flatten(), 0)
a_norm = d.dot(adj).tocsr()
return a_norm
def preprocess_adj(adj, symmetric=True):
adj = adj + sp.eye(adj.shape[0])
adj = normalize_adj(adj, symmetric)
return adj
def sample_mask(idx, l):
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def get_splits(y):
num = [i for i in range(y.shape[0])]
random.shuffle(num)
n = len(num)
idx_train = num[:int(n * 0.6)]
idx_val = num[int(n * 0.6):int(n * 0.8)]
idx_test = num[int(n * 0.8):]
y_train = np.zeros(y.shape, dtype=np.int32)
y_val = np.zeros(y.shape, dtype=np.int32)
y_test = np.zeros(y.shape, dtype=np.int32)
y_train[idx_train] = y[idx_train]
y_val[idx_val] = y[idx_val]
y_test[idx_test] = y[idx_test]
# print("idx_train: ", idx_train)
train_mask = sample_mask(idx_train, y.shape[0])
return y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask
def categorical_crossentropy(preds, labels):
temp = np.extract(labels, preds)
epsilon = 1e-5
return np.mean(-np.log(temp + epsilon))
def accuracy(preds, labels):
return np.mean(np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
def evaluate_preds(preds, labels, indices):
split_loss = list()
split_acc = list()
for y_split, idx_split in zip(labels, indices):
split_loss.append(categorical_crossentropy(preds[idx_split], y_split[idx_split]))
split_acc.append(accuracy(preds[idx_split], y_split[idx_split]))
return split_loss, split_acc
def normalized_laplacian(adj, symmetric=True):
adj_normalized = normalize_adj(adj, symmetric)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
return laplacian
def rescale_laplacian(laplacian):
try:
print('Calculating largest eigenvalue of normalized graph Laplacian...')
largest_eigval = eigsh(laplacian, 1, which='LM', return_eigenvectors=False)[0]
except ArpackNoConvergence:
print('Eigenvalue calculation did not converge! Using largest_eigval=2 instead.')
largest_eigval = 2
scaled_laplacian = (2. / largest_eigval) * laplacian - sp.eye(laplacian.shape[0])
return scaled_laplacian
def chebyshev_polynomial(X, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
T_k = list()
T_k.append(sp.eye(X.shape[0]).tocsr())
T_k.append(X)
def chebyshev_recurrence(T_k_minus_one, T_k_minus_two, X):
X_ = sp.csr_matrix(X, copy=True)
return 2 * X_.dot(T_k_minus_one) - T_k_minus_two
for i in range(2, k + 1):
T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X))
return T_k
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def pltFig(n, x1, x2, x1_label, x2_label, xlabel, ylabel, title, path, dpi):
fig = plt.figure()
plt.plot(range(1, n + 1), x1, label=x1_label)
plt.plot(range(1, n + 1), x2, label=x2_label)
plt.legend(loc="best")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
fig.savefig(path, dpi=dpi)
plt.show()
def pltFigSingle(n, x, x_label, xlabel, ylabel, title, path, dpi):
fig = plt.figure()
plt.plot(range(1, n + 1), x, label=x_label)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
fig.savefig(path, dpi=dpi)
plt.show()
def getNeighbor(node, adj):
neighbors = []
for i in range(len(adj[node])):
if adj[node][i] == 1 and i != node:
neighbors.append(i)
return neighbors
def calWeight(w, threshold):
if w > threshold:
if w >= 1:
weight = 1
else:
weight = w
else:
weight = abs(0.5 - (1 / (1 + np.exp(-w))))
return weight
def int2str(n):
s = str(n).split(".")
return s[0] + s[-1]
def randomList(edgeList, percent):
l = []
for _ in range(min(len(edgeList), percent)):
l.append(edgeList[min(len(edgeList) - 1, int(random.random() * len(edgeList)))])
return l
def subMatrix(ADJ_MATRIX_PATH, WEIGHT_PATH, node, threshold):
adj = np.loadtxt(ADJ_MATRIX_PATH)
weights = np.loadtxt(WEIGHT_PATH)
dic = {}
neighbors = getNeighbor(node, adj)
dic[node] = neighbors
for n in neighbors:
dic[n] = getNeighbor(n, adj)
all_nodes = []
for v in dic.values():
all_nodes += v
all_nodes = list(set(all_nodes))
n = len(all_nodes)
matrix = np.zeros(shape=(n, n))
for i in range(n):
for j in range(n):
if weights[all_nodes[i]][all_nodes[j]] > threshold:
if weights[all_nodes[i]][all_nodes[j]] >= 1.0:
matrix[i][j] = 1
else:
matrix[i][j] = weights[all_nodes[i]][all_nodes[j]]
else:
matrix[i][j] = abs(0.5 - (1 / (1 + np.exp(-weights[all_nodes[i]][all_nodes[j]]))))
return matrix, adj
|
"""
this script is used to process the CCPD_FR labels and make them fit the required training format
"""
from collections import deque
import cv2
import numpy as np
from src.img_utility import pts_to_BBCor, read_img_from_dir, pixel_to_ratio, IoU
from src.dataset_utility import CCPD_FR_vertices_info
from src.data_aug import data_aug
from src.geometry_calc import pt_within_polygon
"""
label pre-processing
"""
# return the mean value of LP size in a dataset of CCPD_FR format images
# need the fixed training input dimension (square for input images)
# can pass total_stride argument (total stride of model)
def mean_size_LP(img_folder, training_dim, total_stride=1):
imgs_path = read_img_from_dir(img_folder)
imgs_amount = len(imgs_path)
W, H = 0., 0.
for img_path in imgs_path:
img_size = cv2.imread(img_path).shape # cv2.imread.shape -> (h, w, ch)
vertices = CCPD_FR_vertices_info(img_path)
BBCor = pts_to_BBCor(*vertices)
BBCor = pixel_to_ratio(img_size, *BBCor)
w_ratio, h_ratio = BBCor[1][0] - BBCor[0][0], BBCor[1][1] - BBCor[0][1]
W += w_ratio * training_dim
H += h_ratio * training_dim
return (W + H) / 2 / imgs_amount / total_stride
# read the CCPD_FR images and return the label for training (WPOD-based encoding)
# need to give the dimension for training and the total stride in the model
# label shape -> [y, x, 1 + 2*4], y and x are the downsampled output map size
# label format -> [object_1or0, x1, y1, x2, y2, x3, y3, x4, y4] pts from bottom right and clockwise
def CCDP_FR_to_training_label(img_path, training_dim, stride, side=3.5):
# side = 3.5 calculated by training_dim = 208 and stride = 16 in 746 dataset
# side = 16. calculated by training_dim = 256 and stride = 4 in 2333 dataset
img_shape = cv2.imread(img_path).shape
out_size = training_dim / stride
assert training_dim % stride == 0, 'training_dim dividing stride must be a integer'
vertices = CCPD_FR_vertices_info(img_path)
LP_Cor = np.array(pixel_to_ratio(img_shape, *vertices)) * training_dim
LP_BB = np.array(pts_to_BBCor(*LP_Cor))
LP_Cor_outdim = LP_Cor / stride
LP_BB_outdim = [np.maximum(LP_BB[0] / stride, 0).astype(int), np.minimum(LP_BB[1] / stride, out_size).astype(int)]
label = np.zeros((out_size, out_size, 1 + 2 * 4))
for y in range(LP_BB_outdim[0][1], LP_BB_outdim[1][1]):
for x in range(LP_BB_outdim[0][0], LP_BB_outdim[1][0]):
now_pixel = np.array([x + 0.5, y + 0.5])
LP_BB_wh = LP_BB_outdim[1] - LP_BB_outdim[0]
same_BB_on_now_pixel = [now_pixel - LP_BB_wh / 2., now_pixel + LP_BB_wh / 2.]
# print LP_BB_outdim
# print same_BB_on_now_pixel
iou = IoU(LP_BB_outdim, same_BB_on_now_pixel)
if iou > 0.7:
LP_Cor_recenter = (np.array(LP_Cor_outdim) - now_pixel) / side
label[y, x, 0] = 1
label[y, x, 1:] = LP_Cor_recenter.flatten()
return label
# batch version of label conversion, with data augmentation!
# WPOD-based encoding
def batch_CCPD_to_training_label(img_paths, training_dim, stride, side=3.5):
x_labels = []
y_labels = []
imgs_aug, vertices_aug = data_aug(img_paths)
for img_aug, vertice_aug in zip(imgs_aug, vertices_aug):
# side = 3.5 calculated by training_dim = 208 and stride = 16 in 746 dataset
# side = 16. calculated by training_dim = 256 and stride = 4 in 2333 dataset
img_shape = img_aug.shape
out_size = training_dim / stride
assert training_dim % stride == 0, 'training_dim dividing stride must be a integer'
LP_Cor = np.array(pixel_to_ratio(img_shape, *vertice_aug)) * training_dim
LP_BB = np.array(pts_to_BBCor(*LP_Cor))
LP_Cor_outdim = LP_Cor / stride
LP_BB_outdim = [np.maximum(LP_BB[0] / stride, 0).astype(int), np.minimum(LP_BB[1] / stride, out_size).astype(int)]
y_label = np.zeros((out_size, out_size, 1 + 2 * 4))
for y in range(LP_BB_outdim[0][1], LP_BB_outdim[1][1]):
for x in range(LP_BB_outdim[0][0], LP_BB_outdim[1][0]):
now_pixel = np.array([x + 0.5, y + 0.5])
LP_BB_wh = LP_BB_outdim[1] - LP_BB_outdim[0]
same_BB_on_now_pixel = [now_pixel - LP_BB_wh / 2., now_pixel + LP_BB_wh / 2.]
iou = IoU(LP_BB_outdim, same_BB_on_now_pixel)
if iou > 0.7:
LP_Cor_recenter = (np.array(LP_Cor_outdim) - now_pixel) / side
y_label[y, x, 0] = 1
y_label[y, x, 1:] = LP_Cor_recenter.flatten()
x_label = cv2.resize(img_aug, (training_dim, training_dim)) / 255. # 255 for normalization
x_labels.append(x_label)
y_labels.append(y_label)
return x_labels, y_labels
# batch version of label conversion, with data augmentation!
# vernex-based encoding
# label format -> [prob_lp, prob_fr, x1, y1, x2, y2, x3, y3, x4, y4, ->> lp coordinates
# x1, y1, x2, y2, x3, y3, x4, y4, ->> fr coordinates
# class_bg, class_front, class_rear] ->> for classification
# pts from bottom right and clockwise
def batch_CCPD_to_training_label_vernex_lpfr(img_paths, training_dim, stride, side=3.5):
x_labels = []
y_labels = []
imgs_aug, vertices_aug, fr_classes = data_aug(img_paths)
for img_aug, vertice_aug, fr_class in zip(imgs_aug, vertices_aug, fr_classes):
# side = 3.5 calculated by training_dim = 208 and stride = 16 in 746 dataset
# side = 16. calculated by training_dim = 256 and stride = 4 in 2333 dataset
img_shape = img_aug.shape
out_size = training_dim / stride
assert training_dim % stride == 0, 'training_dim dividing stride must be a integer'
LP_Cor = vertice_aug[0:4]
FR_Cor = vertice_aug[4:8]
LP_Cor = np.array(pixel_to_ratio(img_shape, *LP_Cor)) * training_dim
FR_Cor = np.array(pixel_to_ratio(img_shape, *FR_Cor)) * training_dim
LP_BB = np.array(pts_to_BBCor(*LP_Cor))
FR_BB = np.array(pts_to_BBCor(*FR_Cor))
LP_Cor_outdim = LP_Cor / stride
FR_Cor_outdim = FR_Cor / stride
LP_BB_outdim = [np.maximum(LP_BB[0] / stride, 0).astype(int), np.minimum(LP_BB[1] / stride, out_size).astype(int)]
FR_BB_outdim = [np.maximum(FR_BB[0] / stride, 0).astype(int), np.minimum(FR_BB[1] / stride, out_size).astype(int)]
y_label = np.zeros((out_size, out_size, 2 + 2 * 4 + 2 * 4 + 3))
y_label[..., 18] = 1 # class_bg encoding
# LP encoding
for y in range(LP_BB_outdim[0][1], LP_BB_outdim[1][1]):
for x in range(LP_BB_outdim[0][0], LP_BB_outdim[1][0]):
now_pixel = np.array([x + 0.5, y + 0.5])
LP_BB_wh = LP_BB_outdim[1] - LP_BB_outdim[0]
same_BB_on_now_pixel = [now_pixel - LP_BB_wh / 2., now_pixel + LP_BB_wh / 2.]
iou = IoU(LP_BB_outdim, same_BB_on_now_pixel)
if iou > 0.7:
LP_Cor_recenter = (np.array(LP_Cor_outdim) - now_pixel) / side
y_label[y, x, 0] = 1
y_label[y, x, 2:10] = LP_Cor_recenter.flatten()
# FR encoding
for y in range(FR_BB_outdim[0][1], FR_BB_outdim[1][1]):
for x in range(FR_BB_outdim[0][0], FR_BB_outdim[1][0]):
now_pixel = np.array([x + 0.5, y + 0.5])
FR_BB_wh = FR_BB_outdim[1] - FR_BB_outdim[0]
same_BB_on_now_pixel = [now_pixel - FR_BB_wh / 2., now_pixel + FR_BB_wh / 2.]
iou = IoU(FR_BB_outdim, same_BB_on_now_pixel)
if iou > 0.4:
FR_Cor_recenter = (np.array(FR_Cor_outdim) - now_pixel) / side
y_label[y, x, 1] = 1
y_label[y, x, 10:18] = FR_Cor_recenter.flatten()
# used for context-auxiliary training and classification, so the region needs to be accurate as possible
# if polygon_iou(FR_Cor_outdim, now_pixel) > 0.2:
if pt_within_polygon(now_pixel, FR_Cor_outdim):
if fr_class in ['front']:
y_label[y, x, 18] = 0
y_label[y, x, 19] = 1 # front one-hot encoding
elif fr_class in ['rear']:
y_label[y, x, 18] = 0
y_label[y, x, 20] = 1 # rear one-hot encoding
x_label = cv2.resize(img_aug, (training_dim, training_dim)) / 255. # 255 for normalization
x_labels.append(x_label)
y_labels.append(y_label)
return x_labels, y_labels
# combine the labels of four images and make it a huge training label
# be sure the label having same training dim and model stride
def label_splicing(label1, label2, label3, label4):
w = label1.shape[1]
h = label1.shape[0]
label2 += np.array([0, w, 0, w, 0, w, 0, w, 0])
label3 += np.array([0, 0, h, 0, h, 0, h, 0, h])
label4 += np.array([0, w, h, w, h, w, h, w, h])
top_row = np.concatenate([label1, label2], axis=1)
bottom_row = np.concatenate([label3, label4], axis=1)
final_label = np.concatenate([top_row, bottom_row], axis=0)
return final_label
# also splicing function but for images
def img_splicing(img1, img2, img3, img4):
top_row = np.concatenate([img1, img2], axis=1)
bottom_row = np.concatenate([img3, img4], axis=1)
final_img = np.concatenate([top_row, bottom_row], axis=0)
return final_img
"""
label post-processing
"""
# receive the output of the network and map the label to the original image
# now this function only work with single image prediction label
# in each label -> [prob, cor_after_affine]
def predicted_label_to_origin_image_WPOD(ori_image_shape, label, stride, prob_threshold=0.9, use_nms=True, side=3.5):
# side = 3.5 calculated by training_dim = 208 and stride = 16
# side = 16. calculated by training_dim = 256 and stride = 4 in 2333 dataset
out_w = label.shape[1]
out_h = label.shape[0]
label_to_origin = []
for y in range(out_h):
for x in range(out_w):
prob = label[y, x, 0] # integer
if prob >= prob_threshold:
now_pixel = np.array([x + 0.5, y + 0.5])
affinex = label[y, x, 2:5] # shape = [3, ]
affiney = label[y, x, 5:] # shape = [3, ]
affinex[0] = max(affinex[0], 0)
affiney[1] = max(affiney[1], 0)
# base rectangle from br and clock-wise
base_rectangle = np.array([[0.5, 0.5, 1], [-0.5, 0.5, 1], [-0.5, -0.5, 1], [0.5, -0.5, 1]])
# cor_after_affine -> [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
cor_after_affine = np.stack([np.sum(affinex * base_rectangle, axis=1),
np.sum(affiney * base_rectangle, axis=1)], axis=1) # shape = [4, 2]
cor_after_affine = cor_after_affine * side
cor_after_affine += now_pixel
cor_after_affine *= stride
cor_after_affine *= np.array([ori_image_shape[1] / (1. * out_w * stride),
ori_image_shape[0] / (1. * out_h * stride)])
cor_after_affine = cor_after_affine.astype(int)
# clip according to the size of original size of image
for pts in cor_after_affine:
pts[0] = np.clip(pts[0], 0, ori_image_shape[1])
pts[1] = np.clip(pts[1], 0, ori_image_shape[0])
label_to_origin.append([prob, cor_after_affine])
if use_nms:
label_to_origin = nms(label_to_origin)
return label_to_origin
# receive the output of the network and map the label to the original image
# now this function only work with single image prediction label
# in each label -> [prob, vertex_predicted]
def predicted_label_to_origin_image_Vernex_lp(ori_image_shape, label, stride, prob_threshold=0.9, use_nms=True, side=3.5):
# side = 3.5 calculated by training_dim = 208 and stride = 16
# side = 16. calculated by training_dim = 256 and stride = 4 in 2333 dataset
out_w = label.shape[1]
out_h = label.shape[0]
label_to_origin = []
for y in range(out_h):
for x in range(out_w):
prob = label[y, x, 0] # integer
if prob >= prob_threshold:
now_pixel = np.array([x + 0.5, y + 0.5])
ratio = label[y, x, 2:]
ratio = np.reshape(ratio, (4, 2))
# base vectors from br and clock-wise
base_vector = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
# predicted vertices -> [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
vertices = base_vector * ratio # shape = [4, 2]
vertices = vertices * side
vertices += now_pixel
vertices *= stride
vertices *= np.array([ori_image_shape[1] / (1. * out_w * stride),
ori_image_shape[0] / (1. * out_h * stride)])
vertices = vertices.astype(int)
# clip according to the size of original size of image
for pts in vertices:
pts[0] = np.clip(pts[0], 0, ori_image_shape[1])
pts[1] = np.clip(pts[1], 0, ori_image_shape[0])
label_to_origin.append([prob, vertices])
if use_nms:
label_to_origin = nms(label_to_origin)
return label_to_origin
# receive the output of the network and map the label to the original image
# now this function only work with single image prediction label
# in each label -> [prob, vertex_predicted_lp, vertex_predicted_fr, [fr_class, class_prob]]
def predicted_label_to_origin_image_Vernex_lpfr(ori_image_shape, label, stride, prob_threshold=0.9, use_nms=True, side=3.5):
out_w = label.shape[1]
out_h = label.shape[0]
label_to_origin = []
for y in range(out_h):
for x in range(out_w):
prob = label[y, x, 0] # integer
if prob >= prob_threshold:
single_label = []
single_label.append(prob)
now_pixel = np.array([x + 0.5, y + 0.5])
ratio_lp = label[y, x, 1:9]
ratio_fr = label[y, x, 9:17]
ratio_lp, ratio_fr = np.reshape(ratio_lp, (4, 2)), np.reshape(ratio_fr, (4, 2))
# base vectors from br and clock-wise
base_vector = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
# predicted vertices -> [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
for ratio in [ratio_lp, ratio_fr]:
vertices = base_vector * ratio # shape = [4, 2]
vertices = vertices * side
vertices += now_pixel
vertices *= stride
vertices *= np.array([ori_image_shape[1] / (1. * out_w * stride),
ori_image_shape[0] / (1. * out_h * stride)])
vertices = vertices.astype(int)
# clip according to the size of original size of image
for pts in vertices:
pts[0] = np.clip(pts[0], 0, ori_image_shape[1])
pts[1] = np.clip(pts[1], 0, ori_image_shape[0])
single_label.append(vertices)
fr_class_prob = label[y, x, 17:20]
fr_class = np.argmax(fr_class_prob)
single_label.append([fr_class, fr_class_prob[fr_class]])
label_to_origin.append(single_label)
if use_nms:
label_to_origin = nms(label_to_origin)
return label_to_origin
# nms function, labels -> a list of labels, its element is [probability, vertex_predicted_lp, vertex_predicted_fr,
# [fr_class, class_prob]]
def nms(labels, threshold=0.1, nms_ref='fr'):
labels.sort(key=lambda x: x[0], reverse=True)
labels = deque(labels)
labels_nms = []
assert nms_ref in ['lp', 'fr'], 'NMS reference must be lp or fr!'
if nms_ref in ['lp']:
index = 1
elif nms_ref in ['fr']:
index = 2
while len(labels) > 0:
now_handle = labels.popleft()
overlap = False
for label_nms in labels_nms:
if IoU(pts_to_BBCor(*now_handle[index]), pts_to_BBCor(*label_nms[index])) > threshold:
overlap = True
break
if not overlap:
labels_nms.append(now_handle)
return labels_nms
|
a = (
1,
<caret>2,
)
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Brian Wray (brian@wrocket.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import subprocess
import json
import unittest
def call_tulip(args):
cmd = ['../../src/tulip']
cmd.extend(args)
out = subprocess.check_output(cmd)
return out.decode('utf-8')
class TestBasicMoveApplication(unittest.TestCase):
def setUp(self):
None
def print_hex(self, n):
h = hex(n).replace('0x', '')
zeros = '0' * (16 - len(h))
return zeros + h.upper()
def validate_state(self, json):
state = json['resultingState']
adjusted_hash = state['hash']
calculated_hash = state['recalculatedHash']
self.assertEqual(adjusted_hash, calculated_hash, 'Hash value computed from the move and the value computed "from scratch" differ.')
return state
def make_move(self, fen, move):
result = call_tulip(['-makemove', move, fen])
parsed_output = json.loads(result)
return self.validate_state(parsed_output)
def null_move(self, fen):
result = call_tulip(['-nullmove', fen])
parsed_output = json.loads(result)
return self.validate_state(parsed_output)
def test_initial_position_nf3(self):
result = self.make_move('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1', 'g1f3')
self.assertEqual('black', result['toMove'])
self.assertEqual(1, result['halfMoveCount'])
self.assertEqual(1, result['fiftyMoveCount'])
board = result['board']
self.assertEqual(32, len(board.keys()))
self.assertTrue('g1' not in board.keys())
self.assertEqual('N', board['f3'])
self.assertEqual('0000000000200002', result['bitboards']['N'])
self.assertEqual('0000FFFFFFDF0040', result['bitboards']['-'])
self.assertEqual('none', result['epFile'])
def test_initial_position_e2e4(self):
result = self.make_move('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1', 'e2e4')
self.assertEqual('black', result['toMove'])
self.assertEqual(1, result['halfMoveCount'])
self.assertEqual(0, result['fiftyMoveCount'])
board = result['board']
self.assertEqual(32, len(board.keys()))
self.assertTrue('e2' not in board.keys())
self.assertEqual('P', board['e4'])
self.assertEqual('000000001000EF00', result['bitboards']['P'])
self.assertEqual('0000FFFFEFFF1000', result['bitboards']['-'])
self.assertEqual('e', result['epFile'])
def test_basic_capture_01(self):
result = self.make_move('8/4k3/2r5/8/1N2K3/8/8/8 w - - 13 1', 'b4c6') # note the fifty-move count of 13
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(0, result['fiftyMoveCount']) # reset the fifty-move count on capture
self.assertEqual(3, len(board))
self.assertTrue('b4' not in board.keys())
self.assertEqual('N', board['c6'])
self.assertTrue('r' not in board.values())
self.assertEqual(0, piece_counts['r'])
self.assertEqual(1, piece_counts['N'])
self.assertEqual(2, result['whitePieceCount'])
self.assertEqual(1, result['blackPieceCount'])
self.assertEqual(61, piece_counts['-'])
self.assertEqual('0000000000000000', bitboards['n'])
self.assertEqual('0000040000000000', bitboards['N'])
self.assertEqual('FFEFFBFFEFFFFFFF', bitboards['-'])
self.assertEqual('none', result['epFile'])
def test_move_pawn_resets_counter(self):
result = self.make_move('8/4k3/2r5/8/1P2K3/8/8/8 w - - 26 1', 'b4b5')
self.assertEqual(0, result['fiftyMoveCount'])
def test_white_castle_kingside(self):
result = self.make_move('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R w KQkq - 0 1', 'e1g1')
board = result['board']
bitboards = result['bitboards']
self.assertEqual(22, len(board))
self.assertEqual('R', board['f1'])
self.assertEqual('R', board['a1'])
self.assertEqual('K', board['g1'])
self.assertTrue('h1' not in board.keys())
self.assertFalse(result['castleWhiteKingside'])
self.assertFalse(result['castleWhiteQueenside'])
self.assertTrue(result['castleBlackKingside'])
self.assertTrue(result['castleBlackQueenside'])
self.assertEqual('0000000000000021', bitboards['R'])
self.assertEqual('0000000000000040', bitboards['K'])
self.assertEqual('6E00FFFFFFFF009E', bitboards['-'])
self.assertEqual('g1', result['whiteKingSquare'])
self.assertEqual('e8', result['blackKingSquare'])
def test_white_castle_queenside(self):
result = self.make_move('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R w KQkq - 0 1', 'e1c1')
board = result['board']
bitboards = result['bitboards']
self.assertEqual(22, len(board))
self.assertEqual('R', board['h1'])
self.assertEqual('R', board['d1'])
self.assertEqual('K', board['c1'])
self.assertTrue('a1' not in board.keys())
self.assertFalse(result['castleWhiteKingside'])
self.assertFalse(result['castleWhiteQueenside'])
self.assertTrue(result['castleBlackKingside'])
self.assertTrue(result['castleBlackQueenside'])
self.assertEqual('0000000000000088', bitboards['R'])
self.assertEqual('0000000000000004', bitboards['K'])
self.assertEqual('6E00FFFFFFFF0073', bitboards['-'])
self.assertEqual('c1', result['whiteKingSquare'])
self.assertEqual('e8', result['blackKingSquare'])
def test_black_castle_queenside(self):
result = self.make_move('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R b KQkq - 0 1', 'e8c8')
board = result['board']
bitboards = result['bitboards']
self.assertEqual(22, len(board))
self.assertEqual('r', board['h8'])
self.assertEqual('r', board['d8'])
self.assertEqual('k', board['c8'])
self.assertTrue('a8' not in board.keys())
self.assertTrue(result['castleWhiteKingside'])
self.assertTrue(result['castleWhiteQueenside'])
self.assertFalse(result['castleBlackKingside'])
self.assertFalse(result['castleBlackQueenside'])
self.assertEqual('8800000000000000', bitboards['r'])
self.assertEqual('0400000000000000', bitboards['k'])
self.assertEqual('7300FFFFFFFF006E', bitboards['-'])
self.assertEqual('e1', result['whiteKingSquare'])
self.assertEqual('c8', result['blackKingSquare'])
def test_black_castle_kingside(self):
result = self.make_move('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R b KQkq - 0 1', 'e8g8')
board = result['board']
bitboards = result['bitboards']
self.assertEqual(22, len(board))
self.assertEqual('r', board['a8'])
self.assertEqual('r', board['f8'])
self.assertEqual('k', board['g8'])
self.assertTrue('h8' not in board.keys())
self.assertTrue(result['castleWhiteKingside'])
self.assertTrue(result['castleWhiteQueenside'])
self.assertFalse(result['castleBlackKingside'])
self.assertFalse(result['castleBlackQueenside'])
self.assertEqual('2100000000000000', bitboards['r'])
self.assertEqual('4000000000000000', bitboards['k'])
self.assertEqual('9E00FFFFFFFF006E', bitboards['-'])
self.assertEqual('e1', result['whiteKingSquare'])
self.assertEqual('g8', result['blackKingSquare'])
def test_white_move_rook_kingside(self):
result = self.make_move('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R w KQkq - 0 1', 'h1g1')
board = result['board']
bitboards = result['bitboards']
self.assertEqual(22, len(board))
self.assertEqual('R', board['g1'])
self.assertEqual('R', board['a1'])
self.assertEqual('K', board['e1'])
self.assertTrue('h1' not in board.keys())
self.assertFalse(result['castleWhiteKingside'])
self.assertTrue(result['castleWhiteQueenside'])
self.assertTrue(result['castleBlackKingside'])
self.assertTrue(result['castleBlackQueenside'])
self.assertEqual('0000000000000041', bitboards['R'])
self.assertEqual('0000000000000010', bitboards['K'])
self.assertEqual('6E00FFFFFFFF00AE', bitboards['-'])
def test_white_move_rook_queenside(self):
result = self.make_move('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R w KQkq - 0 1', 'a1b1')
board = result['board']
bitboards = result['bitboards']
self.assertEqual(22, len(board))
self.assertEqual('R', board['h1'])
self.assertEqual('R', board['b1'])
self.assertEqual('K', board['e1'])
self.assertTrue('a1' not in board.keys())
self.assertTrue(result['castleWhiteKingside'])
self.assertFalse(result['castleWhiteQueenside'])
self.assertTrue(result['castleBlackKingside'])
self.assertTrue(result['castleBlackQueenside'])
self.assertEqual('0000000000000082', bitboards['R'])
self.assertEqual('0000000000000010', bitboards['K'])
self.assertEqual('6E00FFFFFFFF006D', bitboards['-'])
def test_black_move_rook_kingside(self):
result = self.make_move('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R b KQkq - 0 1', 'h8g8')
board = result['board']
bitboards = result['bitboards']
self.assertEqual(22, len(board))
self.assertEqual('r', board['g8'])
self.assertEqual('r', board['a8'])
self.assertEqual('k', board['e8'])
self.assertTrue('h8' not in board.keys())
self.assertTrue(result['castleWhiteKingside'])
self.assertTrue(result['castleWhiteQueenside'])
self.assertFalse(result['castleBlackKingside'])
self.assertTrue(result['castleBlackQueenside'])
self.assertEqual('4100000000000000', bitboards['r'])
self.assertEqual('1000000000000000', bitboards['k'])
self.assertEqual('AE00FFFFFFFF006E', bitboards['-'])
def test_black_move_rook_queenside(self):
result = self.make_move('r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R b KQkq - 0 1', 'a8b8')
board = result['board']
bitboards = result['bitboards']
self.assertEqual(22, len(board))
self.assertEqual('r', board['b8'])
self.assertEqual('r', board['h8'])
self.assertEqual('k', board['e8'])
self.assertTrue('a8' not in board.keys())
self.assertTrue(result['castleWhiteKingside'])
self.assertTrue(result['castleWhiteQueenside'])
self.assertTrue(result['castleBlackKingside'])
self.assertFalse(result['castleBlackQueenside'])
self.assertEqual('8200000000000000', bitboards['r'])
self.assertEqual('1000000000000000', bitboards['k'])
self.assertEqual('6D00FFFFFFFF006E', bitboards['-'])
def test_promote_pawn_white_q_no_capture(self):
result = self.make_move('8/5P2/8/8/1k6/8/2K5/8 w - - 0 1', 'f7f8=q')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual('Q', board['f8'])
self.assertEqual('k', board['b4'])
self.assertEqual('K', board['c2'])
self.assertEqual('2000000000000000', bitboards['Q'])
self.assertEqual('0000000000000000', bitboards['P'])
self.assertEqual('DFFFFFFFFDFFFBFF', bitboards['-'])
self.assertEqual(0, piece_counts['P'])
self.assertEqual(1, piece_counts['Q'])
def test_promote_pawn_white_n_no_capture(self):
result = self.make_move('8/5P2/8/8/1k6/8/2K5/8 w - - 0 1', 'f7f8=n')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual('N', board['f8'])
self.assertEqual('k', board['b4'])
self.assertEqual('K', board['c2'])
self.assertEqual('2000000000000000', bitboards['N'])
self.assertEqual('0000000000000000', bitboards['P'])
self.assertEqual('0000000000000000', bitboards['Q'])
self.assertEqual('DFFFFFFFFDFFFBFF', bitboards['-'])
self.assertEqual(0, piece_counts['P'])
self.assertEqual(1, piece_counts['N'])
def test_promote_pawn_white_q_with_capture(self):
result = self.make_move('5r2/6P1/8/8/1k6/8/2K5/8 w - - 0 1', 'g7f8=q')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual('Q', board['f8'])
self.assertEqual('k', board['b4'])
self.assertEqual('K', board['c2'])
self.assertEqual('2000000000000000', bitboards['Q'])
self.assertEqual('0000000000000000', bitboards['P'])
self.assertEqual('0000000000000000', bitboards['n'])
self.assertEqual('DFFFFFFFFDFFFBFF', bitboards['-'])
self.assertEqual(0, piece_counts['P'])
self.assertEqual(0, piece_counts['r'])
self.assertEqual(1, piece_counts['Q'])
def test_promote_pawn_black_q_no_capture(self):
result = self.make_move('8/8/8/8/1k6/8/2K2p2/8 b - - 0 1', 'f2f1=q')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual('q', board['f1'])
self.assertEqual('k', board['b4'])
self.assertEqual('K', board['c2'])
self.assertEqual('0000000000000020', bitboards['q'])
self.assertEqual('0000000000000000', bitboards['p'])
self.assertEqual('FFFFFFFFFDFFFBDF', bitboards['-'])
self.assertEqual(0, piece_counts['p'])
self.assertEqual(1, piece_counts['q'])
def test_promote_pawn_black_n_no_capture(self):
result = self.make_move('8/8/8/8/1k6/8/2K2p2/8 b - - 0 1', 'f2f1=n')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual('n', board['f1'])
self.assertEqual('k', board['b4'])
self.assertEqual('K', board['c2'])
self.assertEqual('0000000000000020', bitboards['n'])
self.assertEqual('0000000000000000', bitboards['p'])
self.assertEqual('FFFFFFFFFDFFFBDF', bitboards['-'])
self.assertEqual(0, piece_counts['p'])
self.assertEqual(1, piece_counts['n'])
def test_promote_pawn_black_q_with_capture(self):
result = self.make_move('8/8/8/8/1k6/8/2K3p1/5R2 b - - 0 1', 'g2f1=q')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual('q', board['f1'])
self.assertEqual('k', board['b4'])
self.assertEqual('K', board['c2'])
self.assertEqual('0000000000000020', bitboards['q'])
self.assertEqual('0000000000000000', bitboards['p'])
self.assertEqual('0000000000000000', bitboards['R'])
self.assertEqual('FFFFFFFFFDFFFBDF', bitboards['-'])
self.assertEqual(0, piece_counts['p'])
self.assertEqual(0, piece_counts['R'])
self.assertEqual(1, piece_counts['q'])
def test_enpassant_white(self):
result = self.make_move('4k3/8/8/3Pp3/8/8/8/4K3 w - e6 0 1', 'd5e6')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual(0, piece_counts['p'])
self.assertEqual(1, piece_counts['P'])
self.assertEqual('0000000000000000', bitboards['p'])
self.assertEqual('0000100000000000', bitboards['P'])
self.assertEqual('EFFFEFFFFFFFFFEF', bitboards['-'])
self.assertEqual('none', result['epFile'])
self.assertEqual(61, piece_counts['-'])
def test_enpassant_black(self):
result = self.make_move('4k3/8/8/8/3pP3/8/8/4K3 b - e3 0 1', 'd4e3')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual(0, piece_counts['P'])
self.assertEqual(1, piece_counts['p'])
self.assertEqual('0000000000100000', bitboards['p'])
self.assertEqual('0000000000000000', bitboards['P'])
self.assertEqual('EFFFFFFFFFEFFFEF', bitboards['-'])
self.assertEqual('none', result['epFile'])
self.assertEqual(61, piece_counts['-'])
def test_enpassant_black(self):
result = self.make_move('4k3/8/8/8/3pP3/8/8/4K3 b - e3 0 1', 'd4e3')
board = result['board']
bitboards = result['bitboards']
piece_counts = result['pieceCounts']
self.assertEqual(3, len(board))
self.assertEqual(0, piece_counts['P'])
self.assertEqual(1, piece_counts['p'])
self.assertEqual('0000000000100000', bitboards['p'])
self.assertEqual('0000000000000000', bitboards['P'])
self.assertEqual('EFFFFFFFFFEFFFEF', bitboards['-'])
self.assertEqual('none', result['epFile'])
self.assertEqual(61, piece_counts['-'])
def null_move_epfile(self):
result = self.null_move('rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1')
self.assertEqual('none', result['epFile'])
self.assertEqual('white', result['toMove'])
def test_hash_simple_move(self):
result = self.make_move('4k3/8/8/8/8/1Q6/8/4K3 w - - 0 1', 'b3c4')
orig_hash = 0x41E9B629C07711F8
wqueen_b3 = 0xb23fe2851af11c0b
wqueen_c4 = 0x4cf17ca889590e6e
empty_b3 = 0x577f452b5eb3fc01
empty_c4 = 0x620a3972d8fd6daf
white_to_move = 0x77e554c3ddafb8c6
mask = wqueen_c4 ^ wqueen_b3 ^ empty_c4 ^ empty_b3 ^ white_to_move
self.assertEqual(self.print_hex(orig_hash ^ mask), result['hash'])
def test_hash_white_kingside_castle(self):
result = self.make_move('4k3/8/8/8/8/8/8/4K2R w K - 0 1', 'e1g1')
orig_hash = 0xD3841F3775536C23
mask = 0xc8df942dcdd9d3e3 # K on e1
mask ^= 0x5165bdb57f3e5d48 # K on g1
mask ^= 0xca56b19fbc285239 # Empty on e1
mask ^= 0xd6a2781d1760be4e # Empty of g1
mask ^= 0xe9f98801eded53f7 # Rook on h1
mask ^= 0x394345456ac4fb80 # Rook on f1
mask ^= 0x381c599bd1a38fd8 # Empty on h1
mask ^= 0x70d7c44806003548 # Empty on f1
mask ^= 0x77e554c3ddafb8c6 # White to move
mask ^= 0x7ac3fac33fa2a123 # Original K castle flags
mask ^= 0xdc0b25e9f28ae0dd # The "no" castle flag
desired_hash = orig_hash ^ mask
self.assertEqual(self.print_hex(desired_hash), result['hash'])
def test_hash_white_queenside_castle(self):
result = self.make_move('4k3/8/8/8/8/8/8/R3K3 w Q - 0 1', 'e1c1')
orig_hash = 0x3020CDC8EB4B3031
mask = 0xc8df942dcdd9d3e3 # K on E1
mask ^= 0x63dab1c7ac4a2570 # K on C1
mask ^= 0xca56b19fbc285239 # Empty on E1
mask ^= 0x0c5d579da3336099 # Empty on C1
mask ^= 0xfc0f0e854e8a0fcf # Rook on A1
mask ^= 0x17d426030a67bc7c # Rook on D1
mask ^= 0x82c3e240217eb87c # empty on A1
mask ^= 0x0d25aae264535842 # empty on D1
mask ^= 0x77e554c3ddafb8c6 # White to move
mask ^= 0x364e1563f20096ad # Original Q castle flag
mask ^= 0xdc0b25e9f28ae0dd # The "no" castle flag
desired_hash = orig_hash ^ mask
self.assertEqual(self.print_hex(desired_hash), result['hash'])
def test_hash_black_kingside_castle(self):
result = self.make_move('4k2r/8/8/8/8/8/8/4K3 b k - 0 1', 'e8g8')
orig_hash = 0xB87D98648B2ED9E9
mask = 0x79ab469f2417a80c # K on e8
mask ^= 0x498757567d0304f6 # K on g8
mask ^= 0xe88948c88c965e82 # Empty on e8
mask ^= 0x6e9b57f33fde478c # Empty of g8
mask ^= 0x1c8a15dfafd8d934 # Rook on h8
mask ^= 0x66c959bbb905a316 # Rook on f8
mask ^= 0x5e8d1435579d024a # Empty on h8
mask ^= 0x57cd66545b16183c # Empty on f8
mask ^= 0x77e554c3ddafb8c6 # White to move
mask ^= 0xf53df923d87bab7e # Original K castle flags
mask ^= 0xdc0b25e9f28ae0dd # The "no" castle flag
desired_hash = orig_hash ^ mask
self.assertEqual(self.print_hex(desired_hash), result['hash'])
def test_hash_black_queenside_castle(self):
result = self.make_move('r3k3/8/8/8/8/8/8/4K3 b q - 0 1', 'e8c8')
orig_hash = 0xFD3E3E716D786956
mask = 0x79ab469f2417a80c # K on e8
mask ^= 0x2c09faad8766ee13 # K on c8
mask ^= 0xe88948c88c965e82 # Empty on e8
mask ^= 0x44e15261d0800493 # Empty of c8
mask ^= 0x04325c381abf132e # Rook on a8
mask ^= 0xd9653edc79fd29b7 # Rook on d8
mask ^= 0xf0aa7d665aa3a8fa # Empty on a8
mask ^= 0xd53bcbf41a7b69ba # Empty on d8
mask ^= 0x77e554c3ddafb8c6 # White to move
mask ^= 0x06e17f8286747b6b # Original K castle flags
mask ^= 0xdc0b25e9f28ae0dd # The "no" castle flag
desired_hash = orig_hash ^ mask
self.assertEqual(self.print_hex(desired_hash), result['hash'])
def test_hash_white_kingside_rookmove(self):
result = self.make_move('4k3/8/8/8/8/8/8/R3K2R w KQ - 0 1', 'h1h2')
orig_hash = 0x289B2A5081C378CC
mask = 0xe9f98801eded53f7 # R on h1
mask ^= 0x381c599bd1a38fd8 # empty on h1
mask ^= 0x76ae5b205f2a0ce5 # R on h2
mask ^= 0x030bbce86b62bf3a # empty on h2
mask ^= 0xff102361a4c6027f # Original KQ castle flags
mask ^= 0x364e1563f20096ad # The Q only castle flag
mask ^= 0x77e554c3ddafb8c6 # White to move
desired_hash = orig_hash ^ mask
self.assertEqual(self.print_hex(desired_hash), result['hash'])
def test_hash_white_queenside_rookmove(self):
result = self.make_move('4k3/8/8/8/8/8/8/R3K2R w KQ - 0 1', 'a1a2')
orig_hash = 0x289B2A5081C378CC
mask = 0xfc0f0e854e8a0fcf # R on a1
mask ^= 0x82c3e240217eb87c # empty on a1
mask ^= 0x6b42ccbfd1b71e51 # R on a2
mask ^= 0x475bf0f7b7e0c7a9 # empty on a2
mask ^= 0xff102361a4c6027f # Original KQ castle flags
mask ^= 0x7ac3fac33fa2a123 # The K only castle flag
mask ^= 0x77e554c3ddafb8c6 # White to move
desired_hash = orig_hash ^ mask
self.assertEqual(self.print_hex(desired_hash), result['hash'])
def test_hash_black_kingside_rookmove(self):
result = self.make_move('r3k2r/8/8/8/8/8/8/4K3 b kq - 0 1', 'h8h7')
orig_hash = 0xDF96DE76A42F81CD
mask = 0x1c8a15dfafd8d934 # R on h8
mask ^= 0x5e8d1435579d024a # empty on h8
mask ^= 0xec99b3b03f612fe3 # R on h7
mask ^= 0xcd241170c085def9 # empty on h7
mask ^= 0x664e9e6fb766488e # Original kq castle flags
mask ^= 0x06e17f8286747b6b # The q only castle flag
mask ^= 0x77e554c3ddafb8c6 # White to move
desired_hash = orig_hash ^ mask
self.assertEqual(self.print_hex(desired_hash), result['hash'])
def test_hash_black_queenside_rookmove(self):
result = self.make_move('r3k2r/8/8/8/8/8/8/4K3 b kq - 0 1', 'a8a7')
orig_hash = 0xDF96DE76A42F81CD
mask = 0x04325c381abf132e # R on a8
mask ^= 0xf0aa7d665aa3a8fa # empty on a8
mask ^= 0xcf3ae6995915a1e8 # R on a7
mask ^= 0xcfa2125d8bc81998 # empty on a7
mask ^= 0x664e9e6fb766488e # Original kq castle flags
mask ^= 0xf53df923d87bab7e # The k only castle flag
mask ^= 0x77e554c3ddafb8c6 # White to move
desired_hash = orig_hash ^ mask
self.assertEqual(self.print_hex(desired_hash), result['hash'])
if __name__ == '__main__':
unittest.main()
|
import gzip
import os
import argparse
import re
__author__ = "Peter Chovanec"
def parse_args():
parser = argparse.ArgumentParser(description='Remove short barcodes')
parser.add_argument('--r1', dest='read_1', type=str, required=True,
help='Fastq read 1')
opts = parser.parse_args()
return opts
def main():
#argparse
opts = parse_args()
read_1_path = opts.read_1
full_out_path = os.path.splitext(os.path.splitext(read_1_path)[0])[0] + '_full.fastq.gz'
short_out_path = os.path.splitext(os.path.splitext(read_1_path)[0])[0] + '_short.fastq.gz'
full_count = 0
incomplete = 0
pattern = re.compile('\[([a-zA-Z0-9_\-]+)\]')
with file_open(read_1_path) as read_1, \
gzip.open(full_out_path, 'wt') as dpm_out, \
gzip.open(short_out_path, 'wt') as short_out:
for qname, seq, thrd, qual in fastq_parse(read_1):
barcodes = pattern.findall(qname)
if 'NOT_FOUND' in barcodes:
incomplete += 1
short_out.write(qname + '\n' + seq + '\n' + thrd + '\n' + qual + '\n')
else:
full_count += 1
dpm_out.write(qname + '\n' + seq + '\n' + thrd + '\n' + qual + '\n')
print('Reads without full barcode:', incomplete)
print('Full reads out:', full_count)
def file_open(filename):
"""
Open as normal or as gzip
Faster using zcat?
"""
#does file exist?
f = open(filename,'rb')
if (f.read(2) == b'\x1f\x8b'): #compressed alsways start with these two bytes
f.seek(0) #return to start of file
return gzip.GzipFile(fileobj=f, mode='rb')
else:
f.seek(0)
return f
def fastq_parse(fp):
"""
Parse fastq file.
"""
linecount = 0
name, seq, thrd, qual = [None] * 4
for line in fp:
linecount += 1
if linecount % 4 == 1:
try:
name = line.decode('UTF-8').rstrip()
except AttributeError:
name = line.rstrip()
assert name.startswith('@'),\
"ERROR: The 1st line in fastq element does not start with '@'.\n\
Please check FastQ file near line number %s" % (linecount)
elif linecount % 4 == 2:
try:
seq = line.decode('UTF-8').rstrip()
except AttributeError:
seq = line.rstrip()
elif linecount % 4 == 3:
try:
thrd = line.decode('UTF-8').rstrip()
except AttributeError:
thrd = line.rstrip()
assert thrd.startswith('+'),\
"ERROR: The 3st line in fastq element does not start with '+'.\n\
Please check FastQ file near line number %s" % (linecount)
elif linecount % 4 == 0:
try:
qual = line.decode('UTF-8').rstrip()
except AttributeError:
qual = line.rstrip()
assert len(seq) == len(qual),\
"ERROR: The length of Sequence and Quality aren't equal.\n\
Please check FastQ file near line number %s" % (linecount)
yield name, seq, thrd, qual,
name, seq, thrd, qual = [None] * 4
if __name__ == "__main__":
main()
|
VERSION='1.0.2'
|
from compas_slicer.slicers import BaseSlicer
from compas_slicer.slicers.curved_slicing import find_desired_number_of_isocurves
import logging
from compas_slicer.slicers.curved_slicing import IsocurvesGenerator
from compas_slicer.parameters import get_param
logger = logging.getLogger('logger')
__all__ = ['CurvedSlicer']
class CurvedSlicer(BaseSlicer):
"""
Generates non-planar contours that interpolate user-defined boundaries.
Attributes
----------
mesh : :class: 'compas.datastructures.Mesh'
Input mesh, it must be a triangular mesh (i.e. no quads or n-gons allowed)
Note that the topology of the mesh matters, irregular tesselation can lead to undesired results.
WE recommend to 1)retopologize, 2) triangulate, and 3) weld your mesh in advance.
preprocessor : :class: 'compas_slicer.pre_processing.CurvedSlicingPreprocessor'
parameters : dict
"""
def __init__(self, mesh, preprocessor=None, parameters=None):
BaseSlicer.__init__(self, mesh)
if preprocessor:
# make sure the mesh of the preprocessor and the mesh of the slicer match
assert len(list(mesh.vertices())) == len(list(preprocessor.mesh.vertices()))
self.parameters = parameters
self.preprocessor = preprocessor
self.n_multiplier = 1.0
def generate_paths(self):
""" Generates curved paths-polylines. """
assert self.preprocessor, 'You need to provide a pre-prosessor in order to generate paths.'
assert self.parameters, 'You need to provide a parameters dict in order to generate paths.'
avg_layer_height = get_param(self.parameters, key='avg_layer_height', defaults_type='curved_slicing')
n = find_desired_number_of_isocurves(self.preprocessor.target_LOW, self.preprocessor.target_HIGH,
avg_layer_height)
logger.info('%d paths will be generated' % n)
isocurves_generator = IsocurvesGenerator(self.mesh, self.preprocessor.target_LOW,
self.preprocessor.target_HIGH, n * self.n_multiplier)
self.layers = isocurves_generator.segments
if __name__ == "__main__":
pass
|
import Levenshtein as Lev
def cer_single(s1, s2):
"""
Computes the Character Error Rate, defined as the edit distance.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
s1, s2, = s1.replace(
" ", ""
), s2.replace(" ", "")
return Lev.distance(s1, s2)
def calculate_cer(source, target):
try:
cer_local = cer_single(source, target)
except:
return len(source.replace(" ", ""))
return cer_local
def cer_for_evaluate(ground_truth, predictions):
num_tokens = []
fcer = []
dict_gt = {}
for item in ground_truth:
gt_file_name = item["text_file_name"].split("/")[-1].split(".")[0]
dict_gt[gt_file_name] = item
for pred in predictions:
pred_file_name = pred["file"].split("/")[-1].split(".")[0]
gt = dict_gt[pred_file_name]
fcer.append(cer_single(gt["text_file_content"], pred["transcription"]))
num_tokens.append(len(gt["text_file_content"].replace(" ", "")))
cer = sum(fcer) / sum(num_tokens) * 100
return cer
def cer(ground_truth, predictions):
num_chars = []
fcer = []
for gt, pred in zip(ground_truth, predictions):
fcer.append(cer_single(gt, pred))
num_chars.append(len(gt.replace(" ", "")))
cer = sum(fcer) / sum(num_chars) * 100
return cer
|
#!/bin/env python
from app import create_app, socketio
from flask_debugtoolbar import DebugToolbarExtension
app = create_app(debug=True)
toolbar = DebugToolbarExtension(app)
if __name__ == '__main__':
socketio.run(app, host=app.config['WEB_IP'])
# End File: chat.py
|
def swap_case(s):
s1 = ''
for i in s:
if i.isupper():
s1 = s1 + i.lower()
elif i.islower():
s1 = s1 + i.upper()
else:
s1 = s1 + i
return s1
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
|
import json
import pandas as pd
from .base import MultiLevelComputer, STANDARDS_FILE
from .ssrtmodel import SSRTmodel
from .sequence import PostStopSlow, Violations
class StopSummary(MultiLevelComputer):
def __init__(self, ssrt_model='replacement',
pss_correct_go_only=True, pss_filter_columns=True,
violations_mean_thresh=200, violations_ssd_quantity_thresh=5,
violations_n_pair_thresh=2, violations_verbose=False):
super().__init__()
self._SSRTmodel = SSRTmodel(model=ssrt_model)
self._PostStopSlow = PostStopSlow(
correct_go_only=pss_correct_go_only,
filter_columns=pss_filter_columns)
self._Violations = Violations(
mean_thresh=violations_mean_thresh,
ssd_quantity_thresh=violations_ssd_quantity_thresh,
n_pair_thresh=violations_n_pair_thresh,
verbose=violations_verbose)
self.args = {
'ssrt_model': ssrt_model,
'pss_correct_go_only': pss_correct_go_only,
'pss_filter_columns': pss_filter_columns,
'violations_mean_thresh': violations_mean_thresh,
'violations_ssd_quantity_thresh': violations_ssd_quantity_thresh,
'violations_n_pair_thresh': violations_n_pair_thresh,
'violations_verbose': violations_verbose
}
with open(STANDARDS_FILE) as json_file:
standards = json.load(json_file)
self._cols = standards['columns']
self._codes = standards['key_codes']
def _fit_individual(self, data_df):
"""Calculate all available metrics for an individual."""
self._raw_data = data_df.copy()
metrics = self._SSRTmodel.fit_transform(self._raw_data).copy()
metrics['post_stop_slow'] = self._get_mean_pss()
metrics['post_stop_success_slow'] = self._get_mean_pss(
stop_type='success')
metrics['post_stop_fail_slow'] = self._get_mean_pss(
stop_type='fail')
metrics['mean_violation'] = self._Violations.fit(
self._raw_data).get_mean_below_thresh()
self._transformed_data = metrics.copy()
def _fit_group(self, data_df):
"""Calculate all available metrics for a group."""
self._raw_data = data_df.copy()
summary_helper = StopSummary(**self.args)
metrics = self._raw_data.groupby('ID').apply(
summary_helper.fit_transform).apply(pd.Series)
if self._SSRTmodel.model == 'all':
metrics = pd.concat([metrics['SSRT'].apply(
pd.Series).add_prefix('SSRT_'), metrics],
1)
del metrics['SSRT']
self._transformed_data = metrics.copy()
def _get_mean_pss(self, stop_type='all'):
"""Get a subject's mean PSS, after fitting to a stop type."""
return self._PostStopSlow.fit(self._raw_data,
stop_type=stop_type).get_mean_pss()
|
from typing import List
import requests
from iexcloud.constants import IEX_CLOUD, IEX_TOKEN
class Reference(object):
def __init__(self):
self.msg_limit: int = self.get_msg_limit()
self.msg_used: int = self.get_msg_used()
self.msg_balance: int = self.msg_limit - self.msg_used
self.symbols: List[str] = self.get_symbols()
@staticmethod
def get_msg_limit() -> int:
"""https://iexcloud.io/docs/api/#metadata
Parameters
----------
Returns
-------
"""
api_url = f"{IEX_CLOUD}/account/metadata?token={IEX_TOKEN}"
response = requests.get(api_url)
if response:
return response.json()["messageLimit"]
@staticmethod
def get_msg_used() -> int:
"""https://iexcloud.io/docs/api/#metadata
Parameters
----------
Returns
-------
"""
api_url = f"{IEX_CLOUD}/account/metadata?token={IEX_TOKEN}"
response = requests.get(api_url)
if response:
return response.json()["messagesUsed"]
@staticmethod
def get_symbols():
api_url = f"{IEX_CLOUD}/ref-data/iex/symbols?token={IEX_TOKEN}"
response = requests.get(api_url)
if response:
return [symbol["symbol"] for symbol in response.json()]
def update_msg_limit(self):
self.msg_limit = self.get_msg_limit()
def update_msg_used(self):
self.msg_used = self.get_msg_used()
def update_msg_balance(self):
self.msg_limit = self.get_msg_limit()
self.msg_used = self.get_msg_used()
self.msg_balance = self.msg_limit - self.msg_usess
|
from ...common_descs import *
from .objs.effe import EffeTag
from supyr_struct.defs.tag_def import TagDef
part_scale_modifiers = (
"velocity",
"velocity_delta",
"velocity_cone_angle",
"angular_velocity",
"angular_velocity_delta",
"type_specific_scale"
)
particle_scale_modifiers = (
"velocity",
"velocity_delta",
"velocity_cone_angle",
"angular_velocity",
"angular_velocity_delta",
"count",
"count_delta",
"distribution_radius",
"distribution_radius_delta",
"particle_radius",
"particle_radius_delta",
"tint"
)
create_in_env = SEnum16("create_in_env",
"any_environment",
"air_only",
"water_only",
"space_only",
)
create_in_mode = SEnum16("create_in_mode",
"either_mode",
"violent_mode_only",
"nonviolent_mode_only",
)
part = Struct("part",
create_in_env,
create_in_mode,
dyn_senum16("location",
DYN_NAME_PATH="........locations.locations_array[DYN_I].marker_name"),
Bool16("flags",
{NAME:"face_down", GUI_NAME:"face down regardless of location(decals)"}
),
Pad(12),
UEnum32("effect_class", INCLUDE=valid_tags_os, VISIBLE=False),
dependency("type", valid_effect_events),
Pad(24),
from_to_wu_sec("velocity_bounds"), # world units/sec
float_rad("velocity_cone_angle"), # radians
from_to_rad_sec("angular_velocity_bounds"), # radians/sec
QStruct("radius_modifier_bounds"),
Bool32("A_scales_values", *part_scale_modifiers),
Bool32("B_scales_values", *part_scale_modifiers),
SIZE=104,
)
particle = Struct("particle",
create_in_env,
create_in_mode,
SEnum16("create_in_camera",
"either",
"first_person_only",
"third_person_only",
"first_person_if_possible",
),
FlSInt16("unknown0", VISIBLE=False),
dyn_senum16("location",
DYN_NAME_PATH="........locations.locations_array[DYN_I].marker_name"),
FlSInt16("unknown1", VISIBLE=False),
yp_float_rad("relative_direction"), # radians
QStruct("relative_offset", INCLUDE=ijk_float),
QStruct("relative_direction_vector", INCLUDE=xyz_float, VISIBLE=False),
Pad(40),
dependency("particle_type", "part"),
Bool32("flags",
"stay_attached_to_marker",
"random_initial_angle",
"tint_from_object_color",
{NAME: "tint_as_hsv", GUI_NAME: "interpolate tint as hsv"},
{NAME: "use_long_hue_path", GUI_NAME: "...across the long hue path"},
),
SEnum16("distribution_function",
"start",
"end",
"constant",
"buildup",
"falloff",
"buildup_and_falloff",
),
Pad(2),
QStruct("created_count",
SInt16("from", GUI_NAME=""),
SInt16("to"), ORIENT='h'
),
from_to_wu("distribution_radius"),
Pad(12),
from_to_wu_sec("velocity"),
float_rad("velocity_cone_angle"), # radians
from_to_rad_sec("angular_velocity"), # radians
Pad(8),
from_to_wu("radius"),
Pad(8),
QStruct("tint_lower_bound", INCLUDE=argb_float),
QStruct("tint_upper_bound", INCLUDE=argb_float),
Pad(16),
Bool32("A_scales_values", *particle_scale_modifiers),
Bool32("B_scales_values", *particle_scale_modifiers),
SIZE=232
)
location = Struct("location",
ascii_str32("marker_name"),
)
event = Struct("event",
Pad(4),
Float("skip_fraction"),
from_to_sec("delay_bounds"),
from_to_sec("duration_bounds"),
Pad(20),
reflexive("parts", part, 32, DYN_NAME_PATH='.type.filepath'),
reflexive("particles", particle, 32, DYN_NAME_PATH='.particle_type.filepath'),
SIZE=68
)
effe_body = Struct("tagdata",
Bool32("flags",
{NAME: "deleted_when_inactive", GUI_NAME: "deleted when attachment deactivates"},
{NAME: "required", GUI_NAME: "required for gameplay(cannot optimize)"},
"use_in_dedicated_servers"
),
dyn_senum16("loop_start_event",
DYN_NAME_PATH=".events.events_array[DYN_I].NAME"),
dyn_senum16("loop_stop_event",
DYN_NAME_PATH=".events.events_array[DYN_I].NAME"),
Pad(32),
reflexive("locations", location, 32, DYN_NAME_PATH='.marker_name'),
reflexive("events", event, 32),
SIZE=64,
)
def get():
return effe_def
effe_def = TagDef("effe",
blam_header("effe", 4),
effe_body,
ext=".effect", endian=">", tag_cls=EffeTag,
)
|
'''
Created on Jan 26, 2018
@author: enerve
'''
from __future__ import division
import logging
import numpy as np
import random
import matplotlib.pyplot as plt
class Perceptron(object):
'''
The Perceptron classifier, intended to be run on a dataset of two classes
that are linearly separable.
'''
def __init__(self, X, Y, is_stochastic, step_size, max_steps,
reg_constant=0):
""" Initializes the Perceptron classifier.
X and Y is the training data over which to learn the hyperplane
If is_stochastic is True then the perceptron gradient steps will be
stochastic not batch.
step_size is the learning rate to be used.
max_steps is the maximum number of iterations to use before giving up
reg_constant is the regularization multiplier to be used.
"""
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.X = X
self.Y = Y
self.is_stochastic = is_stochastic
if is_stochastic:
self.logger.debug("Running Stochastic Perceptron...")
self.step_size = step_size
self.max_steps = max_steps
self.reg_constant = reg_constant
self.w = None
def learn(self):
""" Learn the separating hyperplane on the training data
"""
Xt = np.append(np.ones((self.X.shape[0], 1)), self.X, axis=1)
Yt = self.Y * 2 - 1
w = np.ones(Xt.shape[1]) # avoiding random init, for debugging
lw = [[] for k in range(len(w))]
for iter in range(self.max_steps):
P = Yt * np.dot(Xt, w)
M = np.where(P <= 0)[0] # indices of misclassified datapoints
if len(M) == 0:
self.logger.debug("Found linearly separable hyperplane!")
break
if self.is_stochastic:
# just pick one randomly from M
M = [M[random.randint(0, len(M)-1)]]
grad = -1 * np.sum((Yt[M] * Xt[M].T), axis=1) / len(M)
if self.reg_constant > 0:
grad += self.reg_constant * w
eta = self.step_size * 10000 / (10000 + iter)
w = w - grad * eta
if iter % 100 == 0:
for k in range(len(w)):
lw[k].append(w[k])
if iter % 1000 == 0:
self.logger.debug("Iter %s:\t %f %f %f" %(iter, w[0], w[1], w[2]))
self.logger.debug("Iterations: %s" %(iter))
# x_range = range(len(lw[0]))
# fig = plt.figure()
# ax1 = fig.add_subplot(111)
# for j, lwn in enumerate(lw):
# if j % 3 >= 2: # plot an arbitrary subset of features
# a = w[j]
# ax1.plot(x_range, [(x-a) for x in lwn], label=str(j))
#
# plt.xlabel("Iteration")
# plt.ylabel("Feature weight")
# plt.show()
#self.logger.debug("%s" % np.array2string(w, precision=2, separator=','))
self.w = w
def classify(self, X_test, Y_test):
""" Classify the given test set using the learned perceptron (and
learning the perceptron to being with if not already done so).
Returns confusion matrix of test result accuracy.
"""
if self.w is None: self.learn()
c_matrix = np.asarray([[0, 0],[0,0]])
Xt = np.append(np.ones((X_test.shape[0], 1)), X_test, axis=1)
Yt = Y_test
class_prediction = np.sign(np.dot(Xt, self.w))
class_prediction = ((class_prediction + 1) / 2).astype(int)
for i, y in enumerate(Yt):
c_matrix[y, class_prediction[i]] += 1
return c_matrix
def predict(self, X_test):
""" Predict class for the given data, and return for each, a quantity
that is positive if class is 1 and that is proportional to the
likelihood of it being so.
"""
if self.w is None: self.learn()
Xt = np.append(np.ones((X_test.shape[0], 1)), X_test, axis=1)
return np.dot(Xt, self.w)
|
from . build_state import SvgBuildState
|
# -*- coding: utf-8 -*-
"""User models."""
import datetime as dt
from enum import auto, Enum
from flask_login import UserMixin
from flask_fsm_test.database import (
Column,
Model,
SurrogatePK,
db,
reference_col,
relationship,
)
from flask_fsm_test.extensions import bcrypt
from transitions import Machine
from transitions.core import MachineError
class SignupStates(Enum):
STEP_1 = auto()
STEP_2 = auto()
STEP_3 = auto()
COMPLETE = auto()
SIGNUP_TRANSITIONS = [
["next", SignupStates.STEP_1, SignupStates.STEP_2],
["next", SignupStates.STEP_2, SignupStates.STEP_3],
["next", SignupStates.STEP_3, SignupStates.COMPLETE],
["prev", SignupStates.STEP_3, SignupStates.STEP_2],
["prev", SignupStates.STEP_2, SignupStates.STEP_1],
]
class Role(SurrogatePK, Model):
"""A role for a user."""
__tablename__ = "roles"
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col("users", nullable=True)
user = relationship("User", backref="roles")
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return f"<Role({self.name})>"
class Cred(SurrogatePK, Model):
"""Provide Cred for a user."""
__tablename__ = "creds"
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col("users", nullable=True)
user = relationship("User", backref="creds")
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return f"<Cred({self.name})>"
class User(UserMixin, SurrogatePK, Model):
"""A user of the app."""
__tablename__ = "users"
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=False)
#: The hashed password
password = Column(db.LargeBinary(128), nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
status = db.Column(
db.Enum(SignupStates), default=SignupStates.STEP_1
)
def __init__(self, username, email, password=None, **kwargs):
"""Create instance."""
db.Model.__init__(self, username=username, email=email, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
"""Set password."""
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
"""Check password."""
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
"""Full user name."""
return f"{self.first_name} {self.last_name}"
@property
def state(self):
return self.status
@state.setter
def state(self, value):
self.status = value
db.session.add(self)
db.session.commit()
def go_next(self):
machine = Machine(
model=self, states=SignupStates, transitions=SIGNUP_TRANSITIONS, initial=self.state
)
try:
self.next()
return True
except MachineError:
return False
def go_prev(self):
machine = Machine(
model=self, states=SignupStates, transitions=SIGNUP_TRANSITIONS, initial=self.state
)
try:
self.prev()
return True
except MachineError:
return False
def __repr__(self):
"""Represent instance as a unique string."""
return f"<User({self.username!r})>"
|
__author__ = 'Killua'
import numpy as np
class RidgeRegression:
"""
Implementation of ridge regression
"""
# properties
beta = None # estimated coefficient
y = None # regression output
data = None # regression input
alpha = 0 # penalty term
intercept = False # whether to include intercept
def __init__(self, y=None, data=None, alpha=0, intercept=False):
""" Construction method
:param data: regression input
:param y: regression output
:param alpha: penalty term
:param intercept: whether to include intercept
:return:
"""
self.data = data
self.y = y
self.alpha = alpha
self.intercept = intercept
if data is not None and y is not None:
self.fit(y, data)
def fit(self, y, data):
""" Fit ridge regression model
:param data: regression input
:param y: regression output
:return: fitted beta
"""
if self.intercept:
data = np.pad(data, ((0, 0), (1, 0)), "constant", constant_values=1)
# Process
y = np.matrix(y)
self.y = y
data = np.matrix(data)
self.data = data
# Fit
_, n_features = data.shape
self.beta = np.linalg.inv(np.transpose(data) * data + self.alpha * np.eye(n_features)) * np.transpose(data) * y
return self.beta
def predict(self, new_data):
""" Predict for new data
:param new_data: new data input
:return: estimated regression output
"""
if self.intercept:
new_data = np.lib.pad(new_data, ((0, 0), (1, 0)), "constant", constant_values=1)
new_data = np.matrix(new_data)
return new_data * self.beta
def fit_and_predict(self, data, y):
self.fit(data, y)
return self.predict(data)
|
import multiprocessing
from .event_bus import EventBus
from .settings import Settings
with multiprocessing.Pool() as pool:
from ui import qtx
app = qtx.QApplication([])
settings = Settings('redstork-sample')
bus = EventBus()
from .view.main_view import MainView
mainView = MainView(app)
from .dialogs import Dialogs
dialogs = Dialogs(mainView)
from .view.file_menu import FileMenu
fileMenu = FileMenu(mainView)
from .controller.project_controller import ProjectController
projectController = ProjectController()
from .presenter.project_presenter import ProjectPresenter
project_presenter = ProjectPresenter(bus, fileMenu, projectController, dialogs, settings)
from .view.toolbar import Toolbar
toolbar = Toolbar(mainView)
from .view.page_view import PageView
pageView = PageView()
mainView.setCentralWidget(pageView)
from .presenter.page_presenter import PagePresenter
pagePresenter = PagePresenter(bus, pageView, toolbar, None, pool)
from .view.tools_menu import ToolsMenu
toolsMenu = ToolsMenu(mainView)
from .presenter.tools_presenter import ToolsPresenter
toolsPresenter = ToolsPresenter(bus, toolsMenu, dialogs)
from .controller.annot_controller import AnnotController
annotController = AnnotController()
from .presenter.annot_presenter import AnnotPresenter
annotPresenter = AnnotPresenter(bus, pageView.scene(), annotController)
from .presenter.settings_presenter import SettingsPresenter
settingsPresenter = SettingsPresenter(mainView, settings)
mainView.show()
mainView.exec_()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.