hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08770b4a8025d1b0a3e992f79b4bec044b49fab8
| 25,224
|
py
|
Python
|
pymatgen/core/tests/test_composition.py
|
rwoodsrobinson/pymatgen
|
f9226b79b8f9ccd0b305e557c9e4878e096bfc77
|
[
"MIT"
] | 2
|
2021-04-28T13:28:52.000Z
|
2021-04-28T13:29:06.000Z
|
pymatgen/core/tests/test_composition.py
|
rwoodsrobinson/pymatgen
|
f9226b79b8f9ccd0b305e557c9e4878e096bfc77
|
[
"MIT"
] | null | null | null |
pymatgen/core/tests/test_composition.py
|
rwoodsrobinson/pymatgen
|
f9226b79b8f9ccd0b305e557c9e4878e096bfc77
|
[
"MIT"
] | 3
|
2018-10-17T19:08:09.000Z
|
2021-12-02T20:26:58.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Created on Nov 10, 2012
@author: shyue
"""
from pymatgen.util.testing import PymatgenTest
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
import unittest
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.composition import Composition, CompositionError, \
ChemicalPotential
import random
class CompositionTest(PymatgenTest):
def setUp(self):
self.comp = list()
self.comp.append(Composition("Li3Fe2(PO4)3"))
self.comp.append(Composition("Li3Fe(PO4)O"))
self.comp.append(Composition("LiMn2O4"))
self.comp.append(Composition("Li4O4"))
self.comp.append(Composition("Li3Fe2Mo3O12"))
self.comp.append(Composition("Li3Fe2((PO4)3(CO3)5)2"))
self.comp.append(Composition("Li1.5Si0.5"))
self.comp.append(Composition("ZnOH"))
self.indeterminate_comp = []
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("Co1",
True)
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("Co1",
False)
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("co2o3")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("ncalu")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("calun")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula(
"liCoo2n (pO4)2")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula(
"(co)2 (PO)4")
)
self.indeterminate_comp.append(
Composition.ranked_compositions_from_indeterminate_formula("Fee3"))
def test_immutable(self):
try:
self.comp[0]["Fe"] = 1
except Exception as ex:
self.assertIsInstance(ex, TypeError)
try:
del self.comp[0]["Fe"]
except Exception as ex:
self.assertIsInstance(ex, TypeError)
def test_in(self):
self.assertIn("Fe", self.comp[0])
self.assertNotIn("Fe", self.comp[2])
self.assertIn(Element("Fe"), self.comp[0])
self.assertEqual(self.comp[0]["Fe"], 2)
self.assertEqual(self.comp[0]["Mn"], 0)
self.assertRaises(TypeError, self.comp[0].__getitem__, "Hello")
self.assertRaises(TypeError, self.comp[0].__getitem__, "Vac")
def test_hill_formula(self):
c = Composition("CaCO3")
self.assertEqual(c.hill_formula, "C Ca O3")
c = Composition("C2H5OH")
self.assertEqual(c.hill_formula, "C2 H6 O")
def test_init_(self):
self.assertRaises(CompositionError, Composition, {"H": -0.1})
f = {'Fe': 4, 'Li': 4, 'O': 16, 'P': 4}
self.assertEqual("Li4 Fe4 P4 O16", Composition(f).formula)
f = {None: 4, 'Li': 4, 'O': 16, 'P': 4}
self.assertRaises(TypeError, Composition, f)
f = {1: 2, 8: 1}
self.assertEqual("H2 O1", Composition(f).formula)
self.assertEqual("Na2 O1", Composition(Na=2, O=1).formula)
c = Composition({'S': Composition.amount_tolerance / 2})
self.assertEqual(len(c.elements), 0)
def test_average_electroneg(self):
val = [2.7224999999999997, 2.4160000000000004, 2.5485714285714285,
2.21, 2.718, 3.08, 1.21, 2.43]
for i, c in enumerate(self.comp):
self.assertAlmostEqual(c.average_electroneg,
val[i])
def test_total_electrons(self):
test_cases = {'C': 6, 'SrTiO3': 84}
for item in test_cases.keys():
c = Composition(item)
self.assertAlmostEqual(c.total_electrons, test_cases[item])
def test_formula(self):
correct_formulas = ['Li3 Fe2 P3 O12', 'Li3 Fe1 P1 O5', 'Li1 Mn2 O4',
'Li4 O4', 'Li3 Fe2 Mo3 O12', 'Li3 Fe2 P6 C10 O54',
'Li1.5 Si0.5', 'Zn1 H1 O1']
all_formulas = [c.formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
self.assertRaises(CompositionError, Composition,
"(co2)(po4)2")
self.assertEqual(Composition("K Na 2").reduced_formula, "KNa2")
self.assertEqual(Composition("K3 Na 2").reduced_formula, "K3Na2")
self.assertEqual(Composition("Na 3 Zr (PO 4) 3").reduced_formula,
"Na3Zr(PO4)3")
def test_iupac_formula(self):
correct_formulas = ['Li3 Fe2 P3 O12', 'Li3 Fe1 P1 O5', 'Li1 Mn2 O4',
'Li4 O4', 'Li3 Mo3 Fe2 O12', 'Li3 Fe2 C10 P6 O54',
'Li1.5 Si0.5', 'Zn1 H1 O1']
all_formulas = [c.iupac_formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
def test_mixed_valence(self):
comp = Composition({"Fe2+": 2, "Fe3+": 4, "Li+": 8})
self.assertEqual(comp.reduced_formula, "Li4Fe3")
self.assertEqual(comp.alphabetical_formula, "Fe6 Li8")
self.assertEqual(comp.formula, "Li8 Fe6")
def test_indeterminate_formula(self):
correct_formulas = [["Co1"], ["Co1", "C1 O1"], ["Co2 O3", "C1 O5"],
["N1 Ca1 Lu1", "U1 Al1 C1 N1"],
["N1 Ca1 Lu1", "U1 Al1 C1 N1"],
["Li1 Co1 P2 N1 O10", "Li1 Co1 Po8 N1 O2",
"Li1 P2 C1 N1 O11", "Li1 Po8 C1 N1 O3"],
["Co2 P4 O4", "Co2 Po4", "P4 C2 O6",
"Po4 C2 O2"], []]
for i, c in enumerate(correct_formulas):
self.assertEqual([Composition(comp) for comp in c],
self.indeterminate_comp[i])
def test_alphabetical_formula(self):
correct_formulas = ['Fe2 Li3 O12 P3', 'Fe1 Li3 O5 P1', 'Li1 Mn2 O4',
'Li4 O4', 'Fe2 Li3 Mo3 O12', 'C10 Fe2 Li3 O54 P6',
'Li1.5 Si0.5', 'H1 O1 Zn1']
all_formulas = [c.alphabetical_formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
def test_reduced_composition(self):
correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4',
'Li2O2', 'Li3Fe2(MoO4)3',
'Li3Fe2P6(C5O27)2', 'Li1.5Si0.5', 'ZnHO']
for i in range(len(self.comp)):
self.assertEqual(self.comp[i]
.get_reduced_composition_and_factor()[0],
Composition(correct_reduced_formulas[i]))
def test_reduced_formula(self):
correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4',
'Li2O2', 'Li3Fe2(MoO4)3',
'Li3Fe2P6(C5O27)2', 'Li1.5Si0.5', 'ZnHO']
all_formulas = [c.reduced_formula for c in self.comp]
self.assertEqual(all_formulas, correct_reduced_formulas)
# test iupac reduced formula (polyanions should still appear at the end)
all_formulas = [c.get_reduced_formula_and_factor(iupac_ordering=True)[0]
for c in self.comp]
self.assertEqual(all_formulas, correct_reduced_formulas)
self.assertEqual(
Composition('H6CN').get_integer_formula_and_factor(
iupac_ordering=True)[0],
'CNH6')
# test rounding
c = Composition({'Na': 2 - Composition.amount_tolerance / 2, 'Cl': 2})
self.assertEqual('NaCl', c.reduced_formula)
def test_integer_formula(self):
correct_reduced_formulas = ['Li3Fe2(PO4)3', 'Li3FePO5', 'LiMn2O4',
'Li2O2', 'Li3Fe2(MoO4)3',
'Li3Fe2P6(C5O27)2', 'Li3Si', 'ZnHO']
all_formulas = [c.get_integer_formula_and_factor()[0]
for c in self.comp]
self.assertEqual(all_formulas, correct_reduced_formulas)
self.assertEqual(Composition('Li0.5O0.25').get_integer_formula_and_factor(),
('Li2O', 0.25))
self.assertEqual(Composition('O0.25').get_integer_formula_and_factor(),
('O2', 0.125))
formula, factor = Composition(
"Li0.16666667B1.0H1.0").get_integer_formula_and_factor()
self.assertEqual(formula, 'Li(BH)6')
self.assertAlmostEqual(factor, 1 / 6)
# test iupac reduced formula (polyanions should still appear at the end)
all_formulas = [c.get_integer_formula_and_factor(iupac_ordering=True)[0]
for c in self.comp]
self.assertEqual(all_formulas, correct_reduced_formulas)
self.assertEqual(
Composition('H6CN0.5').get_integer_formula_and_factor(
iupac_ordering=True),
('C2NH12', 0.5))
def test_num_atoms(self):
correct_num_atoms = [20, 10, 7, 8, 20, 75, 2, 3]
all_natoms = [c.num_atoms for c in self.comp]
self.assertEqual(all_natoms, correct_num_atoms)
def test_weight(self):
correct_weights = [417.427086, 187.63876199999999, 180.81469, 91.7616,
612.3258, 1302.430172, 24.454250000000002, 82.41634]
all_weights = [c.weight for c in self.comp]
self.assertArrayAlmostEqual(all_weights, correct_weights, 5)
def test_get_atomic_fraction(self):
correct_at_frac = {"Li": 0.15, "Fe": 0.1, "P": 0.15, "O": 0.6}
for el in ["Li", "Fe", "P", "O"]:
self.assertEqual(self.comp[0].get_atomic_fraction(el),
correct_at_frac[el],
"Wrong computed atomic fractions")
self.assertEqual(self.comp[0].get_atomic_fraction("S"), 0,
"Wrong computed atomic fractions")
def test_anonymized_formula(self):
expected_formulas = ['A2B3C3D12', 'ABC3D5', 'AB2C4', 'AB',
'A2B3C3D12', 'A2B3C6D10E54', 'A0.5B1.5', 'ABC']
for i in range(len(self.comp)):
self.assertEqual(self.comp[i].anonymized_formula,
expected_formulas[i])
def test_get_wt_fraction(self):
correct_wt_frac = {"Li": 0.0498841610868, "Fe": 0.267567687258,
"P": 0.222604831158, "O": 0.459943320496}
for el in ["Li", "Fe", "P", "O"]:
self.assertAlmostEqual(correct_wt_frac[el],
self.comp[0].get_wt_fraction(el),
5, "Wrong computed weight fraction")
self.assertEqual(self.comp[0].get_wt_fraction(Element("S")), 0,
"Wrong computed weight fractions")
def test_from_dict(self):
sym_dict = {"Fe": 6, "O": 8}
self.assertEqual(Composition.from_dict(sym_dict).reduced_formula,
"Fe3O4",
"Creation form sym_amount dictionary failed!")
comp = Composition({"Fe2+": 2, "Fe3+": 4, "O2-": 8})
comp2 = Composition.from_dict(comp.as_dict())
self.assertEqual(comp, comp2)
def test_as_dict(self):
c = Composition.from_dict({'Fe': 4, 'O': 6})
d = c.as_dict()
correct_dict = {'Fe': 4.0, 'O': 6.0}
self.assertEqual(d['Fe'], correct_dict['Fe'])
self.assertEqual(d['O'], correct_dict['O'])
correct_dict = {'Fe': 2.0, 'O': 3.0}
d = c.to_reduced_dict
self.assertEqual(d['Fe'], correct_dict['Fe'])
self.assertEqual(d['O'], correct_dict['O'])
def test_pickle(self):
for c in self.comp:
self.serialize_with_pickle(c, test_eq=True)
self.serialize_with_pickle(c.to_data_dict, test_eq=True)
def test_to_data_dict(self):
comp = Composition('Fe0.00009Ni0.99991')
d = comp.to_data_dict
self.assertAlmostEqual(d["reduced_cell_composition"]["Fe"], 9e-5)
def test_add(self):
self.assertEqual((self.comp[0] + self.comp[2]).formula,
"Li4 Mn2 Fe2 P3 O16",
"Incorrect composition after addition!")
self.assertEqual((self.comp[3] + {"Fe": 4, "O": 4}).formula,
"Li4 Fe4 O8", "Incorrect composition after addition!")
def test_sub(self):
self.assertEqual((self.comp[0]
- Composition("Li2O")).formula,
"Li1 Fe2 P3 O11",
"Incorrect composition after addition!")
self.assertEqual((self.comp[0] - {"Fe": 2, "O": 3}).formula,
"Li3 P3 O9")
self.assertRaises(CompositionError, Composition('O').__sub__,
Composition('H'))
# check that S is completely removed by subtraction
c1 = Composition({'S': 1 + Composition.amount_tolerance / 2, 'O': 1})
c2 = Composition({'S': 1})
self.assertEqual(len((c1 - c2).elements), 1)
def test_mul(self):
self.assertEqual((self.comp[0] * 4).formula, "Li12 Fe8 P12 O48")
self.assertEqual((3 * self.comp[1]).formula, "Li9 Fe3 P3 O15")
def test_div(self):
self.assertEqual((self.comp[0] / 4).formula, 'Li0.75 Fe0.5 P0.75 O3')
def test_equals(self):
random_z = random.randint(1, 92)
fixed_el = Element.from_Z(random_z)
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp1 = Composition({fixed_el: 1, Element.from_Z(other_z): 0})
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp2 = Composition({fixed_el: 1, Element.from_Z(other_z): 0})
self.assertEqual(comp1, comp2,
"Composition equality test failed. " +
"%s should be equal to %s" % (comp1.formula,
comp2.formula))
self.assertEqual(comp1.__hash__(), comp2.__hash__(),
"Hashcode equality test failed!")
def test_comparisons(self):
c1 = Composition({'S': 1})
c1_1 = Composition({'S': 1.00000000000001})
c2 = Composition({'S': 2})
c3 = Composition({'O': 1})
c4 = Composition({'O': 1, 'S': 1})
self.assertFalse(c1 > c2)
self.assertFalse(c1_1 > c1)
self.assertFalse(c1_1 < c1)
self.assertTrue(c1 > c3)
self.assertTrue(c3 < c1)
self.assertTrue(c4 > c1)
self.assertEqual(sorted([c1, c1_1, c2, c4, c3]),
[c3, c1, c1_1, c4, c2])
def test_almost_equals(self):
c1 = Composition({'Fe': 2.0, 'O': 3.0, 'Mn': 0})
c2 = Composition({'O': 3.2, 'Fe': 1.9, 'Zn': 0})
c3 = Composition({'Ag': 2.0, 'O': 3.0})
c4 = Composition({'Fe': 2.0, 'O': 3.0, 'Ag': 2.0})
self.assertTrue(c1.almost_equals(c2, rtol=0.1))
self.assertFalse(c1.almost_equals(c2, rtol=0.01))
self.assertFalse(c1.almost_equals(c3, rtol=0.1))
self.assertFalse(c1.almost_equals(c4, rtol=0.1))
def test_equality(self):
self.assertTrue(self.comp[0].__eq__(self.comp[0]))
self.assertFalse(self.comp[0].__eq__(self.comp[1]))
self.assertFalse(self.comp[0].__ne__(self.comp[0]))
self.assertTrue(self.comp[0].__ne__(self.comp[1]))
def test_fractional_composition(self):
for c in self.comp:
self.assertAlmostEqual(c.fractional_composition.num_atoms, 1)
def test_init_numerical_tolerance(self):
self.assertEqual(Composition({'B': 1, 'C': -1e-12}), Composition('B'))
def test_negative_compositions(self):
self.assertEqual(Composition('Li-1(PO-1)4', allow_negative=True).formula,
'Li-1 P4 O-4')
self.assertEqual(Composition('Li-1(PO-1)4', allow_negative=True).reduced_formula,
'Li-1(PO-1)4')
self.assertEqual(Composition('Li-2Mg4', allow_negative=True).reduced_composition,
Composition('Li-1Mg2', allow_negative=True))
self.assertEqual(Composition('Li-2.5Mg4', allow_negative=True).reduced_composition,
Composition('Li-2.5Mg4', allow_negative=True))
# test math
c1 = Composition('LiCl', allow_negative=True)
c2 = Composition('Li')
self.assertEqual(c1 - 2 * c2, Composition({'Li': -1, 'Cl': 1},
allow_negative=True))
self.assertEqual((c1 + c2).allow_negative, True)
self.assertEqual(c1 / -1, Composition('Li-1Cl-1', allow_negative=True))
# test num_atoms
c1 = Composition('Mg-1Li', allow_negative=True)
self.assertEqual(c1.num_atoms, 2)
self.assertEqual(c1.get_atomic_fraction('Mg'), 0.5)
self.assertEqual(c1.get_atomic_fraction('Li'), 0.5)
self.assertEqual(c1.fractional_composition,
Composition('Mg-0.5Li0.5', allow_negative=True))
# test copy
self.assertEqual(c1.copy(), c1)
# test species
c1 = Composition({'Mg': 1, 'Mg2+': -1}, allow_negative=True)
self.assertEqual(c1.num_atoms, 2)
self.assertEqual(c1.element_composition, Composition())
self.assertEqual(c1.average_electroneg, 1.31)
def test_special_formulas(self):
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
for k, v in special_formulas.items():
self.assertEqual(Composition(k).reduced_formula, v)
def test_oxi_state_guesses(self):
self.assertEqual(Composition("LiFeO2").oxi_state_guesses(),
({"Li": 1, "Fe": 3, "O": -2},))
self.assertEqual(Composition("Fe4O5").oxi_state_guesses(),
({"Fe": 2.5, "O": -2},))
self.assertEqual(Composition("V2O3").oxi_state_guesses(),
({"V": 3, "O": -2},))
# all_oxidation_states produces *many* possible responses
self.assertEqual(len(Composition("MnO").oxi_state_guesses(
all_oxi_states=True)), 4)
# can't balance b/c missing V4+
self.assertEqual(Composition("VO2").oxi_state_guesses(
oxi_states_override={"V": [2, 3, 5]}), [])
# missing V4+, but can balance due to additional sites
self.assertEqual(Composition("V2O4").oxi_state_guesses(
oxi_states_override={"V": [2, 3, 5]}), ({"V": 4, "O": -2},))
# multiple solutions - Mn/Fe = 2+/4+ or 3+/3+ or 4+/2+
self.assertEqual(len(Composition("MnFeO3").oxi_state_guesses(
oxi_states_override={"Mn": [2, 3, 4], "Fe": [2, 3, 4]})), 3)
# multiple solutions prefers 3/3 over 2/4 or 4/2
self.assertEqual(Composition("MnFeO3").oxi_state_guesses(
oxi_states_override={"Mn": [2, 3, 4], "Fe": [2, 3, 4]})[0],
{"Mn": 3, "Fe": 3, "O": -2})
# target charge of 1
self.assertEqual(Composition("V2O6").oxi_state_guesses(
oxi_states_override={"V": [2, 3, 4, 5]}, target_charge=-2),
({"V": 5, "O": -2},))
# max_sites for very large composition - should timeout if incorrect
self.assertEqual(Composition("Li10000Fe10000P10000O40000").
oxi_state_guesses(max_sites=7)[0],
{"Li": 1, "Fe": 2, "P": 5, "O": -2})
# max_sites for very large composition - should timeout if incorrect
self.assertEqual(Composition("Li10000Fe10000P10000O40000").
oxi_state_guesses(max_sites=-1)[0],
{"Li": 1, "Fe": 2, "P": 5, "O": -2})
# negative max_sites less than -1 - should throw error if cannot reduce
# to under the abs(max_sites) number of sites. Will also timeout if
# incorrect.
self.assertEqual(
Composition("Sb10000O10000F10000").oxi_state_guesses(
max_sites=-3)[0],
{"Sb": 3, "O": -2, "F": -1})
self.assertRaises(ValueError, Composition("LiOF").oxi_state_guesses,
max_sites=-2)
self.assertRaises(ValueError, Composition("V2O3").
oxi_state_guesses, max_sites=1)
def test_oxi_state_decoration(self):
# Basic test: Get compositions where each element is in a single charge state
decorated = Composition("H2O").add_charges_from_oxi_state_guesses()
self.assertIn(Specie("H", 1), decorated)
self.assertEqual(2, decorated.get(Specie("H", 1)))
# Test: More than one charge state per element
decorated = Composition("Fe3O4").add_charges_from_oxi_state_guesses()
self.assertEqual(1, decorated.get(Specie("Fe", 2)))
self.assertEqual(2, decorated.get(Specie("Fe", 3)))
self.assertEqual(4, decorated.get(Specie("O", -2)))
# Test: No possible charge states
# It should return an uncharged composition
decorated = Composition("NiAl").add_charges_from_oxi_state_guesses()
self.assertEqual(1, decorated.get(Specie("Ni", 0)))
self.assertEqual(1, decorated.get(Specie("Al", 0)))
def test_Metallofullerene(self):
# Test: Parse Metallofullerene formula (e.g. Y3N@C80)
formula = "Y3N@C80"
sym_dict = {"Y": 3, "N": 1, "C": 80}
cmp = Composition(formula)
cmp2 = Composition.from_dict(sym_dict)
self.assertEqual(cmp, cmp2)
def test_contains_element_type(self):
formula = "EuTiO3"
cmp = Composition(formula)
self.assertTrue(cmp.contains_element_type("lanthanoid"))
self.assertFalse(cmp.contains_element_type("noble_gas"))
self.assertTrue(cmp.contains_element_type("f-block"))
self.assertFalse(cmp.contains_element_type("s-block"))
def test_chemical_system(self):
formula = "NaCl"
cmp = Composition(formula)
self.assertEqual(cmp.chemical_system, "Cl-Na")
def test_is_valid(self):
formula = "NaCl"
cmp = Composition(formula)
self.assertTrue(cmp.valid)
formula = "NaClX"
cmp = Composition(formula)
self.assertFalse(cmp.valid)
self.assertRaises(ValueError,
Composition,
"NaClX", strict=True)
def test_remove_charges(self):
cmp1 = Composition({'Al3+': 2.0, 'O2-': 3.0})
cmp2 = Composition({'Al': 2.0, 'O': 3.0})
self.assertNotEqual(str(cmp1), str(cmp2))
cmp1 = cmp1.remove_charges()
self.assertEqual(str(cmp1), str(cmp2))
cmp1 = cmp1.remove_charges()
self.assertEqual(str(cmp1), str(cmp2))
cmp1 = Composition({'Fe3+': 2.0, 'Fe2+': 3.0, 'O2-': 6.0})
cmp2 = Composition({'Fe': 5.0, 'O': 6.0})
self.assertNotEqual(str(cmp1), str(cmp2))
cmp1 = cmp1.remove_charges()
self.assertEqual(str(cmp1), str(cmp2))
class ChemicalPotentialTest(unittest.TestCase):
def test_init(self):
d = {'Fe': 1, Element('Fe'): 1}
self.assertRaises(ValueError, ChemicalPotential, d)
for k in ChemicalPotential(Fe=1).keys():
self.assertIsInstance(k, Element)
def test_math(self):
fepot = ChemicalPotential({'Fe': 1})
opot = ChemicalPotential({'O': 2.1})
pots = ChemicalPotential({'Fe': 1, 'O': 2.1})
potsx2 = ChemicalPotential({'Fe': 2, 'O': 4.2})
feo2 = Composition('FeO2')
# test get_energy()
self.assertAlmostEqual(pots.get_energy(feo2), 5.2)
self.assertAlmostEqual(fepot.get_energy(feo2, False), 1)
self.assertRaises(ValueError, fepot.get_energy, feo2)
# test multiplication
self.assertRaises(NotImplementedError, lambda: (pots * pots))
self.assertDictEqual(pots * 2, potsx2)
self.assertDictEqual(2 * pots, potsx2)
# test division
self.assertDictEqual(potsx2 / 2, pots)
self.assertRaises(NotImplementedError, lambda: (pots / pots))
self.assertRaises(NotImplementedError, lambda: (pots / feo2))
# test add/subtract
self.assertDictEqual(pots + pots, potsx2)
self.assertDictEqual(potsx2 - pots, pots)
self.assertDictEqual(fepot + opot, pots)
self.assertDictEqual(fepot - opot, pots - opot - opot)
if __name__ == "__main__":
unittest.main()
| 42.322148
| 91
| 0.575642
|
82ccec3c057780f5609a01408de46842f8509a49
| 2,120
|
py
|
Python
|
data/scripts/cragCastleDefs.py
|
starpirate2203/BROODY-S-LAST-SCRIPT
|
7b5a9eab02c782ebf39b0f3edf69536fae8289c6
|
[
"MIT"
] | 2
|
2021-09-07T16:04:30.000Z
|
2021-09-16T03:30:16.000Z
|
data/scripts/cragCastleDefs.py
|
starpirate2203/BROODY-S-LAST-SCRIPT
|
7b5a9eab02c782ebf39b0f3edf69536fae8289c6
|
[
"MIT"
] | null | null | null |
data/scripts/cragCastleDefs.py
|
starpirate2203/BROODY-S-LAST-SCRIPT
|
7b5a9eab02c782ebf39b0f3edf69536fae8289c6
|
[
"MIT"
] | 1
|
2021-09-21T12:42:28.000Z
|
2021-09-21T12:42:28.000Z
|
# This file was automatically generated from "cragCastle.ma"
points = {}
boxes = {}
boxes['areaOfInterestBounds'] = (0.7033834902, 6.55869393, -3.153439808) + (0.0, 0.0, 0.0) + (16.73648528, 14.94789935, 11.60063102)
points['ffaSpawn1'] = (-4.04166076, 7.54589296, -3.542792409) + (2.471508516, 1.156019141, 0.1791707664)
points['ffaSpawn2'] = (5.429881832, 7.582951102, -3.497145747) + (2.415753564, 1.12871694, 0.17898173)
points['ffaSpawn3'] = (4.8635999, 9.311949436, -6.013939259) + (1.61785329, 1.12871694, 0.17898173)
points['ffaSpawn4'] = (-3.628023052, 9.311949436, -6.013939259) + (1.61785329, 1.12871694, 0.17898173)
points['ffaSpawn5'] = (-2.414363536, 5.930994442, 0.03036413701) + (1.61785329, 1.12871694, 0.17898173)
points['ffaSpawn6'] = (3.520989196, 5.930994442, 0.03036413701) + (1.61785329, 1.12871694, 0.17898173)
points['flag1'] = (-1.900164924, 9.363050076, -6.441041548)
points['flag2'] = (3.240019982, 9.319215955, -6.392759924)
points['flag3'] = (-6.883672142, 7.475761129, 0.2098388241)
points['flag4'] = (8.193957063, 7.478129652, 0.1536410508)
points['flagDefault'] = (0.6296142785, 6.221901832, -0.0435909658)
boxes['levelBounds'] = (0.4799042306, 9.085075529, -3.267604531) + (0.0, 0.0, 0.0) + (22.9573075, 9.908550511, 14.17997333)
points['powerupSpawn1'] = (7.916483636, 7.83853949, -5.990841203)
points['powerupSpawn2'] = (-0.6978591232, 7.883836528, -6.066674247)
points['powerupSpawn3'] = (1.858093733, 7.893059862, -6.076932659)
points['powerupSpawn4'] = (-6.671997388, 7.992307645, -6.121432603)
points['spawn1'] = (-5.169730601, 7.54589296, -3.542792409) + (1.057384557, 1.156019141, 0.1791707664)
points['spawn2'] = (6.203092708, 7.582951102, -3.497145747) + (1.009865407, 1.12871694, 0.17898173)
points['spawnByFlag1'] = (-2.872146219, 9.363050076, -6.041110823)
points['spawnByFlag2'] = (4.313355684, 9.363050076, -6.041110823)
points['spawnByFlag3'] = (-6.634074097, 7.508585058, -0.5918910315)
points['spawnByFlag4'] = (7.868759529, 7.508585058, -0.5918910315)
points['tnt1'] = (-5.038090498, 10.0136642, -6.158580823)
points['tnt2'] = (6.203368846, 10.0136642, -6.158580823)
| 73.103448
| 132
| 0.705189
|
f917edca32b5a5b00edd294d9b8cc8ed1c8e1747
| 915
|
py
|
Python
|
obd_client.py
|
lukaszmitka/obd-tool
|
fedf94d3a3254f1b6497811bbc6da9d1a2934375
|
[
"MIT"
] | null | null | null |
obd_client.py
|
lukaszmitka/obd-tool
|
fedf94d3a3254f1b6497811bbc6da9d1a2934375
|
[
"MIT"
] | null | null | null |
obd_client.py
|
lukaszmitka/obd-tool
|
fedf94d3a3254f1b6497811bbc6da9d1a2934375
|
[
"MIT"
] | null | null | null |
from time import sleep
import RPi.GPIO as GPIO
import serial
ser = serial.Serial('/dev/ttyUSB0', 38400, timeout=1)
ser.write(b'ATZ\r')
data = ser.read(100)
ser.write(b'ATE0\r')
data = ser.read(100)
ser.write(b'ATH1\r')
data = ser.read(100)
ser.write(b'ATL0\r')
data = ser.read(100)
ser.write(b'0100\r')
data = ser.read(100)
while data.decode()[0:24] != "7E8 06 41 00 98 3B A0 13":
ser.write(b'0100\r')
data = ser.read(100)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT, initial=GPIO.LOW)
while True:
ser.write(b'010D1\r')
speed_data = ser.read(100)
if len(speed_data) == 19:
vehicle_speed = int(speed_data[13:15], base=16)
print(vehicle_speed)
if vehicle_speed > 20:
print("Turn OFF parking sensor")
GPIO.output(11, GPIO.LOW)
else:
print("Turn ON parking sensor")
GPIO.output(11, GPIO.HIGH)
sleep(1)
| 22.317073
| 56
| 0.625137
|
e227baad59d08c625045f06043d649b12c6aebe4
| 3,475
|
py
|
Python
|
resources/synthetic_data.py
|
BreastGAN/experiment1
|
edc55b93f214997c2ef0654aeeedcdc2f11b554c
|
[
"Apache-2.0"
] | 7
|
2018-12-05T21:40:16.000Z
|
2022-01-09T11:11:02.000Z
|
resources/synthetic_data.py
|
BreastGAN/experiment1
|
edc55b93f214997c2ef0654aeeedcdc2f11b554c
|
[
"Apache-2.0"
] | 2
|
2021-01-31T12:14:31.000Z
|
2022-02-09T23:28:39.000Z
|
resources/synthetic_data.py
|
BreastGAN/experiment1
|
edc55b93f214997c2ef0654aeeedcdc2f11b554c
|
[
"Apache-2.0"
] | 1
|
2018-11-26T01:44:19.000Z
|
2018-11-26T01:44:19.000Z
|
# Copyright 2018 Lukas Jendele and Ondrej Skopek. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from dotmap import DotMap
import skimage.filters
# Do not modify img!
def gen_element(img, center, min_rad, max_rad, max_thresh, eps=0.1):
def is_inside(x, y, center, rad1, rad2):
x0 = (x - center[0])
y0 = (y - center[1])
return x0**2 / rad1 + y0**2 / rad2 <= 1 + np.random.normal(0, eps)
mask = np.zeros_like(img)
min_thresh = 0.9 # smallest number I can actually see in the resulting image
max_area = max_rad**2
# print("max_area =", max_area)
min_area = max(1, min_rad**2)
# print("min_area =", min_area)
area = np.random.uniform(min_area, max_area)
# print("area =", area)
rad1 = np.random.randint(min_rad, max_rad + 1)
# print("r1 =", rad1)
rad2 = max(1, int(area / rad1))
area = rad1 * rad2
# print("area =", area)
# print("r2 =", rad2)
for x in range(max(0, center[0] - rad1), min(center[0] + rad1, img.shape[0])):
for y in range(max(0, center[1] - rad2), min(center[1] + rad2, img.shape[1])):
if is_inside(x, y, center, rad1, rad2):
mask[x, y] = 1
# print("max_rad =", max_rad)
thresh = area # x in [0, max_area]
thresh = thresh / max_area # x in [0, 1]
# print("ratio =", thresh)
thresh = 1 - thresh
thresh = (thresh * (max_thresh - min_thresh)) + min_thresh # x in [min_thresh, max_thresh]
# print("thresh =", thresh)
mask *= thresh
return mask
# doesn't work well under (30, 30)
def generate_synth(size=(256, 256), max_thresh=2.5): # $max_thresh sigma
def gen_img(size, max_thresh, masses=1):
img_meta = DotMap()
img_meta.laterality = 'L'
img_meta.view = 'CC'
img = np.random.standard_normal(size=size)
img = skimage.filters.gaussian(img, sigma=1)
min_rad = max(1, int(min(size) / 80))
max_rad = max(min_rad, int(min(size) / 10))
mask = np.zeros(size)
for i in range(masses):
x = np.random.randint(0, size[0])
y = np.random.randint(0, size[1])
mask += gen_element(img, (x, y), min_rad, max_rad, max_thresh)
return img, mask, img_meta
while True:
yield gen_img(size=size, max_thresh=max_thresh, masses=1)
def read_synth(n_samples, size=(256, 256), no_gpu=False):
# In this case, synth data + batch and shuffle it. In our case, it will be quite different.
imgs = np.zeros((n_samples, size[0], size[1]))
masks = np.zeros((n_samples, size[0], size[1]))
data_gen = generate_synth(size=size)
for i in range(n_samples):
imgs[i], masks[i], _ = next(data_gen)
# imgs = np.reshape(imgs, (n_samples, size[0] * size[1]))
# masks = np.reshape(masks, (n_samples, size[0] * size[1]))
return imgs, masks
| 34.405941
| 95
| 0.610935
|
42e778de4b98377ea3438075d8ad85f654837949
| 4,043
|
py
|
Python
|
.history/run_update_20220328115000.py
|
miguel-fresh/geoip-translation
|
ccf9dbc0330e597704e57d8b2967fc9be16017ed
|
[
"Info-ZIP"
] | null | null | null |
.history/run_update_20220328115000.py
|
miguel-fresh/geoip-translation
|
ccf9dbc0330e597704e57d8b2967fc9be16017ed
|
[
"Info-ZIP"
] | null | null | null |
.history/run_update_20220328115000.py
|
miguel-fresh/geoip-translation
|
ccf9dbc0330e597704e57d8b2967fc9be16017ed
|
[
"Info-ZIP"
] | null | null | null |
from asyncio.subprocess import STDOUT
from fileinput import filename
from genericpath import exists
import subprocess
from pathlib import Path
from os import getcwd, remove, rename, path
import yaml
def removeFileIfExists(file_path):
if path.exists(file_path):
remove(file_path)
def checkExistence(path_to_check):
if not path.exists(path_to_check):
raise(Exception(f"No existe la ruta \'{path_to_check}\'"))
return True
def good_msg(msg):
return f"+ {msg}"
def bad_msg(msg):
return f"- {msg}"
def neutral_msg(msg):
return f"~ {msg}"
# LEGACY
ZIP_LEGACY_NAME = 'GeoLite2-City-CSV.zip'
# Default values
ONSTART_DOWNLOAD = False
ONSTART_CONVERT = False
CURRENT_DIR = Path(__file__).parent.resolve()
CONFIG_FILENAME = 'config.yml'
ZIP_NAME = 'GeoLite2-City-CSV.zip'
DAT_NAME = 'GeoLiteCity.dat'
DOWNLOAD_DIRNAME = './data'
OUTPUT_DIRNAME = './output'
LICENSE_KEY = ''
DB_EDITION = 'GeoLite2-City-CSV'
# Get config from config.yml file
try:
with open(CONFIG_ABSPATH) as cfg_file:
documents = yaml.full_load(cfg_file)
paths = documents['paths']
names = documents['names']
on_start = documents['on_start']
max_mind = documents['max_mind']
OUTPUT_DIRNAME = paths['output']
DOWNLOAD_DIRNAME = paths['data']
if ('relative_to' in paths and paths['relative_to'] == 'cwd'):
CURRENT_DIR = Path(getcwd())
ZIP_NAME = names['zip']
DAT_NAME = names['dat']
ONSTART_DOWNLOAD = on_start['download_zip']
ONSTART_CONVERT = on_start['convert_to_dat']
LICENSE_KEY = max_mind['license-key']
DB_EDITION = max_mind['edition'] if 'edition' in max_mind else DB_EDITION
except:
print(neutral_msg('No se encontró un archivo config.yml válido, usando valores por defecto...'))
if (not ONSTART_CONVERT and not ONSTART_DOWNLOAD):
print(good_msg("No se especificó ninguna acción (download_zip, convert_to_dat). Saliendo..."))
exit(0)
# Setting paths
DOWNLOAD_ABSPATH = CURRENT_DIR.joinpath(DOWNLOAD_DIRNAME)
OUTPUT_ABSPATH = CURRENT_DIR.joinpath(OUTPUT_DIRNAME)
ZIP_ABSPATH = DOWNLOAD_ABSPATH.joinpath(ZIP_LEGACY_NAME)
DAT_ABSPATH = OUTPUT_ABSPATH.joinpath(DAT_NAME)
# Download .zip
if ONSTART_DOWNLOAD:
# Check if download folder exists
checkExistence(DOWNLOAD_ABSPATH)
# Remove previous .zip file if exists
removeFileIfExists(ZIP_ABSPATH)
print(good_msg(f'Descargando {ZIP_LEGACY_NAME}...'))
# Download .zip
download_output = subprocess.run(['php', 'download.php',
'--license-key', LICENSE_KEY,
'--output-path', DOWNLOAD_ABSPATH,
'--edition', DB_EDITION],
cwd=CURRENT_DIR.joinpath('./geoip2-update'), stderr=STDOUT)
# Rename .zip if necessary
if (ZIP_LEGACY_NAME != ZIP_NAME):
rename(ZIP_ABSPATH, DOWNLOAD_ABSPATH.joinpath(ZIP_NAME))
# Check if download was successful
if (download_output.returncode != 0):
raise(Exception(bad_msg('Error en la descarga :(')))
checkExistence(ZIP_ABSPATH)
print(good_msg(f'Descarga exitosa :) -> {ZIP_ABSPATH}'))
# Convert format
if ONSTART_CONVERT:
# Check if .zip exists
checkExistence(ZIP_ABSPATH)
# Check if output folder exists
checkExistence(OUTPUT_ABSPATH)
# python geolite2legacy.py -i GeoLite2-City-CSV.zip -o GeoLiteCity.dat -f geoname2fips.csv
update_output = subprocess.run(['python', 'geolite2legacy.py',
'-i', ZIP_ABSPATH,
'-o', DAT_ABSPATH,
'-f', 'geoname2fips.csv'],
cwd='./geolite2legacy')
# Check convertion was successful
if update_output.returncode != 0:
raise(Exception(bad_msg('Error en la conversión de formato :(')))
print(good_msg(f'Conversión existosa :) -> {DAT_ABSPATH}'))
| 29.086331
| 100
| 0.65026
|
ffb13af6cf3eb4f6ed85f2de4405cb7cee07b789
| 6,039
|
py
|
Python
|
tf_models.py
|
hanskrupakar/MRI-tumor-segmentation-Brats
|
5a1a51b159a556261cd485db45f4d705974c86f4
|
[
"MIT"
] | null | null | null |
tf_models.py
|
hanskrupakar/MRI-tumor-segmentation-Brats
|
5a1a51b159a556261cd485db45f4d705974c86f4
|
[
"MIT"
] | null | null | null |
tf_models.py
|
hanskrupakar/MRI-tumor-segmentation-Brats
|
5a1a51b159a556261cd485db45f4d705974c86f4
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, Conv3DTranspose
from tf_layers import *
def PlainCounterpart(input, name):
x = Conv3DWithBN(input, filters=24, ksize=3, strides=1, padding='same', name=name + '_conv_15rf_1x')
x = Conv3DWithBN(x, filters=36, ksize=3, strides=1, padding='same', name=name + '_conv_15rf_2x')
x = Conv3DWithBN(x, filters=48, ksize=3, strides=1, padding='same', name=name + '_conv_15rf_3x')
x = Conv3DWithBN(x, filters=60, ksize=3, strides=1, padding='same', name=name + '_conv_15rf_4x')
x = Conv3DWithBN(x, filters=72, ksize=3, strides=1, padding='same', name=name + '_conv_15rf_5x')
x = Conv3DWithBN(x, filters=84, ksize=3, strides=1, padding='same', name=name + '_conv_15rf_6x')
x = Conv3DWithBN(x, filters=96, ksize=3, strides=1, padding='same', name=name + '_conv_15rf_7x')
out_15rf = x
x = Conv3DWithBN(x, filters=108, ksize=3, strides=1, padding='same', name=name + '_conv_27rf_1x')
x = Conv3DWithBN(x, filters=120, ksize=3, strides=1, padding='same', name=name + '_conv_27rf_2x')
x = Conv3DWithBN(x, filters=132, ksize=3, strides=1, padding='same', name=name + '_conv_27rf_3x')
x = Conv3DWithBN(x, filters=144, ksize=3, strides=1, padding='same', name=name + '_conv_27rf_4x')
x = Conv3DWithBN(x, filters=156, ksize=3, strides=1, padding='same', name=name + '_conv_27rf_5x')
x = Conv3DWithBN(x, filters=168, ksize=3, strides=1, padding='same', name=name + '_conv_27rf_6x')
out_27rf = x
return out_15rf, out_27rf
def BraTS2ScaleDenseNetConcat(input, name):
x = Conv3D(filters=24, kernel_size=3, strides=1, padding='same', name=name+'_conv_init')(input)
x = DenseNetUnit3D(x, growth_rate=12, ksize=3, rep=6, name=name+'_denseblock1')
out_15rf = BatchNormalization(center=True, scale=True)(x)
out_15rf = Activation('relu')(out_15rf)
out_15rf = Conv3DWithBN(out_15rf, filters=96, ksize=1, strides=1, name=name + '_out_15_postconv')
x = DenseNetUnit3D(x, growth_rate=12, ksize=3, rep=6, name=name+'_denseblock2')
out_27rf = BatchNormalization(center=True, scale=True)(x)
out_27rf = Activation('relu')(out_27rf)
out_27rf = Conv3DWithBN(out_27rf, filters=168, ksize=1, strides=1, name=name + '_out_27_postconv')
return out_15rf, out_27rf
def BraTS2ScaleDenseNetConcat_large(input, name):
x = Conv3D(filters=48, kernel_size=3, strides=1, padding='same', name=name+'_conv_init')(input)
x = DenseNetUnit3D(x, growth_rate=12, ksize=3, rep=6, name=name+'_denseblock1')
out_15rf = BatchNormalization(center=True, scale=True)(x)
out_15rf = Activation('relu')(out_15rf)
out_15rf = Conv3DWithBN(out_15rf, filters=192, ksize=1, strides=1, name=name + '_out_15_postconv')
x = DenseNetUnit3D(x, growth_rate=24, ksize=3, rep=6, name=name+'_denseblock2')
out_27rf = BatchNormalization(center=True, scale=True)(x)
out_27rf = Activation('relu')(out_27rf)
out_27rf = Conv3DWithBN(out_27rf, filters=336, ksize=1, strides=1, name=name + '_out_27_postconv')
return out_15rf, out_27rf
def BraTS2ScaleDenseNet(input, num_labels):
x = Conv3D(filters=24, kernel_size=3, strides=1, padding='same')(input)
x = DenseNetUnit3D(x, growth_rate=12, ksize=3, rep=6)
out_15rf = BatchNormalization(center=True, scale=True)(x)
out_15rf = Activation('relu')(out_15rf)
out_15rf = Conv3DWithBN(out_15rf, filters=96, ksize=1, strides=1, name='out_15_postconv')
x = DenseNetUnit3D(x, growth_rate=12, ksize=3, rep=6)
out_27rf = BatchNormalization(center=True, scale=True)(x)
out_27rf = Activation('relu')(out_27rf)
out_27rf = Conv3DWithBN(out_27rf, filters=168, ksize=1, strides=1, name='out_27_postconv')
score_15rf = Conv3D(num_labels, kernel_size=1, strides=1, padding='same')(out_15rf)
score_27rf = Conv3D(num_labels, kernel_size=1, strides=1, padding='same')(out_27rf)
score = score_15rf[:, 13:25, 13:25, 13:25, :] + \
score_27rf[:, 13:25, 13:25, 13:25, :]
return score
def BraTS3ScaleDenseNet(input, num_labels):
x = Conv3D(filters=24, kernel_size=3, strides=1, padding='same')(input)
x = DenseNetUnit3D(x, growth_rate=12, ksize=3, rep=5)
out_13rf = BatchNormalization(center=True, scale=True)(x)
out_13rf = Activation('relu')(out_13rf)
out_13rf = Conv3DWithBN(out_13rf, filters=84, ksize=1, strides=1, name='out_13_postconv')
x = DenseNetUnit3D(x, growth_rate=12, ksize=3, rep=5)
out_23rf = BatchNormalization(center=True, scale=True)(x)
out_23rf = Activation('relu')(out_23rf)
out_23rf = Conv3DWithBN(out_23rf, filters=144, ksize=1, strides=1, name='out_23_postconv')
x = DenseNetUnit3D(x, growth_rate=12, ksize=3, rep=5)
out_33rf = BatchNormalization(center=True, scale=True)(x)
out_33rf = Activation('relu')(out_33rf)
out_33rf = Conv3DWithBN(out_33rf, filters=204, ksize=1, strides=1, name='out_33_postconv')
score_13rf = Conv3D(num_labels, kernel_size=1, strides=1, padding='same')(out_13rf)
score_23rf = Conv3D(num_labels, kernel_size=1, strides=1, padding='same')(out_23rf)
score_33rf = Conv3D(num_labels, kernel_size=1, strides=1, padding='same')(out_33rf)
score = score_13rf[:, 16:28, 16:28, 16:28, :] + \
score_23rf[:, 16:28, 16:28, 16:28, :] + \
score_33rf[:, 16:28, 16:28, 16:28, :]
return score
def BraTS1ScaleDenseNet(input, num_labels):
x = Conv3D(filters=36, kernel_size=5, strides=1, padding='same')(input)
x = DenseNetUnit3D(x, growth_rate=18, ksize=3, rep=6)
out_15rf = BatchNormalization(center=True, scale=True)(x)
out_15rf = Activation('relu')(out_15rf)
out_15rf = Conv3DWithBN(out_15rf, filters=144, ksize=1, strides=1, name='out_17_postconv1')
out_15rf = Conv3DWithBN(out_15rf, filters=144, ksize=1, strides=1, name='out_17_postconv2')
score_15rf = Conv3D(num_labels, kernel_size=1, strides=1, padding='same')(out_15rf)
score = score_15rf[:, 8:20, 8:20, 8:20, :]
return score
| 44.404412
| 104
| 0.700116
|
86a65c50784fa6a2f7c65b5254a4b8a03d01aa0e
| 4,025
|
py
|
Python
|
mass_replace/mass_replace.py
|
Kilo59/mass_replace
|
f44627f99f09433af9368e7a5fd6f7152905d484
|
[
"MIT"
] | 1
|
2019-01-06T01:32:41.000Z
|
2019-01-06T01:32:41.000Z
|
mass_replace/mass_replace.py
|
Kilo59/mass_replace
|
f44627f99f09433af9368e7a5fd6f7152905d484
|
[
"MIT"
] | 14
|
2018-04-25T00:06:58.000Z
|
2020-03-31T00:55:23.000Z
|
mass_replace/mass_replace.py
|
Kilo59/mass_replace
|
f44627f99f09433af9368e7a5fd6f7152905d484
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
mass_replace.py
~~~~~~~~~~~~~~~
Python Application for multiple simultaneous find and replace operations in a directory.
"""
import pathlib
from sys import version_info
import os
import fileinput
import yaml
from pprint import pprint as pp
PYTHON_VER = (version_info.major, version_info.minor)
ROOT = pathlib.Path(__file__).joinpath("..").resolve()
def resolve_wd(target_dir="mass_replace"):
if target_dir in get_dirs():
os.chdir(target_dir)
resolve_wd(target_dir=target_dir)
def load_config(filename="config.yaml"):
"""Load a .yml config file as a dictionary and return it."""
with open(filename, "r") as f_in:
return yaml.safe_load(f_in)
def get_items():
"""Returns a list of files and folders in a directory"""
return [x for x in os.listdir()]
def get_dirs():
"""Returns a list of all folders in the current working directory."""
return [x for x in get_items() if os.path.isdir(x)]
def get_files():
"""Returns a list of all files in the current working directory."""
return [x for x in get_items() if os.path.isfile(x)]
def file_find_replace(filename, text_to_search, replacement_text):
with fileinput.FileInput(filename, inplace=True) as file:
for line in file:
print(line.replace(text_to_search, replacement_text), end="")
def many_find_replace(filename, text_search_replace_dicts):
for text_to_search, replacement_text in text_search_replace_dicts.items():
file_find_replace(filename, text_to_search, replacement_text)
def discover_filetypes(root_folder=None, hard_copy="file_exts.txt"):
"""Walks through the specified `root_folder` and collects all file
extension types.
Writes the extension types to `file_exts.txt`."""
if not root_folder:
try:
root_folder = load_config("config.yaml")["root_folder"]
if root_folder is None:
raise (FileNotFoundError)
except FileNotFoundError:
root_folder = os.getcwd()
file_types = set()
for _, _, filenames in os.walk(root_folder):
f_types = [".{}".format(ext.split(".")[-1]) for ext in filenames]
file_types.update(f_types)
if hard_copy:
with open(hard_copy, "w") as f_out:
f_out.writelines("\n".join(file_types))
return file_types
def mass_replace(root_folder=None, config=None, verbose=False):
"""Performs find and replace operations on files nested in a root direcotry
according to settings in the `config.yaml` file."""
if not config:
try:
config = load_config("config.yaml")
except FileNotFoundError:
raise FileNotFoundError(
"Could not find a 'config.yaml' file and no alternative config provided"
)
if not root_folder:
root_folder = config["root_folder"]
print("ROOT: {}".format(root_folder))
replacement_pairs = config["replacement_pairs"]
for i in replacement_pairs.items():
print(i)
counter = 0
for dirpath, dirnames, filenames in os.walk(root_folder):
valid_files = [f for f in filenames if f.split(".")[-1] in config["filetypes"]]
if verbose:
print("=" * 79)
print("\tCurrent Path - STEP:{}".format(counter))
pp(dirpath)
print("\tDirectories - STEP:{}".format(counter))
pp(dirnames)
print("\tFiles: - STEP:{}".format(counter))
pp(filenames)
print()
counter += 1
for fname in valid_files:
print("|----{}".format(fname))
many_find_replace("{}/{}".format(dirpath, fname), replacement_pairs)
if __name__ == "__main__":
print("{0}\n{1}".format(__doc__, "*" * 79))
print("discover_filetypes()\n", discover_filetypes.__doc__)
pp(discover_filetypes(hard_copy=True))
print(
"{}\nmass_replace()\n{}\n{}".format(
"*" * 79, mass_replace.__doc__, mass_replace(verbose=True)
)
)
| 32.991803
| 89
| 0.644472
|
9f0422b5b39e15216c044f2bbab22c6e71c4608e
| 1,444
|
py
|
Python
|
pysubgroup/tests/algorithms_testing.py
|
rovany706/pysubgroup
|
ed20c47fd3b82f34109e5451552986c3349e6aa4
|
[
"Apache-2.0"
] | 40
|
2018-11-25T14:00:32.000Z
|
2022-03-17T07:24:22.000Z
|
pysubgroup/tests/algorithms_testing.py
|
rovany706/pysubgroup
|
ed20c47fd3b82f34109e5451552986c3349e6aa4
|
[
"Apache-2.0"
] | 25
|
2018-12-05T13:56:17.000Z
|
2022-03-02T10:53:49.000Z
|
pysubgroup/tests/algorithms_testing.py
|
rovany706/pysubgroup
|
ed20c47fd3b82f34109e5451552986c3349e6aa4
|
[
"Apache-2.0"
] | 21
|
2018-12-21T11:19:37.000Z
|
2022-01-14T03:41:36.000Z
|
from timeit import default_timer as timer
import abc
import pysubgroup as ps
class TestAlgorithmsBase(abc.ABC):
# pylint: disable=no-member
def evaluate_result(self, algorithm_result, result, qualities):
self.assertTrue(isinstance(algorithm_result, ps.SubgroupDiscoveryResult))
algorithm_result.to_dataframe()
algorithm_result = algorithm_result.to_descriptions()
for (q, sg) in algorithm_result:
print(" " + str(q) + ":\t" + str(sg))
# compare length such that zip works correctly
self.assertEqual(len(algorithm_result), len(result))
self.assertEqual(len(algorithm_result), len(qualities))
for (algorithm_q, algorithm_SG), expected_q, expected_SGD in zip(algorithm_result, qualities, result):
self.assertEqual(repr(algorithm_SG), repr(expected_SGD))
self.assertEqual(algorithm_q, expected_q)
def runAlgorithm(self, algorithm, name, result, qualities, task):
print()
print("Running " + name)
start = timer()
algorithm_result = algorithm.execute(task)
end = timer()
print(" Runtime for {}: {}".format(name, end - start))
if hasattr(self.task.qf, 'calls'):
print(' Number of call to qf:', self.task.qf.calls)
print()
self.evaluate_result(algorithm_result, result, qualities)
return algorithm_result
# pylint: enable=no-member
| 39.027027
| 110
| 0.662742
|
6eab9e9ceae63a82d5a82c4f277843db888be921
| 11,925
|
py
|
Python
|
saleor/graphql/account/types.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 8
|
2018-07-17T13:13:21.000Z
|
2022-03-01T17:02:34.000Z
|
saleor/graphql/account/types.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 2
|
2021-03-09T18:11:34.000Z
|
2021-05-10T15:05:35.000Z
|
saleor/graphql/account/types.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 1
|
2019-09-03T03:04:50.000Z
|
2019-09-03T03:04:50.000Z
|
import graphene
import graphene_django_optimizer as gql_optimizer
from django.contrib.auth import get_user_model
from graphene import relay
from graphql_jwt.decorators import login_required
from ...account import models
from ...checkout.utils import get_user_checkout
from ...core.permissions import get_permissions
from ...order import models as order_models
from ..checkout.types import Checkout
from ..core.connection import CountableDjangoObjectType
from ..core.fields import PrefetchingConnectionField
from ..core.resolvers import resolve_meta, resolve_private_meta
from ..core.types import CountryDisplay, Image, MetadataObjectType, PermissionDisplay
from ..core.utils import get_node_optimized
from ..decorators import permission_required
from ..utils import format_permissions_for_display
from .enums import CustomerEventsEnum
class AddressInput(graphene.InputObjectType):
first_name = graphene.String(description="Given name.")
last_name = graphene.String(description="Family name.")
company_name = graphene.String(description="Company or organization.")
street_address_1 = graphene.String(description="Address.")
street_address_2 = graphene.String(description="Address.")
city = graphene.String(description="City.")
city_area = graphene.String(description="District.")
postal_code = graphene.String(description="Postal code.")
country = graphene.String(description="Country.")
country_area = graphene.String(description="State or province.")
phone = graphene.String(description="Phone number.")
class Address(CountableDjangoObjectType):
country = graphene.Field(
CountryDisplay, required=True, description="Default shop's country"
)
is_default_shipping_address = graphene.Boolean(
required=False, description="Address is user's default shipping address"
)
is_default_billing_address = graphene.Boolean(
required=False, description="Address is user's default billing address"
)
class Meta:
description = "Represents user address data."
interfaces = [relay.Node]
model = models.Address
only_fields = [
"city",
"city_area",
"company_name",
"country",
"country_area",
"first_name",
"id",
"last_name",
"phone",
"postal_code",
"street_address_1",
"street_address_2",
]
@staticmethod
def resolve_country(root: models.Address, _info):
return CountryDisplay(code=root.country.code, country=root.country.name)
@staticmethod
def resolve_is_default_shipping_address(root: models.Address, _info):
"""Look if the address is the default shipping address of the user.
This field is added through annotation when using the
`resolve_addresses` resolver. It's invalid for
`resolve_default_shipping_address` and
`resolve_default_billing_address`
"""
if not hasattr(root, "user_default_shipping_address_pk"):
return None
user_default_shipping_address_pk = getattr(
root, "user_default_shipping_address_pk"
)
if user_default_shipping_address_pk == root.pk:
return True
return False
@staticmethod
def resolve_is_default_billing_address(root: models.Address, _info):
"""Look if the address is the default billing address of the user.
This field is added through annotation when using the
`resolve_addresses` resolver. It's invalid for
`resolve_default_shipping_address` and
`resolve_default_billing_address`
"""
if not hasattr(root, "user_default_billing_address_pk"):
return None
user_default_billing_address_pk = getattr(
root, "user_default_billing_address_pk"
)
if user_default_billing_address_pk == root.pk:
return True
return False
class CustomerEvent(CountableDjangoObjectType):
date = graphene.types.datetime.DateTime(
description="Date when event happened at in ISO 8601 format."
)
type = CustomerEventsEnum(description="Customer event type")
user = graphene.Field(
lambda: User,
id=graphene.Argument(graphene.ID),
description="User who performed the action.",
)
message = graphene.String(description="Content of the event.")
count = graphene.Int(description="Number of objects concerned by the event.")
order = gql_optimizer.field(
graphene.Field(
"saleor.graphql.order.types.Order", description="The concerned order."
),
model_field="order",
)
order_line = graphene.Field(
"saleor.graphql.order.types.OrderLine", description="The concerned order line."
)
class Meta:
description = "History log of the customer."
model = models.CustomerEvent
interfaces = [relay.Node]
only_fields = ["id"]
@staticmethod
def resolve_message(root: models.CustomerEvent, _info):
return root.parameters.get("message", None)
@staticmethod
def resolve_count(root: models.CustomerEvent, _info):
return root.parameters.get("count", None)
@staticmethod
def resolve_order_line(root: models.CustomerEvent, info):
if "order_line_pk" in root.parameters:
try:
qs = order_models.OrderLine.objects
order_line_pk = root.parameters["order_line_pk"]
return get_node_optimized(qs, {"pk": order_line_pk}, info)
except order_models.OrderLine.DoesNotExist:
pass
return None
class ServiceAccount(MetadataObjectType, CountableDjangoObjectType):
permissions = graphene.List(
PermissionDisplay, description="List of the service's permissions."
)
created = graphene.DateTime(
description="The date and time when the service account was created."
)
is_active = graphene.Boolean(
description="Determine if service account will be set active or not."
)
name = graphene.String(description="Name of the service account.")
auth_token = graphene.String(description="Last 4 characters of the token")
class Meta:
description = "Represents service account data."
interfaces = [relay.Node]
model = models.ServiceAccount
permissions = ("account.manage_service_accounts",)
only_fields = ["name" "permissions", "created", "is_active", "auth_token", "id"]
@staticmethod
def resolve_permissions(root: models.ServiceAccount, _info, **_kwargs):
permissions = root.permissions.prefetch_related("content_type").order_by(
"codename"
)
return format_permissions_for_display(permissions)
@staticmethod
def resolve_auth_token(root: models.ServiceAccount, _info, **_kwargs):
return root.auth_token[-4:]
@staticmethod
def resolve_meta(root, info):
return resolve_meta(root, info)
class User(MetadataObjectType, CountableDjangoObjectType):
addresses = gql_optimizer.field(
graphene.List(Address, description="List of all user's addresses."),
model_field="addresses",
)
checkout = graphene.Field(
Checkout, description="Returns the last open checkout of this user."
)
gift_cards = gql_optimizer.field(
PrefetchingConnectionField(
"saleor.graphql.giftcard.types.GiftCard",
description="List of the user gift cards.",
),
model_field="gift_cards",
)
note = graphene.String(description="A note about the customer")
orders = gql_optimizer.field(
PrefetchingConnectionField(
"saleor.graphql.order.types.Order", description="List of user's orders."
),
model_field="orders",
)
permissions = graphene.List(
PermissionDisplay, description="List of user's permissions."
)
avatar = graphene.Field(Image, size=graphene.Int(description="Size of the avatar."))
events = gql_optimizer.field(
graphene.List(
CustomerEvent, description="List of events associated with the user."
),
model_field="events",
)
stored_payment_sources = graphene.List(
"saleor.graphql.payment.types.PaymentSource",
description="List of stored payment sources",
)
class Meta:
description = "Represents user data."
interfaces = [relay.Node]
model = get_user_model()
only_fields = [
"date_joined",
"default_billing_address",
"default_shipping_address",
"email",
"first_name",
"id",
"is_active",
"is_staff",
"last_login",
"last_name",
"note",
"token",
]
@staticmethod
def resolve_addresses(root: models.User, _info, **_kwargs):
return root.addresses.annotate_default(root).all()
@staticmethod
def resolve_checkout(root: models.User, _info, **_kwargs):
return get_user_checkout(root)[0]
@staticmethod
def resolve_gift_cards(root: models.User, info, **_kwargs):
return root.gift_cards.all()
@staticmethod
def resolve_permissions(root: models.User, _info, **_kwargs):
if root.is_superuser:
permissions = get_permissions()
else:
permissions = root.user_permissions.prefetch_related(
"content_type"
).order_by("codename")
return format_permissions_for_display(permissions)
@staticmethod
@permission_required("account.manage_users")
def resolve_note(root: models.User, _info):
return root.note
@staticmethod
@permission_required("account.manage_users")
def resolve_events(root: models.User, _info):
return root.events.all()
@staticmethod
def resolve_orders(root: models.User, info, **_kwargs):
viewer = info.context.user
if viewer.has_perm("order.manage_orders"):
return root.orders.all()
return root.orders.confirmed()
@staticmethod
def resolve_avatar(root: models.User, info, size=None, **_kwargs):
if root.avatar:
return Image.get_adjusted(
image=root.avatar,
alt=None,
size=size,
rendition_key_set="user_avatars",
info=info,
)
@staticmethod
@login_required
def resolve_stored_payment_sources(root: models.User, _info):
from .resolvers import resolve_payment_sources
return resolve_payment_sources(root)
@staticmethod
@permission_required("account.manage_users")
def resolve_private_meta(root, _info):
return resolve_private_meta(root, _info)
@staticmethod
def resolve_meta(root, _info):
return resolve_meta(root, _info)
class ChoiceValue(graphene.ObjectType):
raw = graphene.String()
verbose = graphene.String()
class AddressValidationData(graphene.ObjectType):
country_code = graphene.String()
country_name = graphene.String()
address_format = graphene.String()
address_latin_format = graphene.String()
allowed_fields = graphene.List(graphene.String)
required_fields = graphene.List(graphene.String)
upper_fields = graphene.List(graphene.String)
country_area_type = graphene.String()
country_area_choices = graphene.List(ChoiceValue)
city_type = graphene.String()
city_choices = graphene.List(ChoiceValue)
city_area_type = graphene.String()
city_area_choices = graphene.List(ChoiceValue)
postal_code_type = graphene.String()
postal_code_matchers = graphene.List(graphene.String)
postal_code_examples = graphene.List(graphene.String)
postal_code_prefix = graphene.String()
| 35.176991
| 88
| 0.672537
|
167910c3c017cf3d680531e8cf3f1de6b9153e4f
| 2,345
|
py
|
Python
|
babilim/model/layers/activation.py
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1
|
2020-05-04T15:20:55.000Z
|
2020-05-04T15:20:55.000Z
|
babilim/model/layers/activation.py
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1
|
2019-11-28T09:03:20.000Z
|
2019-11-28T09:03:20.000Z
|
babilim/model/layers/activation.py
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1
|
2019-11-28T08:30:13.000Z
|
2019-11-28T08:30:13.000Z
|
# AUTOGENERATED FROM: babilim/model/layers/activation.ipynb
# Cell: 0
"""doc
# babilim.model.layers.activation
> Compute an activation function.
"""
# Cell: 1
from babilim.core.annotations import RunOnlyOnce
from babilim.core.module_native import ModuleNative
# Cell: 2
class Activation(ModuleNative):
def __init__(self, activation, axis):
"""
Supports the activation functions.
:param activation: A string specifying the activation function to use. (Only "relu" and None supported yet.)
:param axis: The axis along which the softmax should happen.
"""
super().__init__()
self.activation = activation
self.axis = axis
@RunOnlyOnce
def _build_pytorch(self, features):
if self.activation is None:
self.activation = self.activation
elif self.activation == "relu":
from torch.nn.functional import relu
self.activation = relu
elif self.activation == "softmax":
from torch.nn.functional import softmax
self.activation = softmax
elif self.activation == "sigmoid":
from torch.nn.functional import sigmoid
self.activation = sigmoid
elif self.activation == "tanh":
from torch.nn.functional import tanh
self.activation = tanh
elif self.activation == "elu":
from torch.nn.functional import elu
self.activation = elu
elif self.activation == "prelu":
from torch.nn.functional import prelu
self.activation = prelu
else:
raise NotImplementedError("Activation '{}' not implemented.".format(self.activation))
def _call_pytorch(self, features):
if self.activation is None:
return features
else:
return self.activation(features, dim=self.axis)
@RunOnlyOnce
def _build_tf(self, features):
if self.activation is None:
self.activation = None
else:
from tensorflow.keras.layers import Activation as _Activation
self.activation = _Activation(self.activation)
def _call_tf(self, features):
if self.activation is None:
return features
else:
return self.activation(features)
| 32.569444
| 116
| 0.620896
|
b77ece63a9d10aaa8c7958e2c24b2f525b0c9747
| 2,994
|
py
|
Python
|
python/television/consumers.py
|
pztrick/django-television
|
5857200a0871702052827a5d603db7d60bca1406
|
[
"MIT"
] | 1
|
2018-07-16T16:21:44.000Z
|
2018-07-16T16:21:44.000Z
|
python/television/consumers.py
|
pztrick/django-television
|
5857200a0871702052827a5d603db7d60bca1406
|
[
"MIT"
] | 4
|
2019-12-22T11:20:56.000Z
|
2021-06-10T19:38:46.000Z
|
python/television/consumers.py
|
pztrick/django-television
|
5857200a0871702052827a5d603db7d60bca1406
|
[
"MIT"
] | null | null | null |
import json
import traceback
from django.http import HttpResponse
from channels.handler import AsgiHandler
from channels import Group
from .decorators import add_listener, call_listener
from django.conf import settings
from django.db import connection
from channels.auth import http_session_user, channel_session_user, channel_session_user_from_http
from .utils import timestamp
@channel_session_user_from_http
def ws_connect(message):
if message.user.is_authenticated:
Group(f"users").add(message.reply_channel)
Group(f"users.{message.user.id}").add(message.reply_channel)
print(f"ws_connect: added to group 'users'")
if message.user.is_superuser:
Group("superusers").add(message.reply_channel)
print("ws_connect: added to group 'superusers'")
if message.user.is_staff:
Group("staff").add(message.reply_channel)
print("ws_connect: added to group 'staff'")
Group("chat").add(message.reply_channel)
message.reply_channel.send({"accept": True})
@channel_session_user
def ws_message(message):
try:
request = json.loads(message['text'])
replyTo = request.get('replyTo', None)
channel = request['channel']
payload = request.get('payload', [])
if settings.DEBUG:
print(f"[{timestamp()}] ws_message received on channel '{channel}'")
n_queries = len(connection.queries)
result = call_listener(channel, message, *payload)
n_queries = len(connection.queries) - n_queries
response = {
'replyTo': replyTo,
'payload': result
}
message.reply_channel.send({
'text': json.dumps(response, default=str)
})
if settings.DEBUG:
print(f"[{timestamp()}] ws_message replied to on channel '{channel}' ({n_queries} SQL queries)")
except Exception as ex:
print(traceback.format_exc())
try:
errorTo = request.get('errorTo', None)
except:
errorTo = None
formatted_lines = traceback.format_exc().splitlines()
if settings.DEBUG:
result = "Backend Error\n%s\n%s" % (formatted_lines[1], formatted_lines[-1])
else:
result = "Backend Error\n%s" % (formatted_lines[-1], )
response = {
'replyTo': errorTo,
'payload': result
}
message.reply_channel.send({
'text': json.dumps(response, default=str)
})
raise ex
@channel_session_user
def ws_disconnect(message):
if message.user.is_authenticated:
print("ws_disconnect: %s" % message.user.email)
Group(f"users").discard(message.reply_channel)
Group(f"users.{message.user.id}").discard(message.reply_channel)
if message.user.is_superuser:
Group("superusers").discard(message.reply_channel)
if message.user.is_staff:
Group("staff").discard(message.reply_channel)
Group("chat").discard(message.reply_channel)
| 37.425
| 108
| 0.654977
|
b3bfece2cb14151382aaeaa2726981e45f3fd08b
| 2,446
|
py
|
Python
|
eshop/store/views/signup.py
|
shruti0419/templates-hacktoberfest
|
748b8d8a335dc53dc6b80a928ca38c53d33b9798
|
[
"MIT"
] | 8
|
2021-10-01T13:29:58.000Z
|
2021-10-06T16:19:50.000Z
|
eshop/store/views/signup.py
|
shruti0419/templates-hacktoberfest
|
748b8d8a335dc53dc6b80a928ca38c53d33b9798
|
[
"MIT"
] | 31
|
2021-09-30T17:45:36.000Z
|
2021-10-12T04:57:21.000Z
|
eshop/store/views/signup.py
|
shruti0419/templates-hacktoberfest
|
748b8d8a335dc53dc6b80a928ca38c53d33b9798
|
[
"MIT"
] | 31
|
2021-09-30T19:49:54.000Z
|
2021-10-05T17:32:42.000Z
|
from django.shortcuts import render, redirect
from django.contrib.auth.hashers import make_password
from store.models.customer import Customer
from django.views import View
class Signup(View):
def get(self, request):
return render(request, 'signup.html')
def post(self, request):
postData = request.POST
first_name = postData.get('firstname')
last_name = postData.get('lastname')
phone = postData.get('phone')
email = postData.get('email')
password = postData.get('password')
# validation
value = {
'first_name': first_name,
'last_name': last_name,
'phone': phone,
'email': email
}
error_message = None
customer = Customer(first_name=first_name,
last_name=last_name,
phone=phone,
email=email,
password=password)
error_message = self.validateCustomer(customer)
if not error_message:
print(first_name, last_name, phone, email, password)
customer.password = make_password(customer.password)
customer.register()
return redirect('homepage')
else:
data = {
'error': error_message,
'values': value
}
return render(request, 'signup.html', data)
def validateCustomer(self, customer):
error_message = None;
if (not customer.first_name):
error_message = "First Name Required !!"
elif len(customer.first_name) < 4:
error_message = 'First Name must be 4 char long or more'
elif not customer.last_name:
error_message = 'Last Name Required'
elif len(customer.last_name) < 4:
error_message = 'Last Name must be 4 char long or more'
elif not customer.phone:
error_message = 'Phone Number required'
elif len(customer.phone) < 10:
error_message = 'Phone Number must be 10 char Long'
elif len(customer.password) < 6:
error_message = 'Password must be 6 char long'
elif len(customer.email) < 5:
error_message = 'Email must be 5 char long'
elif customer.isExists():
error_message = 'Email Address Already Registered..'
# saving
return error_message
| 35.449275
| 68
| 0.577269
|
dbcd4dab1960e21a0df909987ff91044f5e61c00
| 3,136
|
py
|
Python
|
http3/dispatch/threaded.py
|
didip/http3
|
7c6fb5c6ca44b69fd221d626c9e2fb60530fa1bc
|
[
"BSD-3-Clause"
] | 1
|
2021-10-07T01:41:33.000Z
|
2021-10-07T01:41:33.000Z
|
http3/dispatch/threaded.py
|
didip/http3
|
7c6fb5c6ca44b69fd221d626c9e2fb60530fa1bc
|
[
"BSD-3-Clause"
] | null | null | null |
http3/dispatch/threaded.py
|
didip/http3
|
7c6fb5c6ca44b69fd221d626c9e2fb60530fa1bc
|
[
"BSD-3-Clause"
] | null | null | null |
from ..config import CertTypes, TimeoutTypes, VerifyTypes
from ..interfaces import AsyncDispatcher, ConcurrencyBackend, Dispatcher
from ..models import (
AsyncRequest,
AsyncRequestData,
AsyncResponse,
AsyncResponseContent,
Request,
RequestData,
Response,
ResponseContent,
)
class ThreadedDispatcher(AsyncDispatcher):
"""
The ThreadedDispatcher class is used to mediate between the Client
(which always uses async under the hood), and a synchronous `Dispatch`
class.
"""
def __init__(self, dispatch: Dispatcher, backend: ConcurrencyBackend) -> None:
self.sync_dispatcher = dispatch
self.backend = backend
async def send(
self,
request: AsyncRequest,
verify: VerifyTypes = None,
cert: CertTypes = None,
timeout: TimeoutTypes = None,
) -> AsyncResponse:
concurrency_backend = self.backend
data = getattr(request, "content", getattr(request, "content_aiter", None))
sync_data = self._sync_request_data(data)
sync_request = Request(
method=request.method,
url=request.url,
headers=request.headers,
data=sync_data,
)
func = self.sync_dispatcher.send
kwargs = {
"request": sync_request,
"verify": verify,
"cert": cert,
"timeout": timeout,
}
sync_response = await self.backend.run_in_threadpool(func, **kwargs)
assert isinstance(sync_response, Response)
content = getattr(
sync_response, "_raw_content", getattr(sync_response, "_raw_stream", None)
)
async_content = self._async_response_content(content)
async def async_on_close() -> None:
nonlocal concurrency_backend, sync_response
await concurrency_backend.run_in_threadpool(sync_response.close)
return AsyncResponse(
status_code=sync_response.status_code,
reason_phrase=sync_response.reason_phrase,
protocol=sync_response.protocol,
headers=sync_response.headers,
content=async_content,
on_close=async_on_close,
request=request,
history=sync_response.history,
)
async def close(self) -> None:
"""
The `.close()` method runs the `Dispatcher.close()` within a threadpool,
so as not to block the async event loop.
"""
func = self.sync_dispatcher.close
await self.backend.run_in_threadpool(func)
def _async_response_content(self, content: ResponseContent) -> AsyncResponseContent:
if isinstance(content, bytes):
return content
# Coerce an async iterator into an iterator, with each item in the
# iteration run within the event loop.
assert hasattr(content, "__iter__")
return self.backend.iterate_in_threadpool(content)
def _sync_request_data(self, data: AsyncRequestData) -> RequestData:
if isinstance(data, bytes):
return data
return self.backend.iterate(data)
| 32
| 88
| 0.641582
|
1ae194ea8b07d1ae0e68649aff67396b47e8ee02
| 2,673
|
py
|
Python
|
src/OTLMOW/OTLModel/Classes/VRBAZ.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/OTLModel/Classes/VRBAZ.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/OTLModel/Classes/VRBAZ.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.VRModuleZFirmware import VRModuleZFirmware
from OTLMOW.OTLModel.Datatypes.KlVRBAZMerk import KlVRBAZMerk
from OTLMOW.OTLModel.Datatypes.KlVRBAZModelnaam import KlVRBAZModelnaam
# Generated with OTLClassCreator. To modify: extend, do not edit
class VRBAZ(VRModuleZFirmware):
"""Het bedien- en aanzichttoestel, ook wel bedienings- en diagnosepaneel, is ingebouwd achter een politiedeur. Het bevat een display en bedieningstoetsen om de verkeersregelaar te bedienen.
De display van de BAZ geeft de actuele werktoestand aan, inclusief eventuele toestandswisselingen. De actieve defecten zijn onmiddellijk, zonder enige manipulatie, zichtbaar op de display. Met de bedieningsknoppen moet het werkingsregime (online, offline, handbediening, oranjegeel knipperlicht, integraal rood of volledig gedoofd) gekozen kunnen worden. Tevens moet het regime "handbediening" bediend kunnen worden met de bedieningsknoppen. Met een knop wordt er overgeschakeld naar de volgende fase. Een getuigen-LED geeft aan wanneer de overgangsfase is afgerond en er kan overgegaan worden naar de volgende fase"""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#VRBAZ'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
super().__init__()
self._merk = OTLAttribuut(field=KlVRBAZMerk,
naam='merk',
label='merk',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#VRBAZ.merk',
definition='De merknaam van de VR-BAZ.',
owner=self)
self._modelnaam = OTLAttribuut(field=KlVRBAZModelnaam,
naam='modelnaam',
label='modelnaam',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#VRBAZ.modelnaam',
definition='De modelnaam van de VR-BAZ.',
owner=self)
@property
def merk(self):
"""De merknaam van de VR-BAZ."""
return self._merk.get_waarde()
@merk.setter
def merk(self, value):
self._merk.set_waarde(value, owner=self)
@property
def modelnaam(self):
"""De modelnaam van de VR-BAZ."""
return self._modelnaam.get_waarde()
@modelnaam.setter
def modelnaam(self, value):
self._modelnaam.set_waarde(value, owner=self)
| 53.46
| 618
| 0.657688
|
9c1e5bd0068d03e2cea45cd6468dc3939626770f
| 3,967
|
py
|
Python
|
cellengine/payloads/gate_utils/ellipse_gate.py
|
primitybio/cellengine-python-toolk
|
1f9dd168f1f27e2beba69f02e340371190857b33
|
[
"MIT"
] | 4
|
2021-01-12T17:03:37.000Z
|
2021-12-16T13:23:57.000Z
|
cellengine/payloads/gate_utils/ellipse_gate.py
|
primitybio/cellengine-python-toolk
|
1f9dd168f1f27e2beba69f02e340371190857b33
|
[
"MIT"
] | 61
|
2021-01-11T05:27:16.000Z
|
2022-03-08T01:50:09.000Z
|
cellengine/payloads/gate_utils/ellipse_gate.py
|
primitybio/cellengine-python-toolkit
|
1f9dd168f1f27e2beba69f02e340371190857b33
|
[
"MIT"
] | null | null | null |
from typing import List
from cellengine.payloads.gate_utils import format_common_gate
from cellengine.utils.generate_id import generate_id
def format_ellipse_gate(
experiment_id: str,
x_channel: str,
y_channel: str,
name: str,
x: float,
y: float,
angle: float,
major: float,
minor: float,
label: List = [],
gid: str = None,
locked: bool = False,
parent_population_id: str = None,
parent_population: str = None,
tailored_per_file: bool = False,
fcs_file_id: str = None,
fcs_file: str = None,
create_population: bool = True,
):
"""Formats an ellipse gate for posting to the CellEngine API.
Args:
x_channel (str): The name of the x channel to which the gate applies.
y_channel (str): The name of the y channel to which the gate applies.
name (str): The name of the gate
x (float): The x centerpoint of the gate.
y (float): The y centerpoint of the gate.
angle (float): The angle of the ellipse in radians.
major (float): The major radius of the ellipse.
minor (float): The minor radius of the ellipse.
label (float, optional): Position of the label. Defaults to the
midpoint of the gate.
gid (str, optional): Group ID of the gate, used for tailoring. If this
is not specified, then a new Group ID will be created. To create a
tailored gate, the gid of the global tailored gate must be
specified.
locked (bool, optional): Prevents modification of the gate via the web
interface.
parent_population_id (Optional[str]): ID of the parent population. Use
``None`` for the "ungated" population. If specified, do not specify
``parent_population``.
parent_population (str, optional): Name of the parent population. An
attempt will be made to find the population by name. If zero or
more than one population exists with the name, an error will be
thrown. If specified, do not specify ``parent_population_id``.
tailored_per_file (bool, optional): Whether or not this gate is
tailored per FCS file. fcs_file_id (str, optional): ID of FCS
file, if tailored per file. Use ``None`` for the global gate in a
tailored gate group. If specified, do not specify ``fcs_file``.
fcs_file (str, optional): Name of FCS file, if tailored per file. An
attempt will be made to find the file by name. If zero or more than
one file exists with the name, an error will be thrown. Looking up
files by name is slower than using the ID, as this requires
additional requests to the server. If specified, do not specify
``fcs_file_id``.
create_population (optional, bool): Automatically create corresponding
population.
Returns:
EllipseGate: An EllipseGate object.
Examples:
```python
cellengine.Gate.create_ellipse_gate(experiment_id, x_channel="FSC-A",
y_channel="FSC-W", name="my gate", x=260000, y=64000, angle=0,
major=120000, minor=70000)
```
"""
if label == []:
label = [x, y]
if gid is None:
gid = generate_id()
model = {
"locked": locked,
"label": label,
"ellipse": {"angle": angle, "major": major, "minor": minor, "center": [x, y]},
}
body = {
"experimentId": experiment_id,
"name": name,
"type": "EllipseGate",
"gid": gid,
"xChannel": x_channel,
"yChannel": y_channel,
"parentPopulationId": parent_population_id,
"model": model,
}
return format_common_gate(
experiment_id,
body=body,
tailored_per_file=tailored_per_file,
fcs_file_id=fcs_file_id,
fcs_file=fcs_file,
create_population=create_population,
)
| 37.424528
| 86
| 0.624653
|
bac9bb6c55d7d93cdaf4803c44758361de8192df
| 253
|
py
|
Python
|
chatnoir_api/types.py
|
chatnoir-eu/chatnoir-api
|
aff5059d2caafc892d0ba750889dc9938fbb0493
|
[
"MIT"
] | 3
|
2022-01-25T10:31:19.000Z
|
2022-01-27T10:21:51.000Z
|
chatnoir_api/types.py
|
chatnoir-eu/chatnoir-api
|
aff5059d2caafc892d0ba750889dc9938fbb0493
|
[
"MIT"
] | 1
|
2022-01-22T17:28:47.000Z
|
2022-01-24T12:57:30.000Z
|
chatnoir_api/types.py
|
heinrichreimer/chatnoir-api
|
aff5059d2caafc892d0ba750889dc9938fbb0493
|
[
"MIT"
] | 1
|
2022-03-31T14:59:44.000Z
|
2022-03-31T14:59:44.000Z
|
try:
from functools import cached_property # noqa: F401
except ImportError:
cached_property = property # noqa: F401
try:
from typing import Literal # noqa: F401
except ImportError:
from typing_extensions import Literal # noqa: F401
| 25.3
| 55
| 0.735178
|
0645ee37440f90b8a168915191e648b994f53786
| 3,087
|
py
|
Python
|
sdk/storagepool/azure-mgmt-storagepool/setup.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/storagepool/azure-mgmt-storagepool/setup.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/storagepool/azure-mgmt-storagepool/setup.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-storagepool"
PACKAGE_PPRINT_NAME = "Storagepool Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.6.21',
'azure-common~=1.1',
'azure-mgmt-core>=1.2.0,<2.0.0',
],
extras_require={
":python_version<'3.0'": ['azure-mgmt-nspkg'],
}
)
| 33.554348
| 91
| 0.60609
|
190e0debcfd77d78928700924dcb09254669831c
| 760
|
py
|
Python
|
venv/Lib/site-packages/joblib/test/test_module.py
|
deerajnagothu/pyenf_extraction
|
f5c3ebc9657133e1bea102b41768152ba0b30e1b
|
[
"MIT"
] | 6
|
2021-10-30T09:00:55.000Z
|
2022-03-07T16:33:25.000Z
|
venv/Lib/site-packages/joblib/test/test_module.py
|
deerajnagothu/pyenf_extraction
|
f5c3ebc9657133e1bea102b41768152ba0b30e1b
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/joblib/test/test_module.py
|
deerajnagothu/pyenf_extraction
|
f5c3ebc9657133e1bea102b41768152ba0b30e1b
|
[
"MIT"
] | 1
|
2022-03-16T11:33:50.000Z
|
2022-03-16T11:33:50.000Z
|
import sys
import joblib
import pytest
from joblib.testing import check_subprocess_call
def test_version():
assert hasattr(joblib, '__version__'), (
"There are no __version__ argument on the joblib module")
@pytest.mark.skipif(sys.version_info < (3, 3), reason="Need python3.3+")
def test_no_start_method_side_effect_on_import():
# check that importing joblib does not implicitly set the global
# start_method for multiprocessing.
code = """if True:
import joblib
import multiprocessing as mp
# The following line would raise RuntimeError if the
# start_method is already set.
mp.set_start_method("loky")
"""
check_subprocess_call([sys.executable, '-c', code])
| 31.666667
| 73
| 0.686842
|
5c161b119fcfae68866c5d3ae296b129b348e9ff
| 1,230
|
py
|
Python
|
src/pyscript/matmul.py
|
JohndeVostok/tftest
|
b63123357b935db155394ccec156862f0cacee6e
|
[
"MIT"
] | null | null | null |
src/pyscript/matmul.py
|
JohndeVostok/tftest
|
b63123357b935db155394ccec156862f0cacee6e
|
[
"MIT"
] | null | null | null |
src/pyscript/matmul.py
|
JohndeVostok/tftest
|
b63123357b935db155394ccec156862f0cacee6e
|
[
"MIT"
] | null | null | null |
import os
import json
import tensorflow as tf
from tensorflow.python.client import timeline
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
os.chdir("/home/mazx/git/tftest")
a = tf.random_normal([5000, 5000], name = "random_normal_0")
b = tf.random_normal([5000, 5000], name = "random_normal_1")
c = tf.matmul(a, b, name = "matmul_0")
d = tf.matmul(b, c, name = "matmul_1")
sess = tf.Session()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
writer=tf.summary.FileWriter("logs", sess.graph)
sess.run(d, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step %03d' % 0)
writer.close()
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('matmul_timeline.json', 'w') as f:
f.write(ctf)
with open('matmul_graph.json', "w") as f:
nodes = []
for n in tf.get_default_graph().as_graph_def().node:
nodes.append("{\"name\":\"" + str(n.name) + "\",\"input\":\"" + str(n.input) + "\"}")
f.write("{\"nodes\":[\n")
f.write(",".join(nodes))
f.write("]}")
| 31.538462
| 97
| 0.629268
|
20451fbf46bb28f5207462324ffc5900dcdbf7b9
| 8,071
|
py
|
Python
|
PaddleCV/gan/network/SPADE_network.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 5
|
2021-09-28T13:28:01.000Z
|
2021-12-21T07:25:44.000Z
|
PaddleCV/gan/network/SPADE_network.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 1
|
2019-11-18T03:03:37.000Z
|
2019-11-18T03:03:37.000Z
|
PaddleCV/gan/network/SPADE_network.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 4
|
2021-08-11T08:25:10.000Z
|
2021-10-16T07:41:59.000Z
|
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .base_network import conv2d, deconv2d, norm_layer, conv2d_spectral_norm
import paddle.fluid as fluid
import numpy as np
class SPADE_model(object):
def __init__(self):
pass
def network_G(self, input, name, cfg, is_test=False):
nf = cfg.ngf
num_up_layers = 5
sw = cfg.crop_width // (2**num_up_layers)
sh = cfg.crop_height // (2**num_up_layers)
seg = input
x = fluid.layers.resize_nearest(
seg, out_shape=(sh, sw), align_corners=False)
x = conv2d(
x,
16 * nf,
3,
padding=1,
name=name + "_fc",
use_bias=True,
initial="kaiming",
is_test=is_test)
x = self.SPADEResnetBlock(
x,
seg,
16 * nf,
16 * nf,
cfg,
name=name + "_head_0",
is_test=is_test)
x = fluid.layers.resize_nearest(x, scale=2.0, align_corners=False)
x = self.SPADEResnetBlock(
x,
seg,
16 * nf,
16 * nf,
cfg,
name=name + "_G_middle_0",
is_test=is_test)
x = self.SPADEResnetBlock(
x,
seg,
16 * nf,
16 * nf,
cfg,
name=name + "_G_middle_1",
is_test=is_test)
x = fluid.layers.resize_nearest(x, scale=2.0, align_corners=False)
x = self.SPADEResnetBlock(
x, seg, 16 * nf, 8 * nf, cfg, name=name + "_up_0", is_test=is_test)
x = fluid.layers.resize_nearest(x, scale=2.0, align_corners=False)
x = self.SPADEResnetBlock(
x, seg, 8 * nf, 4 * nf, cfg, name=name + "_up_1", is_test=is_test)
x = fluid.layers.resize_nearest(x, scale=2.0, align_corners=False)
x = self.SPADEResnetBlock(
x, seg, 4 * nf, 2 * nf, cfg, name=name + "_up_2", is_test=is_test)
x = fluid.layers.resize_nearest(x, scale=2.0, align_corners=False)
x = self.SPADEResnetBlock(
x, seg, 2 * nf, 1 * nf, cfg, name=name + "_up_3", is_test=is_test)
x = fluid.layers.leaky_relu(
x, alpha=0.2, name=name + '_conv_img_leaky_relu')
x = conv2d(
x,
3,
3,
padding=1,
name=name + "_conv_img",
use_bias=True,
initial="kaiming",
is_test=is_test)
x = fluid.layers.tanh(x)
return x
def SPADEResnetBlock(self, x, seg, fin, fout, opt, name, is_test=False):
learn_shortcut = (fin != fout)
fmiddle = min(fin, fout)
semantic_nc = opt.label_nc + (0 if opt.no_instance else 1)
if learn_shortcut:
x_s = self.SPADE(
x, seg, fin, name=name + ".norm_s", is_test=is_test)
x_s = conv2d_spectral_norm(
x_s,
fout,
1,
use_bias=False,
name=name + ".conv_s",
is_test=is_test)
else:
x_s = x
dx = self.SPADE(x, seg, fin, name=name + ".norm_0", is_test=is_test)
dx = fluid.layers.leaky_relu(dx, alpha=0.2, name=name + '_leaky_relu0')
dx = conv2d_spectral_norm(
dx,
fmiddle,
3,
padding=1,
name=name + ".conv_0",
use_bias=True,
is_test=is_test)
dx = self.SPADE(
dx, seg, fmiddle, name=name + ".norm_1", is_test=is_test)
dx = fluid.layers.leaky_relu(dx, alpha=0.2, name=name + '_leaky_relu1')
dx = conv2d_spectral_norm(
dx,
fout,
3,
padding=1,
name=name + ".conv_1",
use_bias=True,
is_test=is_test)
output = dx + x_s
return output
def SPADE(self, input, seg_map, norm_nc, name, is_test=False):
nhidden = 128
ks = 3
pw = ks // 2
seg_map = fluid.layers.resize_nearest(
seg_map, out_shape=input.shape[2:], align_corners=False)
actv = conv2d(
seg_map,
nhidden,
ks,
padding=pw,
activation_fn='relu',
name=name + ".mlp_shared.0",
initial="kaiming",
use_bias=True)
gamma = conv2d(
actv,
norm_nc,
ks,
padding=pw,
name=name + ".mlp_gamma",
initial="kaiming",
use_bias=True)
beta = conv2d(
actv,
norm_nc,
ks,
padding=pw,
name=name + ".mlp_beta",
initial="kaiming",
use_bias=True)
param_attr = fluid.ParamAttr(
name=name + ".param_free_norm.weight",
initializer=fluid.initializer.Constant(value=1.0),
trainable=False)
bias_attr = fluid.ParamAttr(
name=name + ".param_free_norm.bias",
initializer=fluid.initializer.Constant(0.0),
trainable=False)
norm = fluid.layers.batch_norm(
input=input,
name=name,
param_attr=param_attr,
bias_attr=bias_attr,
moving_mean_name=name + ".param_free_norm.running_mean",
moving_variance_name=name + ".param_free_norm.running_var",
is_test=is_test)
out = norm * (1 + gamma) + beta
return out
def network_D(self, input, name, cfg):
num_D = 2
result = []
for i in range(num_D):
out = build_discriminator_Nlayers(input, name=name + "_%d" % i)
result.append(out)
input = fluid.layers.pool2d(
input,
pool_size=3,
pool_type="avg",
pool_stride=2,
pool_padding=1,
name=name + "_pool%d" % i)
return result
def build_discriminator_Nlayers(input,
name="discriminator",
d_nlayers=4,
d_base_dims=64,
norm_type='instance_norm'):
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
nf = d_base_dims
res_list = []
res1 = conv2d(
input,
nf,
kw,
2,
0.02,
1,
name=name + ".model0.0",
activation_fn='leaky_relu',
relufactor=0.2,
initial="kaiming",
use_bias=True)
d_dims = d_base_dims
res_list.append(res1)
for i in range(1, d_nlayers):
conv_name = name + ".model{}.0.0".format(i)
nf = min(nf * 2, 512)
stride = 1 if i == d_nlayers - 1 else 2
dis_output = conv2d_spectral_norm(
res_list[-1],
nf,
kw,
stride,
0.02,
1,
name=conv_name,
norm=norm_type,
activation_fn='leaky_relu',
relufactor=0.2,
use_bias=False,
norm_affine=False)
res_list.append(dis_output)
o_c4 = conv2d(
res_list[-1],
1,
4,
1,
0.02,
1,
name + ".model{}.0".format(d_nlayers),
initial="kaiming",
use_bias=True)
res_list.append(o_c4)
return res_list
| 30.923372
| 79
| 0.515673
|
701fade2ab871d586a73ad7aeda563e0d20f5df4
| 3,707
|
py
|
Python
|
core/utils/frame_utils.py
|
gallif/raft
|
11a35ff5ede31918a360eca2f1481bc5fec9b5e5
|
[
"BSD-3-Clause"
] | 1
|
2021-11-28T09:52:49.000Z
|
2021-11-28T09:52:49.000Z
|
core/utils/frame_utils.py
|
Kolin96/RAFT
|
3fac6470f487c85bcc03ef102f86e1542262108e
|
[
"BSD-3-Clause"
] | null | null | null |
core/utils/frame_utils.py
|
Kolin96/RAFT
|
3fac6470f487c85bcc03ef102f86e1542262108e
|
[
"BSD-3-Clause"
] | 1
|
2021-03-10T03:43:18.000Z
|
2021-03-10T03:43:18.000Z
|
import numpy as np
from PIL import Image
from os.path import *
import re
import cv2
TAG_CHAR = np.array([202021.25], np.float32)
def readFlow(fn):
""" Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b'PF':
color = True
elif header == b'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def writeFlow(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert(uv.ndim == 3)
assert(uv.shape[2] == 2)
u = uv[:,:,0]
v = uv[:,:,1]
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def readFlowKITTI(filename):
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
flow = flow[:,:,::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
def read_gen(file_name, pil=False):
ext = splitext(file_name)[-1]
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
return Image.open(file_name)
elif ext == '.bin' or ext == '.raw':
return np.load(file_name)
elif ext == '.flo':
return readFlow(file_name).astype(np.float32)
elif ext == '.pfm':
flow = readPFM(file_name).astype(np.float32)
return flow[:, :, :-1]
return []
| 29.895161
| 109
| 0.578635
|
cfd526ec4e91e34a6efb4c2fd9ca547cdcc828d6
| 497
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/heatmapgl/colorbar/tickformatstop/_name.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/heatmapgl/colorbar/tickformatstop/_name.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/heatmapgl/colorbar/tickformatstop/_name.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="name",
parent_name="heatmapgl.colorbar.tickformatstop",
**kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 27.611111
| 66
| 0.61167
|
891453bacdbb537eddab14e1b54cd58ad577b562
| 11,070
|
py
|
Python
|
aim/storage/structured/sql_engine/entities.py
|
fairhopeweb/aim
|
f17b309e0e415e8798b6330b9ee71436a1b3994e
|
[
"Apache-2.0"
] | null | null | null |
aim/storage/structured/sql_engine/entities.py
|
fairhopeweb/aim
|
f17b309e0e415e8798b6330b9ee71436a1b3994e
|
[
"Apache-2.0"
] | null | null | null |
aim/storage/structured/sql_engine/entities.py
|
fairhopeweb/aim
|
f17b309e0e415e8798b6330b9ee71436a1b3994e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Collection, Union
from sqlalchemy.orm import joinedload
from aim.storage.types import SafeNone
from aim.storage.structured.entities import \
Run as IRun, \
Experiment as IExperiment, \
Tag as ITag,\
RunCollection, TagCollection
from aim.storage.structured.sql_engine.models import \
Run as RunModel,\
Experiment as ExperimentModel,\
Tag as TagModel
from aim.storage.structured.sql_engine.utils import ModelMappedClassMeta, ModelMappedCollection
from aim.storage.structured.sql_engine.utils import ModelMappedProperty as Property
def timestamp_or_none(dt):
if dt is None:
return None
return dt.timestamp()
class ModelMappedRun(IRun, metaclass=ModelMappedClassMeta):
__model__ = RunModel
__mapped_properties__ = [
Property('name'),
Property('description'),
Property('archived', 'is_archived'),
Property('created_at', with_setter=False),
Property('creation_time', 'created_at', get_modifier=timestamp_or_none, with_setter=False),
Property('finalized_at'),
Property('end_time', 'finalized_at', get_modifier=timestamp_or_none, with_setter=False),
Property('updated_at', with_setter=False),
Property('hash', with_setter=False),
Property('experiment', autogenerate=False),
Property('tags', autogenerate=False),
]
def __init__(self, model: RunModel, session):
self._model = model
self._id = model.id
self._session = session
def __repr__(self) -> str:
return f'<ModelMappedRun id={self.hash}, name=\'{self.name}\'>'
@classmethod
def from_model(cls, model_obj, session) -> 'ModelMappedRun':
return ModelMappedRun(model_obj, session)
@classmethod
def from_hash(cls, runhash: str, session) -> 'ModelMappedRun':
if session.query(RunModel).filter(RunModel.hash == runhash).scalar():
raise ValueError(f'Run with hash \'{runhash}\' already exists.')
run = RunModel(runhash)
session.add(run)
session.flush()
return ModelMappedRun(run, session)
@classmethod
def find(cls, _id: str, **kwargs) -> Union[IRun, SafeNone]:
session = kwargs.get('session')
if not session:
return SafeNone()
model_obj = session.query(RunModel).options([
joinedload(RunModel.experiment),
joinedload(RunModel.tags),
]).filter(RunModel.hash == _id).first()
if model_obj:
return ModelMappedRun.from_model(model_obj, session)
return SafeNone()
@classmethod
def all(cls, **kwargs) -> Collection[IRun]:
session = kwargs.get('session')
if not session:
return []
q = session.query(RunModel).options([
joinedload(RunModel.experiment),
joinedload(RunModel.tags),
]).order_by(RunModel.created_at)
return ModelMappedRunCollection(session, query=q)
@classmethod
def search(cls, term: str, **kwargs) -> Collection[IRun]:
session = kwargs.get('session')
if not session:
return []
term = f'%{term}%'
q = session.query(RunModel).options([
joinedload(RunModel.experiment),
joinedload(RunModel.tags),
]).filter(RunModel.name.like(term))
return ModelMappedRunCollection(session, query=q)
@property
def experiment(self) -> Union[IExperiment, SafeNone]:
if self._model and self._model.experiment:
return ModelMappedExperiment(self._model.experiment, self._session)
else:
return SafeNone()
@experiment.setter
def experiment(self, value: str):
session = self._session
if value is None:
exp = None
else:
exp = session.query(ExperimentModel).filter(ExperimentModel.name == value).first()
if not exp:
exp = ExperimentModel(value)
session.add(exp)
self._model.experiment = exp
session.add(self._model)
session.flush()
@property
def tags(self) -> TagCollection:
if self._model:
return ModelMappedTagCollection(self._session,
collection=[t for t in self._model.tags if t.is_archived is not True])
else:
return []
def add_tag(self, value: str) -> ITag:
session = self._session
tag = session.query(TagModel).filter(TagModel.name == value).first()
if not tag:
tag = TagModel(value)
session.add(tag)
self._model.tags.append(tag)
session.add(self._model)
session.flush()
return ModelMappedTag.from_model(tag, session)
def remove_tag(self, tag_id: str) -> bool:
session = self._session
tag_removed = False
for tag in self._model.tags:
if tag.uuid == tag_id:
self._model.tags.remove(tag)
tag_removed = True
break
session.add(self._model)
session.flush()
return tag_removed
class ModelMappedExperiment(IExperiment, metaclass=ModelMappedClassMeta):
__model__ = ExperimentModel
__mapped_properties__ = [
Property('name'),
Property('uuid', with_setter=False),
Property('archived', 'is_archived'),
Property('created_at', with_setter=False),
Property('updated_at', with_setter=False),
]
def __init__(self, model_inst: ExperimentModel, session):
self._model = model_inst
self._id = model_inst.id
self._session = session
def __repr__(self) -> str:
return f'<ModelMappedExperiment id={self.uuid}, name=\'{self.name}\'>'
def __eq__(self, other) -> bool:
if isinstance(other, str):
return self._model.name == other
elif isinstance(other, ModelMappedExperiment):
return self._model.id == other._model.id
return False
@classmethod
def from_model(cls, model_obj, session) -> 'ModelMappedExperiment':
return ModelMappedExperiment(model_obj, session)
@classmethod
def from_name(cls, name: str, session) -> 'ModelMappedExperiment':
if session.query(ExperimentModel).filter(ExperimentModel.name == name).scalar():
raise ValueError(f'Experiment with name \'{name}\' already exists.')
exp = ExperimentModel(name)
session.add(exp)
session.flush()
return ModelMappedExperiment(exp, session)
@property
def runs(self) -> RunCollection:
return ModelMappedRunCollection(self._session, collection=self._model.runs)
@classmethod
def find(cls, _id: str, **kwargs) -> Union[IExperiment, SafeNone]:
session = kwargs.get('session')
if not session:
return SafeNone()
model_obj = session.query(ExperimentModel).options([
joinedload(ExperimentModel.runs),
]).filter(ExperimentModel.uuid == _id).first()
if model_obj:
return ModelMappedExperiment(model_obj, session)
return SafeNone()
@classmethod
def all(cls, **kwargs) -> Collection[IExperiment]:
session = kwargs.get('session')
if not session:
return []
q = session.query(ExperimentModel).options([
joinedload(ExperimentModel.runs),
])
return ModelMappedExperimentCollection(session, query=q)
@classmethod
def search(cls, term: str, **kwargs) -> Collection[IExperiment]:
session = kwargs.get('session')
if not session:
return []
term = f'%{term}%'
q = session.query(ExperimentModel).options([
joinedload(ExperimentModel.runs),
]).filter(ExperimentModel.name.like(term))
return ModelMappedExperimentCollection(session, query=q)
class ModelMappedTag(ITag, metaclass=ModelMappedClassMeta):
__model__ = TagModel
__mapped_properties__ = [
Property('name'),
Property('color'),
Property('description'),
Property('archived', 'is_archived'),
Property('uuid', with_setter=False),
Property('created_at', with_setter=False),
Property('updated_at', with_setter=False),
]
def __init__(self, model_inst: TagModel, session):
self._model = model_inst
self._id = model_inst.id
self._session = session
def __repr__(self) -> str:
return f'<ModelMappedTag id={self.uuid}, name=\'{self.name}\'>'
def __eq__(self, other) -> bool:
if isinstance(other, str):
return self._model.name == other
elif isinstance(other, ModelMappedTag):
return self._model.id == other._model.id
return False
@classmethod
def from_model(cls, model_obj, session) -> 'ModelMappedTag':
return ModelMappedTag(model_obj, session)
@classmethod
def from_name(cls, name: str, session) -> 'ModelMappedTag':
if session.query(TagModel).filter(TagModel.name == name).scalar():
raise ValueError(f'Tag with name \'{name}\' already exists.')
tag = TagModel(name)
session.add(tag)
session.flush()
return ModelMappedTag(tag, session)
@classmethod
def find(cls, _id: str, **kwargs) -> Union[ITag, SafeNone]:
session = kwargs.get('session')
if not session:
return SafeNone()
model_obj = session.query(TagModel).options([
joinedload(TagModel.runs),
]).filter(TagModel.uuid == _id).first()
if model_obj:
return ModelMappedTag(model_obj, session)
return SafeNone()
@classmethod
def all(cls, **kwargs) -> Collection[ITag]:
session = kwargs.get('session')
if not session:
return []
q = session.query(TagModel).options([
joinedload(TagModel.runs),
])
return ModelMappedTagCollection(session, query=q)
@classmethod
def search(cls, term: str, **kwargs) -> Collection[ITag]:
session = kwargs.get('session')
if not session:
return []
term = f'%{term}%'
q = session.query(TagModel).options([
joinedload(TagModel.runs),
]).filter(TagModel.name.like(term))
return ModelMappedTagCollection(session, query=q)
@classmethod
def delete(cls, _id: str, **kwargs) -> bool:
session = kwargs.get('session')
if not session:
return False
model_obj = session.query(TagModel).filter(TagModel.uuid == _id).first()
if model_obj:
session.delete(model_obj)
return True
return False
@property
def runs(self) -> RunCollection:
return ModelMappedRunCollection(self._session, collection=self._model.runs)
ModelMappedRunCollection = ModelMappedCollection[ModelMappedRun]
ModelMappedExperimentCollection = ModelMappedCollection[ModelMappedExperiment]
ModelMappedTagCollection = ModelMappedCollection[ModelMappedTag]
| 34.811321
| 114
| 0.628636
|
0d51d0dee13bb281a3e3e357e611cd70a6008c43
| 10,563
|
py
|
Python
|
tensorflow/contrib/image/python/ops/image_ops.py
|
xincao79/tensorflow
|
7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64
|
[
"Apache-2.0"
] | 1
|
2019-01-22T19:43:27.000Z
|
2019-01-22T19:43:27.000Z
|
tensorflow/contrib/image/python/ops/image_ops.py
|
xincao79/tensorflow
|
7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/image/python/ops/image_ops.py
|
xincao79/tensorflow
|
7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64
|
[
"Apache-2.0"
] | 1
|
2019-03-03T15:17:50.000Z
|
2019-03-03T15:17:50.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_image_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_image_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_image_ops.so"))
_IMAGE_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
ops.RegisterShape("ImageProjectiveTransform")(common_shapes.call_cpp_shape_fn)
def rotate(images, angles, interpolation="NEAREST"):
"""Rotate image(s) by the passed angle(s) in radians.
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW).
angles: A scalar angle to rotate all images by, or (if images has rank 4)
a vector of length num_images, with an angle for each image in the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
Returns:
Image(s) with the same type and shape as `images`, rotated by the given
angle(s). Empty space due to the rotation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
image_or_images = ops.convert_to_tensor(images, name="images")
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
image_height = math_ops.cast(array_ops.shape(images)[1], dtypes.float32)[None]
image_width = math_ops.cast(array_ops.shape(images)[2], dtypes.float32)[None]
output = transform(
images,
angles_to_projective_transforms(angles, image_height, image_width),
interpolation=interpolation)
if len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def angles_to_projective_transforms(angles, image_height, image_width):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch.
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to `tf.contrib.image.transform`.
"""
angle_or_angles = ops.convert_to_tensor(
angles, name="angles", dtype=dtypes.float32)
if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test
angles = angle_or_angles[None]
elif len(angle_or_angles.get_shape()) == 1:
angles = angle_or_angles
else:
raise TypeError("Angles should have rank 0 or 1.")
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
def transform(images, transforms, interpolation="NEAREST"):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW).
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to
the transform mapping input points to output points.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
output = gen_image_ops.image_projective_transform(
images, transforms, interpolation=interpolation.upper())
if len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def compose_transforms(*transforms):
"""Composes the transforms tensors.
Args:
*transforms: List of image projective transforms to be composed. Each
transform is length 8 (single transform) or shape (N, 8) (batched
transforms). The shapes of all inputs must be equal, and at least one
input must be given.
Returns:
A composed transform tensor. When passed to `tf.contrib.image.transform`,
equivalent to applying each of the given transforms to the image in
order.
"""
assert transforms, "transforms cannot be empty"
composed = _flat_transforms_to_matrices(transforms[0])
for tr in transforms[1:]:
# Multiply batches of matrices.
composed = math_ops.matmul(composed, _flat_transforms_to_matrices(tr))
return _transform_matrices_to_flat(composed)
def _flat_transforms_to_matrices(transforms):
# Make the transform(s) 2D in case the input is a single transform.
transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))
num_transforms = array_ops.shape(transforms)[0]
# Add a column of ones for the implicit last entry in the matrix.
return array_ops.reshape(
array_ops.concat(
[transforms, array_ops.ones([num_transforms, 1])], axis=1),
constant_op.constant([-1, 3, 3]))
def _transform_matrices_to_flat(transform_matrices):
# Flatten each matrix.
transforms = array_ops.reshape(
transform_matrices, constant_op.constant([-1, 9]))
# Divide each matrix by the last entry (normally 1).
transforms /= transforms[:, 8:9]
return transforms[:, :8]
@ops.RegisterGradient("ImageProjectiveTransform")
def _image_projective_transform_grad(op, grad):
"""Computes the gradient for ImageProjectiveTransform."""
images = op.inputs[0]
transforms = op.inputs[1]
interpolation = op.get_attr("interpolation")
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
# Invert transformations
transforms = _flat_transforms_to_matrices(transforms=transforms)
inverse = linalg_ops.matrix_inverse(transforms)
transforms = _transform_matrices_to_flat(inverse)
output = gen_image_ops.image_projective_transform(
grad, transforms, interpolation=interpolation)
if len(image_or_images.get_shape()) == 2:
return [output[0, :, :, 0], None]
elif len(image_or_images.get_shape()) == 3:
return [output[0, :, :, :], None]
else:
return [output, None]
| 40.471264
| 85
| 0.700275
|
b3d0c43cc11f39cd6883c4f3d54a3405ef96fe8e
| 2,438
|
py
|
Python
|
cfripper/rules/SecurityGroupOpenToWorldRule.py
|
harry738/cfripper
|
83e4a2eb8015aad7178b67b112bcd99b1f60b09f
|
[
"Apache-2.0"
] | null | null | null |
cfripper/rules/SecurityGroupOpenToWorldRule.py
|
harry738/cfripper
|
83e4a2eb8015aad7178b67b112bcd99b1f60b09f
|
[
"Apache-2.0"
] | null | null | null |
cfripper/rules/SecurityGroupOpenToWorldRule.py
|
harry738/cfripper
|
83e4a2eb8015aad7178b67b112bcd99b1f60b09f
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2018 Skyscanner Ltd
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from cfripper.config.logger import get_logger
from cfripper.model.rule_processor import Rule
logger = get_logger()
class SecurityGroupOpenToWorldRule(Rule):
def invoke(self, resources, parameters):
rs = resources.get("AWS::EC2::SecurityGroup", [])
for resource in rs:
self.process_resource(resource.logical_id, resource)
def process_resource(self, logical_name, properties):
if not properties:
return
for ingress in properties.security_group_ingress:
if ingress.ipv4_slash_zero() or ingress.ipv6_slash_zero():
self.check_ports(logical_name, ingress)
def check_ports(self, logical_name, ingress_rule):
from_port = int(ingress_rule.from_port) if ingress_rule.from_port is not None else None
to_port = int(ingress_rule.to_port) if ingress_rule.from_port is not None else None
if from_port == to_port:
the_port = from_port
self.check_single_port(logical_name, the_port)
else:
self.check_port_range(logical_name, from_port, to_port)
def check_single_port(self, logical_name, the_port):
if str(the_port) not in self._config.ALLOWED_WORLD_OPEN_PORTS:
reason = "Port {} open to the world in security group \"{}\"".format(
the_port,
logical_name,
)
self.add_failure(type(self).__name__, reason)
def check_port_range(self, logical_name, from_port, to_port):
for port in range(from_port, to_port + 1):
if str(port) not in self._config.ALLOWED_WORLD_OPEN_PORTS:
reason = "Ports {} - {} open in Security Group {}".format(
from_port,
to_port,
logical_name,
)
self.add_failure(type(self).__name__, reason)
| 37.507692
| 95
| 0.671452
|
c8c3e1cd08a4b913da3e4c4698c66b556e93c9d8
| 3,171
|
py
|
Python
|
emanate/cli.py
|
ritlew/emanate
|
9081e382242a9f7d27d8ee9027b4c891af1b3f1a
|
[
"MIT"
] | null | null | null |
emanate/cli.py
|
ritlew/emanate
|
9081e382242a9f7d27d8ee9027b4c891af1b3f1a
|
[
"MIT"
] | null | null | null |
emanate/cli.py
|
ritlew/emanate
|
9081e382242a9f7d27d8ee9027b4c891af1b3f1a
|
[
"MIT"
] | null | null | null |
"""Command-line interface for the Emanate symbolic link manager.
Examples:
Emanate defaults to using the current directory as source,
and the current user's home directory as destination::
~/.dotfiles $ emanate
would create symbolic links in ~ for files in ~/.dotfiles
Emanate also defaults to looking for a configuration file in the source
directory, allowing usages such as::
$ cat software/foo/emanate.json
{ 'destination': '/usr/local' }
$ emanate --source software/foo
${PWD}/software/foo/bin/foo -> /usr/local/bin/foo
${PWD}/software/foo/lib/library.so -> /usr/local/lib/library.so
See `emanate --help` for all command-line options.
"""
from argparse import ArgumentParser, SUPPRESS
from pathlib import Path
from . import Emanate, config
def _parse_args(args=None):
argparser = ArgumentParser(
description="Link files from one directory to another",
argument_default=SUPPRESS,
)
argparser.add_argument("--destination",
metavar="DESTINATION",
help="Directory containing the symbolic links.")
argparser.add_argument("--dry-run",
action="store_false",
default=True,
dest="exec",
help="Only display the actions that would be taken.")
argparser.add_argument("--source",
metavar="SOURCE",
type=Path,
help="Directory holding the files to symlink.")
argparser.add_argument("--no-confirm",
action="store_false",
dest="confirm",
help="Don't prompt before replacing a file.")
argparser.add_argument("--config",
metavar="CONFIG_FILE",
default=None,
type=Path,
help="Configuration file to use.")
subcommands = argparser.add_subparsers(dest='command')
subcommands.add_parser('clean')
subcommands.add_parser('create')
return argparser.parse_args(args)
def main(args=None):
"""Invoke Emanate from command-line arguments.
Emanate prioritizes configuration sources in the following order:
- default values have lowest priority;
- the configuration file overrides defaults;
- command-line arguments override everything.
"""
args = _parse_args(args)
if args.config is None:
if 'source' in args:
args.config = args.source / "emanate.json"
else:
args.config = Path.cwd() / "emanate.json"
emanate = Emanate(
config.from_json(args.config) if args.config.exists() else None,
config.resolve(vars(args)),
src=vars(args).get("source", None),
)
if args.command is None or args.command == 'create':
execute = emanate.create()
elif args.command == 'clean':
execute = emanate.clean()
else:
assert False
if args.exec:
execute.run()
else:
execute.dry()
| 33.378947
| 80
| 0.58562
|
c8684e5bb8c448810e0191bf6a0e26457171c4f5
| 2,416
|
py
|
Python
|
tests/tests_models/test_ensemble.py
|
michaelneale/mljar-supervised
|
8d1b5fdd56e994a7f13ec5f6d2033830744f3d6f
|
[
"MIT"
] | null | null | null |
tests/tests_models/test_ensemble.py
|
michaelneale/mljar-supervised
|
8d1b5fdd56e994a7f13ec5f6d2033830744f3d6f
|
[
"MIT"
] | null | null | null |
tests/tests_models/test_ensemble.py
|
michaelneale/mljar-supervised
|
8d1b5fdd56e994a7f13ec5f6d2033830744f3d6f
|
[
"MIT"
] | 1
|
2021-03-12T05:48:45.000Z
|
2021-03-12T05:48:45.000Z
|
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from numpy.testing import assert_almost_equal
from sklearn import datasets
from supervised.models.ensemble import Ensemble
from supervised.metric import Metric
from supervised.models.learner_factory import LearnerFactory
class SimpleFramework:
def __init__(self, params):
pass
def predict(self, X):
return np.array([0.1, 0.2, 0.8, 0.9])
def to_json(self):
return {
"params": {
"model_type": "simple",
"learner": {"model_type": "simple"},
"validation": {
"validation_type": "kfold",
"k_folds": 5,
"shuffle": True,
},
}
}
def from_json(self, json_desc):
pass
def load(self, json_desc):
pass
class EnsembleTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X = pd.DataFrame(
{
"model_0": [0.1, 0.2, 0.8, 0.9],
"model_1": [0.2, 0.1, 0.9, 0.8],
"model_2": [0.8, 0.8, 0.1, 0.1],
"model_3": [0.8, 0.8, 0.1, 0.1],
"model_4": [0.8, 0.8, 0.1, 0.1],
"model_5": [0.8, 0.8, 0.1, 0.1],
}
)
cls.y = np.array([0, 0, 1, 1])
def test_fit_predict(self):
ensemble = Ensemble()
ensemble.models = [SimpleFramework({})] * 5
ensemble.fit(self.X, self.y)
self.assertEqual(1, ensemble.selected_models[1]["repeat"])
self.assertEqual(1, ensemble.selected_models[1]["repeat"])
self.assertTrue(len(ensemble.selected_models) == 2)
y = ensemble.predict(self.X)
assert_almost_equal(y[0], 0.1)
assert_almost_equal(y[1], 0.2)
assert_almost_equal(y[2], 0.8)
assert_almost_equal(y[3], 0.9)
"""
def test_save_load(self):
ensemble = Ensemble()
ensemble.models = [SimpleFramework({})] * 5
ensemble.fit(self.X, self.y)
y = ensemble.predict(self.X)
assert_almost_equal(y[0], 0.1)
ensemble_json = ensemble.to_json()
ensemble2 = Ensemble()
ensemble2.from_json(ensemble_json)
y2 = ensemble2.predict(self.X)
assert_almost_equal(y2[0], 0.1)
"""
if __name__ == "__main__":
unittest.main()
| 27.146067
| 66
| 0.54346
|
8ed269914a494a7b6bf3195f956b482c907d43bc
| 4,017
|
py
|
Python
|
Source/Qt5/modules/syscheck.py
|
TheEnvironmentGuy/noodle-pipe
|
175ee86618ff202c3291f1bfa3952a14b9d6ac75
|
[
"MIT"
] | null | null | null |
Source/Qt5/modules/syscheck.py
|
TheEnvironmentGuy/noodle-pipe
|
175ee86618ff202c3291f1bfa3952a14b9d6ac75
|
[
"MIT"
] | 12
|
2015-01-31T02:37:45.000Z
|
2015-02-05T04:18:21.000Z
|
Source/Qt5/modules/syscheck.py
|
TheEnvironmentGuy/noodle-pipe
|
175ee86618ff202c3291f1bfa3952a14b9d6ac75
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Load modules
try:
import imp
import sys
import os
import time
import prefcheck
import gui
#import update
import json
except ImportError:
raise Exception("Error: Faild to import critical modules.")
sys.exit(1)
def sysCheck():
#start writing to sysCheck.log
unlog = sys.stdout
sysCheckLog = open("sysCheck.log", 'w')
sys.stdout = sysCheckLog
print("\nNote : Doing system check...")
#Print OS and time
print("Note : Timestamp %s, %s" %(time.strftime("%d/%m/%Y"), time.strftime("%H:%M:%S")))
print("Note : Running on platform '%s'" %(sys.platform))
#Test Python Version
pyVersion = sys.hexversion
if (pyVersion < 0x04000000) or (pyVersion > 0x03000000):
print("Note : Running Python version '%s' " %(pyVersion))
else:
print("Error: Running Python version '%s'. Python 3.x is required " %(pyVersion))
sys.exit(1)
print("Note : Rolling updates enabled, '%s'" %('no'))
print("Note : noodle-pipe release version '%s'" %('0.0.0'))
#Check core dependencies
jsonFile = open('../modules/data/dependencies.json', 'r')
jsonData = json.load(jsonFile)
jsonFile.close()
for keys, values in jsonData.items():
for items in jsonData[keys]:
try:
imp.find_module(items)
except:
print("Error: Found core module '%s', no" %(items))
raise Exception("Found core module '%s', no" %(items))
sys.exit(1)
else:
print("Note : Found core module '%s', yes" %(items))
#Check plugin dependencies
dirList = os.listdir('../plugins/')
fileList = []
plugModules = []
for items in dirList:
print("Note : Found plugin '%s' in '%s'" %(items, "../plugins/%s/" %(items)))
if os.path.isfile("../plugins/%s/data" %(items)):
jsonFile = open("../plugins/%s/data" %(items), 'r')
jsonData = json.load(jsonFile)
jsonFile.close
for keys, values in jsonData.items():
for items in jsonData[keys]:
try:
imp.find_module(items)
except:
print("Error: Found plugin module '%s', no" %(items))
else:
print("Note : Found plugin module '%s', yes" %(items))
else:
print("Warn : Found dependencies.json for plugin '%s', no" %(items))
#raise Exception("Found dependencies.json for plugin '%s', no" %(items))
#Stop writing to sysCheck.log
sys.stdout = unlog
sysCheckLog.close()
print("Note : Read sysCheck.log for details")
#Queryies all noodle-pipe modules for a list of dependencie modules
def modGather():
if os.path.isdir("../modules/data/"):
print("Note : Found path '../modules/data/', yes")
else:
print("Warn : Found path '../modules/data/', no")
print("Warn : Can't determine core module dependencies")
#get list of noodle-pipe core modules
dirList = os.listdir('../modules/')
coreModules = []
for items in dirList:
if items[-2:] == 'py':
coreModules.append(items[:-3])
#get list of plugin modules
dirList = os.listdir('../plugins/')
fileList = []
plugModules = []
for items in dirList:
fileList.append(os.listdir("../plugins/%s/data" %(items)))
#if items[-2:] == 'py':
# modules.append(items[:-3])
print(fileList)
#Determines what modules are availible and which arnt
def modCheck(modules=None):
if (modules is None) or (modules < 1):
raise Exception("Expected type 'List' with length > 0")
else:
moduleDic = {}
for mods in modules:
try:
imp.find_module(mods)
except:
moduleDic[str(mods)] = False
else:
moduleDic[str(mods)] = True
return(moduleDic)
#Prints the results of modCheck() to the terminal
def modPrint(modules=None):
if (modules is None) or (modules < 1):
raise Exception("Expected type 'List' with length > 0")
else:
for keys, values in modules.items():
if values is False:
print("Error: Found module '%s', no" %(keys))
else:
print("Note : Found module '%s', yes" %(keys))
if __name__ == '__main__':
print("Error: Improper usage of 'syscheck', See documentaion for proper usage")
raise Exception("Improper usage of 'syscheck', See documentaion for proper usage")
| 28.090909
| 89
| 0.656958
|
f396f4dea2b71f999b99b58cc46819ef7711d4bb
| 357
|
py
|
Python
|
Web_Pages_Manipulating_html/FindClass_on_SpecificTag.py
|
OblackatO/Network-Security
|
c954676453d0767e2f27cea622835e3e353b1134
|
[
"MIT"
] | null | null | null |
Web_Pages_Manipulating_html/FindClass_on_SpecificTag.py
|
OblackatO/Network-Security
|
c954676453d0767e2f27cea622835e3e353b1134
|
[
"MIT"
] | null | null | null |
Web_Pages_Manipulating_html/FindClass_on_SpecificTag.py
|
OblackatO/Network-Security
|
c954676453d0767e2f27cea622835e3e353b1134
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
bsinstance = BeautifulSoup(open('ltps_parse.html'),"lxml")
#Searches for all img tags with a class on it , True means it can be any class, but one class can be specified.
classimg = bsinstance.find_all('div',class_='label_text',limit=5) #can set a limit to the number of classes you want
for classi in classimg:
print classi
| 51
| 117
| 0.770308
|
52d2af3fca052544302d2230ff5c1761b18aca1f
| 1,195
|
py
|
Python
|
2020/21/1.py
|
chaserobertson/advent
|
30205dcc20e46e5a48aaa80cbf8025d947045d93
|
[
"MIT"
] | null | null | null |
2020/21/1.py
|
chaserobertson/advent
|
30205dcc20e46e5a48aaa80cbf8025d947045d93
|
[
"MIT"
] | null | null | null |
2020/21/1.py
|
chaserobertson/advent
|
30205dcc20e46e5a48aaa80cbf8025d947045d93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
ingredients = []
allergens = []
with open('advent2020/21/input.txt', 'r') as f:
for line in f.readlines():
all_str = line[line.find('(')+10:line.find(')')]
if line.count(',') > 0:
allergenz = all_str.split(', ')
else:
allergenz = [all_str]
allergens.append(allergenz)
ingred = line[:line.find('(')].strip().split()
ingredients.append(ingred)
itoa = dict()
ing = ingredients.copy()
alr = allergens.copy()
n = -1
while len(itoa) != n:
n = len(itoa)
for i in range(len(ing)):
for j in range(i, len(ing)):
iset = set(ing[i]).intersection(ing[j])
aset = set(alr[i]).intersection(alr[j])
if len(iset) == 1 and len(aset) == 1:
a_match = aset.pop()
i_match = iset.pop()
itoa.setdefault(i_match, a_match)
for i in range(len(ing)):
for ingr in ing[i]:
if ingr in itoa.keys():
ing[i].remove(ingr)
if itoa[ingr] in alr[i]:
alr[i].remove(itoa[ingr])
print(itoa)
#print(ing)
cnt = 0
for ig in ing:
cnt += len(ig)
print(cnt)
| 25.978261
| 56
| 0.512971
|
12d0f48978959f6e0bbcf26835b8c3109ae82cd7
| 19,488
|
py
|
Python
|
visualization/visualization_utils.py
|
abfleishman/CameraTraps
|
eb509ac6a5305217266647b518d5383dc48f0b72
|
[
"MIT"
] | null | null | null |
visualization/visualization_utils.py
|
abfleishman/CameraTraps
|
eb509ac6a5305217266647b518d5383dc48f0b72
|
[
"MIT"
] | null | null | null |
visualization/visualization_utils.py
|
abfleishman/CameraTraps
|
eb509ac6a5305217266647b518d5383dc48f0b72
|
[
"MIT"
] | null | null | null |
#####
#
# visualization_utils.py
#
# Core rendering functions shared across visualization scripts
#
#####
#%% Constants and imports
import numpy as np
from PIL import Image, ImageFile, ImageFont, ImageDraw
import matplotlib.pyplot as plt
from data_management.annotations import annotation_constants
ImageFile.LOAD_TRUNCATED_IMAGES = True
#%% Functions
def open_image(input):
"""
Opens an image in binary format using PIL.Image and convert to RGB mode. This operation is lazy; image will
not be actually loaded until the first operation that needs to load it (for example, resizing), so file opening
errors can show up later.
Args:
input: an image in binary format read from the POST request's body or
path to an image file (anything that PIL can open)
Returns:
an PIL image object in RGB mode
"""
image = Image.open(input)
if image.mode not in ('RGBA', 'RGB', 'L'):
raise AttributeError('Input image {} uses unsupported mode {}'.format(input,image.mode))
if image.mode == 'RGBA' or image.mode == 'L':
# PIL.Image.convert() returns a converted copy of this image
image = image.convert(mode='RGB')
return image
def resize_image(image, target_width, target_height=-1):
"""
Resizes a PIL image object to the specified width and height; does not resize
in place. If either width or height are -1, resizes with aspect ratio preservation.
If both are -1, returns the original image (does not copy in this case).
"""
# Null operation
if target_width == -1 and target_height == -1:
return image
elif target_width == -1 or target_height == -1:
# Aspect ratio as width over height
aspect_ratio = image.size[0] / image.size[1]
if target_width != -1:
# ar = w / h
# h = w / ar
target_height = int(target_width / aspect_ratio)
else:
# ar = w / h
# w = ar * h
target_width = int(aspect_ratio * target_height)
resized_image = image.resize((target_width, target_height), Image.ANTIALIAS)
return resized_image
def render_iMerit_boxes(boxes, classes, image,
label_map=annotation_constants.bbox_category_id_to_name):
"""
Renders bounding boxes and their category labels on a PIL image.
Args:
boxes: bounding box annotations from iMerit, format is [x_rel, y_rel, w_rel, h_rel] (rel = relative coords)
image: PIL.Image object to annotate on
label_map: optional dict mapping classes to a string for display
Returns:
image will be altered in place
"""
display_boxes = []
display_strs = [] # list of list, one list of strings for each bounding box (to accommodate multiple labels)
for box, clss in zip(boxes, classes):
x_rel, y_rel, w_rel, h_rel = box
ymin, xmin = y_rel, x_rel
ymax = ymin + h_rel
xmax = xmin + w_rel
display_boxes.append([ymin, xmin, ymax, xmax])
if label_map:
clss = label_map[int(clss)]
display_strs.append([clss])
display_boxes = np.array(display_boxes)
draw_bounding_boxes_on_image(image, display_boxes, classes, display_strs=display_strs)
def render_db_bounding_boxes(boxes, classes, image, original_size=None,
label_map=None, thickness=4):
"""
Render bounding boxes (with class labels) on [image]. This is a wrapper for
draw_bounding_boxes_on_image, allowing the caller to operate on a resized image
by providing the original size of the image; bboxes will be scaled accordingly.
"""
display_boxes = []
display_strs = []
if original_size is not None:
image_size = original_size
else:
image_size = image.size
img_width, img_height = image_size
for box, clss in zip(boxes, classes):
x_min_abs, y_min_abs, width_abs, height_abs = box
ymin = y_min_abs / img_height
ymax = ymin + height_abs / img_height
xmin = x_min_abs / img_width
xmax = xmin + width_abs / img_width
display_boxes.append([ymin, xmin, ymax, xmax])
if label_map:
clss = label_map[int(clss)]
display_strs.append([clss])
display_boxes = np.array(display_boxes)
draw_bounding_boxes_on_image(image, display_boxes, classes, display_strs=display_strs,
thickness=thickness)
def render_detection_bounding_boxes(detections, image,
label_map={},
classification_label_map={},
confidence_threshold=0.8, thickness=4,
classification_confidence_threshold=0.3,
max_classifications=3):
"""
Renders bounding boxes, label, and confidence on an image if confidence is above the threshold.
This works with the output of the batch processing API.
Supports classification, if the detection contains classification results according to the
API output version 1.0.
Args:
detections: detections on the image, example content:
[
{
"category": "2",
"conf": 0.996,
"bbox": [
0.0,
0.2762,
0.1234,
0.2458
]
}
]
where the bbox coordinates are [x, y, width_box, height_box], (x, y) is upper left.
Supports classification results, if *detections* have the format
[
{
"category": "2",
"conf": 0.996,
"bbox": [
0.0,
0.2762,
0.1234,
0.2458
]
"classifications": [
["3", 0.901],
["1", 0.071],
["4", 0.025]
]
}
]
image: PIL.Image object, output of generate_detections.
label_map: optional, mapping the numerical label to a string name. The type of the numerical label
(default string) needs to be consistent with the keys in label_map; no casting is carried out.
classification_label_map: optional, mapping of the string class labels to the actual class names.
The type of the numerical label (default string) needs to be consistent with the keys in
label_map; no casting is carried out.
confidence_threshold: optional, threshold above which the bounding box is rendered.
thickness: optional, rendering line thickness.
image is modified in place.
"""
display_boxes = []
display_strs = [] # list of lists, one list of strings for each bounding box (to accommodate multiple labels)
classes = [] # for color selection
for detection in detections:
score = detection['conf']
if score > confidence_threshold:
x1, y1, w_box, h_box = detection['bbox']
display_boxes.append([y1, x1, y1 + h_box, x1 + w_box])
clss = detection['category']
label = label_map[clss] if clss in label_map else clss
displayed_label = ['{}: {}%'.format(label, round(100 * score))]
if 'classifications' in detection:
# To avoid duplicate colors with detection-only visualization, offset
# the classification class index by the number of detection classes
clss = len(annotation_constants.bbox_categories) + int(detection['classifications'][0][0])
classifications = detection['classifications']
if len(classifications) > max_classifications:
classifications = classifications[0:max_classifications]
for classification in classifications:
p = classification[1]
if p < classification_confidence_threshold:
continue
class_key = classification[0]
if class_key in classification_label_map:
class_name = classification_label_map[class_key]
else:
class_name = class_key
displayed_label += ['{}: {:5.1%}'.format(class_name.lower(), classification[1])]
# ...if we have detection results
display_strs.append(displayed_label)
classes.append(clss)
# ...if the confidence of this detection is above threshold
# ...for each detection
display_boxes = np.array(display_boxes)
draw_bounding_boxes_on_image(image, display_boxes, classes,
display_strs=display_strs, thickness=thickness)
# The following functions are modified versions of those at:
#
# https://github.com/tensorflow/models/blob/master/research/object_detection/utils/visualization_utils.py
COLORS = [
'AliceBlue', 'Red', 'RoyalBlue', 'Gold', 'Chartreuse', 'Aqua', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'RosyBrown', 'Aquamarine', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def draw_bounding_boxes_on_image(image,
boxes,
classes,
thickness=4,
display_strs=()):
"""
Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
classes: a list of ints or strings (which can be cast to ints) corresponding to the class labels of the boxes.
This is only used for selecting the color to render the bounding box in.
thickness: line thickness. Default value is 4.
display_strs: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
# print('Input must be of size [N, 4], but is ' + str(boxes_shape))
return # no object detection on this image, return
for i in range(boxes_shape[0]):
if display_strs:
display_str_list = display_strs[i]
draw_bounding_box_on_image(image,
boxes[i, 0], boxes[i, 1], boxes[i, 2], boxes[i, 3],
classes[i],
thickness=thickness, display_str_list=display_str_list)
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
clss=None,
thickness=4,
display_str_list=(),
use_normalized_coordinates=True,
label_font_size=16):
"""
Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box - upper left.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
clss: the class of the object in this bounding box - will be cast to an int.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
if clss is None:
color = COLORS[1]
else:
color = COLORS[int(clss) % len(COLORS)]
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', label_font_size)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= (text_height + 2 * margin)
def plot_confusion_matrix(matrix, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues,
vmax=None,
use_colorbar=True,
y_label = True):
"""
This function plots a confusion matrix.
Normalization can be applied by setting `normalize=True`.
Args:
matrix: confusion matrix as a numpy 2D matrix. Rows are ground-truth classes
and columns the predicted classes. Number of rows and columns have to match
classes: list of strings, which contain the corresponding class names for each row/column
normalize: boolean indicating whether to perform row-wise normalization to sum 1
title: string which will be used as title
cmap: pyplot colormap, default: matplotlib.pyplot.cm.Blues
vmax: float, specifies the value that corresponds to the largest value of the colormap.
If None, the maximum value in *matrix* will be used. Default: None
use_colorbar: boolean indicating if a colorbar should be plotted
y_label: boolean indicating whether class names should be plotted on the y-axis as well
Returns a reference to the figure
"""
assert matrix.shape[0] == matrix.shape[1]
fig = plt.figure(figsize=[3 + 0.5 * len(classes)] * 2)
if normalize:
matrix = matrix.astype(np.double) / (matrix.sum(axis=1, keepdims=True) + 1e-7)
plt.imshow(matrix, interpolation='nearest', cmap=cmap, vmax=vmax)
plt.title(title) #,fontsize=22)
if use_colorbar:
plt.colorbar(fraction=0.046, pad=0.04, ticks=[0.0,0.25,0.5,0.75,1.0]).set_ticklabels(['0%','25%','50%','75%','100%'])
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
if y_label:
plt.yticks(tick_marks, classes)
else:
plt.yticks(tick_marks, ['' for cn in classes])
for i, j in np.ndindex(matrix.shape):
plt.text(j, i, '{:.0f}%'.format(matrix[i, j]*100),
horizontalalignment="center",
verticalalignment="center",
color="white" if matrix[i, j] > 0.5 else "black",
fontsize='x-small')
if y_label:
plt.ylabel('Ground-truth class')
plt.xlabel('Predicted class')
#plt.grid(False)
plt.tight_layout()
return fig
def plot_precision_recall_curve(precisions, recalls, title='Precision/Recall curve'):
"""
Plots the precision recall curve given lists of (ordered) precision
and recall values
Args:
precisions: list of floats, the precision for the corresponding recall values.
Should have same length as *recalls*.
recalls: list of floats, the recall values for corresponding precision values.
Should have same length as *precisions*.
title: string that will be as as plot title
Returns a reference to the figure
"""
step_kwargs = ({'step': 'post'})
fig = plt.figure()
plt.title(title)
plt.step(recalls, precisions, color='b', alpha=0.2,
where='post')
plt.fill_between(recalls, precisions, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.05])
return fig
| 39.054108
| 125
| 0.594982
|
68d6d328bf9064bb508b93ae3a4620575cc30b52
| 2,426
|
py
|
Python
|
CODE/downloader/downloader.py
|
teamboxcoxrox/teamboxcoxrox.gitlab.io
|
626074a46cb1b2dba6d367cfb63c9bfdb9cb3a92
|
[
"Unlicense"
] | null | null | null |
CODE/downloader/downloader.py
|
teamboxcoxrox/teamboxcoxrox.gitlab.io
|
626074a46cb1b2dba6d367cfb63c9bfdb9cb3a92
|
[
"Unlicense"
] | null | null | null |
CODE/downloader/downloader.py
|
teamboxcoxrox/teamboxcoxrox.gitlab.io
|
626074a46cb1b2dba6d367cfb63c9bfdb9cb3a92
|
[
"Unlicense"
] | 1
|
2021-04-22T21:29:44.000Z
|
2021-04-22T21:29:44.000Z
|
from downloader.review_helper import ReviewHelper
from downloader.product_helper import ProductHelper
from downloader.download_helper import DownloadHelper
from datetime import datetime
class Downloader():
def __init__(self):
pass
def run(self, database_location):
print("--------------- Downloading data --------------------")
start_time = datetime.now()
review_source = "http://deepyeti.ucsd.edu/jianmo/amazon/categoryFiles/Pet_Supplies.json.gz"
product_source = "http://deepyeti.ucsd.edu/jianmo/amazon/metaFiles/meta_Pet_Supplies.json.gz"
dh = DownloadHelper()
dh.download(review_source, product_source)
print("Time to download and decompress: {}".format(datetime.now()-start_time))
print("Finished.")
print("--------------- Processing reviews --------------------")
rh = ReviewHelper()
rh.create_db_connection(database_location)
rh.drop_review_table_sql()
print("Table Dropped.")
rh.create_review_table_sql()
print("Table Created.")
rh.DEBUG = False
print("Reading data...")
with open("reviews.json", "r") as reviews:
review_lines = reviews.readlines()
print("Found {} reviews.".format(len(review_lines)))
print("Inserting reviews...")
rh.insert_json_lines(review_lines)
print("Creating index. Be patient.")
rh.create_index()
rh.close_db()
print("Total reviews imported: {}".format(len(review_lines)))
print("--------------- Processing products --------------------")
ph = ProductHelper()
ph.create_db_connection(database_location)
ph.drop_product_table()
print("Table Dropped.")
ph.create_product_table()
print("Table Created.")
ph.DEBUG = False
print("Reading data...")
with open("products.json", "r") as products:
product_lines = products.readlines()
print("Found {} products.".format(len(product_lines)))
print("Inserting products...")
ph.insert_json_lines(product_lines)
print("Creating index. Be patient.")
ph.create_index()
ph.close_db()
print("Total product imported: {}".format(len(product_lines)))
print("Data download complete.")
print("Full time to import data: {}".format(datetime.now() - start_time))
| 37.323077
| 101
| 0.612531
|
4627f9f04b186bcb0487853879cbe9bbcb6e0156
| 275
|
py
|
Python
|
tests/test_api.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | 5
|
2020-07-20T11:05:07.000Z
|
2022-03-11T15:51:52.000Z
|
tests/test_api.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | null | null | null |
from pynorare import NoRaRe
def test_NoRaRe(repos, concepticon_api):
api = NoRaRe(repos=repos, concepticon=concepticon_api)
assert len(api.refs) == 1
assert len(api.datasets) == 3
assert api.datasets['dsid'].columns
assert api.datasets['dsid'].concepts
| 27.5
| 58
| 0.72
|
4428772b23fee354ea92116c67ac970f5f4b8755
| 1,994
|
py
|
Python
|
profiles_api/models.py
|
Aravind2468/Django-Profiles-Rest-API
|
25909a614c9b715c424abbd02924ea1d3cfba17e
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
Aravind2468/Django-Profiles-Rest-API
|
25909a614c9b715c424abbd02924ea1d3cfba17e
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
Aravind2468/Django-Profiles-Rest-API
|
25909a614c9b715c424abbd02924ea1d3cfba17e
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
""" Create a new user profile """
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
""" Create super user """
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff =True
user.save(using=self._db)
return user
class MODELNAME(models.Model):
"""Model definition for MODELNAME."""
# TODO: Define fields here
class Meta:
"""Meta definition for MODELNAME."""
verbose_name = 'MODELNAME'
verbose_name_plural = 'MODELNAMEs'
def __str__(self):
"""Unicode representation of MODELNAME."""
pass
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user """
return self.name
def get_short_name(self):
"""Retrieve shot name of user """
return self.name
def __str__(self):
"""Return string representation of our user"""
return self.email
| 28.084507
| 63
| 0.657472
|
892a9979a4daf180bbe497580e0ee399b7ed1bfa
| 414
|
py
|
Python
|
experiments/fdtd-2d/tmp_files/3651.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/fdtd-2d/tmp_files/3651.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/fdtd-2d/tmp_files/3651.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/3651.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,128,2)
tile(1,4,16,4)
tile(2,2,128,2)
tile(2,4,16,4)
tile(3,2,128,2)
tile(3,4,16,4)
| 23
| 116
| 0.722222
|
8c49b944da7c4f7cfc3fc708e9283849868ef216
| 1,912
|
py
|
Python
|
base/internedcollection.py
|
squahtx/hal9000
|
80e13911d0cf240c786f016993cd18bb063e687f
|
[
"MIT"
] | null | null | null |
base/internedcollection.py
|
squahtx/hal9000
|
80e13911d0cf240c786f016993cd18bb063e687f
|
[
"MIT"
] | null | null | null |
base/internedcollection.py
|
squahtx/hal9000
|
80e13911d0cf240c786f016993cd18bb063e687f
|
[
"MIT"
] | null | null | null |
from .event import Event
class InternedCollection(object):
def __init__(self, factory, identity = None):
super(InternedCollection, self).__init__()
if identity is None: identity = lambda x: x
self._itemAdded = Event("itemAdded")
self._itemRemoved = Event("itemRemoved")
self.factory = factory
self.identity = identity
self.items = {}
def __getitem__(self, id):
return self.items[id]
def __iter__(self):
return iter(self.items.values())
def __len__(self):
return len(self.items)
# Events
@property
def itemAdded(self):
return self._itemAdded
@property
def itemRemoved(self):
return self._itemRemoved
# Collection
def clear():
removedIds = list(self.items.keys)
[self.remove(self.items[id]) for id in removedIds]
def get(self, id, default = None):
return self.items.get(id, default)
def intern(self, item):
return self.add(item)
def inject(self, item, id = None):
if id is None: id = self.identity(item)
if id in self.items: self.remove(self.items[id])
self.items[id] = item
self.onItemAdded(item)
self.itemAdded(item)
return item
def update(self, enumerable):
ids = set()
[ids.add(self.identity(item)) for item in enumerable]
[self.intern(item) for item in enumerable]
removedIds = [id for id in self.items.keys() if id not in ids]
[self.remove(self.items[id]) for id in removedIds]
# Internal
def onItemAdded(self, item): pass
def onItemRemoved(self, item): pass
# Internal
def add(self, item):
id = self.identity(item)
if id in self.items: return self.items[id]
item = self.factory(item)
self.items[id] = item
self.onItemAdded(item)
self.itemAdded(item)
return item
def remove(self, item):
id = self.identity(item)
if id not in self.items: return
del self.items[id]
self.onItemRemoved(item)
self.itemRemoved(item)
| 21.010989
| 64
| 0.675209
|
dd7460808a52a4b92a2dd6c926adc18145be3007
| 23,305
|
py
|
Python
|
src/evaluation_system/model/solr_core.py
|
FREVA-CLINT/Freva
|
53c6d0951a8dcfe985c8f33cbb3fbac7e8a3db04
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2020-06-12T18:18:48.000Z
|
2021-12-18T03:35:08.000Z
|
src/evaluation_system/model/solr_core.py
|
FREVA-CLINT/Freva
|
53c6d0951a8dcfe985c8f33cbb3fbac7e8a3db04
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/evaluation_system/model/solr_core.py
|
FREVA-CLINT/Freva
|
53c6d0951a8dcfe985c8f33cbb3fbac7e8a3db04
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""
Created on 11.03.2013
@author: Sebastian Illing / estani
This package encapsulate access to a solr instance (not for search but for administration)
We define two cores::
* files: all files - id is file (full file path)
* latest: only those files from the latest dataset version - id is file_no_version (full file path *wothout* version information)
"""
import os
import shutil
import urllib2
import json
from datetime import datetime
import logging
log = logging.getLogger(__name__)
from evaluation_system.model.file import DRSFile
from evaluation_system.misc import config
class META_DATA(object):
"""This class just holds some values for the dump file parsing/writing. Here a small example::
crawl_dir /some/dir
data
/some/dir/some/other/subdir/file1.nc,123123.0
/some/dir/some/other/subdir/file2.nc,123123.230
/some/dir/yet/another/subdir/file.nc,123123.230
...
See more info on :class:`SolrCore.dump_fs_to_file` and :class:`SolrCore.load_fs_from_file`"""
CRAWL_DIR = 'crawl_dir'
"The path to the directory that was crawled and from which the following list of files comes."
DATA = 'data'
"Marks the end of the metadata area. What follows is a list of filepaths and timestamps separated by a line break."
class SolrCore(object):
"""Encapsulate access to a Solr instance"""
def __init__(self, core=None, host=None, port=None, echo=False, instance_dir=None, data_dir=None, get_status=True):
"""Create the connection pointing to the proper solr url and core.
:param core: The name of the core referred (default: loaded from config file)
:param host: the hostname of the Solr server (default: loaded from config file)
:param port: The port number of the Solr Server (default: loaded from config file)
:param echo: If True, show all urls before issuing them.
:param instance_dir: the core instance directory (if empty but the core exists it will get downloaded from Solr)
:param data_dir: the directory where the data is being kept (if empty but the core exists it will
get downloaded from Solr)"""
if host is None:
host = config.get(config.SOLR_HOST)
if port is None:
port = config.get(config.SOLR_PORT)
if core is None:
core = config.get(config.SOLR_CORE)
self.solr_url = 'http://%s:%s/solr/' % (host, port)
self.core = core
self.core_url = self.solr_url + core + '/'
self.echo = echo
self.instance_dir = instance_dir
self.data_dir = data_dir
if get_status:
st = self.status()
else:
st = {}
if self.instance_dir is None and 'instanceDir' in st:
self.instance_dir = st['instanceDir']
if self.data_dir is None and 'dataDir' in st:
self.data_dir = st['dataDir']
else:
self.data_dir = 'data'
# Other Defaults
import socket
socket.setdefaulttimeout(20)
def __str__(self):
return '<SolrCore %s>' % self.core_url
def post(self, list_of_dicts, auto_list=True, commit=True):
"""Sends some json to Solr for ingestion.
:param list_of_dicts: either a json or more normally a list of json instances that will be sent to Solr for ingestion
:param auto_list: avoid packing list_of dicts in a directory if it's not one
:param commit: send also a Solr commit so that changes can be seen immediately."""
if auto_list and not isinstance(list_of_dicts, list):
list_of_dicts = [list_of_dicts]
endpoint = 'update/json?'
if commit:
endpoint += 'commit=true'
query = self.core_url + endpoint
if self.echo:
log.debug(query)
req = urllib2.Request(query, json.dumps(list_of_dicts))
req.add_header("Content-type", "application/json")
return urllib2.urlopen(req).read()
def get_json(self, endpoint, use_core=True, check_response=True):
"""Return some json from server. Is the raw access to Solr.
:param endpoint: The endpoint, path missing after the core url and all parameters encoded in it (e.g. 'select?q=*')
:param use_core: if the core info is used for generating the endpoint. (if False, then == self.core + '/' + endpoint)
:param check_response: If the response should be checked for errors. If True, raise an exception if something is
wrong (default: True)"""
if '?' in endpoint:
endpoint += '&wt=json'
else:
endpoint += '?wt=json'
if use_core:
query = self.core_url + endpoint
else:
query = self.solr_url + endpoint
if self.echo:
log.debug(query)
req = urllib2.Request(query)
response = json.loads(urllib2.urlopen(req).read())
if response['responseHeader']['status'] != 0:
raise Exception("Error while accessing Core %s. Response: %s" % (self.core, response))
return response
def get_solr_fields(self):
"""Return information about the Solr fields. This is dynamically generated and because of
dynamicFiled entries in the Schema, this information cannot be inferred from anywhere else."""
return self.get_json('admin/luke')['fields']
def create(self, instance_dir=None, data_dir=None, config='solrconfig.xml', schema='schema.xml'):
"""Creates (actually "register") this core. The Core configuration and directories must
be generated beforehand (not the data one). You may clone an existing one or start from scratch.
:param instance_dir: main directory for this core
:param data_dir: Data directory for this core (if left unset, a local "data" directory in instance_dir will be used)
:param config: The configuration file (expected in instance_dir/conf)
:param schema: The schema file (expected in instance_dir/conf)"""
# check basic configuration (it must exists!)
if instance_dir is None and self.instance_dir is None:
raise Exception("No Instance directory defined!")
elif instance_dir is not None:
self.instance_dir = instance_dir
if not os.path.isdir(self.instance_dir):
raise Exception("Expected Solr Core configuration not found in %s" % self.instance_dir)
if data_dir is not None:
self.data_dir = data_dir
return self.get_json('admin/cores?action=CREATE&name=%s' % self.core
+ '&instanceDir=%s' % self.instance_dir
+ '&config=%s' % config
+ '&schema=%s' % schema
+ '&dataDir=%s' % self.data_dir, use_core=False
)
def reload(self):
"""Reload the core. Useful after schema changes.
Be aware that you might need to re-ingest everything if there were changes to the indexing part of the schema."""
return self.get_json('admin/cores?action=RELOAD&core=' + self.core, use_core=False)
def unload(self):
"""Unload the core."""
return self.get_json('admin/cores?action=UNLOAD&core=' + self.core, use_core=False)
def swap(self, other_core):
"""Will swap this core with the given one (that means rename their references)
:param other_core: the name of the other core that this will be swapped with."""
return self.get_json('admin/cores?action=SWAP&core=%s&other=%s' % (self.core, other_core), use_core=False)
def status(self, general=False):
"""Return status information about this core or the whole Solr server.
:param general: If True return all information as provided by the server, otherwise just the status info
from this core."""
url_str = 'admin/cores?action=STATUS'
if not general:
url_str += '&core=' + self.core
response = self.get_json(url_str, use_core=False)
if general:
return response
else:
return response['status'][self.core]
def clone(self, new_instance_dir, data_dir='data', copy_data=False):
"""Copies a core somewhere else.
:param new_instance_dir: the new location for the clone.
:param data_dir: the location of the data directory for this new clone.
:param copy_data: If the data should also be copied (Warning, this is done on the-fly so be sure to unload the core
first) or assure otherwise there's no chance of getting corrupted data (I don't know any other way besides
unloading the original code))"""
try:
os.makedirs(new_instance_dir)
except:
pass
shutil.copytree(os.path.join(self.instance_dir, 'conf'), os.path.join(new_instance_dir, 'conf'))
if copy_data:
shutil.copytree(os.path.join(self.instance_dir, self.data_dir), os.path.join(new_instance_dir, data_dir))
def delete(self, query):
"""Issue a delete command, there's no default query for this to avoid unintentional deletion."""
self.post(dict(delete=dict(query=query)), auto_list=False)
@staticmethod
def dump_fs_to_file(start_dir, dump_file, batch_size=1000, check=False, abort_on_errors=False):
"""This is the currently used method for ingestion. This method generates a file with
a listing of paths and timestamps from the file system. The syntax of the file looks like this::
crawl_dir /path/to/some/directory
data
/path/to/a/file,1239879.0
/path/to/another/file,1239879.0
...
The crawl_dir indicates the directory being crawled and results in the deletion of all files whose path starts with
that one (i.e. everything under that path will be *replaced*).
Generating this file takes at least 8 hours for the whole /miklip/integration/data4miklip directory. It would be
nice to generate it in a different manner (e.g. using the gpfs policy API).
:param start_dir: The directory from which the file system will be crawled
:param dump_file: the path to the file that will contain the dump. if the file ends with '.gz' the resulting file
will be gziped (preferred)
:param batch_size: number of entries that will be written to disk at once. This might help pin-pointing crashes.
:param check: if the paths should be checked. While checking path the resulting paths are guaranteed to be accepted
later on normally this is too slow for this phase, so the default is False.
:param abort_on_errors: If dumping should get aborted as soon as an error is found, i.e. a file that can't be ingested.
Most of the times there are many files being found that are no data at all."""
log.debug('starting sequential ingest')
if dump_file.endswith('.gz'):
# print "Using gzip"
import gzip
# the with statement support started with python 2.7 (http://docs.python.org/2/library/gzip.html)
# Let's leave this python 2.6 compatible...
f = gzip.open(dump_file, 'wb')
else:
f = open(dump_file, 'w')
try:
batch_count = 0
# store metadata
f.write('%s\t%s\n' % (META_DATA.CRAWL_DIR, start_dir))
# store data
f.write('\n%s\n' % META_DATA.DATA)
for path in dir_iter(start_dir):
if check:
try:
DRSFile.from_path(path)
except:
if abort_on_errors:
raise
else:
print "Error ingesting %s" % path
continue
ts = os.path.getmtime(path)
f.write('%s,%s\n' % (path, ts))
batch_count += 1
if batch_count >= batch_size:
f.flush()
finally:
f.close()
@staticmethod
def load_fs_from_file(dump_file, batch_size=10000, abort_on_errors=False, core_all_files=None, core_latest=None):
"""This is the opposite method of :class:`SolrCore.dump_fs_to_file`. It loads the files system information to Solr
from the given file. The syntax is defined already in the mentioned dump method.
Contrary to what was previously done, this method loads the information from a file and decides if it should be added
to just the common file core, holding the index of all files, or also to the *latest* core, holding information
about the latest version of all files (remember that in CMIP5 not all files are version, just the datasets).
:param dump_file: the path to the file that contains the dump. if the file ends with '.gz' the file is assumed to
be gziped.
:param batch_size: number of entries that will be written to the Solr main core (the latest core will be flushed at
the same time and is guaranteed to have at most as many as the other.
:param abort_on_errors: If dumping should get aborted as soon as an error is found, i.e. a file that can't be ingested.
Most of the times there are many files being found in the dump file that are no data at all
:param core_all_files: if desired you can pass the SolrCore managing all the files (if not the one named 'files'will
be used, using the configuration from the config file).
:param core_latest: if desired you can pass the SolrCore managing the latest file versions (if not the one named
'latest' will be used, using the configuration from the config file).
"""
if dump_file.endswith('.gz'):
# print "Using gzip"
import gzip
# the with statement support started with python 2.7 (http://docs.python.org/2/library/gzip.html)
# Let's leave this python 2.6 compatible...
f = gzip.open(dump_file, 'rb')
else:
f = open(dump_file, 'r')
if core_latest is None:
core_latest = SolrCore(core='latest')
if core_all_files is None:
core_all_files = SolrCore(core='files')
try:
batch_count = 0
batch = []
batch_latest = []
batch_latest_new = {}
latest_versions = {}
header = True
import re
meta = re.compile('[^ \t]{1,}[ \t]{1,}(.*)$')
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if header:
if line.startswith(META_DATA.CRAWL_DIR):
crawl_dir = meta.match(line).group(1).strip()
# we should delete these. We need to scape the first slash since Solr
# will expect a regexp if not (new to Solr 4.0)
core_all_files.delete('file:\\%s*' % crawl_dir)
core_latest.delete('file:\\%s*' % crawl_dir)
elif line.startswith(META_DATA.DATA):
header = False
continue
else:
file_path, timestamp = line.split(',')
try:
drs_file = DRSFile.from_path(file_path)
metadata = SolrCore.to_solr_dict(drs_file)
ts = float(timestamp)
metadata['timestamp'] = ts
metadata['creation_time'] = timestamp_to_solr_date(ts)
batch.append(metadata)
if drs_file.is_versioned():
version = latest_versions.get(drs_file.to_dataset(versioned=False), None)
if version is None or drs_file.get_version() > version:
# unknown or new version, update
version = drs_file.get_version()
latest_versions[drs_file.to_dataset(versioned=False)] = version
batch_latest_new[drs_file.to_dataset(versioned=False)] = metadata
#batch_latest = batch_latest_new.values()
if not drs_file.get_version() < version:
# print latest_versions
#print drs_file.get_version(), version, metadata
batch_latest.append(metadata)
else:
# if not version always add to latest
batch_latest_new[drs_file.to_dataset(versioned=False)] = metadata
batch_latest.append(metadata)
if len(batch) >= batch_size:
print "Sending entries %s-%s" % (batch_count * batch_size, (batch_count+1) * batch_size)
core_all_files.post(batch)
batch = []
batch_count += 1
if batch_latest:
core_latest.post(batch_latest)
batch_latest = []
batch_latest_new = {}
except:
print "Can't ingest file %s" % file_path
if abort_on_errors:
raise
# flush
if len(batch) > 0:
print "Sending last %s entries and %s entries to latest core" % (len(batch), len(batch_latest))
#print len(batch_latest)
core_all_files.post(batch)
batch = []
batch_count += 1
if batch_latest:
core_latest.post(batch_latest)
batch_latest = []
finally:
f.close()
@staticmethod
def to_solr_dict(drs_file):
"""Extracts from a DRSFile the information that will be stored in Solr"""
metadata = drs_file.dict['parts'].copy()
metadata['file'] = drs_file.to_path()
if 'version' in metadata:
metadata['file_no_version'] = metadata['file'].replace('/%s/' % metadata['version'], '/')
else:
metadata['file_no_version'] = metadata['file']
metadata['data_type'] = drs_file.drs_structure
# metadata['timestamp'] = float(timestamp)
# metadata['dataset'] = drs_file.to_dataset()
return metadata
# def dump(self, dump_file=None, batch_size=10000, sort_results=False):
# """Dump a list of files and their timestamps that can be ingested afterwards"""
# if dump_file is None:
# # just to store where and how we are storing this
# dump_file = datetime.now().strftime('/miklip/integration/infrastructure/solr/backup_data/%Y%m%d.csv.gz')
#
# def cache(batch_size):
# offset = 0
# while True:
# url_query = 'select?fl=file,timestamp&start=%s&rows=%s&q=*' % (offset, batch_size)
# if sort_results:
# url_query += '&sort=file+desc'
# print "Calling %s" % url_query
# answer = self.get_json(url_query)
# offset = answer['response']['start']
# total = answer['response']['numFound']
# for item in answer['response']['docs']:
# yield (item['file'], item['timestamp'],)
# if total - offset <= batch_size:
# break # we are done
# else:
# offset += batch_size
#
# if dump_file.endswith('.gz'):
# import gzip
# # the with statement support started with python 2.7 (http://docs.python.org/2/library/gzip.html)
# # Let's leave this python 2.6 compatible...
# f = gzip.open(dump_file, 'wb')
# else:
# f = open(dump_file, 'w')
#
# try:
# # store metadata
# f.write('%s\t%s\n' % (META_DATA.CRAWL_DIR, '/'))
#
# # store data
# f.write('\n%s\n' % META_DATA.DATA)
# for file_path, timestamp in cache(batch_size):
# f.write('%s,%s\n' % (file_path, timestamp))
# finally:
# f.close()
def timestamp_to_solr_date(timestamp):
"""Transform a timestamp (float) into a string parsable by Solr"""
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%SZ')
# -- These are for multiple processes...
# but There's no benefit for having multiple threads at this time
# on the contrary, it's worse :-/
# There's no improvement for not having this construct either, so I'm leaving it
# here. It might help in the future...
# def search_iter(data_types, search_dict):
# if not isinstance(data_types, list):
# data_types = [data_types]
# for data_type in data_types:
# for file_path in DRSFile.search(data_type, latest_version=False, path_only=True, **search_dict):
# yield file_path
# # yield SolrCore.to_solr_dict(drs_file)
#
#
def dir_iter(start_dir, abort_on_error=True, followlinks=True):
for base_dir, dirs, files in os.walk(start_dir, followlinks=followlinks):
# make sure we walk them in the proper order (latest version first)
dirs.sort(reverse=True)
files.sort(reverse=True) # just for consistency
for f in files:
yield os.path.join(base_dir, f)
#
#
# def enqueue_from_search(q, data_types, search_dir):
# for metadata in search_iter(data_types, search_dir):
# q.put(metadata)
#
#
# def enqueue_from_dir(q, start_dir, abort_on_error=True):
# for metadata in dir_iter(start_dir, abort_on_error=abort_on_error):
# q.put(metadata)
#
#
# def handle_file_init(q, core, batch_size=10000):
# handle_file.batch_size = batch_size
# handle_file.running = True
# handle_file.q = q
# handle_file.core = core
#
#
# def handle_file(number, end_token):
# print "starting proc %s" % number
# batch_count = 0
# batch = []
# solr = SolrCore(core=handle_file.core)
# while handle_file.running:
# path = handle_file.q.get()
# if path == end_token:
# handle_file.q.put(end_token)
# break
# value = SolrCore.to_solr_dict(DRSFile.from_path(path))
# # import scipy.io.netcdf
# # with scipy.io.netcdf.netcdf_file(value['file'], 'r') as f:
# # value.update(f._attributes)
# ts = os.path.getmtime(value['file'])
# value['timestamp'] = ts
# value['creation_time'] = timestamp_to_solr_date(ts)
# batch.append(value)
# if len(batch) >= handle_file.batch_size:
# print "Sending entries %s-%s from %s" % (batch_count * handle_file.batch_size,
# (batch_count+1) * handle_file.batch_size, number)
# solr.post(batch)
# batch = []
# batch_count += 1
#
# if batch:
# print "Sending last %s entries from %s." % (len(batch), number)
# solr.post(batch)
# print "proc %s done!" % number
| 43.724203
| 129
| 0.599828
|
472a3793ba217b49590f65f650e3fc7d9c6f2c41
| 1,052
|
py
|
Python
|
src/baselines/random_baseline.py
|
Michael-Beukman/NEATNoveltyPCG
|
2441d80eb0f6dd288a00ebb56c432963cefc879d
|
[
"MIT"
] | 5
|
2022-01-26T23:19:46.000Z
|
2022-02-10T20:24:19.000Z
|
src/baselines/random_baseline.py
|
Michael-Beukman/NEATNoveltyPCG
|
2441d80eb0f6dd288a00ebb56c432963cefc879d
|
[
"MIT"
] | null | null | null |
src/baselines/random_baseline.py
|
Michael-Beukman/NEATNoveltyPCG
|
2441d80eb0f6dd288a00ebb56c432963cefc879d
|
[
"MIT"
] | null | null | null |
from common.methods.pcg_method import PCGMethod
from games.game import Game
from games.level import Level
import numpy as np
class RandomBaseline(PCGMethod):
# This is a simple random baseline
def __init__(self, game: Game, init_level: Level) -> None:
super().__init__(game, init_level)
def generate_level(self) -> Level:
low = 0
high = len(self.init_level.tile_types)
new_map = np.random.randint(low, high, size=self.init_level.map.shape)
return self.init_level.from_map(new_map)
if __name__ == '__main__':
from games.mario.mario_game import MarioGame, MarioLevel
from games.maze.maze_game import MazeGame
from games.maze.maze_level import MazeLevel
import matplotlib.pyplot as plt
L = MarioLevel()
baseline = RandomBaseline(MarioGame(L), L)
L = MazeLevel()
baseline = RandomBaseline(MazeGame(L), L)
plt.imshow(baseline.generate_level().show())
plt.show()
plt.imshow(baseline.generate_level().show())
plt.show()
| 30.941176
| 78
| 0.68251
|
2d1d70618803015027d0ed67167745f601348cb2
| 1,920
|
py
|
Python
|
games/forms.py
|
dabreese00/clue-solver-django
|
ff5f33782a21cd1642362f48207f759342767a0e
|
[
"MIT"
] | null | null | null |
games/forms.py
|
dabreese00/clue-solver-django
|
ff5f33782a21cd1642362f48207f759342767a0e
|
[
"MIT"
] | null | null | null |
games/forms.py
|
dabreese00/clue-solver-django
|
ff5f33782a21cd1642362f48207f759342767a0e
|
[
"MIT"
] | null | null | null |
from django import forms
from django.core.exceptions import ValidationError
from .models import ClueRelation, Player, GameCard
from cards.models import CardSet
class ClueRelationForm(forms.ModelForm):
def __init__(self, game, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['player'].queryset = Player.objects.filter(game=game)
self.fields['cards'].queryset = GameCard.objects.filter(game=game)
class Meta:
model = ClueRelation
fields = ['rel_type', 'player', 'cards']
def clean(self):
cleaned_data = super().clean()
rel_type = cleaned_data.get('rel_type')
cards = cleaned_data.get('cards')
if cards:
# Do nothing if cards is already invalid
if (rel_type in (
ClueRelation.RelationType.HAVE,
ClueRelation.RelationType.PASS)):
if len(cards.all()) != 1:
raise ValidationError(
"A Have or Pass must contain exactly 1 card.",
code='invalid-haveorpass-card-count'
)
else:
if len(cards.all()) != 3:
raise ValidationError(
"A Show must contain exactly 3 cards.",
code='invalid-show-card-count'
)
if not ClueRelation.validate_show_card_types(cards.all()):
raise ValidationError(
"A Show must contain 1 card of each type "
"(person, weapon, room).",
code='invalid-show-card-types'
)
class CreateGameForm(forms.Form):
card_set = forms.ModelChoiceField(queryset=CardSet.objects.all())
class PlayerForm(forms.ModelForm):
class Meta:
model = Player
fields = ['name', 'hand_size']
| 35.555556
| 74
| 0.548438
|
a04c86953bfb4ac7c27f6b36d94f792478766c43
| 2,340
|
py
|
Python
|
colosseumrl/matchmaking/MatchmakingClient.py
|
carletonz/colosseumrl
|
878f0459731511d716672aee8a5adafcb96cf0a7
|
[
"MIT"
] | 8
|
2019-06-04T00:22:30.000Z
|
2022-02-14T15:27:17.000Z
|
colosseumrl/matchmaking/MatchmakingClient.py
|
carletonz/colosseumrl
|
878f0459731511d716672aee8a5adafcb96cf0a7
|
[
"MIT"
] | 1
|
2019-07-23T03:32:59.000Z
|
2019-07-23T06:16:35.000Z
|
colosseumrl/matchmaking/MatchmakingClient.py
|
carletonz/colosseumrl
|
878f0459731511d716672aee8a5adafcb96cf0a7
|
[
"MIT"
] | 3
|
2020-01-13T08:09:27.000Z
|
2021-11-14T01:30:25.000Z
|
from typing import NamedTuple
from hashlib import sha256
import grpc
from .grpc_gen.server_pb2 import QuickMatchRequest
from .grpc_gen.server_pb2_grpc import MatchmakerStub
class GameResponse(NamedTuple):
""" The container object for the response from the server. """
host: str
port: int
username: str
token: str
ranking: float
def hash_password(username: str, password: str):
""" Has a password and salt it with the username. """
m = sha256()
m.update(password.encode())
m.update(username.encode())
return m.digest()
def request_game(hostname: str, port: int, username: str, password: str = "") -> GameResponse:
""" Contact a matchmaking server and ask for a new game.
This function will block until enough players connect to create a server.
Parameters
----------
hostname: str
port: int
Hostname and port of the remote matchmaking server
username: str
Username that will identify you in the game.
password: str
Password to confirm your identity for ranking and other metadata.
Returns
-------
GameResponse
host: str
Hostname of the assigned gamer server.
port: int
Port of the game server that was created for you
username: str
Your username again to verify.
token: str
Authentication string you will need to provide to connect to the match server
See Also
--------
colosseumrl.matchmaking.MatchmakingClient.GameResponse
The named tuple that will be returned by this function.
"""
username = username.lower()
try:
with grpc.insecure_channel('{}:{}'.format(hostname, port)) as channel:
hashed_password = hash_password(username, password)
response = MatchmakerStub(channel).GetMatch(QuickMatchRequest(username=username, password=hashed_password))
except grpc.RpcError as e:
raise ConnectionError("Failed to connect to specified host. {}:{}".format(e.code(), e.details()))
if response.server == "FAIL":
raise ConnectionError("Could not connect to matchmaking server. Error message: {}".format(response.response))
_, port = response.server.split(":")
return GameResponse(hostname, int(port), username, response.auth_key, response.ranking)
| 32.5
| 119
| 0.675214
|
a0cc62a5fe266341fafaaae59a4b133b5049a3e1
| 1,759
|
py
|
Python
|
pychron/lasers/tasks/laser_calibration_panes.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/lasers/tasks/laser_calibration_panes.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/lasers/tasks/laser_calibration_panes.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Any
from traitsui.api import View, UItem, InstanceEditor, ButtonEditor
# from pyface.tasks.traits_task_pane import TraitsTaskPane
from pyface.tasks.traits_dock_pane import TraitsDockPane
# ============= standard library imports ========================
# ============= local library imports ==========================
class LaserCalibrationExecutePane(TraitsDockPane):
name = "Execute"
id = "pychron.laser_calibration.execute"
def traits_view(self):
v = View(UItem("execute", editor=ButtonEditor(label_value="execute_label")))
return v
class LaserCalibrationControlPane(TraitsDockPane):
name = "Control"
id = "pychron.laser_calibration.control"
editor = Any
def traits_view(self):
v = View(UItem("editor", style="custom", editor=InstanceEditor()))
return v
# ============= EOF =============================================
| 35.18
| 84
| 0.604889
|
da2c3eb3c7af20c3785eb039f29ed74302fd116b
| 768
|
py
|
Python
|
packages/micropython-official/v1.9.4/esp8266/stubs/uctypes.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 18
|
2019-07-11T13:31:09.000Z
|
2022-01-27T06:38:40.000Z
|
packages/micropython-official/v1.9.4/esp8266/stubs/uctypes.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 9
|
2019-09-01T21:44:49.000Z
|
2022-02-04T20:55:08.000Z
|
packages/micropython-official/v1.9.4/esp8266/stubs/uctypes.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 6
|
2019-10-08T05:31:21.000Z
|
2021-04-22T10:21:01.000Z
|
"""
Module: 'uctypes' on esp8266 v1.9.4
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.2.0-dev(9422289)', version='v1.9.4-8-ga9a3caad0 on 2018-05-11', machine='ESP module with ESP8266')
# Stubber: 1.1.2
ARRAY = -1073741824
BFINT16 = -671088640
BFINT32 = -402653184
BFINT8 = -939524096
BFUINT16 = -805306368
BFUINT32 = -536870912
BFUINT8 = -1073741824
BF_LEN = 22
BF_POS = 17
BIG_ENDIAN = 1
FLOAT32 = -268435456
FLOAT64 = -134217728
INT16 = 402653184
INT32 = 671088640
INT64 = 939524096
INT8 = 134217728
LITTLE_ENDIAN = 0
NATIVE = 2
PTR = 536870912
UINT16 = 268435456
UINT32 = 536870912
UINT64 = 805306368
UINT8 = 0
VOID = 0
def addressof():
pass
def bytearray_at():
pass
def bytes_at():
pass
def sizeof():
pass
class struct:
''
| 17.066667
| 156
| 0.699219
|
5aae8edea28495fe0a38dd31a1a555722d128185
| 43,831
|
py
|
Python
|
gnumpy/npmat.py
|
ExoCTK/gnumpy3
|
e8366dee8c25ab621c1a8780eb917afc407df0a8
|
[
"BSD-3-Clause"
] | 1
|
2020-12-26T13:22:33.000Z
|
2020-12-26T13:22:33.000Z
|
gnumpy/npmat.py
|
ExoCTK/gnumpy3
|
e8366dee8c25ab621c1a8780eb917afc407df0a8
|
[
"BSD-3-Clause"
] | null | null | null |
gnumpy/npmat.py
|
ExoCTK/gnumpy3
|
e8366dee8c25ab621c1a8780eb917afc407df0a8
|
[
"BSD-3-Clause"
] | 1
|
2020-05-10T00:49:36.000Z
|
2020-05-10T00:49:36.000Z
|
import os, pdb, time, warnings
import numpy as np
__DTYPE__ = np.float64
def dummy():
return CUDAMatrix(np.zeros((1, 1)))
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
#from cudamat import CUDAMatException
class CUDAMatException(Exception):
pass
IncompatibleDimensionsException = CUDAMatException("Incompatible matrix dimensions.")
InvalidConfig = CUDAMatException("Invalid Configuration Error (i.e., a dim of the array must be smaller than 2**16.")
## TODO: Figure out which functions produce an invalid config error. These are those who allocate a thread per col/row/elem.
## Those who allocate a bunch of rows per thread, like mult, add, sub, etc, should be immune to the invalid
## configuration error. PS: this error occurs on the real cudamat, which is why it happens.
## Sum/Max/Cumsum
MAX_DIM = 2**16
class CUDAMatrix(object):
"""
A CUDAMatrix object represents a matrix of single precision floating point
numbers on a GPU.
"""
def __init__(self, array, ref=True):
if ref:
self.numpy_array = reformat(array)
else:
self.numpy_array = array
assert self.numpy_array.ndim == 2
self.trans = False
def __del__(self):
pass
@staticmethod
def init_random(seed):
import numpy.random as random
random.seed(seed)
@property
def num_elems(self):
return self.numpy_array.size
@property
def shape(self):
return self.numpy_array.shape
def cheap_transpose(self):
return CUDAMatrix(self.reshape((self.shape[1], self.shape[0])))
def reshape(self, shape):
assert shape[0]*shape[1] == self.shape[0]*self.shape[1]
#self.numpy_array.resize(shape)
#self.numpy_array = self.numpy_array.reshape(shape, order='F')
self.numpy_array.resize(*shape)
return self
def copy(self):
return empty().assign(self)
def set_np_array(self, X):
assert X.shape == self.shape
self.numpy_array[:] = X
self.copy_to_device()
return self
def zero_copy(self):
return self.copy().assign(0)
def resize(self, shape):
if self.shape != shape:
print('CUDAMatrix: resize (%s -> %s)' % (self.shape, shape))
#self.numpy_array = np.resize(self.numpy_array, shape).astype(__DTYPE__)
self.numpy_array.resize(shape)
self.numpy_array[:] = 0
return self
@property
def T(self):
return CUDAMatrix(self.numpy_array.T)
@property
def mat(self):
return self.numpy_array
@deprecated
def set_shape(self, shape):
return self.resize(shape)
def asarray(self):
"""
Copies the matrix to an ndarray on the CPU and returns it.
"""
#return reformat(self.numpy_array.copy())
return self.numpy_array
def copy_to_device(self):
"""
Copy the matrix to the GPU.
"""
pass
def select_columns(self, indices, target):
"""
copies some columns of self into target.
<indices> must be a row vector. Its elements are float32's representing integers, e.g. "34.0" means the integer "34".
after this call, for all r,c, target[r,c]=self[r,indices[c]].
This returns target.
Negative indices are interpreted in the usual Python way: all elements of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an exception (because the programmer was lazy). Instead, they result in NaN values in <target>.
"""
assert target.shape[0]==self.shape[0]
assert indices.shape[0]==1
assert indices.shape[1] == target.shape[1]
for c in range(target.shape[1]):
try:
target.numpy_array[:,c] = self.numpy_array[:, int(indices.numpy_array.ravel()[c])]
except IndexError:
target.numpy_array[:,c] = np.nan
return target
def set_selected_columns(self, indices, source):
"""
copies all columns of source into some columns of self.
<indices> must be a row vector. Its elements are float32's representing
integers, e.g. "34.0" means the integer "34". after this call, for all
r,c, self[r,indices[c]]=source[r,c]. This returns self.
Negative indices are interpreted in the usual Python way: all elements
of <indices> had better be in the range [-self.shape[1], self.shape[1]-1].
This does bounds checking, but out of bounds indices do not raise an
exception (because the programmer was lazy). Instead, they result in NaN
values in <self>.
"""
assert self.shape[0]==source.shape[0]
assert indices.shape[0]==1
assert indices.shape[1]==source.shape[1]
for c in range(source.shape[1]):
try:
self.numpy_array[:,int(indices.numpy_array.ravel()[c])] = source.numpy_array[:,c]
except IndexError:
self.numpy_array[:,int(indices.numpy_array.ravel()[c])] = np.nan
return self
def copy_to_host(self):
"""
Copy the matrix to the CPU.
"""
return self.asarray()
def np(self):
return self.copy_to_host()
def assign(self, val):
"""Assign val to self, where val can be a scalar or a CUDAMatrix
with the same dimensions as self. """
if isinstance(val, CUDAMatrix):
self.resize(val.shape)
self.numpy_array[:] = val.numpy_array
elif isinstance(val, (int, float, __DTYPE__)):
self.numpy_array[:] = val
return self
def free_device_memory(self):
"""
Free memory used up by the matrix on the GPU.
"""
pass
def set_trans(self, is_trans):
"""
Set the transposedness flag to is_trans.
"""
if is_trans is True:
self.numpy_array = self.numpy_array.T
def slice(self, first_col, last_col):
return CUDAMatrix(self.numpy_array[:, first_col:last_col], ref=False)
def get_row_slice(self, start, end, target = None):
"""
Get the rows with indices start through end. If target is not provided
memory for a new matrix will be allocated.
"""
ans = CUDAMatrix(self.numpy_array[start:end, :].copy())
if target is not None:
target.assign(ans)
else:
target = ans
return target
def set_row_slice(self, start, end, mat):
try:
self.numpy_array[start:end] = mat.numpy_array
except ValueError:
raise IncompatibleDimensionsException
return self
def get_col_slice(self, start, end, target = None):
## NOTE: no .copy()
ans = self.slice(start, end)
if target is not None:
target.assign(ans)
else:
target = ans
return target
def set_col_slice(self, start, end, mat):
return self.slice(start, end).assign(mat)
# def select_columns(self, indices, target):
# """
# Copies selected columns into a target matrix.
# <self>, <indices>, and <target> are all cudamat matrices.
# <self> is an M by K matrix.
# <indices> is of shape 1 by N. All elements x are expected to be
# 0<=x<K, and are expected to have nothing after the decimal point (i.e.
# to be floats representing integers).
# <target> is an M by N matrix that will be filled with the result.
# After the operation, for all i,j, target[i, j] = self[i, int(indices[j])]
# This returns <target>.
# ? idea: No bounds checking is done.
# """
# M, K = self.shape
# one, N = indices.shape
# assert one == 1
# M_, N_ = target.shape
# assert M_ == M and N == N_
# np_ints = indices.numpy_array.astype(int)
# if not (np_ints.max() < K and np_ints.min() >= 0):
# raise ValueError("Index out of bounds.")
# target.numpy_array[:] = self.numpy_array[:, np_ints.flatten()]
# return target
def transpose(self, target = None):
if target is None:
return CUDAMatrix(self.numpy_array.T.copy())
else:
target.numpy_array.resize((self.shape[1], self.shape[0]))
target.numpy_array[:] = self.numpy_array.T
return target
def assign_transpose(self, t):
return t.transpose(target = self)
def fill_with_rand(self):
"""
Fill matrix on the GPU with random numbers drawn from the uniform
distribution over the (0,1) interval.
"""
self.numpy_array[:] = np.random.rand(*self.shape)
return self
def fill_with_randn(self):
"""
Fill matrix on the GPU with random numbers drawn from the standard normal
distribution.
"""
self.numpy_array[:] = np.random.randn(*self.shape)
return self
def add_col_vec(self, vec, target = None):
"""
Add vector vec to every column of the matrix. If a target is provided,
it is used to store the result instead of self.
"""
a, b = self.shape
a_, b_ = vec.shape
if not (b_ == 1 and a_ == a):
raise IncompatibleDimensionsException
if target is None:
target = self
target.resize(self.shape)
target.numpy_array[:] = self.numpy_array + vec.numpy_array
return target
def assign_add_col_vec(self, a, b):
return a.add_col_vec(b, target = self)
def add_col_mult(self, vec, mult, target = None):
"""
Add a multiple of vector vec to every column of the matrix. If a target
is provided, it is used to store the result instead of self.
"""
a, b = self.shape
a_, b_ = vec.shape
if not (b_ == 1 and a_ == a):
raise IncompatibleDimensionsException
if target is None:
target = self
target.resize(self.shape)
target.numpy_array[:] = self.numpy_array + vec.numpy_array * mult
return target
def assign_add_col_mult(self, a, m, b):
return a.add_col_vec(b, m, target = self)
def add_row_vec(self, vec, target = None):
"""
Add vector vec to every row of the matrix. If a target is provided,
it is used to store the result instead of self.
"""
a, b = self.shape
a_, b_ = vec.shape
if not (a_ == 1 and b_ == b):
raise IncompatibleDimensionsException
if target is None:
target = self
target.resize(self.shape)
target.numpy_array[:] = vec.numpy_array + self.numpy_array
return target
def assign_add_row_vec(self, a, b):
return a.add_row_vec(b, target = self)
def mult_by_col(self, vec, target = None):
"""
Multiply vector vec into every column of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
a, b = self.shape
a_, b_ = vec.shape
if not (b_ == 1 and a_ == a):
raise IncompatibleDimensionsException
if target is None:
target = self
target.resize(self.shape)
target.numpy_array[:] = vec.numpy_array * self.numpy_array
return target
def mult_by_row(self, vec, target = None):
"""
Multiply vector vec into every row of the matrix. If a target is
provided, it is used to store the result instead of self.
"""
a, b = self.shape
a_, b_ = vec.shape
if not (b_ == b and a_ == 1):
raise IncompatibleDimensionsException
if target is None:
target = self
target.resize(self.shape)
target.numpy_array[:] = vec.numpy_array * self.numpy_array
return target
def sum(self, axis, target = None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If a target is
not prvided, a new vector is created for storing the result.
"""
if axis == 0:
ans = self.numpy_array.sum(0)[np.newaxis, :]
elif axis == 1:
ans = self.numpy_array.sum(1)[:, np.newaxis]
else:
raise ValueError("axis must be only 0 or 1; instead, got %s\n", axis)
ans = CUDAMatrix(ans)
if target is not None:
target.assign(ans)
else:
target = ans
return target
def mean(self, axis, target = None):
if axis == 0:
ans = self.numpy_array.mean(0)[np.newaxis, :]
elif axis == 1:
ans = self.numpy_array.mean(1)[:, np.newaxis]
else:
raise ValueError("axis must be only 0 or 1; instead, got %s\n", axis)
ans = CUDAMatrix(ans)
if target is not None:
target.assign(ans)
else:
target = ans
return target
def assign_sum(self, mat, axis):
return mat.sum(axis, target = self)
def assign_mean(self, mat, axis):
return mat.mean(axis, target = self)
def add_sums(self, mat, axis, mult = 1.):
"""
Add a multiple of the sums of the matrix mat along the given dimension
to self.
"""
if self.numpy_array.shape != self.mat.shape:
raise IncompatibleDimensionsException
sum = mat.sum(axis)
sum.numpy_array *= mult
if axis == 0:
self.add_row_vec(sum)
elif axis == 1:
self.add_col_vec(sum)
return self
def less_than(self, val, target = None):
"""
Perform the operation target = 1. * (self < val), where val can be a matrix or a scalar.
"""
if target is None:
target = self
target.resize(self.shape)
if isinstance(val, (int, float, __DTYPE__)):
target.numpy_array[:] = self.numpy_array < val
else:
if val.shape != self.shape:
raise IncompatibleDimensionsException
target.numpy_array[:] = (self.numpy_array < val.numpy_array).astype(__DTYPE__)
return target
def assign_less_than(self, mat, val):
return mat.less_than(val, self)
def greater_than(self, val, target = None):
"""
Perform the operation target = 1. * (self > val), where val can be a matrix or a scalar.
"""
if target is None:
target = self
target.resize(self.shape)
if isinstance(val, (int, float, __DTYPE__)):
target.numpy_array[:] = (self.numpy_array > val).astype(__DTYPE__)
else:
if val.shape != self.shape:
raise IncompatibleDimensionsException
target.numpy_array[:] = (self.numpy_array > val.numpy_array).astype(__DTYPE__)
return target
def assign_greater_than(self, mat, val):
return mat.greater_than(val, self)
def max(self, axis, target = None, transpose_aux=None):
"""
Find the maximum value along the given dimension, where 0 represents the
leading dimension and 1 represents the non-leading dimension. If a target
is not prvided, a new vector is created for storing the result.
"""
m, n = self.shape
if axis == 0:
if target is None:
target = empty((1, n))
target.resize((1, n))
target.numpy_array[:] = self.numpy_array.max(0)
elif axis == 1:
# IN theory: we are supposed to do this:
# if not target:
# #target = CUDAMatrix(np.empty((m, 1), dtype=np.float32, order = 'F'))
# target = empty((m, 1))
# else:
# target.resize((m, 1))
# err_code = _cudamat.max_by_axis(self.p_mat, target.p_mat, ct.c_int(axis))
# if err_code:
# raise generate_exception(err_code)
assert transpose_aux != None
self.transpose(target = transpose_aux)
target.reshape(target.shape[::-1])
transpose_aux.max(axis = 0, target = target)
target.reshape(target.shape[::-1])
return target
def assign_max(self, mat, axis, transpose_aux=None):
return mat.max(axis, target = self, transpose_aux = transpose_aux)
def total_max(self):
row_maxes = empty((1, 1)).assign_max(self, axis = 0)
return row_maxes.reshape((row_maxes.shape[1], row_maxes.shape[0])).max(axis = 0).asarray()[0,0]
def total_sum(self):
return self.numpy_array.sum()
def sign(self, target = None):
if target is None:
target = empty(self.shape)
target.resize(self.shape)
target.numpy_array[:] = np.sign(self.numpy_array)
return target
def assign_sign(self, a):
return a.sign(target = self)
def apply_sigmoid(self, target = None):
"""
Apply the logistic sigmoid to each element of the matrix.
"""
return sigmoid(self, target)
def sigmoid(self, target = None):
"""
Apply the logistic sigmoid to each element of the matrix.
"""
return sigmoid(self, target)
def assign_sigmoid(self, t):
return sigmoid(t, self)
def log(self, target = None):
return log(self, target)
def assign_log(self, t):
return log(t, self)
def exp(self, target = None):
return exp(self, target)
def assign_exp(self, t):
return exp(t, self)
def pow(self, p, target = None):
return pow(self, p, target)
def assign_pow(self, mat, p):
return pow(mat, p, self)
def sqrt(self, target = None):
return sqrt(self, target)
def assign_sqrt(self, mat):
return sqrt(mat, self)
def reciprocal(self, target = None):
"""
Find the reciprocal of each element of the matrix.
"""
if not target:
target = self
target.resize(self.shape)
target.numpy_array[:] = 1./self.numpy_array[:]
return target
def assign_reciprocal(self, mat):
return mat.reciprocal(target = self)
def dot(self, mat2, target = None):
"""
Multiply the matrix by mat2 from the right.
"""
return dot(self, mat2, target)
def assign_dot(self, m1, m2):
m1.dot(m2, target = self)
return self
def add_dot(self, m1, m2):
"""
Add the dot product of m1 and m2 to the matrix.
"""
m3 = dot(m1, m2)
if m3.shape != self.shape:
raise IncompatibleDimensionsException
self.numpy_array += m3.numpy_array
return self
def subtract_dot(self, m1, m2):
"""
Subtract the dot product of m1 and m2 from the matrix.
"""
m3 = dot(m1, m2)
if m3.shape != self.shape:
raise IncompatibleDimensionsException
self.numpy_array -= m3.numpy_array
return self
def add_mult(self, mat2, alpha = 1.):
"""
Add multiple of mat2 to the matrix.
"""
if mat2.shape != self.shape:
raise IncompatibleDimensionsException
self.numpy_array += mat2.numpy_array * alpha
return self
def assign_mult(self, mat2, alpha):
self.resize(mat2.shape)
self.assign(0)
self.add_mult(mat2, alpha)
return self
def subtract_mult(self, mat2, alpha = 1.):
"""
Subtract a multiple of mat2 from the matrix.
"""
if mat2.shape != self.shape:
raise IncompatibleDimensionsException
self.numpy_array -= mat2.numpy_array * alpha
return self
def add(self, val, target = None):
"""Add val to self, where val can be a scalar or a CUDAMatrix with the
same dimensions as self. """
if not target:
target = self
target.resize(self.shape)
if isinstance(val, CUDAMatrix):
if target.shape != val.shape:
raise IncompatibleDimensionsException
target.numpy_array[:] = self.numpy_array + val.numpy_array
elif isinstance(val, (int, float, __DTYPE__)):
target.numpy_array[:] = self.numpy_array + val
else:
raise ValueError("Value must be of type CUDAMatrix, int, or float.")
return target
def assign_add(self, a, b):
a.add(b, target = self)
return self
def subtract(self, val, target = None):
"""Subtract val from self, where val can be a scalar or a CUDAMatrix with
the same dimensions as self. """
if not target:
target = self
target.resize(self.shape)
if isinstance(val, CUDAMatrix):
if target.shape != val.shape:
raise IncompatibleDimensionsException
target.numpy_array[:] = self.numpy_array - val.numpy_array
elif isinstance(val, (int, float, __DTYPE__)):
target.numpy_array[:] = self.numpy_array - val
else:
raise ValueError("Value must be of type CUDAMatrix, int, or float.")
return target
def assign_subtract(self, a, b):
a.subtract(b, target = self)
return self
def divide(self, val, target = None):
"""Divide self by val, where val can be a scalar or a CUDAMatrix with the
same dimensions as self. """
if not target:
target = self
target.resize(self.shape)
if isinstance(val, CUDAMatrix):
if target.shape != val.shape:
raise IncompatibleDimensionsException
target.numpy_array[:] = self.numpy_array / val.numpy_array
elif isinstance(val, (int, float, __DTYPE__)):
target.numpy_array[:] = self.numpy_array / val
else:
raise ValueError("Value must be of type CUDAMatrix, int, or float.")
return target
def assign_divide(self, a, b):
a.divide(b, target = self)
return self
def mult(self, val, target = None):
"""Multiply self by val, where val can be a scalar or a CUDAMatrix with
the same dimensions as self. """
if not target:
target = self
target.resize(self.shape)
if isinstance(val, CUDAMatrix):
if target.shape != val.shape:
raise IncompatibleDimensionsException
target.numpy_array[:] = self.numpy_array * val.numpy_array
elif isinstance(val, (int, float, __DTYPE__)):
target.numpy_array[:] = self.numpy_array * val
else:
raise ValueError("Value must be of type CUDAMatrix, int, or float.")
return target
def assign_mult(self, a, b):
a.mult(b, target = self)
return self
@deprecated
def assign_scalar(self, alpha):
"""
Assign scalar alpha to every element of the matrix.
"""
self.assign(alpha)
return self
@deprecated
def mult_by_scalar(self, alpha, target = None):
"""
Multiply the matrix by a scalar.
"""
return self.mult(alpha, target)
@deprecated
def div_by_scalar(self, alpha, target = None):
"""
Divide the matrix by a scalar.
"""
return self.divide(alpha, target)
@deprecated
def add_scalar(self, alpha, target = None):
"""
Increment the matrix by a scalar.
"""
return self.add(alpha, target)
def euclid_norm(self):
return np.sqrt((self.numpy_array**2).sum())
def empty(shape=None):
"""
Creates and returns a new CUDAMatrix with the given shape.
"""
if shape is None:
shape = (1, 1)
return CUDAMatrix(np.empty(shape))
def zeros(shape):
return empty(shape).assign(0)
def randn(a, b):
ans = empty((a, b)).fill_with_randn()
return ans
def sum(mat, axis, target = None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If a target is
not prvided, a new vector is created for storing the result.
"""
return mat.sum(axis, target)
def dot(m1, m2, target = None):
"""
Find the dot product between m1 and m2.
"""
m = m1.shape[0]
n = m2.shape[1]
target_shape = (m, n)
if not target:
target = empty(target_shape)
target.resize(target_shape)
try:
target.numpy_array[:] = np.dot(m1.numpy_array, m2.numpy_array)
except ValueError:
raise IncompatibleDimensionsException
return target
def vdot(m1, m2):
assert m1.shape == m2.shape
return (m1.asarray() * m2.asarray()).sum()
def sigmoid(mat, target = None):
"""
Apply the logistic sigmoid to each element of the matrix mat.
"""
if not target:
target = mat
target.resize(mat.shape)
target.numpy_array[:] = 1. / (1 + np.exp(-mat.numpy_array))
return target
def tanh(mat, target = None):
"""
Apply the logistic sigmoid to each element of the matrix mat.
"""
if not target:
target = mat
target.resize(mat.shape)
target.numpy_array[:] = np.tanh(mat.numpy_array)
return target
def gammaln(mat, target = None):
if not target:
target = mat
target.resize(mat.shape)
import scipy.special
target.numpy_array[:] = scipy.special.gammaln(mat.numpy_array)
return target
def abs(mat, target = None):
"""
Apply the logistic sigmoid to each element of the matrix mat.
"""
if not target:
target = mat
target.resize(mat.shape)
target.numpy_array[:] = abs(mat.numpy_array)
return target
def log_1_plus_exp(mat, target = None):
"""
Apply log(1+exp(x)) to each element of the matrix mat.
"""
if not target:
target = mat
mask = mat.numpy_array > 0
target.numpy_array[mask] = mat.numpy_array[mask] + np.log(1+np.exp(-mat.numpy_array[mask]))
mask = np.logical_not(mask)
target.numpy_array[mask] = np.log(1+np.exp(mat.numpy_array[mask]))
return target
log_1_sum_exp = log_1_plus_exp
def log(mat, target = None):
"""
Find the natural logarithm of each element of the matrix mat.
"""
if not target:
target = mat
target.resize(mat.shape)
target.numpy_array[:] = np.log(mat.numpy_array)
return target
def exp(mat, target = None):
"""
Apply the exponential function to each element of the matrix mat.
"""
if not target:
target = mat
target.resize(mat.shape)
target.numpy_array[:] = np.exp(mat.numpy_array)
return target
if not target:
target = mat
target.resize(mat.shape)
return target
def sqrt(mat, target = None):
"""
Compute the square root of each element of the matrix mat.
"""
if not target:
target = mat
target.resize(mat.shape)
target.numpy_array[:] = np.sqrt(mat.numpy_array)
return target
if not target:
target = mat
target.resize(mat.shape)
return target
def pow(mat, p, target = None):
"""
Compute the 'p'th power of each element of the matrix mat.
"""
if not target:
target = mat
target.resize(mat.shape)
target.numpy_array[:] = mat.numpy_array[:] ** p
return target
def cuda_sync_threads():
pass
def reformat(array):
"""
Returns array as a float32 array in FORTRAN order.
"""
return np.array(array, dtype=__DTYPE__, order='F')
def cuda_set_some_device():
return 0
def cuda_set_device(dev_id):
"""
Selects the CUDA device with the given ID.
"""
return 0
def cuda_get_free_device():
"""
Returns the ID of the first free CUDA device.
"""
return 0
def cublas_init():
"""
Initialize Cublas.
"""
return 0
def cublas_shutdown():
"""
Shut down Cublas.
"""
return 0
# The following functions are for implementing things like coarse filters and
# models with replicated local filters. At the moment they are quite slow.
def sum_superpixels(source, target, w, temp = None):
raise NotImplemented()
def kronecker(mat1, mat2, target = None):
raise NotIMplemented
def flat_to_tiled(source, target, stride):
raise NotImplemented()
def tiled_to_flat(source, target, stride, temp = None):
raise NotImplemented()
def flat_to_tiled3(source, target, stride):
raise NotImplemented()
def get_item_from_each_row(source, target, inds, num_rows, num_cols):
if source.numpy_array.shape == (num_cols, num_rows):
src = source.numpy_array.T
else:
src = source.numpy_array.reshape(num_rows, num_cols)
ix = inds.numpy_array.reshape(num_rows).astype(int)
t = target.numpy_array.reshape(num_rows)
for i in range(num_rows):
t[i] = src[i,ix[i]]
return target
def set_item_to_each_row(source, target, inds, num_rows, num_cols):
if source.numpy_array.shape == (num_cols, num_rows):
src = source.numpy_array.T
else:
src = source.numpy_array.reshape(num_rows, num_cols)
ix = inds.numpy_array.reshape(num_rows).astype(int)
t = target.numpy_array.reshape(num_rows)
for i in range(num_rows):
src[i,ix[i]] = t[i]
return source
def abs(X, aux):
return aux.assign_mult(X, X).sqrt()
def total_sum(X):
return X.total_sum()
def mean(mat, axis, target = None):
target = sum(mat, axis, target)
target.mult_by_scalar(1. / mat.shape[axis])
return target
def total_mean(mat):
s = total_sum(mat)
return s / mat.num_elems
def cumsum(mat, target):
target.resize(mat.shape)
target.numpy_array[:] = mat.numpy_array.cumsum(1)
return target
# def multi_transpose(IN, OUT, w, h, batch_size):
# """
# the order of w, h seems wrong, but it is consistent with the one on cudamat.py
# """
# assert IN.shape == (w*h, batch_size)
# assert OUT.shape == (w*h, batch_size)
# from pylab import amap, transpose
# OUT.numpy_array[:] = amap(transpose,IN.numpy_array.reshape(h, w, batch_size).transpose([2,0,1])).transpose([1,2,0]).reshape(w*h, batch_size)
def multi_transpose(IN, OUT, w, h, batch_size):
i = IN.numpy_array
o = OUT.numpy_array
# o = o.reshape(batch_size, w, h)
# o[:] = i.reshape(batch_size, h, w).transpose([0,2,1])
# OUT.numpy_array[:] = o.reshape(*OUT.numpy_array.shape)
o = o.ravel()
o[:] = i.reshape(h, w, batch_size).transpose([1,0,2]).ravel()
OUT.numpy_array[:] = o.reshape(*OUT.numpy_array.shape)
return OUT
def ind_incr(target, inds, axis):
assert target.shape[1] == inds.shape[0] * inds.shape[1]
assert inds.shape[1] == 1 or inds.shape[0] == 1
if axis == 1:
try:
for i in inds:
target.numpy_array[:, i] += 1
except IndexError:
raise IncompatibleDimensionsException
return target
elif axis == 0:
try:
for i in inds:
target.numpy_array[i, :] += 1
except IndexError:
raise IncompatibleDimensionsException
return target
else:
raise Exception ("bad axis.")
## The code below has been lifted from cudamat. It needs to work with numpy.
MAX_ELEMS = 2 ** 16 - 10
class softmax:
def __init__(self, axis):
self.axis = axis
self.transpose_aux = empty()
self.neg_max = empty()
self.mat = empty()
self.exp = empty()
self.Z = empty()
self.probs = empty()
self.transpose_aux_small = empty()
self.neg_max_small = empty()
self.mat_small = empty()
self.exp_small = empty()
self.Z_small = empty()
self.probs_small = empty()
def __call__(self, mat, target):
if mat.shape != target.shape:
target.resize(mat.shape)
if self.axis == 1:
return self.__call_helper_small__(mat, target)
pos = 0
step = MAX_ELEMS
## width is how many elems we have to work with.
width = mat.shape[1 - self.axis]
while pos < width:
next = min(width, pos + step)
step_size = next - pos
if step_size == step:
self.__call_helper__(mat.slice(pos, next),
target.slice(pos, next))
else:
self.__call_helper_small__(mat.slice(pos, next),
target.slice(pos, next))
pos += step_size
return target
def __call_helper__(self, mat, target):
self.neg_max.\
assign_max(mat,
axis = self.axis,
transpose_aux = self.transpose_aux).\
mult(-1)
if self.axis == 0:
self.mat.assign_add_row_vec(mat, self.neg_max)
else:
self.mat.assign_add_col_vec(mat, self.neg_max)
self.exp.assign_exp(self.mat)
self.Z.assign_sum(self.exp, self.axis).reciprocal()
self.probs.assign(self.exp)
if self.axis == 0:
self.probs.mult_by_row(self.Z)
else:
self.probs.mult_by_col(self.Z)
target.assign(self.probs)
def __call_helper_small__(self, mat, target):
self.neg_max_small.\
assign_max(mat,
axis = self.axis,
transpose_aux = self.transpose_aux_small).\
mult(-1)
if self.axis == 0:
self.mat_small.assign_add_row_vec(mat, self.neg_max_small)
else:
self.mat_small.assign_add_col_vec(mat, self.neg_max_small)
self.exp_small.assign_exp(self.mat_small)
self.Z_small.assign_sum(self.exp_small, self.axis).reciprocal()
self.probs_small.assign(self.exp_small)
if self.axis == 0:
self.probs_small.mult_by_row(self.Z_small)
else:
self.probs_small.mult_by_col(self.Z_small)
target.assign(self.probs_small)
def log_Zs(self, mat, target):
self.neg_max.\
assign_max(mat,
axis = self.axis,
transpose_aux = self.transpose_aux).\
mult(-1)
if self.axis == 0:
self.mat.assign_add_row_vec(mat, self.neg_max)
else:
self.mat.assign_add_col_vec(mat, self.neg_max)
## the exps without the max
self.exp.assign_exp(self.mat)
## take the sums of the exps, take the log, and add subtruct the maxes.
target.assign_sum(self.exp, self.axis).log().add(self.neg_max.mult(-1))
return target
class sample_multinomial:
def __init__(self, probs, axis):
raise NotImplementedError("use robust_multinomial instead.")
self.axis = axis
self.cumsums = empty()
self.cumsums_t = empty()
self.probs_t = empty()
self.cumsums_small = empty()
self.cumsums_t_small = empty()
self.probs_t_small = empty()
self.set_probs(probs)
self.samples = empty()
self.samples_small = empty()
if axis == 0:
width = probs.shape[1]
std_width = min(width, MAX_ELEMS)
self.rand_vals = empty((1, std_width))
self.ones = empty((probs.shape[0], 1)).assign(1.)
small_width = max(0, width % MAX_ELEMS)
self.rand_vals_small = empty((1, small_width))
self.ones_small = empty((probs.shape[1], 1)).assign(1.)
elif axis == 1:
width = probs.shape[0]
std_width = min(width, MAX_ELEMS)
self.rand_vals = empty((std_width, 1))
self.ones = empty((1, probs.shape[1])).assign(1.)
small_width = max(0, width % MAX_ELEMS)
self.rand_vals_small = empty((small_width, 1))
self.ones_small = empty((1, probs.shape[1])).assign(1.)
self.rand_mat = empty()
self.threshs = empty()
self.rand_mat_small = empty()
self.threshs_small = empty()
def set_probs(self, probs):
if self.axis == 1:
cumsum(probs, self.cumsums)
else:
probs.transpose(target = self.probs_t)
cumsum(self.probs_t, self.cumsums_t)
self.cumsums_t.transpose(target = self.cumsums)
def multi_sample(self, target, k):
target.resize(self.cumsums.shape)
for i in range(k):
self.rand_vals.fill_with_rand()
if self.axis == 1:
self.rand_mat.assign_dot(self.rand_vals, self.ones)
else:
self.rand_mat.assign_dot(self.ones, self.rand_vals)
self.threshs.\
assign_less_than(self.cumsums, self.rand_mat).\
sum(self.axis, target = self.samples)
ind_incr(target, self.samples, self.axis)
return target
def set_probs_helper_small(self, probs):
self.probs = probs
if self.axis == 1:
cumsum(probs, self.cumsums_small)
else:
probs.transpose(target = self.probs_t_small)
cumsum(self.probs_t_small, self.cumsums_t_small)
self.cumsums_t_small.transpose(target = self.cumsums_small)
def multi_sample_helper_small(self, target, k):
target.resize(self.cumsums_small.shape)
for i in range(k):
self.rand_vals_small.fill_with_rand()
if self.axis == 1:
self.rand_mat_small.assign_dot(self.rand_vals_small, self.ones_small)
else:
self.rand_mat_small.assign_dot(self.ones_small, self.rand_vals_small)
self.threshs_small.\
assign_less_than(self.cumsums_small, self.rand_mat_small).\
sum(self.axis, target = self.samples_small)
ind_incr(target, self.samples_small, self.axis)
return target
def sample_from_probs(self, probs, target):
if probs.shape != target.shape:
target.resize(probs.shape)
## yes: we make a loop.
pos = 0
step = MAX_ELEMS
width = probs.shape[1]
while pos < width:
next = min(width, pos + step)
step_size = next - pos
if step_size == step:
p = probs.slice(pos, next)
t = target.slice(pos, next)
self.set_probs(p)
self.multi_sample(t, 1)
else:
p = probs.slice(pos, next)
t = target.slice(pos, next)
self.set_probs_helper_small(probs)
self.multi_sample_helper_small(t, 1)
pos += step_size
return target
class robust_multinomial:
def __init__(self, shape, axis):
self.axis = axis
self.cumsums = empty()
self.cumsums_t = empty()
self.probs_t = empty()
self.cumsums_small = empty()
self.cumsums_t_small = empty()
self.probs_t_small = empty()
self.samples = empty()
self.samples_small = empty()
if axis == 0:
width = shape[1]
std_width = min(width, MAX_ELEMS)
self.rand_vals = empty((1, std_width))
self.ones = empty((shape[0], 1)).assign(1.)
small_width = max(0, width % MAX_ELEMS)
self.rand_vals_small = empty((1, small_width))
self.ones_small = empty((shape[0], 1)).assign(1.)
elif axis == 1:
width = shape[0]
std_width = min(width, MAX_ELEMS)
self.rand_vals = empty((std_width, 1))
self.ones = empty((1, shape[1])).assign(1.)
small_width = max(0, width % MAX_ELEMS)
self.rand_vals_small = empty((small_width, 1))
self.ones_small = empty((1, shape[1])).assign(1.)
self.rand_mat = empty()
self.threshs = empty()
self.rand_mat_small = empty()
self.threshs_small = empty()
def set_probs(self, probs):
self.probs = probs
if self.axis == 1:
cumsum(probs, self.cumsums)
else:
probs.transpose(target = self.probs_t)
cumsum(self.probs_t, self.cumsums_t)
self.cumsums_t.transpose(target = self.cumsums)
def multi_sample(self, target, k):
target.resize(self.cumsums.shape)
for i in range(k):
self.rand_vals.fill_with_rand()
if self.axis == 1:
self.rand_mat.assign_dot(self.rand_vals, self.ones)
else:
self.rand_mat.assign_dot(self.ones, self.rand_vals)
self.threshs.\
assign_less_than(self.cumsums, self.rand_mat).\
sum(self.axis, target = self.samples)
ind_incr(target, self.samples, self.axis)
return target
def set_probs_helper_small(self, probs):
if self.axis == 1:
cumsum(probs, self.cumsums_small)
else:
probs.transpose(target = self.probs_t_small)
cumsum(self.probs_t_small, self.cumsums_t_small)
self.cumsums_t_small.transpose(target = self.cumsums_small)
def multi_sample_helper_small(self, target, k):
target.resize(self.cumsums_small.shape)
for i in range(k):
self.rand_vals_small.fill_with_rand()
if self.axis == 1:
self.rand_mat_small.assign_dot(self.rand_vals_small, self.ones_small)
else:
self.rand_mat_small.assign_dot(self.ones_small, self.rand_vals_small)
self.threshs_small.\
assign_less_than(self.cumsums_small, self.rand_mat_small).\
sum(self.axis, target = self.samples_small)
ind_incr(target, self.samples_small, self.axis)
return target
def sample_from_probs(self, probs, target):
if probs.shape != target.shape:
target.resize(probs.shape)
## yes: we make a loop.
pos = 0
step = MAX_ELEMS
width = probs.shape[1 - self.axis]
while pos < width:
next = min(width, pos + step)
step_size = next - pos
p = probs.slice(pos, next)
t = target.slice(pos, next)
if step_size == step:
self.set_probs(p)
self.multi_sample(t, 1)
else:
self.set_probs_helper_small(p)
self.multi_sample_helper_small(t, 1)
pos += step
return target
| 21.517428
| 169
| 0.579407
|
2338ddfa264ef1df05958d42b54fc21b95436ccf
| 44,527
|
py
|
Python
|
src/TheLanguage/Lexer/Phrases/UnitTests/DSL_UnitTest.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | null | null | null |
src/TheLanguage/Lexer/Phrases/UnitTests/DSL_UnitTest.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | null | null | null |
src/TheLanguage/Lexer/Phrases/UnitTests/DSL_UnitTest.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | 1
|
2021-06-18T18:58:57.000Z
|
2021-06-18T18:58:57.000Z
|
# ----------------------------------------------------------------------
# |
# | DSL_UnitTest.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2021-09-24 16:43:52
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Unit test for PhraseDSL.py"""
import os
import textwrap
import pytest
pytest.register_assert_rewrite("CommonEnvironment.AutomatedTestHelpers")
import CommonEnvironment
from CommonEnvironment.AutomatedTestHelpers import CompareResultsFromFile
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from ..DSL import *
from ...Components.Token import (
DedentToken,
IndentToken,
NewlineToken,
PopIgnoreWhitespaceControlToken,
PushIgnoreWhitespaceControlToken,
RegexToken,
)
from ...Components.UnitTests import (
CoroutineMock,
CreateIterator,
parse_mock,
MethodCallsToString,
)
# ----------------------------------------------------------------------
_word_token = RegexToken("Word Token", re.compile(r"(?P<value>[a-z]+)"))
_number_token = RegexToken("Number Token", re.compile(r"(?P<value>\d+)"))
_upper_token = RegexToken("Upper Token", re.compile(r"(?P<value>[A-Z]+)"))
_lpar_token = RegexToken("lpar", re.compile(r"\("))
_rpar_token = RegexToken("rpar", re.compile(r"\)"))
# ----------------------------------------------------------------------
class TestLexSimple(object):
_phrase = CreatePhrase(
name="Phrase",
item=[
_word_token,
_word_token,
NewlineToken(),
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_SingleSpaceSep(self, parse_mock):
iter = CreateIterator("one two")
assert str(iter) == "[1, 1] (0)"
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (8)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_MultipleSpaceSep(self, parse_mock):
iter = CreateIterator("one two")
assert str(iter) == "[1, 1] (0)"
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (13)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_TabSep(self, parse_mock):
iter = CreateIterator("one\ttwo")
assert str(iter) == "[1, 1] (0)"
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (8)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_MultiTabSep(self, parse_mock):
iter = CreateIterator("one\t\ttwo")
assert str(iter) == "[1, 1] (0)"
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (9)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_TrailingSpace(self, parse_mock):
iter = CreateIterator("one two ")
assert str(iter) == "[1, 1] (0)"
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (9)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_MultipleTrailingSpace(self, parse_mock):
iter = CreateIterator("one two ")
assert str(iter) == "[1, 1] (0)"
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (12)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_TrailingTab(self, parse_mock):
iter = CreateIterator("one two\t")
assert str(iter) == "[1, 1] (0)"
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (9)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_MultipleTrailingTab(self, parse_mock):
iter = CreateIterator("one two\t\t\t\t")
assert str(iter) == "[1, 1] (0)"
assert iter.ContentLen == 12
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (12)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_MultipleLines(self, parse_mock):
iter = CreateIterator(
textwrap.dedent(
"""\
one two
three four
""",
),
)
# Line 1
assert str(iter) == "[1, 1] (0)"
assert iter.ContentLen == 19
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[2, 1] (8)", "The result iterator should be modified"
CompareResultsFromFile(str(result), suffix=".line1")
assert len(parse_mock.method_calls) == 12
iter = result.IterEnd
# Line 2
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[2, 1] (8)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[3, 1] (19)", "The result iterator should be modified"
CompareResultsFromFile(str(result), suffix=".line2")
assert len(parse_mock.method_calls) == 24
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_TrailingWhitespace(self, parse_mock):
iter = CreateIterator("one two\n\n \n \n")
assert str(iter) == "[1, 1] (0)"
assert iter.ContentLen == 17
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[5, 1] (17)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatch(self, parse_mock):
iter = CreateIterator("one two three")
assert str(iter) == "[1, 1] (0)"
assert iter.ContentLen == 14
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
assert str(iter) == "[1, 1] (0)", "The incoming iterator should not be modified"
assert str(result.IterEnd) == "[1, 8] (7)", "The result iterator should be modified"
CompareResultsFromFile(str(result))
assert len(parse_mock.method_calls) == 10
# ----------------------------------------------------------------------
class TestLexIndentAndDedent(object):
_phrase = CreatePhrase(
name="Phrase",
item=[
_word_token,
NewlineToken(),
IndentToken(),
_word_token,
NewlineToken(),
_word_token,
NewlineToken(),
DedentToken(),
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
one
two
three
""",
),
),
parse_mock,
single_threaded=True,
)
assert result.IterEnd.AtEnd()
CompareResultsFromFile(str(result), suffix=".results")
CompareResultsFromFile(MethodCallsToString(parse_mock), suffix=".events", file_ext=".txt")
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatch(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
one
two
three
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert len(parse_mock.method_calls) == 19
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_FinishEarly(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
one
""",
),
),
parse_mock,
single_threaded=True,
)
CompareResultsFromFile(str(result))
assert len(parse_mock.method_calls) == 10
# ----------------------------------------------------------------------
class TestIgnoreWhitespace(object):
_phrase = CreatePhrase(
name="Phrase",
item=[
_word_token,
_lpar_token,
PushIgnoreWhitespaceControlToken(),
_word_token,
_word_token,
_word_token,
_word_token,
PopIgnoreWhitespaceControlToken(),
_rpar_token,
_word_token,
NewlineToken(),
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_MatchNoExtra(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
one (
two
three
four
five
) six
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 30
# ----------------------------------------------------------------------
class TestEmbeddedPhrases(object):
_inner_phrase = CreatePhrase(
name="Inner",
item=[
_word_token,
_word_token,
],
)
_phrase = CreatePhrase(
name="Phrase",
item=[
_lpar_token,
_inner_phrase,
_rpar_token,
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator("( one two )"),
parse_mock,
single_threaded=True,
)
CompareResultsFromFile(str(result), suffix=".results")
CompareResultsFromFile(MethodCallsToString(parse_mock), suffix=".events", file_ext=".txt")
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatchAllInner(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("( one two"), parse_mock)
CompareResultsFromFile(str(result))
assert len(parse_mock.method_calls) == 16
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatchPartialInner(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("( one"), parse_mock)
CompareResultsFromFile(str(result))
assert len(parse_mock.method_calls) == 12
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatchFirstOnly(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("( "), parse_mock)
CompareResultsFromFile(str(result))
assert len(parse_mock.method_calls) == 9
# ----------------------------------------------------------------------
class TestDynamicPhrases(object):
_word_phrase = CreatePhrase(
name="Word Phrase",
item=[
_word_token,
_word_token,
NewlineToken(),
],
)
_number_phrase = CreatePhrase(
name="Number Phrase",
item=[
_number_token,
NewlineToken(),
],
)
_phrase = CreatePhrase(
name="Phrase",
item=[
DynamicPhrasesType.Statements,
DynamicPhrasesType.Statements,
DynamicPhrasesType.Expressions,
],
)
# ----------------------------------------------------------------------
@staticmethod
@pytest.fixture
def modified_parse_mock(parse_mock):
parse_mock.GetDynamicPhrases.side_effect = lambda unique_id, value: ([TestDynamicPhrases._word_phrase, TestDynamicPhrases._number_phrase] if value == DynamicPhrasesType.Statements else [TestDynamicPhrases._number_phrase], None)
return parse_mock
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match(self, modified_parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda wordb
123
456
""",
),
),
modified_parse_mock,
single_threaded=True,
)
CompareResultsFromFile(str(result), suffix=".results")
assert result.IterEnd.AtEnd()
CompareResultsFromFile(MethodCallsToString(modified_parse_mock), suffix=".events", file_ext=".txt")
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatch(self, modified_parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda wordb
123
wordc wordd
""",
),
),
modified_parse_mock,
)
CompareResultsFromFile(str(result))
assert len(modified_parse_mock.method_calls) == 54
# ----------------------------------------------------------------------
class TestOrPhrases(object):
_word_phrase = CreatePhrase(
name="Word Phrase",
item=[
_word_token,
NewlineToken(),
],
)
_number_phrase = CreatePhrase(
name="Number Phrase",
item=[
_number_token,
NewlineToken(),
],
)
_upper_phrase = CreatePhrase(
name="Upper Phrase",
item=[
_upper_token,
NewlineToken(),
],
)
_phrase = CreatePhrase(
item=(
_word_phrase,
_number_phrase,
_upper_phrase,
),
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_WordMatch(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("word"), parse_mock)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 20
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NumberMatch(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("1234"), parse_mock)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 20
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_UpperMatch(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("WORD"), parse_mock)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 20
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatch(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator("this is not a match"),
parse_mock,
single_threaded=True,
)
CompareResultsFromFile(str(result), suffix=".results")
CompareResultsFromFile(MethodCallsToString(parse_mock), suffix=".events", file_ext=".txt")
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_EarlyTermination(self, parse_mock):
parse_mock.OnInternalPhraseAsync = CoroutineMock(
side_effect=[True, False],
)
result = await self._phrase.LexAsync(("root", ), CreateIterator("word"), parse_mock)
assert result is None
assert len(parse_mock.method_calls) == 18
# ----------------------------------------------------------------------
class TestEmbeddedOrPhrases(object):
_phrase = CreatePhrase(
(
[_word_token, NewlineToken()],
[_number_token, NewlineToken()],
[_upper_token, NewlineToken()],
),
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Standard(self, parse_mock):
iter = CreateIterator(
textwrap.dedent(
"""\
one
2222
THREE
""",
),
)
# Line 1
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
CompareResultsFromFile(str(result), suffix=".line1")
iter = result.IterEnd
# Line 2
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
CompareResultsFromFile(str(result), suffix=".line2")
iter = result.IterEnd
# Line 3
result = await self._phrase.LexAsync(("root", ), iter, parse_mock)
CompareResultsFromFile(str(result), suffix=".line3")
iter = result.IterEnd
# Done
assert iter.AtEnd()
# ----------------------------------------------------------------------
class TestRepeatPhrases(object):
_phrase = CreatePhrase(
[
ZeroOrMorePhraseItem([_word_token, NewlineToken()]),
OneOrMorePhraseItem([_number_token, NewlineToken()]),
OptionalPhraseItem([_upper_token, NewlineToken()]),
OneOrMorePhraseItem([_word_token, NewlineToken()]),
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match1(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda
wordb
12
3456
UPPER
wordc
wordd
worde
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 95
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match2(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
12
3456
UPPER
wordc
wordd
worde
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 77
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match3(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda
12
3456
wordc
wordd
worde
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
assert len(parse_mock.method_calls) == 81
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match4(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
12
3456
wordc
wordd
worde
""",
),
),
parse_mock,
single_threaded=True,
)
CompareResultsFromFile(str(result), suffix=".results")
assert result.IterEnd.AtEnd()
CompareResultsFromFile(MethodCallsToString(parse_mock), suffix=".events", file_ext=".txt")
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatch1(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda
wordb
UPPER
wordc
wordd
worde
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert len(parse_mock.method_calls) == 33
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatch2(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
12
3456
UPPER
999
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert len(parse_mock.method_calls) == 52
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_EarlyTermination(self, parse_mock):
parse_mock.OnInternalPhraseAsync = CoroutineMock(
side_effect=[True, True, True, True, False],
)
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda
wordb
12
3456
wordc
wordd
worde
""",
),
),
parse_mock,
)
assert result is None
# ----------------------------------------------------------------------
class TestRepeatSimilarPhrases(object):
# Ensure that the first phrase doesn't eat the word so that it isn't available to the
# second phrase.
_phrase = CreatePhrase(
item=[
ZeroOrMorePhraseItem(
PhraseItem.Create(
name="Word & Number",
item=[_word_token, _number_token],
),
),
OptionalPhraseItem(
PhraseItem.Create(
item=_word_token,
),
),
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_LargeMatch(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("word 123"), parse_mock)
CompareResultsFromFile(str(result))
# assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_SmallMatch(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("word"), parse_mock)
CompareResultsFromFile(str(result))
# assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
class TestNamedPhrases(object):
_word_line_phrase = CreatePhrase(
name="Word Line",
item=[_word_token, NewlineToken()],
)
_phrase = CreatePhrase(
name="__Phrase__",
item=[
PhraseItem.Create(name="__Dynamic__", item=DynamicPhrasesType.Statements),
PhraseItem.Create(
name="__Or__",
item=OrPhraseItem()
| _word_line_phrase
| PhraseItem.Create(name="Upper Line", item=[_upper_token, NewlineToken()])
,
),
CustomArityPhraseItem(
PhraseItem.Create(name="__Repeat__", item=[_number_token, NewlineToken()]),
2,
2,
),
],
)
# ----------------------------------------------------------------------
@pytest.fixture
def modified_parse_mock(self, parse_mock):
parse_mock.GetDynamicPhrases.side_effect = lambda unique_id, value: ([self._word_line_phrase], None)
return parse_mock
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match(self, modified_parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda
WORDB
123
456
""",
),
),
modified_parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoMatch(self, modified_parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda
WORDB
123
""",
),
),
modified_parse_mock,
)
CompareResultsFromFile(str(result))
# ----------------------------------------------------------------------
class TestComments(object):
_multiline_phrase = CreatePhrase(
OneOrMorePhraseItem(
PhraseItem.Create(
name="Multiline",
item=[
[_word_token, NewlineToken()],
[_upper_token, NewlineToken()],
[_number_token, NewlineToken()],
],
),
),
)
_indent_phrase = CreatePhrase(
name="Indent",
item=[
_word_token,
RegexToken("Colon", re.compile(r":")),
NewlineToken(),
IndentToken(),
[_upper_token, NewlineToken()],
[_number_token, NewlineToken()],
DedentToken(),
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Multiline(self, parse_mock):
result = await self._multiline_phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
one # Comment 1
TWO
3
four
FIVE # Comment 5
66
seven
EIGHT
999 # Comment 9
ten # Comment 10
ELEVEN # Comment 11
12 # Comment 12
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Indent(self, parse_mock):
iterator = CreateIterator(
textwrap.dedent(
"""\
one: # Comment 1
TWO
3
four:
FIVE # Comment 5
66
seven:
EIGHT
999 # Comment 9
ten: # Comment 10
ELEVEN # Comment 11
12 # Comment 12
""",
),
)
# 1-3
result = await self._indent_phrase.LexAsync(("root", ), iterator, parse_mock)
CompareResultsFromFile(str(result), suffix=".section1")
iterator = result.IterEnd
# 4-6
result = await self._indent_phrase.LexAsync(("root", ), iterator, parse_mock)
CompareResultsFromFile(str(result), suffix=".section2")
iterator = result.IterEnd
# 7-9
result = await self._indent_phrase.LexAsync(("root", ), iterator, parse_mock)
CompareResultsFromFile(str(result), suffix=".section3")
iterator = result.IterEnd
# 10-12
result = await self._indent_phrase.LexAsync(("root", ), iterator, parse_mock)
CompareResultsFromFile(str(result), suffix=".section4")
iterator = result.IterEnd
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_StandAlone(self, parse_mock):
result = await self._multiline_phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
# one
one # After one
# TWO
TWO # After TWO
# 3
3 # After 3
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
class TestRecursiveOrPhrases(object):
_phrase = CreatePhrase(
name="Recursive Phrase",
item=[
_lpar_token,
(_word_token, None),
_rpar_token,
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NoRecursion(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("( hello )"), parse_mock)
CompareResultsFromFile(str(result))
# assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_SingleRecursion(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("((hello))"), parse_mock)
CompareResultsFromFile(str(result))
# assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_DoubleRecursion(self, parse_mock):
result = await self._phrase.LexAsync(("root", ), CreateIterator("( ( ( hello)))"), parse_mock)
CompareResultsFromFile(str(result))
# assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
class TestRecursiveRepeatPhrase(object):
_phrase = CreatePhrase(
name="Recursive Phrase",
item=[
[_number_token, NewlineToken()],
(
CustomArityPhraseItem(None, 1, 2),
[_word_token, NewlineToken()]
),
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
123
456
789
helloa
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
class TestRecursiveSequencePhrase(object):
_phrase = CreatePhrase(
[
[_number_token, NewlineToken()],
[_upper_token, NewlineToken()],
(
None,
[RegexToken("Delimiter", re.compile(r"----")), NewlineToken()],
),
[_word_token, NewlineToken()],
],
)
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_Match(self, parse_mock):
result = await self._phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
123
UPPERA
456
UPPERB
789
UPPERC
----
worda
wordb
wordc
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_IgnoreWhitespace(parse_mock):
phrase = CreatePhrase(
name="Phrase",
item=[
PushIgnoreWhitespaceControlToken(),
_word_token,
_word_token,
_word_token,
PopIgnoreWhitespaceControlToken(),
],
)
result = await phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda
wordb
wordc
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_IgnoreWhitespaceNestedPhrase(parse_mock):
phrase = CreatePhrase(
name="Phrase",
item=[
_word_token,
NewlineToken(),
CreatePhrase(
name="Nested",
item=[
PushIgnoreWhitespaceControlToken(),
_word_token,
_word_token,
PopIgnoreWhitespaceControlToken(),
],
),
_word_token,
NewlineToken(),
],
)
result = await phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
worda
wordb
wordc
wordd
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_IgnoreWhitespaceNestedPhraseWithDedents(parse_mock):
phrase = CreatePhrase(
name="Phrase",
item=[
_word_token,
RegexToken("':'", re.compile(r":")),
NewlineToken(),
IndentToken(),
CreatePhrase(
name="Nested",
item=[
PushIgnoreWhitespaceControlToken(),
_word_token,
_word_token,
PopIgnoreWhitespaceControlToken(),
],
),
_word_token,
NewlineToken(),
DedentToken(),
_word_token,
NewlineToken(),
],
)
result = await phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
newscope:
worda
wordb
wordc
wordd
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_IgnoreWhitespaceNestedPhraseEndWithDedents(parse_mock):
phrase = CreatePhrase(
name="Phrase",
item=[
_word_token,
RegexToken("':'", re.compile(r":")),
NewlineToken(),
IndentToken(),
CreatePhrase(
name="Nested",
item=[
PushIgnoreWhitespaceControlToken(),
_word_token,
_word_token,
PopIgnoreWhitespaceControlToken(),
],
),
DedentToken(),
],
)
result = await phrase.LexAsync(
("root", ),
CreateIterator(
textwrap.dedent(
"""\
newscope:
worda
wordb
""",
),
),
parse_mock,
)
CompareResultsFromFile(str(result))
assert result.IterEnd.AtEnd()
# ----------------------------------------------------------------------
@pytest.mark.asyncio
async def test_NestedPhrase(parse_mock):
phrase = CreatePhrase(
name="Phrase",
item=[TokenPhrase(_word_token),],
)
result = await phrase.LexAsync(("root", ), CreateIterator("test"), parse_mock)
CompareResultsFromFile(str(result))
# assert result.IterEnd.AtEnd()
| 32.525201
| 236
| 0.42545
|
46fd49c07aff2abf38a480232ea59a52feb9c767
| 1,326
|
py
|
Python
|
test/functional/test_framework/script_util.py
|
Perbug/perbug
|
4fa48d19be465006cdcaf9e6c09d394309aa2bf9
|
[
"MIT"
] | 1
|
2022-03-05T14:50:58.000Z
|
2022-03-05T14:50:58.000Z
|
test/functional/test_framework/script_util.py
|
Perbug/perbug
|
4fa48d19be465006cdcaf9e6c09d394309aa2bf9
|
[
"MIT"
] | 2
|
2021-12-18T03:02:54.000Z
|
2022-01-17T17:55:36.000Z
|
test/functional/test_framework/script_util.py
|
DclrCoin/dclrcoin
|
1ca3bd1f787fdead6ae84b7cda2bab6c6cb62b1d
|
[
"MIT"
] | 2
|
2021-12-25T12:39:07.000Z
|
2022-02-14T03:03:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful Script constants and utils."""
from test_framework.script import CScript
# To prevent a "tx-size-small" policy rule error, a transaction has to have a
# non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in
# src/policy/policy.h). Considering a Tx with the smallest possible single
# input (blank, empty scriptSig), and with an output omitting the scriptPubKey,
# we get to a minimum size of 60 bytes:
#
# Tx Skeleton: 4 [Version] + 1 [InCount] + 1 [OutCount] + 4 [LockTime] = 10 bytes
# Blank Input: 32 [PrevTxHash] + 4 [Index] + 1 [scriptSigLen] + 4 [SeqNo] = 41 bytes
# Output: 8 [Amount] + 1 [scriptPubKeyLen] = 9 bytes
#
# Hence, the scriptPubKey of the single output has to have a size of at
# least 22 bytes, which corresponds to the size of a P2WPKH scriptPubKey.
# The following script constant consists of a single push of 21 bytes of 'a':
# <PUSH_21> <21-bytes of 'a'>
# resulting in a 22-byte size. It should be used whenever (small) fake
# scriptPubKeys are needed, to guarantee that the minimum transaction size is
# met.
DUMMY_P2WPKH_SCRIPT = CScript([b'a' * 21])
| 51
| 84
| 0.737557
|
a149c37aea7b9672b441d22aa1332574486432ce
| 3,129
|
py
|
Python
|
python/ray/tune/tests/test_warnings.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/tests/test_warnings.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | 41
|
2021-09-21T01:13:48.000Z
|
2022-03-19T07:12:22.000Z
|
python/ray/tune/tests/test_warnings.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import ray
from ray import tune
from ray.data.context import DatasetContext
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
from ray.tune.error import TuneError
def test_nowarn_zero_cpu():
def f(*a):
@ray.remote(num_cpus=0)
def f():
pass
@ray.remote(num_cpus=0)
class Actor:
def f(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.f.remote())
tune.run(f, verbose=0)
def test_warn_cpu():
def f(*a):
@ray.remote(num_cpus=1)
def f():
pass
ray.get(f.remote())
with pytest.raises(TuneError):
tune.run(f, verbose=0)
with pytest.raises(TuneError):
tune.run(
f, resources_per_trial=tune.PlacementGroupFactory([{"CPU": 1}]), verbose=0
)
def g(*a):
@ray.remote(num_cpus=1)
class Actor:
def f(self):
pass
a = Actor.remote()
ray.get(a.f.remote())
with pytest.raises(TuneError):
tune.run(g, verbose=0)
with pytest.raises(TuneError):
tune.run(
g, resources_per_trial=tune.PlacementGroupFactory([{"CPU": 1}]), verbose=0
)
def test_pg_slots_ok():
def f(*a):
@ray.remote(num_cpus=1)
def f():
pass
@ray.remote(num_cpus=1)
class Actor:
def f(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.f.remote())
tune.run(
f, resources_per_trial=tune.PlacementGroupFactory([{"CPU": 1}] * 2), verbose=0
)
def test_bad_pg_slots():
def f(*a):
@ray.remote(num_cpus=2)
def f():
pass
ray.get(f.remote())
with pytest.raises(TuneError):
tune.run(
f,
resources_per_trial=tune.PlacementGroupFactory([{"CPU": 1}] * 2),
verbose=0,
)
def test_dataset_ok():
def f(*a):
ray.data.range(10).show()
tune.run(f, verbose=0)
def g(*a):
ctx = DatasetContext.get_current()
ctx.scheduling_strategy = PlacementGroupSchedulingStrategy(
ray.util.get_current_placement_group()
)
ray.data.range(10).show()
with pytest.raises(TuneError):
tune.run(g, verbose=0)
tune.run(
g, resources_per_trial=tune.PlacementGroupFactory([{"CPU": 1}] * 2), verbose=0
)
def test_scheduling_strategy_override():
def f(*a):
@ray.remote(num_cpus=1, scheduling_strategy="SPREAD")
def f():
pass
@ray.remote(num_cpus=1, scheduling_strategy="SPREAD")
class Actor:
def f(self):
pass
# SPREAD tasks are not captured by placement groups, so don't warn.
ray.get(f.remote())
# SPREAD actors are not captured by placement groups, so don't warn.
a = Actor.remote()
ray.get(a.f.remote())
tune.run(f, verbose=0)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 21.729167
| 86
| 0.555129
|
d7cc7ad803aea1957fe8a0f65051bf4e4ecfc351
| 10,262
|
py
|
Python
|
exps/actor_critic.py
|
windweller/nlplab
|
aac33ff17036bbf00bd3118953db08ae98da9d36
|
[
"MIT"
] | null | null | null |
exps/actor_critic.py
|
windweller/nlplab
|
aac33ff17036bbf00bd3118953db08ae98da9d36
|
[
"MIT"
] | null | null | null |
exps/actor_critic.py
|
windweller/nlplab
|
aac33ff17036bbf00bd3118953db08ae98da9d36
|
[
"MIT"
] | null | null | null |
from sandbox.rocky.tf.algos.vpg import VPG
from rllab.misc.overrides import overrides
import numpy as np
import pickle
from sandbox.rocky.tf.misc import tensor_utils
import tensorflow as tf
from rllab.misc import ext
# TODO: 2. add code to pretrain critic
class PreTrainer(object):
"""
It seems that rllab is bad at allowing pretraining
we write extra code for this (by borrowing from elsewhere)
"""
def __init__(self, config, env, policy,
distributor, critic):
super(PreTrainer, self).__init__()
self.config = config
self.env = env
self.policy = policy
self.distributor = distributor
self.critic = critic
class ActorCritic(VPG):
"""
1. override process samples (use a critic network)
2. Inside we also embed a target network
3. Implement a Reward Baseline that doesn't use lgbfs
Many aspects borrowed from DDPG
"""
def __init__(self,
env, policy,
# delayed_policy, critic, target_critic,
baseline,
# soft_target_tau=0.001,
optimizer=None, optimizer_args=None, **kwargs):
# __init__ signature must line up with VPG's
# otherwise Serialization class won't work...
super(ActorCritic, self).__init__(env, policy, baseline,
optimizer, optimizer_args, **kwargs)
self.policy = policy
self.critic = kwargs["critic"]
self.soft_target_tau = kwargs["soft_target_tau"]
# First, create "target" policy and Q functions
# TODO: 1. this could be broken
# TODO: since you have to load weights into the policy, why don't you just
# TODO: load twice? Initialize two policies (also policies...are mutable)
# TODO: so the pickle would only apply
# TODO: (create policy and critic seperately)
self.delayed_policy = kwargs["delayed_policy"] # might have to use tf.train.Saver()
self.target_critic = kwargs["target_critic"]
# TODO: 3. we need to pretrain critic with a fixed actor
# so that n_envs calculation will be correct
self.max_path_length = kwargs["config"]['max_seq_len']
self.batch_size = kwargs["config"]["batch_size"]
def init_opt(self):
# ======== VPG's init_opt =======
is_recurrent = int(self.policy.recurrent)
# we don't need placeholder from network is because we propogate
# gradients through a categorical variable
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
advantage_var = tensor_utils.new_tensor(
name='advantage',
ndim=1 + is_recurrent,
dtype=tf.float32,
)
dist = self.policy.distribution
old_dist_info_vars = {
k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name='old_%s' % k)
for k, shape in dist.dist_info_specs
}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {
k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name=k)
for k, shape in self.policy.state_info_specs
}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = tf.placeholder(tf.float32, shape=[None, None], name="valid")
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars) # this uses network to compute probs
# and sample from probs
logli = dist.log_likelihood_sym(action_var, dist_info_vars) # got it from RecurrentCategorical
# instead of computing it from the GRU output, we are computing loss based on the sampling distribution
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
# formulate as a minimization problem
# The gradient of the surrogate objective is the policy gradient
if is_recurrent:
surr_obj = - tf.reduce_sum(logli * advantage_var * valid_var) / tf.reduce_sum(valid_var)
mean_kl = tf.reduce_sum(kl * valid_var) / tf.reduce_sum(valid_var)
max_kl = tf.reduce_max(kl * valid_var)
else:
surr_obj = - tf.reduce_mean(logli * advantage_var)
mean_kl = tf.reduce_mean(kl)
max_kl = tf.reduce_max(kl)
input_list = [obs_var, action_var, advantage_var] + state_info_vars_list
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(loss=surr_obj, target=self.policy, inputs=input_list)
f_kl = tensor_utils.compile_function(
inputs=input_list + old_dist_info_vars_list,
outputs=[mean_kl, max_kl],
)
self.opt_info = dict(
f_kl=f_kl,
)
# ======== VPG's init_opt End =======
# we are updating critic in optimize_policy, not in here
# y need to be computed first
# critic_update = self.critic.updates # qf_loss
def optimize_policy(self, itr, samples_data):
"""
This is where we optimize actor and critic
receives input from sampler/base.py BaseSampler's process_samples()
when you call optimize_policy,
policy is already reset() exactly once
and things should load appropriately. (make sure of this)
"""
# all_input_values = tuple(ext.extract(
# samples_data,
# "observations", "actions", "advantages"
# ))
# agent_infos = samples_data["agent_infos"]
# state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
# dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
# all_input_values += tuple(state_info_list) + tuple(dist_info_list)
# if self.policy.recurrent:
# all_input_values += (samples_data["valids"],)
#
# rewards = ext.extract(samples_data, "rewards")
# # q_t =
# TODO: if we pass in the same ddist, calling reset()
# TODO: should trigger the same h0/hs. Might need to TEST this!
self.delayed_policy.reset()
# ext.extract will put things in a tuple, we don't need the tuple part...
rewards = ext.extract(samples_data, "rewards")[0]
observations = ext.extract(samples_data, "observations")[0] # dimension will work??
actions = ext.extract(samples_data, "actions")[0] # dimension will work??
# so now it should be batched
# we are going through the batch
# q: (?, time_steps, )
# print "reward shape: ", rewards.shape (2, 32)
# print "policy output shape: ", self.delayed_policy.f_output(observations).shape (2, 32, 52)
# print "Q output shape: ", self.target_critic.compute_reward_sa(observations, actions).shape (2, 32)
# print "observations shape: ", observations.shape
q = rewards + np.sum(self.delayed_policy.f_output(observations) *
self.target_critic.compute_reward_sa(observations, actions), axis=2)
# sum out action_dim
# then we update critic using the computed q
self.critic.train(observations, actions, q)
# then we process the rewards and try to get the advantage
paths = samples_data["paths"] # this is the original paths
for path in paths:
X = np.column_stack((path['observations'], path['actions']))
# if env returns some ambient reward, we want to ignore these for training.
# but still keep track of it for diagnostics.
path['env_rewards'] = path['rewards']
# compute_reward returns (max_seq_len,) we squeezed
# hope this is correct. path['actions'] is one-hot encoding
rewards = np.sum(self.critic.compute_reward(X) * path['actions'], axis=1)
# it in critic model
if rewards.ndim == 0:
rewards = rewards[np.newaxis]
path['rewards'] = rewards
assert all([path['rewards'].ndim == 1 for path in paths])
# so now the output of critic is baked into rewards
# have the sampler reprocess the sample!
samples_data = self.sampler.process_samples(itr, paths)
# this optimize the policy (actor) (in the end), we update policy
super(ActorCritic, self).optimize_policy(itr, samples_data)
# so now normal policy and critic are updated, we update delayed policy, critic
# you can double check if this is correct
self.delayed_policy.set_param_values(
self.delayed_policy.get_param_values() * (1.0 - self.soft_target_tau) +
self.policy.get_param_values() * self.soft_target_tau)
self.target_critic.set_param_values(
self.target_critic.get_param_values() * (1.0 - self.soft_target_tau) +
self.critic.get_param_values() * self.soft_target_tau)
# TODO: maybe check if this works!??
@overrides
def process_samples(self, itr, paths):
"""
Now this works.
"""
# we create the raw, batched paths here
max_path_length = max([len(path["rewards"]) for path in paths])
actions = [path["actions"] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path["rewards"] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
obs = [path["observations"] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
samples_data = dict(
observations=obs,
actions=actions,
rewards=rewards,
paths=paths,
)
# we still batch together actions and rewards and obs
return samples_data
if __name__ == '__main__':
pass
# we test baseline
| 38.578947
| 114
| 0.625609
|
b1372e15f25deaa263c9376112c3964a526d2b5f
| 151,680
|
py
|
Python
|
python/helpers/pycharm_generator_utils/pyparsing_py2.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/helpers/pycharm_generator_utils/pyparsing_py2.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/helpers/pycharm_generator_utils/pyparsing_py2.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-04-05T14:26:13.000Z
|
2021-03-09T08:18:17.000Z
|
# module pyparsing.py
#
# Copyright (c) 2003-2011 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.6"
__versionTime__ = "26 June 2011 10:53"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_PY3K = sys.version_info[0] > 2
if _PY3K:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
alphas = string.ascii_lowercase + string.ascii_uppercase
else:
_MAX_INT = sys.maxint
range = xrange
set = lambda s : dict( [(c,0) for c in s] )
alphas = string.lowercase + string.uppercase
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
alphas = string.lowercase + string.uppercase
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len enumerate sorted reversed list tuple set any all".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{ParseFatalException}, but thrown internally when an
C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
#~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if True: #name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
'decorator to trim function calls to match the arity of the target'
if not _PY3K:
def _trim_arity(func, maxargs=2):
limit = [0]
def wrapper(*args):
while 1:
try:
return func(*args[limit[0]:])
except TypeError:
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
return wrapper
else:
def _trim_arity(func, maxargs=2):
limit = maxargs
def wrapper(*args):
#~ nonlocal limit
while 1:
try:
return func(*args[limit:])
except TypeError:
if limit:
limit -= 1
continue
raise
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{ParseFatalException}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException:
#~ print ("Exception raised:", err)
err = None
if self.debugActions[2]:
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
if err is None:
err = sys.exc_info()[1]
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value, Exception):
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException:
pe = sys.exc_info()[1]
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{StringEnd()}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def transformString( self, instring ):
"""Extension to C{scanString}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{scanString}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{And} with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + ZeroOrMore(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}
- C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{MatchFirst}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{Or}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{Each}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{NotAny}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{setResultsName}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{Literal}::
Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is C{False}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{exclude} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join([c for c in initChars if c not in excludeChars])
if bodyChars:
bodyChars = ''.join([c for c in bodyChars if c not in excludeChars])
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-')
self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset)
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{Word} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException:
pe = sys.exc_info()[1]
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException:
err = sys.exc_info()[1]
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print "found ignoreExpr, advance to", loc
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of C{ParseExpression}, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of C{ZeroOrMore} and C{OneOrMore} expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception:
exc = sys.exc_info()[1]
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr )
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{MatchFirst} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{ParseResults} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty."""
return TokenConverter(expr).setParseAction(lambda t:t[0])
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\x' (\x21, which is a '!' character)
(\0x## is also supported for backwards compatibility)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{transformString()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{originalTextFor}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with C{makeXMLTags} or C{makeHTMLTags}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one C{blockStatement}.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException:
err = sys.exc_info()[1]
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| 40.448
| 196
| 0.588014
|
21231c3f6f54724b3ebc9dcd89ad41ad7121d9d8
| 14,463
|
py
|
Python
|
userbot/modules/stickers.py
|
Furuhashii/aone-kangbot
|
2732b66faf778771cff33f48a83ee0275d92c462
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/stickers.py
|
Furuhashii/aone-kangbot
|
2732b66faf778771cff33f48a83ee0275d92c462
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/stickers.py
|
Furuhashii/aone-kangbot
|
2732b66faf778771cff33f48a83ee0275d92c462
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2020-05-06T17:14:03.000Z
|
2020-05-06T17:14:03.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for kanging stickers or making new ones. Thanks @rupansh"""
import io
import math
import urllib.request
from os import remove
from PIL import Image
import random
from telethon.tl.types import DocumentAttributeFilename, MessageMediaPhoto
from userbot import bot, CMD_HELP
from userbot.events import register
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import InputStickerSetID
from telethon.tl.types import DocumentAttributeSticker
KANGING_STR = [
"Using Witchery to kang this sticker...",
"Plagiarising hehe...",
"Inviting this sticker over to my pack...",
"Kanging this sticker...",
"Hey that's a nice sticker!\nMind if I kang?!..",
"hehe me stel ur stikér\nhehe.",
"Ay look over there (☉。☉)!→\nWhile I kang this...",
"Roses are red violets are blue, kanging this sticker so my pacc looks cool",
"Imprisoning this sticker...",
"Mr.Steal Your Sticker is stealing this sticker... ",
]
@register(outgoing=True, pattern="^.kang")
async def kang(args):
""" For .kang command, kangs stickers or creates new ones. """
user = await bot.get_me()
if not user.username:
user.username = user.first_name
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if message and message.media:
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split('/'):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (DocumentAttributeFilename(file_name='sticker.webp') in
message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document,
'AnimatedSticker.tgs')
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
return await args.edit("`Unsupported File!`")
else:
return await args.edit("`I can't kang that...`")
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "🤔"
pack = 1
if len(splat) == 3:
pack = splat[2] # User sent both
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
# User wants to push into different pack, but is okay with
# thonk as emote.
pack = int(splat[1])
else:
# User sent just custom emote, wants to push to default
# pack
emoji = splat[1]
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username}'s kang pack Vol.{pack}"
cmd = '/newpack'
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = '/newanimated'
response = urllib.request.urlopen(
urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode("utf8").split('\n')
if " A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>." not in htmlstr:
async with bot.conversation('Stickers') as conv:
await conv.send_message('/addsticker')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "120" in x.text:
pack += 1
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username}'s kang pack Vol.{pack}"
await args.edit("`Switching to Pack " + str(pack) +
" due to insufficient space`")
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Invalid pack selected.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
return await args.edit(
"`Sticker added in a Different Pack !"
"\nThis Pack is Newly created!"
f"\nYour pack can be found [here](t.me/addstickers/{packname})",
parse_mode='md')
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
return await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message('/done')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Brewing a new Pack...`")
async with bot.conversation('Stickers') as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
return await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(
"`Sticker kanged successfully!`"
f"\nPack can be found [here](t.me/addstickers/{packname})",
parse_mode='md')
async def resize_photo(photo):
""" Resize the given photo to 512x512 """
image = Image.open(photo)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if size1 > size2:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
maxsize = (512, 512)
image.thumbnail(maxsize)
return image
@register(outgoing=True, pattern="^.stkrinfo$")
async def get_pack_info(event):
if not event.is_reply:
return await event.edit("`I can't fetch info from nothing, can I ?!`")
rep_msg = await event.get_reply_message()
if not rep_msg.document:
return await event.edit("`Reply to a sticker to get the pack details`")
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit(
"`Fetching details of the sticker pack, please wait..`")
except BaseException:
return await event.edit("`This is not a sticker. Reply to a sticker.`")
if not isinstance(stickerset_attr, DocumentAttributeSticker):
return await event.edit("`This is not a sticker. Reply to a sticker.`")
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash)))
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = (
f"**Sticker Title:** `{get_stickerset.set.title}\n`"
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n"
f"**Official:** `{get_stickerset.set.official}`\n"
f"**Archived:** `{get_stickerset.set.archived}`\n"
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n"
f"**Emojis In Pack:**\n{' '.join(pack_emojis)}"
)
await event.edit(OUTPUT)
@register(outgoing=True, pattern="^.getsticker$")
async def sticker_to_png(sticker):
if not sticker.is_reply:
await sticker.edit("`NULL information to feftch...`")
return False
img = await sticker.get_reply_message()
if not img.document:
await sticker.edit("`Reply to a sticker...`")
return False
await sticker.edit("`Converting...`")
image = io.BytesIO()
await sticker.client.download_media(img, image)
image.name = 'sticker.png'
image.seek(0)
await sticker.client.send_file(
sticker.chat_id, image, reply_to=img.id, force_document=True)
await sticker.delete()
return
CMD_HELP.update({
"stickers":
">`.kang [emoji('s)]?`"
"\nUsage: Reply .kang to a sticker or an image to kang it to your userbot pack "
"\nor specify the emoji you want to."
"\n\n>`.kang (emoji['s]]?` [number]?"
"\nUsage: Kang's the sticker/image to the specified pack but uses 🤔 as emoji "
"or choose the emoji you want to."
"\n\n>`.stkrinfo`"
"\nUsage: Gets info about the sticker pack."
"\n\n>`.getsticker`"
"\nUsage: reply to a sticker to get 'PNG' file of sticker."
})
| 42.289474
| 114
| 0.566618
|
5761ac2c4a24737674df47d16022385b64e6ba53
| 6,404
|
py
|
Python
|
gtp_extensions.py
|
awesome-archive/minigo
|
188fb197fdafbe9664a32142373b1cbd1459bc67
|
[
"Apache-2.0"
] | null | null | null |
gtp_extensions.py
|
awesome-archive/minigo
|
188fb197fdafbe9664a32142373b1cbd1459bc67
|
[
"Apache-2.0"
] | null | null | null |
gtp_extensions.py
|
awesome-archive/minigo
|
188fb197fdafbe9664a32142373b1cbd1459bc67
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# extends gtp.py
import gtp
import sys
import sgf_wrapper
import itertools
import go
import coords
def parse_message(message):
message = gtp.pre_engine(message).strip()
first, rest = (message.split(" ", 1) + [None])[:2]
if first.isdigit():
message_id = int(first)
if rest is not None:
command, arguments = (rest.split(" ", 1) + [None])[:2]
else:
command, arguments = None, None
else:
message_id = None
command, arguments = first, rest
command = command.replace("-", "_") # for kgs extensions.
return message_id, command, arguments
class KgsExtensionsMixin(gtp.Engine):
def __init__(self, game_obj, name="gtp (python, kgs-chat extensions)", version="0.1"):
super().__init__(game_obj=game_obj, name=name, version=version)
self.known_commands += ["kgs-chat"]
def send(self, message):
message_id, command, arguments = parse_message(message)
if command in self.known_commands:
try:
retval = getattr(self, "cmd_" + command)(arguments)
response = gtp.format_success(message_id, retval)
sys.stderr.flush()
return response
except ValueError as exception:
return gtp.format_error(message_id, exception.args[0])
else:
return gtp.format_error(message_id, "unknown command: " + command)
# Nice to implement this, as KGS sends it each move.
def cmd_time_left(self, arguments):
pass
def cmd_showboard(self, arguments):
return self._game.showboard()
def cmd_kgs_chat(self, arguments):
try:
msg_type, sender, *text = arguments.split()
text = " ".join(text)
except ValueError:
return "Unparseable message, args: %r" % arguments
return self._game.chat(msg_type, sender, text)
class RegressionsMixin(gtp.Engine):
def cmd_loadsgf(self, arguments):
args = arguments.split()
if len(args) == 2:
file_, movenum = args
movenum = int(movenum)
print("movenum =", movenum, file=sys.stderr)
else:
file_ = args[0]
movenum = None
try:
with open(file_, 'r') as f:
contents = f.read()
except:
raise ValueError("Unreadable file: " + file_)
try:
# This is kinda bad, because replay_sgf is already calling
# 'play move' on its internal position objects, but we really
# want to advance the engine along with us rather than try to
# push in some finished Position object.
for idx, p in enumerate(sgf_wrapper.replay_sgf(contents)):
print("playing #", idx, p.next_move, file=sys.stderr)
self._game.play_move(p.next_move)
if movenum and idx == movenum:
break
except:
raise
# Should this class blatantly reach into the game_obj and frob its tree? Sure!
# What are private members? Python lets you do *anything!*
class GoGuiMixin(gtp.Engine):
""" GTP extensions of 'analysis commands' for gogui.
We reach into the game_obj (an instance of the players in strategies.py),
and extract stuff from its root nodes, etc. These could be extracted into
methods on the Player object, but its a little weird to do that on a Player,
which doesn't really care about GTP commands, etc. So instead, we just
violate encapsulation a bit... Suggestions welcome :) """
def __init__(self, game_obj, name="gtp (python, gogui extensions)", version="0.1"):
super().__init__(game_obj=game_obj, name=name, version=version)
self.known_commands += ["gogui-analyze_commands"]
def cmd_gogui_analyze_commands(self, arguments):
return "\n".join(["var/Most Read Variation/nextplay",
"var/Think a spell/spin",
"pspairs/Visit Heatmap/visit_heatmap",
"pspairs/Q Heatmap/q_heatmap"])
def cmd_nextplay(self, arguments):
return self._game.root.mvp_gg()
def cmd_visit_heatmap(self, arguments):
sort_order = list(range(self._game.size * self._game.size + 1))
sort_order.sort(key=lambda i: self._game.root.child_N[i], reverse=True)
return self.heatmap(sort_order, self._game.root, 'child_N')
def cmd_q_heatmap(self, arguments):
sort_order = list(range(self._game.size * self._game.size + 1))
reverse = True if self._game.root.position.to_play is go.BLACK else False
sort_order.sort(key=lambda i: self._game.root.child_Q[i], reverse=reverse)
return self.heatmap(sort_order, self._game.root, 'child_Q')
def heatmap(self, sort_order, node, prop):
return "\n".join(["{!s:6} {}".format(
coords.to_human_coord(coords.unflatten_coords(key)),
node.__dict__.get(prop)[key])
for key in sort_order if node.child_N[key] > 0][:20])
def cmd_spin(self, arguments):
for i in range(50):
for j in range(100):
self._game.tree_search()
moves = self.cmd_nextplay(None).lower()
moves = moves.split()
colors = "bw" if self._game.root.position.to_play is go.BLACK else "wb"
moves_cols = " ".join(['{} {}'.format(*z)
for z in zip(itertools.cycle(colors), moves)])
print("gogui-gfx: TEXT", "{:.3f} after {}".format(self._game.root.Q, self._game.root.N), file=sys.stderr, flush=True)
print("gogui-gfx: VAR", moves_cols, file=sys.stderr, flush=True)
return self.cmd_nextplay(None)
class GTPDeluxe(KgsExtensionsMixin, RegressionsMixin, GoGuiMixin):
pass
| 40.27673
| 129
| 0.62539
|
266389ce1d217230e40fdfefa2446c07d0d18a8e
| 3,086
|
py
|
Python
|
deepcell_datasets/conftest.py
|
vanvalenlab/deepcell-datasets
|
aadd709b2bc13e1de57add5226e4d2abbeb21362
|
[
"Apache-2.0"
] | null | null | null |
deepcell_datasets/conftest.py
|
vanvalenlab/deepcell-datasets
|
aadd709b2bc13e1de57add5226e4d2abbeb21362
|
[
"Apache-2.0"
] | 5
|
2021-03-19T18:43:46.000Z
|
2022-02-28T02:34:03.000Z
|
deepcell_datasets/conftest.py
|
vanvalenlab/deepcell-datasets
|
aadd709b2bc13e1de57add5226e4d2abbeb21362
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2020 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-datasets/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the General Blueprint."""
import random
from flask_security import hash_password
from mongoengine import connect, disconnect
import pytest
from deepcell_datasets.database import models
from deepcell_datasets import create_app
@pytest.fixture
def app():
"""set up and tear down a test application"""
disconnect() # TODO: why do we need to call this?
connect('mongoenginetest', host='mongomock://localhost')
mongo_settings = {
'DB': 'mongoenginetest',
'HOST': 'mongomock://localhost',
# 'PORT': 27017,
'alias': 'testdb'
}
yield create_app(
MONGODB_SETTINGS=mongo_settings,
TESTING=True,
WTF_CSRF_ENABLED=False,
ADMIN_EMAIL='admin@me.com',
ADMIN_PASSWORD='password',
)
disconnect()
@pytest.fixture()
def mongodb():
disconnect() # TODO: why do we need to call this?
db = connect('mongoenginetest', host='mongomock://localhost')
yield db
disconnect()
@pytest.fixture()
def sample(mongodb, experiment):
session = random.randint(1, 9999)
position = random.randint(1, 9999)
spatial_dim = random.choice(['2d', '3d'])
kinetics = random.choice(['static', 'dynamic'])
sample = models.Samples(session=session, position=position,
spatial_dim=spatial_dim, kinetics=kinetics,
experiment=experiment.id)
sample.save()
yield sample
sample.delete()
@pytest.fixture()
def experiment(mongodb):
doi = 'a specific DOI number'
created_by = models.Users(
first_name='first',
last_name='last',
facility='test facility'
)
created_by.save()
experiment = models.Experiments(doi=doi, created_by=created_by)
experiment.save()
yield experiment
experiment.delete()
| 29.961165
| 80
| 0.680169
|
1be6f53339dcf0a50c440909896dbeaf1f779acf
| 261
|
py
|
Python
|
World 3/Exercise 114.py
|
NikiReis/Python--Exercises
|
2f50a3cd6900cec024edcf1a812d1cd86afcdea1
|
[
"MIT"
] | null | null | null |
World 3/Exercise 114.py
|
NikiReis/Python--Exercises
|
2f50a3cd6900cec024edcf1a812d1cd86afcdea1
|
[
"MIT"
] | null | null | null |
World 3/Exercise 114.py
|
NikiReis/Python--Exercises
|
2f50a3cd6900cec024edcf1a812d1cd86afcdea1
|
[
"MIT"
] | null | null | null |
import urllib
import urllib.request
try:
site = urllib.request.urlopen('https://www.python.org/')
except urllib.error.URLError:
print('O site não está disponível para ser acessado!')
else:
print('O site está disponível para ser acessado!')
| 29
| 61
| 0.708812
|
294810aedac3ebcfb07a7fc6e47737e53d420602
| 1,378
|
py
|
Python
|
gcd/bus.py
|
bthate/genocide
|
8de7a2cccee7315ae6cf5661738ba1335e30a5ba
|
[
"DOC"
] | null | null | null |
gcd/bus.py
|
bthate/genocide
|
8de7a2cccee7315ae6cf5661738ba1335e30a5ba
|
[
"DOC"
] | null | null | null |
gcd/bus.py
|
bthate/genocide
|
8de7a2cccee7315ae6cf5661738ba1335e30a5ba
|
[
"DOC"
] | null | null | null |
# This file is placed in the Public Domain.
from .obj import Object
class BusError(Exception):
pass
class Bus(Object):
objs = []
def __iter__(self):
return iter(Bus.objs)
@staticmethod
def add(obj):
if obj not in Bus.objs:
Bus.objs.append(obj)
@staticmethod
def announce(txt):
for h in Bus.objs:
if "announce" in dir(h):
h.announce(txt)
@staticmethod
def byorig(orig):
for o in Bus.objs:
if o.__oqn__() == orig:
return o
raise BusError(orig)
@staticmethod
def byfd(fd):
for o in Bus.objs:
if o.fd and o.fd == fd:
return o
return None
@staticmethod
def bytype(typ):
for o in Bus.objs:
if isinstance(o, typ):
return o
return None
@staticmethod
def first(otype=None):
if Bus.objs:
if not otype:
return Bus.objs[0]
for o in Bus.objs:
if otype in str(type(o)):
return o
return None
@staticmethod
def resume():
for o in Bus.objs:
o.resume()
@staticmethod
def say(orig, channel, txt):
for o in Bus.objs:
if o.__oqn__() == orig:
o.say(channel, txt)
| 19.408451
| 43
| 0.49492
|
61dd7e4005f58f895c0e3be78e928704c2d8da1e
| 7,678
|
py
|
Python
|
my_configs/fast_rcnn/fast_demo.py
|
EtokonE/mmdetection
|
1d7b0dcf3ff0cdc0d5142ea3bee1a4c0040408a0
|
[
"Apache-2.0"
] | null | null | null |
my_configs/fast_rcnn/fast_demo.py
|
EtokonE/mmdetection
|
1d7b0dcf3ff0cdc0d5142ea3bee1a4c0040408a0
|
[
"Apache-2.0"
] | null | null | null |
my_configs/fast_rcnn/fast_demo.py
|
EtokonE/mmdetection
|
1d7b0dcf3ff0cdc0d5142ea3bee1a4c0040408a0
|
[
"Apache-2.0"
] | 1
|
2021-07-16T06:47:17.000Z
|
2021-07-16T06:47:17.000Z
|
model = dict(
type='FastRCNN',
pretrained='open-mmlab://detectron2/resnet101_caffe',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
train_cfg=dict(
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
dataset_type = 'MyDataset'
data_root = '/home/user/Documents/Kalinin/Data/full_data/'
classes = ['drone']
img_norm_cfg = dict(
mean=[103.53, 116.28, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals'])
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'ch02_20200605121548-part 00000.json',
img_prefix=data_root,
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'])
],
#proposal_file='data/coco/proposals/rpn_r50_fpn_1x_train2017.pkl'),
val=dict(
type=dataset_type,
ann_file=data_root + 'ch01_20200605121410-part 00000.json',
img_prefix=data_root,
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals'])
])
],
#proposal_file='data/coco/proposals/rpn_r50_fpn_1x_val2017.pkl'),
test=dict(
type=dataset_type,
ann_file=data_root + 'ch02_20200605114152-part 00000.json',
img_prefix=data_root,
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals'])
])
],
#proposal_file='data/coco/proposals/rpn_r50_fpn_1x_val2017.pkl'))
evaluation = dict(interval=1, metric='mAP')
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
work_dir = './experiment/fast_rcnn'
| 37.091787
| 102
| 0.525527
|
9f9fc72d4466b6882677958b4101597bebcf6404
| 1,698
|
py
|
Python
|
example/utils.py
|
marknotfound/django-su
|
af989d8872a7add40eb1ec33a43a661f6429ea47
|
[
"MIT"
] | 123
|
2015-01-06T00:38:04.000Z
|
2022-02-02T10:50:12.000Z
|
example/utils.py
|
marknotfound/django-su
|
af989d8872a7add40eb1ec33a43a661f6429ea47
|
[
"MIT"
] | 42
|
2015-02-19T20:22:28.000Z
|
2022-03-30T14:11:21.000Z
|
example/utils.py
|
marknotfound/django-su
|
af989d8872a7add40eb1ec33a43a661f6429ea47
|
[
"MIT"
] | 34
|
2015-06-15T22:24:00.000Z
|
2022-02-02T10:50:13.000Z
|
# -*- coding: utf-8 -*-
from django.contrib.auth import SESSION_KEY, BACKEND_SESSION_KEY, get_user_model
try:
from django.contrib.auth import HASH_SESSION_KEY
except ImportError:
HASH_SESSION_KEY = '_auth_user_hash'
User = get_user_model()
def su_login_callback(user):
if user.is_active and user.is_staff:
return True
return user.has_perm('auth.change_user')
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return User._meta.pk.to_python(request.session[SESSION_KEY])
def custom_login(request, user):
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
request.session.get(HASH_SESSION_KEY) != session_auth_hash):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = user.backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
try:
from django.middleware.csrf import rotate_token
rotate_token(request)
except ImportError:
pass
| 31.444444
| 80
| 0.694346
|
1df97bc96c54cddbc16ffc293c707555da259a42
| 511
|
py
|
Python
|
tools/build/test/example_libraries.py
|
lijgame/boost
|
ec2214a19cdddd1048058321a8105dd0231dac47
|
[
"BSL-1.0"
] | 85
|
2015-02-08T20:36:17.000Z
|
2021-11-14T20:38:31.000Z
|
libs/boost/tools/build/test/example_libraries.py
|
flingone/frameworks_base_cmds_remoted
|
4509d9f0468137ed7fd8d100179160d167e7d943
|
[
"Apache-2.0"
] | 9
|
2015-01-28T16:33:19.000Z
|
2020-04-12T23:03:28.000Z
|
libs/boost/tools/build/test/example_libraries.py
|
flingone/frameworks_base_cmds_remoted
|
4509d9f0468137ed7fd8d100179160d167e7d943
|
[
"Apache-2.0"
] | 27
|
2015-01-28T16:33:30.000Z
|
2021-08-12T05:04:39.000Z
|
#!/usr/bin/python
# Copyright (C) Vladimir Prus 2006.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test the 'libraries' example.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.set_tree("../example/libraries")
t.run_build_system()
t.expect_addition(["app/bin/$toolset/debug/app.exe",
"util/foo/bin/$toolset/debug/bar.dll"])
t.cleanup()
| 23.227273
| 66
| 0.684932
|
abb75434a2349be8190271481222c1b3f5c00420
| 2,472
|
py
|
Python
|
genesis/length_scales/model_fitting/exponential_fit.py
|
leifdenby/uclales-extractor
|
6147533e25b3b417c744bd814d2407a6588cf995
|
[
"BSD-3-Clause"
] | null | null | null |
genesis/length_scales/model_fitting/exponential_fit.py
|
leifdenby/uclales-extractor
|
6147533e25b3b417c744bd814d2407a6588cf995
|
[
"BSD-3-Clause"
] | null | null | null |
genesis/length_scales/model_fitting/exponential_fit.py
|
leifdenby/uclales-extractor
|
6147533e25b3b417c744bd814d2407a6588cf995
|
[
"BSD-3-Clause"
] | null | null | null |
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
from .pystan_cache import StanModel_cache
from .utils import dist_plot
SM_CODE = """
data {
int<lower=1> n;
real x[n];
}
parameters {
real<lower=0> beta;
}
model {
x ~ exponential(1.0/beta);
}
"""
def _sample_exp(Ntot, vrange, beta):
vmin, vmax = vrange
# alpha = Ntot / (beta * (np.exp(-vmin / beta) - np.exp(-vmax / beta)))
x = np.random.exponential(scale=beta, size=Ntot)
return x
def _fit_exp(v_data, debug=False):
sm = StanModel_cache(model_code=SM_CODE)
fit = sm.sampling(data=dict(x=v_data, n=len(v_data)))
beta = fit["beta"]
if debug:
plt.hist(beta)
print(fit)
print(np.mean(beta), np.std(beta))
return beta
def fit(da_v, dv=None, plot_to=None, debug=False, plot_components="default"):
"""
Fit an exponential model to da_v, returns the mean and std div of beta the
scale parameter of the distribution
"""
# remove all nans and infs
v_data = da_v
v_data = v_data[~np.logical_or(np.isnan(v_data), np.isinf(v_data))]
# only fit from the minimum limit, otherwise we'll be fitting from v=0
# where aren't any objects
vmin_fit = v_data.min().values
vrange_fit = (vmin_fit, np.max(v_data).values)
beta = _fit_exp(v_data[v_data > vmin_fit] - vmin_fit, debug=debug)
if plot_to is not None:
axes = None
if isinstance(plot_to, np.ndarray) and isinstance(plot_to[0], plt.Axes):
axes = plot_to
log_dists = False
fig, axes = dist_plot(
v_data,
dv_bin=dv,
axes=axes,
fit=("exp", (np.mean(beta), np.std(beta)), vrange_fit),
log_dists=log_dists,
components=plot_components,
)
v_sample = vmin_fit + _sample_exp(len(v_data), vrange_fit, np.mean(beta))
dist_plot(
v_sample,
dv_bin=dv,
axes=axes,
alpha=0.6,
log_dists=log_dists,
components=plot_components,
)
fig.legend(axes[-1].get_lines(), ["data", "model"], loc="lower left")
return xr.DataArray.from_dict(
dict(
dims="part",
coords=dict(part=dict(dims="part", data=np.array(["mean", "std"]))),
data=[np.mean(beta), np.std(beta)],
attrs=da_v.attrs,
name=da_v.name,
)
)
return np.mean(beta), np.std(beta)
| 27.164835
| 81
| 0.591019
|
cf89b0ffd4fa78a2f8d4f00d1efe7329878be7ed
| 1,716
|
py
|
Python
|
scripts/cms_user_adder.py
|
ryanking13/cms-guide
|
d64b0dff3ca4c70abed44f23908fa660a33644d7
|
[
"MIT"
] | 3
|
2017-11-02T06:03:33.000Z
|
2019-09-10T08:56:23.000Z
|
scripts/cms_user_adder.py
|
ryanking13/cms-guide
|
d64b0dff3ca4c70abed44f23908fa660a33644d7
|
[
"MIT"
] | null | null | null |
scripts/cms_user_adder.py
|
ryanking13/cms-guide
|
d64b0dff3ca4c70abed44f23908fa660a33644d7
|
[
"MIT"
] | null | null | null |
from subprocess import call
import sys
import random
import hashlib
def parse_info(info):
# this function must return an array of dictionary.
# each element should have username, password, first_name, last_name
'''
# PARSING SAMPLE
# YOU SHOULD IMPLEMNET THIS BY USERSELF
data = []
cnt = 1
for row in info:
r = row.strip().split()
pw = hashlib.md5(str(random.random()).encode()).hexdigest()
d = {
'username': r[0],
'password': pw,
'first_name': r[1],
'last_name': r[2],
}
data.append(d)
cnt += 1
return data
'''
return [
{
'username': 'iam',
'password': 'mr',
'first_name': 'Donald',
'last_name': 'Trump',
}
]
def main():
if len(sys.argv) < 2:
print("[*] Usage : python %s <user info file>" % sys.argv[0])
exit()
user_info_file = sys.argv[1]
users = open(user_info_file, 'r').readlines()
user_info = parse_info(users)
login_info = ['Username,Password']
for info in user_info:
# $ cmsAddUser <first name> <last name> <username> -p <password>
call(['cmsAddUser', info['first_name'], info['last_name'], info['username'],
'-p', info['password']])
# user login info
login_info.append('%s,%s' % (info['username'], info['password']))
# saves user login info in csv format
login_info_file = 'login_info.csv'
with open(login_info_file, 'w') as f:
for info in login_info:
f.write(info + '\n')
print('[+] Done Adding user, file saved')
if __name__ == '__main__':
main()
| 21.721519
| 84
| 0.543124
|
082a4a362429acf4124219d990480395b61a1aae
| 9,092
|
py
|
Python
|
lambda_ecs_draining/lambda/source/index.py
|
MakeMeReach/terraform-aws-ecs
|
457b17c1f5bb5a14b903c85d2f8901c84e7c5bf0
|
[
"MIT"
] | 2
|
2018-06-06T09:44:03.000Z
|
2021-02-01T12:42:11.000Z
|
lambda_ecs_draining/lambda/source/index.py
|
MakeMeReach/terraform-aws-ecs
|
457b17c1f5bb5a14b903c85d2f8901c84e7c5bf0
|
[
"MIT"
] | null | null | null |
lambda_ecs_draining/lambda/source/index.py
|
MakeMeReach/terraform-aws-ecs
|
457b17c1f5bb5a14b903c85d2f8901c84e7c5bf0
|
[
"MIT"
] | 1
|
2021-09-24T13:21:08.000Z
|
2021-09-24T13:21:08.000Z
|
from __future__ import print_function
import boto3
from urlparse import urlparse
import base64
import json
import datetime
import time
import logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Establish boto3 session
session = boto3.session.Session()
logger.debug("Session is in region %s ", session.region_name)
ec2Client = session.client(service_name='ec2')
ecsClient = session.client(service_name='ecs')
asgClient = session.client('autoscaling')
snsClient = session.client('sns')
lambdaClient = session.client('lambda')
"""Publish SNS message to trigger lambda again.
:param message: To repost the complete original message received when ASG terminating event was received.
:param topicARN: SNS topic to publish the message to.
"""
def publishToSNS(message, topicARN):
logger.info("Publish to SNS topic %s",topicARN)
snsResponse = snsClient.publish(
TopicArn=topicARN,
Message=json.dumps(message),
Subject='Publishing SNS message to invoke lambda again..'
)
return "published"
"""Check task status on the ECS container instance ID.
:param Ec2InstanceId: The EC2 instance ID is used to identify the cluster, container instances in cluster
"""
def checkContainerInstanceTaskStatus(Ec2InstanceId, clusterListResp):
containerInstanceId = None
clusterName = None
tmpMsgAppend = None
# Describe instance attributes and get the Clustername from userdata section which would have set ECS_CLUSTER name
ec2Resp = ec2Client.describe_instance_attribute(InstanceId=Ec2InstanceId, Attribute='userData')
userdataEncoded = ec2Resp['UserData']
userdataDecoded = base64.b64decode(userdataEncoded['Value'])
logger.debug("Describe instance attributes response %s", ec2Resp)
tmpList = userdataDecoded.split()
for token in tmpList:
if token.find("ECS_CLUSTER") > -1:
# Split and get the cluster name
clusterName = token.split('=')[1].replace("'", "")
logger.info("Cluster name %s",clusterName)
# Get list of container instance IDs from the clusterName
paginator = ecsClient.get_paginator('list_container_instances')
clusterListPages = paginator.paginate(cluster=clusterName)
for containerListResp in clusterListPages:
containerDetResp = ecsClient.describe_container_instances(cluster=clusterName, containerInstances=clusterListResp[
'containerInstanceArns'])
logger.debug("describe container instances response %s",containerDetResp)
for containerInstances in containerDetResp['containerInstances']:
logger.debug("Container Instance ARN: %s and ec2 Instance ID %s",containerInstances['containerInstanceArn'],
containerInstances['ec2InstanceId'])
if containerInstances['ec2InstanceId'] == Ec2InstanceId:
logger.info("Container instance ID of interest : %s",containerInstances['containerInstanceArn'])
containerInstanceId = containerInstances['containerInstanceArn']
# Check if the instance state is set to DRAINING. If not, set it, so the ECS Cluster will handle de-registering instance, draining tasks and draining them
containerStatus = containerInstances['status']
if containerStatus == 'DRAINING':
logger.info("Container ID %s with EC2 instance-id %s is draining tasks",containerInstanceId,
Ec2InstanceId)
tmpMsgAppend = {"containerInstanceId": containerInstanceId}
else:
# Make ECS API call to set the container status to DRAINING
logger.info("Make ECS API call to set the container status to DRAINING...")
ecsResponse = ecsClient.update_container_instances_state(cluster=clusterName,containerInstances=[containerInstanceId],status='DRAINING')
# When you set instance state to draining, append the containerInstanceID to the message as well
tmpMsgAppend = {"containerInstanceId": containerInstanceId}
break
if containerInstanceId is not None:
break
# Using container Instance ID, get the task list, and task running on that instance.
if containerInstanceId != None:
# List tasks on the container instance ID, to get task Arns
listTaskResp = ecsClient.list_tasks(cluster=clusterName, containerInstance=containerInstanceId)
logger.debug("Container instance task list %s",listTaskResp['taskArns'])
# If the chosen instance has tasks
if len(listTaskResp['taskArns']) > 0:
logger.info("Tasks are on this instance...%s",Ec2InstanceId)
return 1, tmpMsgAppend
else:
logger.info("NO tasks are on this instance...%s",Ec2InstanceId)
return 0, tmpMsgAppend
else:
logger.info("NO tasks are on this instance....%s",Ec2InstanceId)
return 0, tmpMsgAppend
def lambda_handler(event, context):
line = event['Records'][0]['Sns']['Message']
message = json.loads(line)
Ec2InstanceId = message['EC2InstanceId']
asgGroupName = message['AutoScalingGroupName']
snsArn = event['Records'][0]['EventSubscriptionArn']
TopicArn = event['Records'][0]['Sns']['TopicArn']
lifecyclehookname = None
clusterName = None
tmpMsgAppend = None
completeHook = 0
logger.info("Lambda received the event %s",event)
logger.debug("records: %s",event['Records'][0])
logger.debug("sns: %s",event['Records'][0]['Sns'])
logger.debug("Message: %s",message)
logger.debug("Ec2 Instance Id %s ,%s",Ec2InstanceId, asgGroupName)
logger.debug("SNS ARN %s",snsArn)
# Describe instance attributes and get the Clustername from userdata section which would have set ECS_CLUSTER name
ec2Resp = ec2Client.describe_instance_attribute(InstanceId=Ec2InstanceId, Attribute='userData')
logger.debug("Describe instance attributes response %s",ec2Resp)
userdataEncoded = ec2Resp['UserData']
userdataDecoded = base64.b64decode(userdataEncoded['Value'])
tmpList = userdataDecoded.split()
for token in tmpList:
if token.find("ECS_CLUSTER") > -1:
# Split and get the cluster name
clusterName = token.split('=')[1].replace("'", "")
logger.debug("Cluster name %s",clusterName)
# Get list of container instance IDs from the clusterName
clusterListResp = ecsClient.list_container_instances(cluster=clusterName)
logger.debug("list container instances response %s",clusterListResp)
# If the event received is instance terminating...
if 'LifecycleTransition' in message.keys():
logger.debug("message autoscaling %s",message['LifecycleTransition'])
if message['LifecycleTransition'].find('autoscaling:EC2_INSTANCE_TERMINATING') > -1:
# Get lifecycle hook name
lifecycleHookName = message['LifecycleHookName']
logger.debug("Setting lifecycle hook name %s ",lifecycleHookName)
# Check if there are any tasks running on the instance
tasksRunning, tmpMsgAppend = checkContainerInstanceTaskStatus(Ec2InstanceId, clusterListResp)
logger.debug("Returned values received: %s ",tasksRunning)
if tmpMsgAppend != None:
message.update(tmpMsgAppend)
# If tasks are still running...
if tasksRunning == 1:
response = snsClient.list_subscriptions()
for key in response['Subscriptions']:
logger.info("Endpoint %s AND TopicArn %s and protocol %s ",key['Endpoint'], key['TopicArn'],
key['Protocol'])
if TopicArn == key['TopicArn'] and key['Protocol'] == 'lambda':
logger.info("TopicArn match, publishToSNS function...")
msgResponse = publishToSNS(message, key['TopicArn'])
logger.debug("msgResponse %s and time is %s",msgResponse, datetime.datetime)
# If tasks are NOT running...
elif tasksRunning == 0:
completeHook = 1
logger.debug("Setting lifecycle to complete;No tasks are running on instance, completing lifecycle action....")
try:
response = asgClient.complete_lifecycle_action(
LifecycleHookName=lifecycleHookName,
AutoScalingGroupName=asgGroupName,
LifecycleActionResult='CONTINUE',
InstanceId=Ec2InstanceId)
logger.info("Response received from complete_lifecycle_action %s",response)
logger.info("Completedlifecycle hook action")
except Exception, e:
print(str(e))
| 49.68306
| 170
| 0.655741
|
1d8747f299030c9fc55f1eaed21b8aa6d79f9040
| 1,687
|
py
|
Python
|
dataviz/cereals.py
|
Udzu/pudzu
|
5a0302830b052fc54feba891eb7bf634957a9d90
|
[
"MIT"
] | 119
|
2017-07-22T15:02:30.000Z
|
2021-08-02T10:42:59.000Z
|
dataviz/cereals.py
|
Udzu/pudzu
|
5a0302830b052fc54feba891eb7bf634957a9d90
|
[
"MIT"
] | null | null | null |
dataviz/cereals.py
|
Udzu/pudzu
|
5a0302830b052fc54feba891eb7bf634957a9d90
|
[
"MIT"
] | 28
|
2017-08-04T14:28:41.000Z
|
2019-11-27T23:46:14.000Z
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
df = pd.read_csv("datasets/cereals.csv").assign_rows(sort=lambda d: "ZZZ" if d["cereal"] == "Special K" else d["cereal"]).sort_values("sort")
data = pd.DataFrame(list(generate_batches((dict(row) for _,row in df.iterrows()), 5)))
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
FONT = verdana
PALETTE = {"M": VegaPalette10.BLUE, "F": VegaPalette10.PINK }
def process(d):
return Image.from_column([
Image.new("RGBA", (180,32)).place(Image.from_text_bounded(d['cereal'], (180,32), 18, partial(FONT, bold=True), padding=2)),
Image.from_url_with_cache(get_non(d, "image", default_img)).resize((160,240)),
Image.from_text(d['mascot'], FONT(16, italics=True), padding=2)
])
grid = grid_chart(data, process, lambda d: "F" if d['cereal'] == "Special K" else "M", padding=10, yalign=0, group_rounded=True, group_padding=4, group_fg_colors=PALETTE, bg="white")
title = Image.from_text("20 breakfast cereal mascots by gender".upper(), FONT(40, bold=True))
footer = Image.from_multitext(
["Gender: ", Rectangle(20, PALETTE["M"]), " male ", Rectangle(20, PALETTE["F"]), " female(?)", Rectangle((150,0)), "* currently on leave ** previously a dog"],
[arial(16, bold=True), ..., arial(16), ..., arial(16), ..., arial(16)], img_offset=-5)
img = Image.from_column([title, grid, footer], bg="white", xalign=0.5, padding=10)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.convert("RGB").save("output/cereals.jpg")
| 64.884615
| 183
| 0.668643
|
1bbf0066e99c120868ab148165752931cb10d758
| 1,542
|
py
|
Python
|
Dfs.py
|
IlohPrincess/SearchAlgorithm
|
371fd077b0d49c13387e37fd92bd81c950ffecb6
|
[
"MIT"
] | null | null | null |
Dfs.py
|
IlohPrincess/SearchAlgorithm
|
371fd077b0d49c13387e37fd92bd81c950ffecb6
|
[
"MIT"
] | null | null | null |
Dfs.py
|
IlohPrincess/SearchAlgorithm
|
371fd077b0d49c13387e37fd92bd81c950ffecb6
|
[
"MIT"
] | null | null | null |
from pyamaze import maze, agent, textLabel, COLOR
from timeit import timeit
def SEARCHDFS(m):
start=(m.rows, m.cols)
explored=[start]
frontier=[start]
dfsPath={}
while len(frontier)>0:
currentCell=frontier.pop()
if currentCell==(2,13):
break
for direction in 'ESNW':
if m.maze_map[currentCell][direction]==True:
if direction=='E':
NextCell=(currentCell[0],currentCell[1]+1)
elif direction=='W':
NextCell=(currentCell[0],currentCell[1]-1)
elif direction=='S':
NextCell=(currentCell[0]+1, currentCell[1])
elif direction=='N':
NextCell=(currentCell[0]-1, currentCell[1])
if NextCell in explored:
continue
explored.append(NextCell)
frontier.append(NextCell)
dfsPath[NextCell]=currentCell
fwdPath={}
cell=(2,13)
while cell!=start:
fwdPath[dfsPath[cell]]=cell
cell=dfsPath[cell]
return fwdPath
if __name__=='__main__':
m=maze(25,25)
m.CreateMaze(2,13, pattern='v',theme=COLOR.light, loopPercent=100)
path=SEARCHDFS(m)
a=agent(m, footprints=True, color= COLOR.red)
m.tracePath({a:path})
l=textLabel(m,'DFS Path Length',len(path)+1)
t=timeit(stmt='SEARCHDFS(m)',number=1000, globals=globals())
textLabel(m,'DFS EXECUTION Time',t)
m.run()
| 33.521739
| 71
| 0.549287
|
551d212e2fcf6702f59cf72e5efc82581357d8e6
| 2,634
|
py
|
Python
|
itdagene/app/stands/views.py
|
itdagene-ntnu/itdagene
|
b972cd3d803debccebbc33641397a39834b8d69a
|
[
"MIT"
] | 9
|
2018-10-17T20:58:09.000Z
|
2021-12-16T16:16:45.000Z
|
itdagene/app/stands/views.py
|
itdagene-ntnu/itdagene
|
b972cd3d803debccebbc33641397a39834b8d69a
|
[
"MIT"
] | 177
|
2018-10-27T18:15:56.000Z
|
2022-03-28T04:29:06.000Z
|
itdagene/app/stands/views.py
|
itdagene-ntnu/itdagene
|
b972cd3d803debccebbc33641397a39834b8d69a
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import permission_required
from django.contrib.messages import SUCCESS, add_message
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from itdagene.app.events.models import Event
from itdagene.app.stands.forms import DigitalStandForm
from itdagene.app.stands.models import DigitalStand
from itdagene.core.decorators import staff_required
@staff_required()
def list(request):
stands = DigitalStand.objects.all()
return render(
request, "stands/list.html", {"stands": stands, "title": _("Stands")},
)
@permission_required("stands.add_stand")
def add(request):
form = DigitalStandForm()
if request.method == "POST":
form = DigitalStandForm(request.POST)
if form.is_valid():
stand = form.save()
add_message(request, SUCCESS, _("Stand saved."))
return redirect(reverse("itdagene.stands.view", args=[stand.pk]))
return render(request, "stands/form.html", {"title": _("Add stand"), "form": form})
@staff_required()
def view(request, pk):
stand = get_object_or_404(DigitalStand, pk=pk)
stand_events = Event.objects.filter(stand=stand)
return render(
request,
"stands/view.html",
{
"stand": stand,
"stand_events": stand_events,
"title": _("Stand"),
"description": str(stand),
},
)
@permission_required("stands.change_stand")
def edit(request, pk):
stand = get_object_or_404(DigitalStand, pk=pk)
form = DigitalStandForm(instance=stand)
if request.method == "POST":
form = DigitalStandForm(request.POST, request.FILES, instance=stand)
if form.is_valid():
form.save()
add_message(request, SUCCESS, _("stand saved."))
return redirect(reverse("itdagene.stands.view", args=[stand.pk]))
return render(
request,
"stands/form.html",
{
"title": _("Edit stand"),
"form": form,
"description": str(stand),
"stand": stand,
},
)
@permission_required("stands.delete_stand")
def delete(request, pk):
stand = get_object_or_404(DigitalStand, pk=pk)
if request.method == "POST":
stand.delete()
add_message(request, SUCCESS, _("stand deleted."))
return redirect(reverse("itdagene.stands.list"))
return render(
request,
"stands/delete.html",
{"stand": stand, "title": _("Delete stand"), "description": str(stand)},
)
| 30.988235
| 87
| 0.64123
|
394c1e6ad5e7fd942f24a25c26f88952575c3c2c
| 4,812
|
py
|
Python
|
Sketchbots/sw/labqueue/support/big_counter.py
|
rlugojr/ChromeWebLab
|
60f964b3f283c15704b7a04b7bb50cb15791e2e4
|
[
"Apache-2.0"
] | 306
|
2015-01-09T14:03:44.000Z
|
2017-09-16T13:03:35.000Z
|
Sketchbots/sw/labqueue/support/big_counter.py
|
rlugojr/ChromeWebLab
|
60f964b3f283c15704b7a04b7bb50cb15791e2e4
|
[
"Apache-2.0"
] | 90
|
2019-03-26T05:36:00.000Z
|
2021-07-28T05:30:16.000Z
|
Sketchbots/sw/labqueue/support/big_counter.py
|
rlugojr/ChromeWebLab
|
60f964b3f283c15704b7a04b7bb50cb15791e2e4
|
[
"Apache-2.0"
] | 119
|
2015-01-26T15:04:33.000Z
|
2017-09-13T09:30:53.000Z
|
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is based almost entirely off the now "classic" sharding counter
technique described by Brett Slatkin (and expanded by Joe Gregorio) in 2008.
"""
from google.appengine.api import memcache
from google.appengine.ext import db
import random
MIN_NUM_SHARDS = 200
class GeneralCounterShardConfig(db.Model):
"""Tracks the number of shards for each named counter."""
name = db.StringProperty(required=True)
num_shards = db.IntegerProperty(required=True, default=MIN_NUM_SHARDS)
class GeneralCounterShard(db.Model):
"""Shards for each named counter"""
name = db.StringProperty(required=True)
count = db.IntegerProperty(required=True, default=0)
class BigCounter(object):
"""This class allows access to an internally-sharded counter
which can scale more easily than simple datastore entity properties.
"""
__counter_name = None
__counter_config = None
def __init__(self, name):
"""Constructs an object through which the counter identified by
name can be incremeneted, decremenented and read.
:name:
The name of the counter
"""
if name is None:
raise Exception('The name argument must be specified')
self.__counter_name = name
def get_value(self):
"""Retrieve the value of the counter.
"""
total = memcache.get(self.__counter_name)
if total is None:
total = 0
for counter in GeneralCounterShard.all().filter('name = ', self.__counter_name):
total += counter.count
memcache.add(self.__counter_name, total, 60)
return total
def increment(self):
"""Increment the value of the counter.
"""
if self.__counter_config is None:
self.__counter_config = GeneralCounterShardConfig.get_or_insert(self.__counter_name, name=self.__counter_name)
# maintain minimum number of shards
if self.__counter_config.num_shards < MIN_NUM_SHARDS:
self.increase_shards(MIN_NUM_SHARDS)
def txn():
index = random.randint(0, self.__counter_config.num_shards - 1)
shard_name = self.__counter_name + str(index)
counter = GeneralCounterShard.get_by_key_name(shard_name)
if counter is None:
counter = GeneralCounterShard(key_name=shard_name, name=self.__counter_name)
counter.count += 1
counter.put()
db.run_in_transaction(txn)
# does nothing if the key does not exist
memcache.incr(self.__counter_name)
def decrement(self):
"""Decrement the value of the counter.
"""
if self.__counter_config is None:
self.__counter_config = GeneralCounterShardConfig.get_or_insert(self.__counter_name, name=self.__counter_name)
def txn():
index = random.randint(0, self.__counter_config.num_shards - 1)
shard_name = self.__counter_name + str(index)
counter = GeneralCounterShard.get_by_key_name(shard_name)
if counter is None:
counter = GeneralCounterShard(key_name=shard_name, name=self.__counter_name)
counter.count -= 1
counter.put()
db.run_in_transaction(txn)
# does nothing if the key does not exist
memcache.decr(self.__counter_name)
def increase_shards(self, num):
"""Increase the number of shards over which the counter value is stored.
Do this to increase write performance at the expense of read performance.
Will never decrease the number of shards.
:num:
How many shards to use. This will be the NEW total number of
shards, this is not the number by which to increase.
"""
if self.__counter_config is None:
self.__counter_config = GeneralCounterShardConfig.get_or_insert(self.__counter_name, name=self.__counter_name)
def txn():
if self.__counter_config.num_shards < num:
self.__counter_config.num_shards = num
self.__counter_config.put()
db.run_in_transaction(txn)
| 37.59375
| 122
| 0.660848
|
a82a2185df32e52a9adc7124d9f131ac14000c70
| 10,266
|
py
|
Python
|
mmdet/apis/inference.py
|
dandelin/mmdetection
|
03e1c72f1bb6222bdf9af3bfe60946cf008c7143
|
[
"Apache-2.0"
] | null | null | null |
mmdet/apis/inference.py
|
dandelin/mmdetection
|
03e1c72f1bb6222bdf9af3bfe60946cf008c7143
|
[
"Apache-2.0"
] | null | null | null |
mmdet/apis/inference.py
|
dandelin/mmdetection
|
03e1c72f1bb6222bdf9af3bfe60946cf008c7143
|
[
"Apache-2.0"
] | null | null | null |
import warnings
import os
import ipdb
from skimage.transform import resize
from tensorboardX import SummaryWriter
import torch.nn.functional as F
import mmcv
import numpy as np
import torch
from mmcv.runner import load_checkpoint
from matplotlib import pyplot as plt
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from mmdet.core import get_classes
from mmdet.datasets import to_tensor
from mmdet.datasets.transforms import ImageTransform
from mmdet.models import build_detector
def init_detector(config, checkpoint=None, device="cuda:0"):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError(
"config must be a filename or Config object, "
"but got {}".format(type(config))
)
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if "CLASSES" in checkpoint["meta"]:
model.CLASSES = checkpoint["meta"]["CLASSES"]
else:
warnings.warn(
"Class names are not saved in the checkpoint's "
"meta data, use COCO classes by default."
)
model.CLASSES = get_classes("coco")
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def inference_detector(model, imgs, features=False):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg
)
device = next(model.parameters()).device # model device
if not isinstance(imgs, list):
return _inference_single(model, imgs, img_transform, device, features=features)
else:
return _inference_generator(model, imgs, img_transform, device)
def _prepare_data(img, img_transform, cfg, device):
ori_shape = img.shape
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get("resize_keep_ratio", True),
)
img = to_tensor(img).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False,
)
]
return dict(img=[img], img_meta=[img_meta])
def _inference_single(model, img, img_transform, device, features=False):
img = mmcv.imread(img)
data = _prepare_data(img, img_transform, model.cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, features=features, **data)
if not features:
return result
else:
return result, data
def _inference_generator(model, imgs, img_transform, device):
for img in imgs:
yield _inference_single(model, img, img_transform, device)
# TODO: merge this method with the one in BaseDetector
def show_result(
img_path,
result,
class_names,
attr_names,
score_thr=0.3,
out_file=None,
detailed=False,
interested=[],
):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img_path)
bbox_result, attr_result = result
bboxes = np.vstack(bbox_result)
attrs = np.vstack(attr_result)
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
if detailed:
detailed_visualization(
img.copy(),
bboxes,
labels,
attrs,
class_names=class_names,
attr_names=attr_names,
score_thr=score_thr,
out_file=out_file,
img_path=img_path,
interested=interested,
)
visualize(
img.copy(),
bboxes,
labels,
attrs,
class_names=class_names,
attr_names=attr_names,
score_thr=score_thr,
out_file=out_file,
img_path=img_path,
interested=interested,
)
def visualize(
img,
bboxes,
labels,
attrs,
class_names=None,
attr_names=None,
score_thr=0,
out_file=None,
img_path="",
interested=[],
):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(1, 1, 1)
ax.imshow(img[:, :, [2, 1, 0]])
if score_thr > 0:
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
attrs = attrs[inds, :]
scores = scores[inds]
for bbox, label, attr, score in zip(bboxes, labels, attrs, scores):
if len(interested) != 0:
if class_names[label] not in interested:
continue
bbox_int = bbox.astype(np.int32)
x, y, w, h = (
bbox_int[0],
bbox_int[1],
bbox_int[2] - bbox_int[0],
bbox_int[3] - bbox_int[1],
)
ax.add_patch(
plt.Rectangle(
(x, y), w, h, facecolor="none", edgecolor="red", linewidth=0.5
)
)
desc = f'[{score:.2f} {class_names[label]}] ({" ".join([attr_names[i] for i, sc in enumerate(attr) if sc > 0.5])})'
bbox_style = {"facecolor": "white", "alpha": 0.5, "pad": 0}
if len(interested) == 0:
ax.text(x, y, desc, style="italic", bbox=bbox_style, fontsize=4)
plt.autoscale()
if out_file is None:
os.makedirs(f"visualizations/out", exist_ok=True)
plt.savefig(
f"visualizations/out/{img_path.split('/')[-1]}_bbox_{len(bboxes)}.full.png",
dpi=720,
)
else:
plt.savefig(f"{out_file}.full.png", dpi=720)
plt.close(fig)
def detailed_visualization(
img,
bboxes,
labels,
attrs,
class_names=None,
attr_names=None,
score_thr=0,
out_file=None,
img_path="",
interested=[],
):
fig = plt.figure(figsize=(10, 100))
if score_thr > 0:
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
attrs = attrs[inds, :]
scores = scores[inds]
ax = fig.add_subplot(len(bboxes) + 1, 1, 1)
# ax.imshow(resize(img[:, :, [2, 1, 0]], (224, 224), anti_aliasing=True))
ax.imshow(img[:, :, [2, 1, 0]])
for i, (bbox, label, attr, score) in enumerate(zip(bboxes, labels, attrs, scores)):
if len(interested) != 0:
if class_names[label] not in interested:
continue
ax = fig.add_subplot(len(bboxes) + 1, 1, i + 2)
bbox_int = bbox.astype(np.int32)
x, y, w, h = (
bbox_int[0],
bbox_int[1],
bbox_int[2] - bbox_int[0],
bbox_int[3] - bbox_int[1],
)
cropped = img[y : y + h, x : x + w, [2, 1, 0]]
# ax.imshow(resize(cropped, (224, 224), anti_aliasing=True))
ax.imshow(cropped)
desc = f'[{score:.2f} {class_names[label]}] ({" ".join([attr_names[i] for i, sc in enumerate(attr) if sc > 0.5])})'
bbox_style = {"facecolor": "white", "alpha": 0.5, "pad": 0}
ax.text(0, 0, desc, style="italic", bbox=bbox_style, fontsize=12)
plt.tight_layout()
# plt.autoscale()
if out_file is None:
os.makedirs(f"visualizations/out", exist_ok=True)
plt.savefig(
f"visualizations/out/{img_path.split('/')[-1]}_bbox_{len(bboxes)}.part.png"
)
else:
plt.savefig(f"{out_file}.part.png")
plt.close(fig)
def visualize_embeddings(img_path, result, class_names, feats, out_file=None):
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img_path)
bbox_result, attr_result, feat_result = result
bboxes = np.vstack(bbox_result)
attrs = np.vstack(attr_result)
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
writer = SummaryWriter(f"{out_file}")
cropped_images, nl_labels = list(), list()
for i, (bbox, label, attr) in enumerate(zip(bboxes, labels, attrs)):
bbox_int = bbox.astype(np.int32)
x, y, w, h = (
bbox_int[0],
bbox_int[1],
bbox_int[2] - bbox_int[0],
bbox_int[3] - bbox_int[1],
)
cropped = img[y : y + h, x : x + w, [2, 1, 0]]
cropped = torch.from_numpy(cropped).permute(2, 0, 1).float() / 255
cropped_images.append(F.adaptive_avg_pool2d(cropped.unsqueeze(0), 128))
nl_labels.append(class_names[label])
cropped = torch.cat(cropped_images)
writer.add_embedding(
feats,
metadata=nl_labels,
label_img=cropped,
global_step=0,
tag=f"valid/embedding",
)
| 30.194118
| 123
| 0.601695
|
7d98bccf220ff0e7e2f21c13da26f17fb330c3a6
| 650
|
py
|
Python
|
ami_test.py
|
Swiftaim/simpleami
|
9beaf1386d64bce525d6b3d9fa9936d627af455f
|
[
"MIT"
] | 1
|
2019-04-22T11:35:17.000Z
|
2019-04-22T11:35:17.000Z
|
ami_test.py
|
Swiftaim/simpleami
|
9beaf1386d64bce525d6b3d9fa9936d627af455f
|
[
"MIT"
] | null | null | null |
ami_test.py
|
Swiftaim/simpleami
|
9beaf1386d64bce525d6b3d9fa9936d627af455f
|
[
"MIT"
] | null | null | null |
from simpleami import *
from collections import namedtuple
def originate_action():
options = OriginateOptions(type='Local',
number='8888',
channel='trunk90-in',
timeout='10',
caller_id='simpleami',
context='auto_test_client',
extension='1234',
action_id='simpleami-1',
priority='1')
return originate_template(options)
def main():
try:
ami = AMISvcHandler('192.168.40.196', 5038)
print(ami.connect('clearit', 'clearit').decode())
print(ami.send_action(originate_action()).decode())
except Exception as exc:
print(exc)
if __name__ == '__main__':
main()
| 22.413793
| 53
| 0.64
|
ad98e9d19a7c08b6d99d34735e8f2bd1e8e242c5
| 2,362
|
py
|
Python
|
toughio/_cli/_merge.py
|
codacy-badger/toughio
|
8d4f3d8408d5507a83f65e7f393b13be08d42aca
|
[
"MIT"
] | null | null | null |
toughio/_cli/_merge.py
|
codacy-badger/toughio
|
8d4f3d8408d5507a83f65e7f393b13be08d42aca
|
[
"MIT"
] | null | null | null |
toughio/_cli/_merge.py
|
codacy-badger/toughio
|
8d4f3d8408d5507a83f65e7f393b13be08d42aca
|
[
"MIT"
] | null | null | null |
__all__ = [
"merge",
]
def merge(argv=None):
import os
parser = _get_parser()
args = parser.parse_args(argv)
# Check that input, MESH and INCON files exist
head = os.path.split(args.infile)[0]
mesh_filename = head + ("/" if head else "") + "MESH"
incon_filename = head + ("/" if head else "") + "INCON"
if not os.path.isfile(args.infile):
raise ValueError("File '{}' not found.".format(args.infile))
if not os.path.isfile(mesh_filename):
raise ValueError("MESH file not found.")
incon_exists = os.path.isfile(incon_filename)
# Buffer input file
with open(args.infile, "r") as f:
input_file = list(f)
# Check that input file has at least blocks ROCKS, PARAM, ENDFI or ENDCY
count = 0
for line in input_file:
count += int(line.upper()[:5] in {"ROCKS", "PARAM", "ENDFI", "ENDCY"})
if count < 3:
raise ValueError("Invalid input file '{}'.".format(args.infile))
# Buffer MESH
with open(mesh_filename, "r") as f:
mesh_file = list(f)
if not mesh_file[0].startswith("ELEME"):
raise ValueError("Invalid MESH file.")
# Buffer INCON if exist
if incon_exists:
with open(incon_filename, "r") as f:
incon_file = list(f)
if not incon_file[0].startswith("INCON"):
raise ValueError("Invalid INCON file.")
# Locate ENDFI or ENDCY
for i, line in enumerate(input_file):
if line.upper()[:5] in {"ENDFI", "ENDCY"}:
break
# Buffer output file
output_file = input_file[:i]
output_file += mesh_file
if incon_exists:
output_file += incon_file
output_file += input_file[i:]
# Write output file
with open(args.outfile, "w") as f:
for line in output_file:
f.write(line)
def _get_parser():
import argparse
# Initialize parser
parser = argparse.ArgumentParser(
description=(
"Merge input file, MESH and/or INCON into a single file. "
"The files must be in the same directory."
),
formatter_class=argparse.RawTextHelpFormatter,
)
# Input file
parser.add_argument(
"infile", type=str, help="TOUGH input file",
)
# Output file
parser.add_argument("outfile", type=str, help="Merged TOUGH input file")
return parser
| 27.465116
| 78
| 0.609653
|
e339142834d7524754044c0c6fa67a5a354b8069
| 1,063
|
py
|
Python
|
convenience/make_image_single_wavelength.py
|
smlower/powderday
|
99e7cec28bfbcba40ec1cff367fa564f51d9b62c
|
[
"BSD-3-Clause"
] | 18
|
2019-09-04T09:48:54.000Z
|
2022-02-08T20:45:19.000Z
|
convenience/make_image_single_wavelength.py
|
smlower/powderday
|
99e7cec28bfbcba40ec1cff367fa564f51d9b62c
|
[
"BSD-3-Clause"
] | 79
|
2019-09-05T15:09:02.000Z
|
2022-02-25T13:29:51.000Z
|
convenience/make_image_single_wavelength.py
|
smlower/powderday
|
99e7cec28bfbcba40ec1cff367fa564f51d9b62c
|
[
"BSD-3-Clause"
] | 14
|
2019-08-30T18:24:52.000Z
|
2021-08-05T15:33:13.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
import astropy.units as u
# ------------------------
# modifiable header
# ------------------------
m = ModelOutput('/Users/desika/Dropbox/powderday/verification/gadget/example.200.rtout.image')
wav = 200 # micron
# ------------------------
# Get the image from the ModelOutput object
image = m.get_image(units='ergs/s')
# Open figure and create axes
fig = plt.figure()
ax = fig.add_subplot(111)
# Find the closest wavelength
iwav = np.argmin(np.abs(wav - image.wav))
# Calculate the image width in kpc
w = image.x_max * u.cm
w = w.to(u.kpc)
# plot the beast
cax = ax.imshow(np.log(image.val[0, :, :, iwav]), cmap=plt.cm.viridis,
origin='lower', extent=[-w.value, w.value, -w.value, w.value])
# Finalize the plot
ax.tick_params(axis='both', which='major', labelsize=10)
ax.set_xlabel('x (kpc)')
ax.set_xlabel('y (kpc)')
plt.colorbar(cax, label='log Luminosity (ergs/s)', format='%.0e')
fig.savefig('pd_image.png', bbox_inches='tight', dpi=150)
| 25.309524
| 94
| 0.650988
|
e57aed14fb739c4933ba40bba57fcc84f1b89192
| 239
|
py
|
Python
|
examples/rq/app.py
|
BrianHicks/em
|
19a86c2392b136c9e857000798ccaa525aa0ed84
|
[
"MIT"
] | 6
|
2015-05-10T14:09:54.000Z
|
2021-01-04T10:09:38.000Z
|
examples/rq/app.py
|
techdragon/emit
|
19a86c2392b136c9e857000798ccaa525aa0ed84
|
[
"MIT"
] | null | null | null |
examples/rq/app.py
|
techdragon/emit
|
19a86c2392b136c9e857000798ccaa525aa0ed84
|
[
"MIT"
] | 3
|
2015-04-04T15:37:55.000Z
|
2015-08-21T08:08:45.000Z
|
'simple rq app'
from redis import Redis
from emit.router.rq import RQRouter
import logging
router = RQRouter(redis_connection=Redis(), node_modules=['tasks'])
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
| 23.9
| 76
| 0.774059
|
86dd282ca9aa91dcaefe3d562e3b72c0656bd001
| 3,057
|
py
|
Python
|
app/profiles/schemas/queries.py
|
MrPeker/acikkaynak-service
|
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
|
[
"Apache-2.0"
] | 5
|
2021-02-28T22:29:13.000Z
|
2021-11-29T00:24:28.000Z
|
app/profiles/schemas/queries.py
|
MrPeker/acikkaynak-service
|
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
|
[
"Apache-2.0"
] | null | null | null |
app/profiles/schemas/queries.py
|
MrPeker/acikkaynak-service
|
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
|
[
"Apache-2.0"
] | 3
|
2021-03-03T19:56:30.000Z
|
2021-03-06T22:10:35.000Z
|
import graphene
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from app.common.schemas.languages import LanguageNode
from app.profiles.models import Profile, ProfileAchievement, ProfileTag
# queries
class ProfileTagNode(DjangoObjectType):
# id = graphene.ID(source="pk", required=True)
class Meta:
model = ProfileTag
exclude = ("uuid",)
interfaces = (graphene.relay.Node,)
filter_fields = {
"slug": ["exact", "icontains", "istartswith"],
"name": ["exact", "icontains", "istartswith"],
}
class ProfileAchievementNode(DjangoObjectType):
# id = graphene.ID(source="pk", required=True)
class Meta:
model = ProfileAchievement
exclude = ("uuid",)
interfaces = (graphene.relay.Node,)
filter_fields = {
"type": ["exact"],
"earned_at": ["exact", "lte", "gte"],
}
class ProfileNode(DjangoObjectType):
class Meta:
model = Profile
exclude = ("uuid",)
interfaces = (graphene.relay.Node,)
filter_fields = {
"first_name": ["exact", "icontains", "istartswith"],
"last_name": ["exact", "icontains", "istartswith"],
"email": ["exact", "icontains", "istartswith"],
"phone": ["exact", "icontains", "istartswith"],
}
class Query(graphene.ObjectType):
profile = graphene.relay.Node.Field(ProfileNode)
profile_by_uuid = graphene.Field(ProfileNode, uuid=graphene.UUID(required=True))
profile_by_slug = graphene.Field(ProfileNode, slug=graphene.String(required=True))
profiles_all = DjangoFilterConnectionField(ProfileNode)
profile_tag = graphene.relay.Node.Field(ProfileTagNode)
profile_tag_by_uuid = graphene.Field(ProfileTagNode, uuid=graphene.UUID(required=True))
profile_tag_by_slug = graphene.Field(ProfileTagNode, slug=graphene.String(required=True))
profile_tags_all = DjangoFilterConnectionField(ProfileTagNode)
@staticmethod
# pylint:disable=unused-argument
def resolve_profile_by_uuid(parent, info, uuid):
try:
return Profile.objects.get(uuid=uuid)
except Profile.DoesNotExist:
return None
@staticmethod
# pylint:disable=unused-argument
def resolve_profile_by_slug(parent, info, slug):
try:
return Profile.objects.get(slug=slug)
except Profile.DoesNotExist:
return None
@staticmethod
# pylint:disable=unused-argument
def resolve_profile_tag_by_uuid(parent, info, uuid):
try:
return ProfileTag.objects.get(uuid=uuid)
except ProfileTag.DoesNotExist:
return None
@staticmethod
# pylint:disable=unused-argument
def resolve_profile_tag_by_slug(parent, info, slug):
try:
return ProfileTag.objects.get(slug=slug)
except ProfileTag.DoesNotExist:
return None
# mutations
class Mutation(graphene.ObjectType):
pass
| 31.193878
| 93
| 0.660124
|
fa413ff1fca80f851da8b26fb111de877796d24f
| 1,116
|
py
|
Python
|
Programmers/C30L43238/C30L43238.py
|
iamGreedy/CodingTest
|
30fc5d73102de2d5c9d8a4f8d7047b8197dc99b7
|
[
"MIT"
] | null | null | null |
Programmers/C30L43238/C30L43238.py
|
iamGreedy/CodingTest
|
30fc5d73102de2d5c9d8a4f8d7047b8197dc99b7
|
[
"MIT"
] | null | null | null |
Programmers/C30L43238/C30L43238.py
|
iamGreedy/CodingTest
|
30fc5d73102de2d5c9d8a4f8d7047b8197dc99b7
|
[
"MIT"
] | null | null | null |
# %%
def prev_sol(n, times):
tt = [t * (n // len(times) + (1 if i < n % len(times) else 0))
for i, t in enumerate(times)]
while (True):
cmxi, cmx = max(enumerate(tt), key=lambda a: a[1])
nmni, nmn = min(
enumerate(map(lambda ab: ab[0] + ab[1], zip(times, tt))), key=lambda a: a[1])
if cmx > nmn:
tt[cmxi] -= times[cmxi]
tt[nmni] += times[nmni]
else:
break
return max(tt)
def solution(n, times):
mnt, mxt = 0, max(times) * n
while (mnt < mxt):
mdt = (mxt + mnt) // 2
nxt = sum(map(lambda t: mdt // t, times))
if nxt >= n:
mxt = mdt
else:
mnt = mdt + 1
return mnt
cases = [
((6, [7, 10]), 28),
# ((10, [6, 7, 10]), 28),
# ((10, [1]), 10),
# ((1000, [1, 10, 100]), 901),
# ((20, [1, 2, 10]), 13),
]
for (args, ret) in cases:
print(
f'{f"solution({args[0]}, {args[1]})":40} : expected `{ret}`, real `{solution(*args)}`')
# print(f'prev sol({args[0]}, {args[1]}) : expected `{ret}`, real `{prev_sol(*args)}`')
| 27.219512
| 95
| 0.450717
|
9871c0e19bc88f145558947598ade2726640b61c
| 990
|
py
|
Python
|
loggingModule.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
loggingModule.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
loggingModule.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
def func():
logging.debug('This is a debug message')
logging.info('This is an info message')
logging.warning('This is a warning message')
logging.error('This is an error message')
logging.critical('This is a critical message')
try:
c = 1 / 0
except Exception as e:
logging.exception("Exception occurred", exc_info=True) #exc_info to on/off this log
logging.basicConfig(filename='log.text', filemode='a', level=logging.DEBUG, format='%(asctime)s %(msecs)d - %(process)d - %(levelname)s - %(filename)s:%(funcName)s:%(lineno)d - %(message)s', datefmt='%Y%m%d %H%M%S')
func()
logging.shutdown()
# Log rotation:
#
# from logging.handlers import RotatingFileHandler
#
# logging.basicConfig(
# handlers=[RotatingFileHandler('./my_log.log', maxBytes=200000, backupCount=2)],
# level=logging.DEBUG,
# format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
# datefmt='%Y-%m-%dT%H:%M:%S')
| 31.935484
| 215
| 0.689899
|
b9282cabb8e70f56150f81861062172041f13ba3
| 152
|
py
|
Python
|
ExpandAlphabets.py
|
hemanthsoma/Coding-Practice
|
82992453426103c5198461886cf014cff3a75813
|
[
"Unlicense"
] | 2
|
2020-09-17T14:30:06.000Z
|
2020-09-20T17:11:06.000Z
|
ExpandAlphabets.py
|
hemanthsoma/CodingSolutions
|
82992453426103c5198461886cf014cff3a75813
|
[
"Unlicense"
] | null | null | null |
ExpandAlphabets.py
|
hemanthsoma/CodingSolutions
|
82992453426103c5198461886cf014cff3a75813
|
[
"Unlicense"
] | null | null | null |
import re
m=re.split('(\d+)',input().strip()) =
for i in range(0,len(m)-1,2):
if i%2==0:
print(m[i]*int(m[i+1]),end="")
| 25.333333
| 57
| 0.434211
|
858525df545e11aa9320e80d666eb0b1d0159c11
| 11,894
|
py
|
Python
|
mmdet/core/anchor/guided_anchor_target.py
|
ar90n/ttfnet
|
99dbfa888f90c8161c2c1666b2d17cdb144dbc30
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/anchor/guided_anchor_target.py
|
ar90n/ttfnet
|
99dbfa888f90c8161c2c1666b2d17cdb144dbc30
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/anchor/guided_anchor_target.py
|
ar90n/ttfnet
|
99dbfa888f90c8161c2c1666b2d17cdb144dbc30
|
[
"Apache-2.0"
] | null | null | null |
import torch
from ..bbox import PseudoSampler, build_assigner, build_sampler
from ..utils import multi_apply, unmap
def calc_region(bbox, ratio, featmap_size=None, use_round=True):
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Args:
bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
ratio (float): Ratio of the output region.
featmap_size (tuple): Feature map size used for clipping the boundary.
use_round (bool): whether to round the results.
Returns:
tuple: x1, y1, x2, y2
"""
x1 = (1 - ratio) * bbox[0] + ratio * bbox[2]
y1 = (1 - ratio) * bbox[1] + ratio * bbox[3]
x2 = ratio * bbox[0] + (1 - ratio) * bbox[2]
y2 = ratio * bbox[1] + (1 - ratio) * bbox[3]
if use_round:
x1, y1, x2, y2 = [torch.round(x).long() for x in [x1, y1, x2, y2]]
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
return (x1, y1, x2, y2)
def ga_loc_target(gt_bboxes_list,
featmap_sizes,
anchor_scale,
anchor_strides,
center_ratio=0.2,
ignore_ratio=0.5):
"""Compute location targets for guided anchoring.
Each feature map is divided into positive, negative and ignore regions.
- positive regions: target 1, weight 1
- ignore regions: target 0, weight 0
- negative regions: target 0, weight 0.1
Args:
gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
featmap_sizes (list[tuple]): Multi level sizes of each feature maps.
anchor_scale (int): Anchor scale.
anchor_strides ([list[int]]): Multi level anchor strides.
center_ratio (float): Ratio of center region.
ignore_ratio (float): Ratio of ignore region.
Returns:
tuple
"""
img_per_gpu = len(gt_bboxes_list)
num_lvls = len(featmap_sizes)
r1 = (1 - center_ratio) / 2
r2 = (1 - ignore_ratio) / 2
all_loc_targets = []
all_loc_weights = []
all_ignore_map = []
for lvl_id in range(num_lvls):
h, w = featmap_sizes[lvl_id]
loc_targets = torch.zeros(
img_per_gpu,
1,
h,
w,
device=gt_bboxes_list[0].device,
dtype=torch.float32)
loc_weights = torch.full_like(loc_targets, -1)
ignore_map = torch.zeros_like(loc_targets)
all_loc_targets.append(loc_targets)
all_loc_weights.append(loc_weights)
all_ignore_map.append(ignore_map)
for img_id in range(img_per_gpu):
gt_bboxes = gt_bboxes_list[img_id]
scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) *
(gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1))
min_anchor_size = scale.new_full(
(1, ), float(anchor_scale * anchor_strides[0]))
# assign gt bboxes to different feature levels w.r.t. their scales
target_lvls = torch.floor(
torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
for gt_id in range(gt_bboxes.size(0)):
lvl = target_lvls[gt_id].item()
# rescaled to corresponding feature map
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]
# calculate ignore regions
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
gt_, r2, featmap_sizes[lvl])
# calculate positive (center) regions
ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(
gt_, r1, featmap_sizes[lvl])
all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 +
1] = 1
all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 +
1, ignore_x1:ignore_x2 + 1] = 0
all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 +
1] = 1
# calculate ignore map on nearby low level feature
if lvl > 0:
d_lvl = lvl - 1
# rescaled to corresponding feature map
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
gt_, r2, featmap_sizes[d_lvl])
all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 +
1, ignore_x1:ignore_x2 + 1] = 1
# calculate ignore map on nearby high level feature
if lvl < num_lvls - 1:
u_lvl = lvl + 1
# rescaled to corresponding feature map
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
gt_, r2, featmap_sizes[u_lvl])
all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 +
1, ignore_x1:ignore_x2 + 1] = 1
for lvl_id in range(num_lvls):
# ignore negative regions w.r.t. ignore map
all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)
& (all_ignore_map[lvl_id] > 0)] = 0
# set negative regions with weight 0.1
all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1
# loc average factor to balance loss
loc_avg_factor = sum(
[t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200
return all_loc_targets, all_loc_weights, loc_avg_factor
def ga_shape_target(approx_list,
inside_flag_list,
square_list,
gt_bboxes_list,
img_metas,
approxs_per_octave,
cfg,
gt_bboxes_ignore_list=None,
sampling=True,
unmap_outputs=True):
"""Compute guided anchoring targets.
Args:
approx_list (list[list]): Multi level approxs of each image.
inside_flag_list (list[list]): Multi level inside flags of each image.
square_list (list[list]): Multi level squares of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
approxs_per_octave (int): number of approxs per octave
cfg (dict): RPN train configs.
gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
sampling (bool): sampling or not.
unmap_outputs (bool): unmap outputs or not.
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(approx_list) == len(inside_flag_list) == len(
square_list) == num_imgs
# anchor number of multi levels
num_level_squares = [squares.size(0) for squares in square_list[0]]
# concat all level anchors and flags to a single tensor
inside_flag_flat_list = []
approx_flat_list = []
square_flat_list = []
for i in range(num_imgs):
assert len(square_list[i]) == len(inside_flag_list[i])
inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
approx_flat_list.append(torch.cat(approx_list[i]))
square_flat_list.append(torch.cat(square_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
(all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,
neg_inds_list) = multi_apply(
ga_shape_target_single,
approx_flat_list,
inside_flag_flat_list,
square_flat_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
img_metas,
approxs_per_octave=approxs_per_octave,
cfg=cfg,
sampling=sampling,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares)
bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares)
bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares)
return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos,
num_total_neg)
def images_to_levels(target, num_level_anchors):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_level_anchors:
end = start + n
level_targets.append(target[:, start:end].squeeze(0))
start = end
return level_targets
def ga_shape_target_single(flat_approxs,
inside_flags,
flat_squares,
gt_bboxes,
gt_bboxes_ignore,
img_meta,
approxs_per_octave,
cfg,
sampling=True,
unmap_outputs=True):
"""Compute guided anchoring targets.
This function returns sampled anchors and gt bboxes directly
rather than calculates regression targets.
Args:
flat_approxs (Tensor): flat approxs of a single image,
shape (n, 4)
inside_flags (Tensor): inside flags of a single image,
shape (n, ).
flat_squares (Tensor): flat squares of a single image,
shape (approxs_per_octave * n, 4)
gt_bboxes (Tensor): Ground truth bboxes of a single image.
img_meta (dict): Meta info of a single image.
approxs_per_octave (int): number of approxs per octave
cfg (dict): RPN train configs.
sampling (bool): sampling or not.
unmap_outputs (bool): unmap outputs or not.
Returns:
tuple
"""
if not inside_flags.any():
return (None, ) * 6
# assign gt and sample anchors
expand_inside_flags = inside_flags[:, None].expand(
-1, approxs_per_octave).reshape(-1)
approxs = flat_approxs[expand_inside_flags, :]
squares = flat_squares[inside_flags, :]
bbox_assigner = build_assigner(cfg.ga_assigner)
assign_result = bbox_assigner.assign(approxs, squares, approxs_per_octave,
gt_bboxes, gt_bboxes_ignore)
if sampling:
bbox_sampler = build_sampler(cfg.ga_sampler)
else:
bbox_sampler = PseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, squares, gt_bboxes)
bbox_anchors = torch.zeros_like(squares)
bbox_gts = torch.zeros_like(squares)
bbox_weights = torch.zeros_like(squares)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes
bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes
bbox_weights[pos_inds, :] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_squares.size(0)
bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags)
bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds)
| 40.872852
| 79
| 0.611148
|
3c02b1e86e3ba156c6d1cd2a8370a7957f7a1658
| 2,761
|
py
|
Python
|
src/third_party/wiredtiger/test/suite/test_txn11.py
|
mwhudson/mongo
|
914bbbd26a686e032fdddec964b109ea78c6e6f6
|
[
"Apache-2.0"
] | 14
|
2019-01-11T05:01:29.000Z
|
2021-11-01T00:39:46.000Z
|
src/third_party/wiredtiger/test/suite/test_txn11.py
|
mwhudson/mongo
|
914bbbd26a686e032fdddec964b109ea78c6e6f6
|
[
"Apache-2.0"
] | 1
|
2022-03-05T02:55:28.000Z
|
2022-03-05T05:28:00.000Z
|
src/third_party/wiredtiger/test/suite/test_txn11.py
|
mwhudson/mongo
|
914bbbd26a686e032fdddec964b109ea78c6e6f6
|
[
"Apache-2.0"
] | 7
|
2019-02-08T16:28:36.000Z
|
2021-05-08T14:25:47.000Z
|
#!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_txn11.py
# Transactions: Empty checkpoints and log archiving
import fnmatch, os, time
from suite_subprocess import suite_subprocess
from wtdataset import SimpleDataSet
import wttest
class test_txn11(wttest.WiredTigerTestCase, suite_subprocess):
archive = 'true'
conn_config = 'verbose=[transaction]'
logmax = "100K"
nrows = 700
tablename = 'test_txn11'
source_uri = 'table:' + tablename + "_src"
uri = 'table:' + tablename
# Turn on logging for this test.
def conn_config(self):
return 'log=(archive=%s,' % self.archive + \
'enabled,file_max=%s,prealloc=false),' % self.logmax + \
'transaction_sync=(enabled=false),'
def run_checkpoints(self):
orig_logs = fnmatch.filter(os.listdir(self.home), "*Log*")
checkpoints = 0
sorig = set(orig_logs)
while checkpoints < 500:
self.session.checkpoint()
cur_logs = fnmatch.filter(os.listdir(self.home), "*Log*")
scur = set(cur_logs)
if scur.isdisjoint(sorig):
break
checkpoints += 1
return
def test_ops(self):
# Populate a table
SimpleDataSet(self, self.source_uri, self.nrows).populate()
# Run forced checkpoints
self.run_checkpoints()
self.archive = 'false'
# Close and reopen the connection
self.reopen_conn()
if __name__ == '__main__':
wttest.run()
| 35.397436
| 73
| 0.692141
|
891f3e6874d4420de073b367842e3b38f1b91e8a
| 737
|
py
|
Python
|
deploy.py
|
yehan2002/TrapsAndTrolls
|
7ca377e7f10802a5dd868ce26ce0f49a1fce81cf
|
[
"MIT"
] | 1
|
2019-04-14T06:21:50.000Z
|
2019-04-14T06:21:50.000Z
|
deploy.py
|
yehan2002/TrapsAndTrolls
|
7ca377e7f10802a5dd868ce26ce0f49a1fce81cf
|
[
"MIT"
] | null | null | null |
deploy.py
|
yehan2002/TrapsAndTrolls
|
7ca377e7f10802a5dd868ce26ce0f49a1fce81cf
|
[
"MIT"
] | 1
|
2019-01-24T14:40:59.000Z
|
2019-01-24T14:40:59.000Z
|
#!/usr/bin/python2
import sys
import os
if len(sys.argv) == 1:
quit("Usage: %s [Version]" % os.path.dirname(__file__))
version= sys.argv[1]
os.chdir(os.path.dirname(__file__))
def addVersion(path, key, data):
filedata = []
with open(path, 'rb') as f:
for l in f.readlines():
if key in l:
l = data % version
filedata.append(l)
with open(path, "wb") as f:
f.writelines(filedata)
addVersion('build.gradle', "//$VERSION$", 'version = "%s" //$VERSION$\n')
addVersion('src/plugin.yml', "#$version$", 'version: %s #$version$\n')
os.system('git tag %s'% version)
os.system('git add -A')
os.system('git commit')
os.system('git push origin master --tags')
| 23.774194
| 73
| 0.588874
|
9325c38d43c1c4d8030ab8f3f98679861fc4659e
| 1,084
|
py
|
Python
|
src/python/WMQuality/Emulators/WMAgents/WMAgentEmulator.py
|
vkuznet/WMCore
|
001cc51651052405a7ecd811cde91da611b1dc57
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMQuality/Emulators/WMAgents/WMAgentEmulator.py
|
vkuznet/WMCore
|
001cc51651052405a7ecd811cde91da611b1dc57
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMQuality/Emulators/WMAgents/WMAgentEmulator.py
|
vkuznet/WMCore
|
001cc51651052405a7ecd811cde91da611b1dc57
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
Auto generated stub be careful with editing,
Inheritance is preferred.
"""
from __future__ import absolute_import
import logging
import threading
from WMCore.Agent.Harness import Harness
from WMQuality.Emulators.RucioClient.MockRucioApi import SITES as DUMMY_SITES
from .WMAgentTasks import WMAgentTasks
class WMAgentEmulator(Harness):
def __init__(self, config):
# call the base class
Harness.__init__(self, config)
def preInitialization(self):
resources = self.populateResources()
# Add event loop to worker manager
myThread = threading.currentThread()
pollInterval = 1
logging.info("Setting poll interval to %s seconds" % pollInterval)
myThread.workerThreadManager.addWorker(WMAgentTasks(resources), pollInterval)
return
def populateResources(self):
"""
emulating resource db which can represent
{site: job} format
"""
jobSites = {}
for site in DUMMY_SITES:
jobSites[site] = 100
return jobSites
| 26.439024
| 85
| 0.687269
|
7598a7740a2c92cdead8fce7a4355fcadc170d22
| 84,716
|
py
|
Python
|
alphapept/feature_finding.py
|
mschwoer/alphapept
|
446b3c8b2a20619a74ff872c24a01fed8b99a20a
|
[
"Apache-2.0"
] | null | null | null |
alphapept/feature_finding.py
|
mschwoer/alphapept
|
446b3c8b2a20619a74ff872c24a01fed8b99a20a
|
[
"Apache-2.0"
] | null | null | null |
alphapept/feature_finding.py
|
mschwoer/alphapept
|
446b3c8b2a20619a74ff872c24a01fed8b99a20a
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_feature_finding.ipynb (unless otherwise specified).
__all__ = ['connect_centroids_unidirection', 'find_centroid_connections', 'convert_connections_to_array',
'eliminate_overarching_vertex', 'connect_centroids', 'path_finder', 'find_path_start', 'find_path_length',
'fill_path_matrix', 'get_hills', 'extract_hills', 'remove_duplicate_hills', 'fast_minima', 'split',
'split_hills', 'check_large_hills', 'filter_hills', 'hill_stats', 'remove_duplicates', 'get_hill_data',
'check_isotope_pattern', 'DELTA_M', 'DELTA_S', 'maximum_offset', 'correlate', 'extract_edge',
'edge_correlation', 'get_pre_isotope_patterns', 'check_isotope_pattern_directed', 'grow', 'grow_trail',
'get_trails', 'plot_pattern', 'get_minpos', 'get_local_minima', 'is_local_minima', 'truncate',
'check_averagine', 'pattern_to_mz', 'cosine_averagine', 'int_list_to_array', 'mz_to_mass', 'M_PROTON',
'isolate_isotope_pattern', 'get_isotope_patterns', 'report_', 'feature_finder_report', 'extract_bruker',
'convert_bruker', 'map_bruker', 'get_stats', 'find_features', 'replace_infs', 'map_ms2']
# Cell
import numpy as np
import alphapept.performance
#This function is tested by being called from find_centroid_connections
@alphapept.performance.performance_function
def connect_centroids_unidirection(x:np.ndarray, row_borders:np.ndarray, connections:np.ndarray, scores:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Connect centroids.
Args:
x (np.ndarray): Index to datapoint. Note that this using the performance_function, so one passes an ndarray.
row_borders (np.ndarray): Row borders of the centroids array.
connections (np.ndarray): Connections matrix to store the connections
scores (np.ndarray): Score matrix to store the connections
centroids (np.ndarray): 1D Array containing the masses of the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
for gap in range(max_gap + 1):
y = x + gap + 1
if y >= row_borders.shape[0]:
return
start_index_f = 0
if x > 0:
start_index_f = row_borders[x - 1]
centroids_1 = centroids[start_index_f: row_borders[x]]
start_index_b = row_borders[y - 1]
centroids_2 = centroids[start_index_b: row_borders[y]]
i = 0
j = 0
while (i < len(centroids_1)) & (j < len(centroids_2)):
mz1, mz2 = centroids_1[i], centroids_2[j]
diff = mz1 - mz2
mz_sum = mz1 + mz2
delta = 2 * 1e6 * abs(diff) / mz_sum
if delta < centroid_tol:
if scores[x, i, gap] > delta:
scores[x, i, gap] = delta
connections[x, i, gap] = (connections.shape[1] * y) + j
if diff > 0:
j += 1
else:
i += 1
def find_centroid_connections(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Wrapper function to call connect_centroids_unidirection
Args:
rowwise_peaks (np.ndarray): Length of centroids with respect to the row borders.
row_borders (np.ndarray): Row borders of the centroids array.
centroids (np.ndarray): Array containing the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
max_centroids = int(cupy.max(rowwise_peaks))
spectra_cnt = len(row_borders) - 1
connections = cupy.full((spectra_cnt, max_centroids, max_gap + 1), -1, dtype=np.int32)
score = cupy.full((spectra_cnt, max_centroids, max_gap + 1), np.inf)
connect_centroids_unidirection(range(len(row_borders)),
row_borders,
connections,
score,
centroids,
max_gap,
centroid_tol)
score = score[cupy.where(score < np.inf)]
score_median = cupy.median(score)
score_std = cupy.std(score)
del score, max_centroids, spectra_cnt
c_shape = connections.shape
from_r, from_c, from_g = cupy.where(connections >= 0)
to_r = connections[from_r, from_c, from_g] // c_shape[1]
to_c = connections[from_r, from_c, from_g] - to_r * c_shape[1]
del connections, from_g
return from_r, from_c, to_r, to_c, score_median, score_std
# Cell
#the performance functions are tested with the wrapper function connect_centroids
@alphapept.performance.performance_function
def convert_connections_to_array(x:np.ndarray, from_r:np.ndarray, from_c:np.ndarray, to_r:np.ndarray, to_c:np.ndarray, row_borders:np.ndarray, out_from_idx:np.ndarray, out_to_idx:np.ndarray):
"""Convert integer indices of a matrix to coordinates.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_r (np.ndarray): From array with row coordinates.
from_c (np.ndarray): From array with column coordinates.
to_r (np.ndarray): To array with row coordinates.
to_c (np.ndarray): To array with column coordinates.
row_borders (np.ndarray): Row borders (for indexing).
out_from_idx (np.ndarray): Reporting array: 1D index from.
out_to_idx (np.ndarray): Reporting array: 1D index to.
"""
row = from_r[x]
col = from_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_from_idx[x] = start_index_f + col
row = to_r[x]
col = to_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_to_idx[x] = start_index_f + col
@alphapept.performance.performance_function
def eliminate_overarching_vertex(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray):
"""Eliminate overacrhing vertex.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
"""
if x == 0:
return
if from_idx[x - 1] == from_idx[x]:
to_idx[x] = -1
def connect_centroids(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, float, float):
"""Function to connect centroids.
Args:
rowwise_peaks (np.ndarray): Indexes for centroids.
row_borders (np.ndarray): Row borders (for indexing).
centroids (np.ndarray): Centroid data.
max_gap: Maximum gap.
centroid_tol: Centroid tol for matching centroids.
Returns:
np.ndarray: From index.
np.ndarray: To index.
float: Median score.
float: Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks,
row_borders,
centroids,
max_gap,
centroid_tol)
from_idx = cupy.zeros(len(from_r), np.int32)
to_idx = cupy.zeros(len(from_r), np.int32)
convert_connections_to_array(range(len(from_r)),
from_r,
from_c,
to_r,
to_c,
row_borders,
from_idx,
to_idx)
eliminate_overarching_vertex(range(len(from_idx)), from_idx, to_idx)
relavent_idx = cupy.where(to_idx >= 0)
from_idx = cupy.take(from_idx, relavent_idx)[0]
to_idx = cupy.take(to_idx, relavent_idx)[0]
del from_r, from_c, to_r, to_c, relavent_idx
return from_idx, to_idx, score_median, score_std
# Cell
@alphapept.performance.performance_function
def path_finder(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, forward:np.ndarray, backward:np.ndarray):
"""Extracts path information and writes to path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): Array containing from indices.
to_idx (np.ndarray): Array containing to indices.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
"""
fr = from_idx[x]
to = to_idx[x]
forward[fr] = to
backward[to] = fr
@alphapept.performance.performance_function
def find_path_start(x:np.ndarray, forward:np.ndarray, backward:np.ndarray, path_starts:np.ndarray):
"""Function to find the start of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
path_starts (np.ndarray): Array to report path starts.
"""
if forward[x] > -1 and backward[x] == -1:
path_starts[x] = 0
@alphapept.performance.performance_function
def find_path_length(x:np.ndarray, path_starts:np.ndarray, forward:np.ndarray, path_cnt:np.ndarray):
"""Function to extract the length of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forward (np.ndarray): Array that stores forward information.
path_cnt (np.ndarray): Reporting array to count the paths.
"""
ctr = 1
idx = path_starts[x]
while forward[idx] > -1:
ctr += 1
idx = forward[idx]
path_cnt[x] = ctr
@alphapept.performance.performance_function
def fill_path_matrix(x:np.ndarray, path_start:np.ndarray, forwards:np.ndarray, out_hill_data:np.ndarray, out_hill_ptr:np.ndarray):
"""Function to fill the path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forwards (np.ndarray): Forward array.
out_hill_data (np.ndarray): Array containing the indices to hills.
out_hill_ptr (np.ndarray): Array containing the bounds to out_hill_data.
"""
path_position = 0
idx = path_start[x]
while idx > -1:
out_hill_data[out_hill_ptr[x] + path_position] = idx
idx = forwards[idx]
path_position += 1
def get_hills(centroids:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, hill_length_min:int=3)-> (np.ndarray, np.ndarray, int):
"""Function to get hills from centroid connections.
Args:
centroids (np.ndarray): 1D Array containing the masses of the centroids.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
hill_length_min (int): Minimum hill length:
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
forward = cupy.full(centroids.shape[0], -1)
backward = cupy.full(centroids.shape[0], -1)
path_starts = cupy.full(centroids.shape[0], -1)
path_finder(range(len(from_idx)), from_idx, to_idx, forward, backward)
find_path_start(range(len(forward)), forward, backward, path_starts)
# path_starts will now container the first index of all connected centroids
path_starts = cupy.where(path_starts == 0)[0]
path_node_cnt = cupy.full(path_starts.shape[0], -1)
find_path_length(range(len(path_starts)), path_starts, forward, path_node_cnt)
relavant_path_node = cupy.where(path_node_cnt >= hill_length_min)[0]
path_starts = cupy.take(path_starts, relavant_path_node)
path_node_cnt = cupy.take(path_node_cnt, relavant_path_node)
del relavant_path_node
# Generate the hill matix indice ptr data
hill_ptrs = cupy.empty((path_starts.shape[0] + 1), dtype=cupy.int32)
hill_ptrs[0] = 0
hill_ptrs[1:] = path_node_cnt.cumsum()
hill_data = cupy.empty((int(hill_ptrs[-1])), np.int32)
fill_path_matrix(range(len(path_starts)), path_starts, forward, hill_data, hill_ptrs)
del from_idx, to_idx, path_starts, forward, backward
return hill_ptrs, hill_data, path_node_cnt
def extract_hills(query_data:dict, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, int, float, float):
"""[summary]
Args:
query_data (dict): Data structure containing the query data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
score_median (float): Median score.
score_std (float): Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
indices = cupy.array(query_data['indices_ms1'])
mass_data = cupy.array(query_data['mass_list_ms1'])
rowwise_peaks = indices[1:] - indices[:-1]
row_borders = indices[1:]
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, mass_data, max_gap, centroid_tol)
hill_ptrs, hill_data, path_node_cnt = get_hills(mass_data, from_idx, to_idx)
del mass_data
del indices
if cupy.__name__ != 'numpy':
hill_ptrs = hill_ptrs.get()
hill_data = hill_data.get()
path_node_cnt = path_node_cnt.get()
score_median = score_median.get()
score_std = score_std.get()
return hill_ptrs, hill_data, path_node_cnt, score_median, score_std
from numba import njit
@njit
def remove_duplicate_hills(hill_ptrs, hill_data, path_node_cnt):
"""
Removes hills that share datapoints. Starts from the largest hills.
"""
taken_points = np.zeros(hill_data.max()+1)
c = 0
current_idx = 0
hill_ptrs_new = np.zeros_like(hill_ptrs)
hill_data_new = np.zeros_like(hill_data)
for i, _ in enumerate(np.argsort(path_node_cnt)[::-1]):
s, e = hill_ptrs[_], hill_ptrs[_+1]
point_idx = hill_data[s:e]
hill_pts = taken_points[point_idx]
if hill_pts.sum() == 0:
hill_data_new[current_idx:current_idx+len(hill_pts)] = point_idx
current_idx += len(hill_pts)
hill_ptrs_new[c+1] = current_idx
c +=1
taken_points[point_idx] +=1
hill_data_new = hill_data_new[:current_idx]
hill_ptrs_new = hill_ptrs_new[:c]
return hill_ptrs_new, hill_data_new
# Cell
@alphapept.performance.compile_function(compilation_mode="numba")
def fast_minima(y:np.ndarray)->np.ndarray:
"""Function to calculate the local minimas of an array.
Args:
y (np.ndarray): Input array.
Returns:
np.ndarray: Array containing minima positions.
"""
minima = np.zeros(len(y))
start = 0
end = len(y)
for i in range(start + 2, end - 2):
if ((y[i - 1] > y[i]) & (y[i + 1] > y[i])) \
or ((y[i - 1] > y[i]) & (y[i + 1] == y[i]) & (y[i + 2] > y[i])) \
or ((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] > y[i])) \
or (((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] == y[i]) & \
(y[i + 2] > y[i]))):
minima[i] = 1
minima = minima.nonzero()[0]
return minima
# Cell
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def split(k:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_data:np.ndarray, splits:np.ndarray, hill_split_level:float, window:int):
"""Function to split hills.
Args:
k (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_data (np.ndarray): Array containing the indices to hills.
splits (np.ndarray): Array containing splits.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
"""
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_trace = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.median(int_trace[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.mean(int_trace[min_index:max_index])
#minima = (np.diff(np.sign(np.diff(int_trace))) > 0).nonzero()[0] + 1 #This works also but is slower
minima = fast_minima(int_trace)
sorted_minima = np.argsort(int_trace[minima])
minima = minima[sorted_minima]
for min_ in minima:
minval = int_trace[min_]
left_max = max(int_trace[:min_])
right_max = max(int_trace[min_:])
min_max = min(left_max, right_max)
if (minval == 0) or ((min_max / minval) > hill_split_level):
splits[k] = start+min_
break # Split only once per iteration
def split_hills(hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, hill_split_level:float, window:int)->np.ndarray:
"""Wrapper function to split hills
Args:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
Returns:
np.ndarray: Array containing the bounds to the hill_data with splits.
"""
splits = np.zeros(len(int_data), dtype=np.int32)
to_check = np.arange(len(hill_ptrs)-1)
while len(to_check) > 0:
split(to_check, hill_ptrs, int_data, hill_data, splits, hill_split_level, window)
splitpoints = splits.nonzero()[0]
to_check = np.zeros(len(hill_ptrs))
to_check[splitpoints] = 1
to_check = np.insert(to_check, splitpoints+1, np.ones(len(splitpoints))).nonzero()[0] #array, index, what
hill_ptrs = np.insert(hill_ptrs, splitpoints+1, splits[splitpoints]) #array, index, what
splits = np.zeros(len(hill_ptrs), dtype=np.int32) #was cupy np.int32
return hill_ptrs
# Cell
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def check_large_hills(idx:np.ndarray, large_peaks:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, to_remove:np.ndarray, large_peak:int = 40, hill_peak_factor:float = 2, window:int=1):
"""Function to check large hills and flag them for removal.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
large_peaks (np.ndarray): Array containing large peaks.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
to_remove (np.ndarray): Array with indexes to remove.
large_peak (int, optional): Length criterion when a peak is large. Defaults to 40.
hill_peak_factor (float, optional): Hill maximum criterion. Defaults to 2.
window (int, optional): Smoothing window.. Defaults to 1.
"""
k = large_peaks[idx]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_smooth_ = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.median(int_smooth_[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.mean(int_smooth_[min_index:max_index])
int_ = int_data[int_idx]
max_ = np.max(int_)
if (max_ / int_smooth_[0] > hill_peak_factor) & (max_ / int_smooth_[-1] > hill_peak_factor):
to_remove[idx] = 0
def filter_hills(hill_data:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_check_large:int =40, window:int = 1) -> (np.ndarray, np.ndarray):
"""Filters large hills.
Args:
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_check_large (int, optional): Length criterion when a hill is considered large.. Defaults to 40.
window (int, optional): Smoothing window. Defaults to 1.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
"""
large_peaks = np.where(np.diff(hill_ptrs)>=hill_check_large)[0]
to_remove = np.ones(len(large_peaks), dtype=np.int32)
check_large_hills(range(len(large_peaks)), large_peaks, hill_ptrs, hill_data, int_data, to_remove, window)
idx_ = np.ones(len(hill_data), dtype = np.int32)
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
to_remove = to_remove.nonzero()[0]
for _ in to_remove:
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_
# Cell
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def hill_stats(idx:np.ndarray, hill_range:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, mass_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, stats:np.ndarray, hill_nboot_max:int, hill_nboot:int):
"""Function to calculate hill stats.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_range (np.ndarray): Hill range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
mass_data (np.ndarray): Array containing mass data.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
"""
np.random.seed(42)
start = hill_ptrs[idx]
end = hill_ptrs[idx + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
mz_ = mass_data[idx_]
ms1_int_apex = np.max(int_)
ms1_int_area = np.abs(np.trapz(int_, rt_[rt_idx[idx_]])) #Area
rt_min = rt_[rt_idx[idx_]].min()
rt_max = rt_[rt_idx[idx_]].max()
if len(idx_) > hill_nboot_max:
bootsize = hill_nboot_max
else:
bootsize = len(idx_)
averages = np.zeros(hill_nboot)
average = 0
for i in range(hill_nboot):
boot = np.random.choice(len(int_), bootsize, replace=True)
boot_mz = np.sum((mz_[boot] * int_[boot])) / np.sum(int_[boot])
averages[i] = boot_mz
average += boot_mz
average_mz = average/hill_nboot
delta = 0
for i in range(hill_nboot):
delta += (average_mz - averages[i]) ** 2 #maybe easier?
delta_m = np.sqrt(delta / (hill_nboot - 1))
stats[idx,0] = average_mz
stats[idx,1] = delta_m
stats[idx,2] = ms1_int_area
stats[idx,3] = ms1_int_apex
stats[idx,4] = rt_min
stats[idx,5] = rt_max
def remove_duplicates(stats:np.ndarray, hill_data:np.ndarray, hill_ptrs:np.ndarray)-> (np.ndarray, np.ndarray, np.ndarray):
"""Remove duplicate hills.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
np.ndarray: Filtered hill stats.
"""
dups = pd.DataFrame(stats).duplicated() #all duplicated hills
idx_ = np.ones(len(hill_data), dtype = np.int32) #keep all
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
for _ in np.arange(len(stats))[dups]: #duplicates will be assigned zeros
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_, stats[~dups]
def get_hill_data(query_data:dict, hill_ptrs:np.ndarray, hill_data:np.ndarray, hill_nboot_max:int = 300, hill_nboot:int = 150) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to get the hill data.
Args:
query_data (dict): Data structure containing the query data.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
Returns:
np.ndarray: Hill stats.
np.ndarray: Sortindex.
np.ndarray: Upper index.
np.ndarray: Scan index.
np.ndarray: Hill data.
np.ndarray: Hill points.
"""
indices_ = np.array(query_data['indices_ms1'])
rt_ = np.array(query_data['rt_list_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
scan_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
int_data = np.array(query_data['int_list_ms1'])
stats = np.zeros((len(hill_ptrs)-1, 6)) #mz, delta, rt_min, rt_max, sum_max
hill_stats(range(len(hill_ptrs)-1), np.arange(len(hill_ptrs)-1), hill_ptrs, hill_data, int_data, mass_data, rt_, scan_idx, stats, hill_nboot_max, hill_nboot)
# sort the stats
sortindex = np.argsort(stats[:,4]) #Sorted by rt_min
stats = stats[sortindex,:]
idxs_upper = stats[:,4].searchsorted(stats[:,5], side="right")
sortindex_ = np.arange(len(sortindex))[sortindex]
return stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs
# Cell
from .constants import mass_dict
DELTA_M = mass_dict['delta_M']
DELTA_S = mass_dict['delta_S']
maximum_offset = DELTA_M + DELTA_S
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, iso_mass_range:int = 5)-> bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
iso_mass_range (int, optional): Mass range. Defaults to 5.
Returns:
bool: Flag to see if pattern belongs to the same pattern.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
delta_mass = np.abs(mass1 - mass2)
left_side = np.abs(delta_mass - DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
# Cell
@alphapept.performance.compile_function(compilation_mode="numba")
def correlate(scans_:np.ndarray, scans_2:np.ndarray, int_:np.ndarray, int_2:np.ndarray)->float:
"""Correlate two scans.
Args:
scans_ (np.ndarray): Masses of the first scan.
scans_2 (np.ndarray): Masses of the second scan.
int_ (np.ndarray): Intensity of the first scan.
int_2 (np.ndarray): Intensity of the second scan.
Returns:
float: Correlation.
"""
min_one, max_one = scans_[0], scans_[-1]
min_two, max_two = scans_2[0], scans_2[-1]
if min_one + 3 > max_two: # at least an overlap of 3 elements
corr = 0
elif min_two + 3 > max_one:
corr = 0
else:
min_s = min(min_one, min_two)
max_s = max(max_one, max_two)
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[scans_ - min_s] = int_
int_two_scaled[scans_2 - min_s] = int_2
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
# Cell
@alphapept.performance.compile_function(compilation_mode="numba")
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list:
"""Extract edges.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparing.
runner (int): Index.
max_index (int): Unused.
maximum_offset (float): Maximum offset when comparing edges.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
Returns:
list: List of edges.
"""
edges = []
mass1 = stats[runner, 0]
delta_mass1 = stats[runner, 1]
for j in range(runner+1, idxs_upper[runner]):
mass2 = stats[j, 0]
if np.abs(mass2 - mass1) <= maximum_offset:
delta_mass2 = stats[j, 1]
for charge in range(iso_charge_min, iso_charge_max + 1):
if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range):
edges.append((runner, j))
break
return edges
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def edge_correlation(idx:np.ndarray, to_keep:np.ndarray, sortindex_:np.ndarray, pre_edges:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float):
"""Correlates two edges and flag them it they should be kept.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
to_keep (np.ndarray): Array with indices which edges should be kept.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
pre_edges (np.ndarray): Array with pre edges.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
"""
edge = pre_edges[idx,:]
y = sortindex_[edge[0]]
start = hill_ptrs[y]
end = hill_ptrs[y + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
con = sortindex_[edge[1]]
start = hill_ptrs[con]
end = hill_ptrs[con + 1]
idx_2 = hill_data[start:end]
int_2 = int_data[idx_2]
scans_2 = scan_idx[idx_2]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
to_keep[idx] = 1
# Cell
import networkx as nx
def get_pre_isotope_patterns(stats:np.ndarray, idxs_upper:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, maximum_offset:float, iso_charge_min:int=1, iso_charge_max:int=6, iso_mass_range:float=5, cc_cutoff:float=0.6)->list:
"""Function to extract pre isotope patterns.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparison.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
maximum_offset (float): Maximum offset when matching.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
cc_cutoff (float, optional): Correlation cutoff. Defaults to 0.6.
Returns:
list: List of pre isotope patterns.
"""
pre_edges = []
# Step 1
for runner in range(len(stats)):
pre_edges.extend(extract_edge(stats, idxs_upper, runner, idxs_upper[runner], maximum_offset, iso_charge_min, iso_charge_max, iso_mass_range))
to_keep = np.zeros(len(pre_edges), dtype='int')
pre_edges = np.array(pre_edges)
edge_correlation(range(len(to_keep)), to_keep, sortindex_, pre_edges, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
edges = pre_edges[to_keep.nonzero()]
G2 = nx.Graph()
for i in range(len(edges)):
G2.add_edge(edges[i][0], edges[i][1])
pre_isotope_patterns = [
sorted(list(c))
for c in sorted(nx.connected_components(G2), key=len, reverse=True)
]
return pre_isotope_patterns
# Cell
from numba.typed import List
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern_directed(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, index:int, iso_mass_range:float)->bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
index (int): Index (unused).
iso_mass_range (float): Isotope mass ranges.
Returns:
bool: Flag if two isotope patterns belong together.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
left_side = np.abs(mass1 - mass2 - index * DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
@alphapept.performance.compile_function(compilation_mode="numba")
def grow(trail:List, seed:int, direction:int, relative_pos:int, index:int, stats:np.ndarray, pattern:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Grows isotope pattern based on a seed and direction.
Args:
trail (List): List of hills belonging to a pattern.
seed (int): Seed position.
direction (int): Direction in which to grow the trail
relative_pos (int): Relative position.
index (int): Index.
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: List of hills belonging to a pattern.
"""
x = pattern[seed] # This is the seed
mass1 = stats[x,0]
delta_mass1 = stats[x,1]
k = sortindex_[x]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
growing = True
while growing:
if direction == 1:
if seed + relative_pos == len(pattern):
growing = False
break
else:
if seed + relative_pos < 0:
growing = False
break
y = pattern[seed + relative_pos] # This is a reference peak
l = sortindex_[y]
mass2 = stats[y,0]
delta_mass2 = stats[y,1]
start = hill_ptrs[l]
end = hill_ptrs[l + 1]
idx_ = hill_data[start:end]
int_2 = int_data[idx_]
scans_2 = scan_idx[idx_]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
if check_isotope_pattern_directed(mass1, mass2, delta_mass1, delta_mass2, charge, -direction * index, iso_mass_range):
if direction == 1:
trail.append(y)
else:
trail.insert(0, y)
index += (
1
) # Greedy matching: Only one edge for a specific distance, will not affect the following matches
delta_mass = np.abs(mass1 - mass2)
if (delta_mass > (DELTA_M+DELTA_S) * index): # the pattern is sorted so there is a maximum to look back
break
relative_pos += direction
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def grow_trail(seed:int, pattern:np.ndarray, stats:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to grow an isotope pattern to the left and right side.
Args:
seed (int): Seed position.
pattern (np.ndarray): Isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Isotope pattern.
"""
x = pattern[seed]
trail = List()
trail.append(x)
trail = grow(trail, seed, -1, -1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trail = grow(trail, seed, 1, 1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def get_trails(seed:int, pattern:np.ndarray, stats:np.ndarray, charge_range:List, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to extract trails for a given charge range.
Args:
seed (int): Seed index.
pattern (np.ndarray): Pre isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge_range (List): Charge range.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Trail of consistent hills.
"""
trails = []
for charge in charge_range:
trail = grow_trail(seed, pattern, stats, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trails.append(trail)
return trails
# Cell
def plot_pattern(pattern:np.ndarray, sorted_hills:np.ndarray, centroids:np.ndarray, hill_data:np.ndarray):
"""Helper function to plot a pattern.
Args:
pattern (np.ndarray): Pre isotope pattern.
sorted_hills (np.ndarray): Hills, sorted.
centroids (np.ndarray): 1D Array containing the masses of the centroids.
hill_data (np.ndarray): Array containing the indices to hills.
"""
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10,10))
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
mzs = []
rts = []
ints = []
for entry in pattern:
hill = sorted_hills[entry]
hill_data = np.array([centroids[_[0]][_[1]] for _ in hill], dtype=centroid_dtype)
int_profile = hill_data["int"]
ax1.plot(hill_data["rt"], hill_data["int"])
ax2.scatter(hill_data["rt"], hill_data["mz"], s = hill_data["int"]/5e5 )
ax1.set_title('Pattern')
ax1.set_xlabel('RT (min)')
ax1.set_ylabel('Intensity')
ax2.set_xlabel('RT (min)')
ax2.set_ylabel('m/z')
plt.show()
# Cell
@alphapept.performance.compile_function(compilation_mode="numba")
def get_minpos(y:np.ndarray, iso_split_level:float)->List:
"""Function to get a list of minima in a trace.
A minimum is returned if the ratio of lower of the surrounding maxima to the minimum is larger than the splitting factor.
Args:
y (np.ndarray): Input array.
iso_split_level (float): Isotope split level.
Returns:
List: List with min positions.
"""
minima = get_local_minima(y)
minima_list = List()
for minpos in minima:
minval = y[minpos]
left_max = (y[:minpos]).max()
right_max = (y[minpos:]).max()
minimum_max = min(left_max, right_max)
if minimum_max / minval >= iso_split_level:
minima_list.append(minpos)
return minima_list
@alphapept.performance.compile_function(compilation_mode="numba")
def get_local_minima(y:np.ndarray)->List:
"""Function to return all local minima of a array
Args:
y (np.ndarray): Input array.
Returns:
List: List with indices to minima.
"""
minima = List()
for i in range(1, len(y) - 1):
if is_local_minima(y, i):
minima.append(i)
return minima
@alphapept.performance.compile_function(compilation_mode="numba")
def is_local_minima(y:np.ndarray, i:int)->bool:
"""Check if position is a local minima.
Args:
y (np.ndarray): Input array.
i (int): Position to check.
Returns:
bool: Flag if position is minima or not.
"""
return (y[i - 1] > y[i]) & (y[i + 1] > y[i])
@alphapept.performance.compile_function(compilation_mode="numba")
def truncate(array:np.ndarray, intensity_profile:np.ndarray, seedpos:int, iso_split_level:float)->np.ndarray:
"""Function to truncate an intensity profile around its seedposition.
Args:
array (np.ndarray): Input array.
intensity_profile (np.ndarray): Intensities for the input array.
seedpos (int): Seedposition.
iso_split_level (float): Split level.
Returns:
np.ndarray: Truncated array.
"""
minima = int_list_to_array(get_minpos(intensity_profile, iso_split_level))
if len(minima) > 0:
left_minima = minima[minima < seedpos]
right_minima = minima[minima > seedpos]
# If the minimum is smaller than the seed
if len(left_minima) > 0:
minpos = left_minima[-1]
else:
minpos = 0
if len(right_minima) > 0:
maxpos = right_minima[0]
else:
maxpos = len(array)
array = array[minpos:maxpos+1]
return array
# Cell
from .chem import mass_to_dist
from .constants import averagine_aa, isotopes, Isotope
from numba.typed import Dict
@alphapept.performance.compile_function(compilation_mode="numba")
def check_averagine(stats:np.ndarray, pattern:np.ndarray, charge:int, averagine_aa:Dict, isotopes:Dict)->float:
"""Function to compare a pattern to an averagine model.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
Returns:
float: Averagine correlation.
"""
masses, intensity = pattern_to_mz(stats, pattern, charge)
spec_one = np.floor(masses).astype(np.int64)
int_one = intensity
spec_two, int_two = mass_to_dist(np.min(masses), averagine_aa, isotopes) # maybe change to no rounded version
spec_two = np.floor(spec_two).astype(np.int64)
return cosine_averagine(int_one, int_two, spec_one, spec_two)
@alphapept.performance.compile_function(compilation_mode="numba")
def pattern_to_mz(stats:np.ndarray, pattern:np.ndarray, charge:int)-> (np.ndarray, np.ndarray):
"""Function to calculate masses and intensities from pattern for a given charge.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge of the pattern.
Returns:
np.ndarray: masses
np.ndarray: intensity
"""
mzs = np.zeros(len(pattern))
ints = np.zeros(len(pattern))
for i in range(len(pattern)):
entry = pattern[i]
mzs[i] = mz_to_mass(stats[entry,0], charge)
ints[i] = stats[entry,2]
sortindex = np.argsort(mzs)
masses = mzs[sortindex]
intensity = ints[sortindex]
return masses, intensity
@alphapept.performance.compile_function(compilation_mode="numba")
def cosine_averagine(int_one:np.ndarray, int_two:np.ndarray, spec_one:np.ndarray, spec_two:np.ndarray)-> float:
"""Calculate the cosine correlation of two hills.
Args:
int_one (np.ndarray): Intensity of the first hill.
int_two (np.ndarray): Intensity of the second hill.
spec_one (np.ndarray): Scan numbers of the first hill.
spec_two (np.ndarray): Scan numbers of the second hill.
Returns:
float: Cosine
"""
min_one, max_one = spec_one[0], spec_one[-1]
min_two, max_two = spec_two[0], spec_two[-1]
min_s = np.min(np.array([min_one, min_two]))
max_s = np.max(np.array([max_one, max_two]))
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[spec_one - min_s] = int_one
int_two_scaled[spec_two - min_s] = int_two
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
@alphapept.performance.compile_function(compilation_mode="numba")
def int_list_to_array(numba_list:List)->np.ndarray:
"""Numba compatbilte function to convert a numba list with integers to a numpy array
Args:
numba_list (List): Input numba-typed List.
Returns:
np.ndarray: Output numpy array.
"""
array = np.zeros(len(numba_list), dtype=np.int64)
for i in range(len(array)):
array[i] = numba_list[i]
return array
M_PROTON = mass_dict['Proton']
@alphapept.performance.compile_function(compilation_mode="numba")
def mz_to_mass(mz:float, charge:int)->float:
"""Function to calculate the mass from a mz value.
Args:
mz (float): M/z
charge (int): Charge.
Raises:
NotImplementedError: When a negative charge is used.
Returns:
float: mass
"""
if charge < 0:
raise NotImplementedError("Negative Charges not implemented.")
mass = mz * charge - charge * M_PROTON
return mass
# Cell
@alphapept.performance.compile_function(compilation_mode="numba")
def isolate_isotope_pattern(pre_pattern:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, iso_mass_range:float, charge_range:List, averagine_aa:Dict, isotopes:Dict, iso_n_seeds:int, cc_cutoff:float, iso_split_level:float)->(np.ndarray, int):
"""Isolate isotope patterns.
Args:
pre_pattern (np.ndarray): Pre isotope pattern.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
iso_mass_range (float): Mass range for checking isotope patterns.
charge_range (List): Charge range.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_n_seeds (int): Number of seeds.
cc_cutoff (float): Cutoff value for what is considered correlating.
iso_split_level (float): Split level when isotopes are split.
Returns:
np.ndarray: Array with the best pattern.
int: Charge of the best pattern.
"""
longest_trace = 0
champion_trace = None
champion_charge = 0
champion_intensity = 0
# Sort patterns by mass
sortindex = np.argsort(stats[pre_pattern][:,0]) #intensity
sorted_pattern = pre_pattern[sortindex]
massindex = np.argsort(stats[sorted_pattern][:,2])[::-1][:iso_n_seeds]
# Use all the elements in the pre_pattern as seed
for seed in massindex: # Loop through all seeds
seed_global = sorted_pattern[seed]
trails = get_trails(seed, sorted_pattern, stats, charge_range, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
for index, trail in enumerate(trails):
if len(trail) >= longest_trace: # Needs to be longer than the current champion
arr = int_list_to_array(trail)
intensity_profile = stats[arr][:,2]
seedpos = np.nonzero(arr==seed_global)[0][0]
# truncate around the seed...
arr = truncate(arr, intensity_profile, seedpos, iso_split_level)
intensity_profile = stats[arr][:,2]
# Remove lower masses:
# Take the index of the maximum and remove all masses on the left side
if charge_range[index] * stats[seed_global, 0] < 1000:
maxpos = np.argmax(intensity_profile)
arr = arr[maxpos:]
intensity_profile = stats[arr][:,2]
if (len(arr) > longest_trace) | ((len(arr) == longest_trace) & (intensity_profile.sum() > champion_intensity)):
# Averagine check
cc = check_averagine(stats, arr, charge_range[index], averagine_aa, isotopes)
if cc > 0.6:
# Update the champion
champion_trace = arr
champion_charge = charge_range[index]
longest_trace = len(arr)
champion_intensity = intensity_profile.sum()
return champion_trace, champion_charge
# Cell
from numba.typed import List
from typing import Callable, Union
def get_isotope_patterns(pre_isotope_patterns:list, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, averagine_aa:Dict, isotopes:Dict, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:float = 5, iso_n_seeds:int = 100, cc_cutoff:float=0.6, iso_split_level:float = 1.3, callback:Union[Callable, None]=None) -> (np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to iterate over pre_isotope_patterns.
Args:
pre_isotope_patterns (list): List of pre-isotope patterns.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
iso_n_seeds (int, optional): Number of isotope seeds. Defaults to 100.
cc_cutoff (float, optional): Cuttoff for correlation.. Defaults to 0.6.
iso_split_level (float, optional): Isotope split level.. Defaults to 1.3.
callback (Union[Callable, None], optional): Callback function for progress. Defaults to None.
Returns:
list: List of isotope patterns.
np.ndarray: Iso idx.
np.ndarray: Array containing isotope charges.
"""
isotope_patterns = []
isotope_charges = []
charge_range = List()
for i in range(iso_charge_min, iso_charge_max + 1):
charge_range.append(i)
isotope_patterns = []
isotope_charges = []
for idx, pre_pattern in enumerate(pre_isotope_patterns):
extract = True
while extract:
isotope_pattern, isotope_charge = isolate_isotope_pattern(np.array(pre_pattern), hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, iso_mass_range, charge_range, averagine_aa, isotopes, iso_n_seeds, cc_cutoff, iso_split_level)
if isotope_pattern is None:
length = 0
else:
length = len(isotope_pattern)
if length > 1:
isotope_charges.append(isotope_charge)
isotope_patterns.append(isotope_pattern)
pre_pattern = [_ for _ in pre_pattern if _ not in isotope_pattern]
if len(pre_pattern) <= 1:
extract = False
else:
extract = False
if callback:
callback((idx+1)/len(pre_isotope_patterns))
iso_patterns = np.zeros(sum([len(_) for _ in isotope_patterns]), dtype=np.int64)
iso_idx = np.zeros(len(isotope_patterns)+1, dtype='int')
start = 0
for idx, _ in enumerate(isotope_patterns):
iso_patterns[start:start+len(_)] = _
start += len(_)
iso_idx[idx+1] = start
return iso_patterns, iso_idx, np.array(isotope_charges)
# Cell
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def report_(idx:np.ndarray, isotope_charges:list, isotope_patterns:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, results:np.ndarray, lookup_idx:np.ndarray):
"""Function to extract summary statstics from a list of isotope patterns and charges.
MS1 feature intensity estimation. For each isotope envelope we interpolate the signal over the retention time
range. All isotope enevelopes are summed up together to estimate the peak sahpe
Lastly, we report three estimates for the intensity:
- ms1_int_sum_apex: The intensity at the peak of the summed signal.
- ms1_int_sum_area: The area of the summed signal
- ms1_int_max_apex: The intensity at the peak of the most intense isotope envelope
- ms1_int_max_area: The area of the the most intense isotope envelope
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
results (np.ndarray): Recordarray with isotope pattern summary statistics.
lookup_idx (np.ndarray): Lookup array for each centroid.
"""
pattern = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
isotope_data = stats[pattern]
mz = np.min(isotope_data[:, 0])
mz_std = np.mean(isotope_data[:, 1])
charge = isotope_charges[idx]
mass = mz_to_mass(mz, charge)
int_max_idx = np.argmax(isotope_data[:, 2])
mz_most_abundant = isotope_data[:, 0][int_max_idx]
int_max = isotope_data[:,2][int_max_idx]
rt_start = isotope_data[int_max_idx, 4] # This is the start of the most abundant trace
rt_end = isotope_data[int_max_idx, 5]
rt_min_ = min(isotope_data[:, 4])
rt_max_ = max(isotope_data[:, 5])
rt_range = np.linspace(rt_min_, rt_max_, 100) #TODO this is a fixed value - is there an optimum?
trace_sum = np.zeros_like(rt_range)
most_intense_pattern = -np.inf
for i, k in enumerate(pattern):
x = sortindex_[k]
start = hill_ptrs[x]
end = hill_ptrs[x + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
rts = rt_[rt_idx[idx_]]
lookup_idx[idx_, 0] = idx
lookup_idx[idx_, 1] = i
interpolation = np.interp(rt_range, rts, int_)
#Filter
interpolation[:(rt_range < rts[0]).sum()] = 0
right_cut = (rt_range > rts[-1]).sum()
if right_cut > 0:
interpolation[-right_cut:]= 0
trace_sum += interpolation
if int_.sum() > most_intense_pattern:
most_intense_pattern = int_.sum()
ms1_int_max_apex = int_.max()
ms1_int_max_area = np.trapz(int_, rts)
rt_apex_idx = trace_sum.argmax()
rt_apex = rt_range[rt_apex_idx]
trace = trace_sum
half_max = trace.max()/2
if rt_apex_idx == 0:
left_apex = 0
else:
left_apex = np.abs(trace[:rt_apex_idx]-half_max).argmin()
right_apex = np.abs(trace[rt_apex_idx:]-half_max).argmin()+rt_apex_idx
ms1_int_sum_apex = trace_sum[rt_apex_idx]
fwhm = rt_range[right_apex] - rt_range[left_apex]
n_isotopes = len(pattern)
rt_cutoff = 0.95 #5%
if rt_apex_idx == 0:
rt_min_idx = 0
else:
rt_min_idx = np.abs(trace[:rt_apex_idx]-trace.max()*(1-rt_cutoff)).argmin()
rt_max_idx = np.abs(trace[rt_apex_idx:]-trace.max()*(1-rt_cutoff)).argmin()+rt_apex_idx
#plt.xlabel('rt')
#plt.ylabel('int')
#plt.show()
#plt.plot(rt_range, trace_sum)
#plt.plot([rt_range[left_apex], rt_range[right_apex]], [(trace[left_apex] + trace[right_apex])/2]*2, 'k:')
#plt.plot(rt_range[rt_apex_idx], trace[rt_apex_idx], 'k*')
#plt.plot(rt_range[rt_min_idx], trace[rt_min_idx], 'k*')
#plt.plot(rt_range[rt_max_idx], trace[rt_max_idx], 'k*')
#plt.show()
rt_start = rt_range[rt_min_idx]
rt_end = rt_range[rt_max_idx]
ms1_int_sum_area = np.trapz(trace_sum[rt_min_idx:rt_max_idx], rt_range[rt_min_idx:rt_max_idx])
results[idx,:] = np.array([mz, mz_std, mz_most_abundant, charge, rt_start, rt_apex, rt_end, fwhm, n_isotopes, mass, ms1_int_sum_apex, ms1_int_sum_area, ms1_int_max_apex, ms1_int_max_area])
# Cell
import pandas as pd
def feature_finder_report(query_data:dict, isotope_patterns:list, isotope_charges:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray)->pd.DataFrame:
"""Creates a report dataframe with summary statistics of the found isotope patterns.
Args:
query_data (dict): Data structure containing the query data.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to the isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
Returns:
pd.DataFrame: DataFrame with isotope pattern summary statistics.
"""
rt_ = np.array(query_data['rt_list_ms1'])
indices_ = np.array(query_data['indices_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
rt_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
lookup_idx= np.zeros((len(mass_data),2), dtype=np.int)-1
int_data = np.array(query_data['int_list_ms1'])
results = np.zeros((len(isotope_charges), 14))
report_(range(len(isotope_charges)), isotope_charges, isotope_patterns, iso_idx, stats, sortindex_, hill_ptrs, hill_data, int_data, rt_, rt_idx, results, lookup_idx)
df = pd.DataFrame(results, columns = ['mz','mz_std','mz_most_abundant','charge','rt_start','rt_apex','rt_end','fwhm','n_isotopes','mass', 'ms1_int_sum_apex', 'ms1_int_sum_area', 'ms1_int_max_apex', 'ms1_int_max_area'])
df.sort_values(['rt_start','mz'])
return df, lookup_idx
# Cell
import subprocess
import os
import platform
def extract_bruker(file:str, base_dir:str = "ext/bruker/FF", config:str = "proteomics_4d.config"):
"""Call Bruker Feautre Finder via subprocess.
Args:
file (str): Filename for feature finding.
base_dir (str, optional): Base dir where the feature finder is stored.. Defaults to "ext/bruker/FF".
config (str, optional): Config file for feature finder. Defaults to "proteomics_4d.config".
Raises:
NotImplementedError: Unsupported operating system.
FileNotFoundError: Feature finder not found.
FileNotFoundError: Config file not found.
FileNotFoundError: Feature file not found.
"""
feature_path = file + '/'+ os.path.split(file)[-1] + '.features'
base_dir = os.path.join(os.path.dirname(__file__), base_dir)
operating_system = platform.system()
if operating_system == 'Linux':
ff_dir = os.path.join(base_dir, 'linux64','uff-cmdline2')
logging.info('Using Linux FF')
elif operating_system == 'Windows':
ff_dir = os.path.join(base_dir, 'win64','uff-cmdline2.exe')
logging.info('Using Windows FF')
else:
raise NotImplementedError(f"System {operating_system} not supported.")
if os.path.exists(feature_path):
return feature_path
else:
if not os.path.isfile(ff_dir):
raise FileNotFoundError(f'Bruker feature finder cmd not found here {ff_dir}.')
config_path = base_dir + '/'+ config
if not os.path.isfile(config_path):
raise FileNotFoundError(f'Config file not found here {config_path}.')
if operating_system == 'Windows':
FF_parameters = [ff_dir,'--ff 4d',f'--readconfig "{config_path}"', f'--analysisDirectory "{file}"']
process = subprocess.Popen(' '.join(FF_parameters), stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, b''):
logtxt = line.decode('utf8')
logging.info(logtxt[48:].rstrip()) #Remove logging info from FF
elif operating_system == 'Linux':
FF_parameters = [
ff_dir,
'--ff',
'4d',
'--readconfig',
config_path,
'--analysisDirectory',
file
]
process = subprocess.run(FF_parameters, stdout=subprocess.PIPE)
if os.path.exists(feature_path):
return feature_path
else:
raise FileNotFoundError(f"Feature file {feature_path} does not exist.")
import sqlalchemy as db
def convert_bruker(feature_path:str)->pd.DataFrame:
"""Reads feature table and converts to feature table to be used with AlphaPept.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
feature_table = pd.read_sql_table('LcTimsMsFeature', engine_featurefile)
feature_cluster_mapping = pd.read_sql_table('FeatureClusterMapping', engine_featurefile)
from .constants import mass_dict
M_PROTON = mass_dict['Proton']
feature_table['Mass'] = feature_table['MZ'].values * feature_table['Charge'].values - feature_table['Charge'].values*M_PROTON
feature_table = feature_table.rename(columns={"MZ": "mz","Mass": "mass", "RT": "rt_apex", "RT_lower":"rt_start", "RT_upper":"rt_end", "Mobility": "mobility", "Mobility_lower": "mobility_lower", "Mobility_upper": "mobility_upper", "Charge":"charge","Intensity":'ms1_int_sum_apex',"ClusterCount":'n_isotopes'})
feature_table['rt_apex'] = feature_table['rt_apex']/60
feature_table['rt_start'] = feature_table['rt_start']/60
feature_table['rt_end'] = feature_table['rt_end']/60
feature_cluster_mapping = feature_cluster_mapping.rename(columns={"FeatureId": "feature_id", "ClusterId": "cluster_id", "Monoisotopic": "monoisotopic", "Intensity": "ms1_int_sum_apex"})
return feature_table, feature_cluster_mapping
def map_bruker(feature_path:str, feature_table:pd.DataFrame, query_data:dict)->pd.DataFrame:
"""Map Ms1 to Ms2 via Table FeaturePrecursorMapping from Bruker FF.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
feature_table (pd.DataFrame): Pandas DataFrame containing the features.
query_data (dict): Data structure containing the query data.
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
mapping = pd.read_sql_table('FeaturePrecursorMapping', engine_featurefile)
mapping = mapping.set_index('PrecursorId')
feature_table= feature_table.set_index('Id')
query_prec_id = query_data['prec_id']
#Now look up the feature for each precursor
mass_matched = []
mz_matched = []
rt_matched = []
query_idx = []
f_idx = []
for idx, prec_id in tqdm(enumerate(query_prec_id)):
try:
f_id = mapping.loc[prec_id]['FeatureId']
all_matches = feature_table.loc[f_id]
if type(f_id) == np.int64:
match = all_matches
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
else:
for k in range(len(all_matches)):
match = all_matches.iloc[k]
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
except KeyError:
pass
features = pd.DataFrame(np.array([mass_matched, mz_matched, rt_matched, query_idx, f_idx]).T, columns = ['mass_matched', 'mz_matched', 'rt_matched', 'query_idx', 'feature_idx'])
features['query_idx'] = features['query_idx'].astype('int')
return features
# Cell
def get_stats(isotope_patterns, iso_idx, stats):
columns = ['mz_average','delta_m','int_sum','int_area','rt_min','rt_max']
stats_idx = np.zeros(iso_idx[-1], dtype=np.int64)
stats_map = np.zeros(iso_idx[-1], dtype=np.int64)
start_ = 0
end_ = 0
for idx in range(len(iso_idx)-1):
k = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
end_ += len(k)
stats_idx[start_:end_] = k
stats_map[start_:end_] = idx
start_ = end_
k = pd.DataFrame(stats[stats_idx], columns=columns)
k['feature_id'] = stats_map
return k
# Cell
import numpy as np
import logging
import os
from .search import query_data_to_features
import alphapept.io
import functools
def find_features(to_process:tuple, callback:Union[Callable, None] = None, parallel:bool = False)-> Union[str, bool]:
"""Wrapper for feature finding.
Args:
to_process (tuple): to_process tuple, to be used from a proces spool.
callback (Union[Callable, None], optional): Optional callback function. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Currently unused. Defaults to False.
Raises:
NotImplementedError: Error if the file extension is not understood.
Returns:
Union[str, bool]: Returns true if function was sucessfull, otherwise the exception as string.
"""
try:
index, settings = to_process
file_name = settings['experiment']['file_paths'][index]
base, ext = os.path.splitext(file_name)
if ext.lower() == '.raw':
datatype='thermo'
elif ext.lower() == '.d':
datatype='bruker'
elif ext.lower() == '.mzml':
datatype='mzml'
else:
raise NotImplementedError('File extension {} not understood.'.format(ext))
out_file = f"{base}.ms_data.hdf"
skip = True
if os.path.isfile(out_file):
try:
alphapept.io.MS_Data_File(
out_file
).read(dataset_name="features")
logging.info(
'Found *.hdf with features for {}'.format(out_file)
)
except KeyError:
logging.info(
'No *.hdf file with features found for {}. Adding to feature finding list.'.format(out_file)
)
skip = False
if not skip:
ms_file = alphapept.io.MS_Data_File(out_file, is_read_only=False)
query_data = ms_file.read_DDA_query_data()
feature_cluster_mapping = pd.DataFrame()
if not settings['workflow']["find_features"]:
features = query_data_to_features(query_data)
else:
if datatype in ['thermo','mzml']:
from .constants import averagine_aa, isotopes
f_settings = settings['features']
max_gap = f_settings['max_gap']
centroid_tol = f_settings['centroid_tol']
hill_split_level = f_settings['hill_split_level']
iso_split_level = f_settings['iso_split_level']
#Cleanup if
int_data = np.array(query_data['int_list_ms1'])
window = f_settings['hill_smoothing']
hill_check_large = f_settings['hill_check_large']
iso_charge_min = f_settings['iso_charge_min']
iso_charge_max = f_settings['iso_charge_max']
iso_n_seeds = f_settings['iso_n_seeds']
hill_nboot_max = f_settings['hill_nboot_max']
hill_nboot = f_settings['hill_nboot']
iso_mass_range = f_settings['iso_mass_range']
iso_corr_min = f_settings['iso_corr_min']
logging.info('Feature finding on {}'.format(file_name))
logging.info(f'Hill extraction with centroid_tol {centroid_tol} and max_gap {max_gap}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, centroid_tol)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
logging.info(f'Repeating hill extraction with centroid_tol {score_median+score_std*3:.2f}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, score_median+score_std*3)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
hill_ptrs, hill_data = remove_duplicate_hills(hill_ptrs, hill_data, path_node_cnt)
logging.info(f'After duplicate removal of hills {len(hill_ptrs):,}')
hill_ptrs = split_hills(hill_ptrs, hill_data, int_data, hill_split_level=hill_split_level, window = window) #hill lenght is inthere already
logging.info(f'After split hill_ptrs {len(hill_ptrs):,}')
hill_data, hill_ptrs = filter_hills(hill_data, hill_ptrs, int_data, hill_check_large = hill_check_large, window=window)
logging.info(f'After filter hill_ptrs {len(hill_ptrs):,}')
stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs = get_hill_data(query_data, hill_ptrs, hill_data, hill_nboot_max = hill_nboot_max, hill_nboot = hill_nboot)
logging.info('Extracting hill stats complete')
pre_isotope_patterns = get_pre_isotope_patterns(stats, idxs_upper, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, maximum_offset, iso_charge_min=iso_charge_min, iso_charge_max=iso_charge_max, iso_mass_range=iso_mass_range, cc_cutoff=iso_corr_min)
logging.info('Found {:,} pre isotope patterns.'.format(len(pre_isotope_patterns)))
isotope_patterns, iso_idx, isotope_charges = get_isotope_patterns(pre_isotope_patterns, hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, averagine_aa, isotopes, iso_charge_min = iso_charge_min, iso_charge_max = iso_charge_max, iso_mass_range = iso_mass_range, iso_n_seeds = iso_n_seeds, cc_cutoff = iso_corr_min, iso_split_level=iso_split_level, callback=None)
logging.info('Extracted {:,} isotope patterns.'.format(len(isotope_charges)))
feature_table, lookup_idx = feature_finder_report(query_data, isotope_patterns, isotope_charges, iso_idx, stats, sortindex_, hill_ptrs, hill_data)
lookup_idx_df = pd.DataFrame(lookup_idx, columns = ['isotope_pattern', 'isotope_pattern_hill'])
ms_file.write(lookup_idx_df, dataset_name="feature_table_idx")
feature_cluster_mapping = get_stats(isotope_patterns, iso_idx, stats)
logging.info('Report complete.')
elif datatype == 'bruker':
logging.info('Feature finding on {}'.format(file_name))
feature_path = extract_bruker(file_name)
feature_table, feature_cluster_mapping = convert_bruker(feature_path)
logging.info('Bruker featurer finder complete. Extracted {:,} features.'.format(len(feature_table)))
# Calculate additional params
feature_table['rt_length'] = feature_table['rt_end'] - feature_table['rt_start']
feature_table['rt_right'] = feature_table['rt_end'] - feature_table['rt_apex']
feature_table['rt_left'] = feature_table['rt_apex'] - feature_table['rt_start']
feature_table['rt_tail'] = feature_table['rt_right'] / feature_table['rt_left']
logging.info('Matching features to query data.')
if 'mono_mzs2' not in query_data.keys():
logging.info('No MS2-data to match.')
features = pd.DataFrame()
else:
features = map_ms2(feature_table, query_data, **settings['features'])
ms_file.write(feature_cluster_mapping, dataset_name="feature_cluster_mapping")
logging.info('Saving feature table.')
ms_file.write(feature_table, dataset_name="feature_table")
logging.info('Feature table saved to {}'.format(out_file))
logging.info('Saving features.')
ms_file.write(features, dataset_name="features")
logging.info(f'Feature finding of file {file_name} complete.')
return True
except Exception as e:
logging.error(f'Feature finding of file {file_name} failed. Exception {e}')
return f"{e}" #Can't return exception object, cast as string
# Cell
from sklearn.neighbors import KDTree
import pandas as pd
import numpy as np
def replace_infs(array:np.ndarray)->np.ndarray:
"""Replace nans and infs with 0
Args:
array (np.ndarray): Input array.
Returns:
np.ndarray: Output array without nans and infs.
"""
array[array == -np.inf] = 0
array[array == np.inf] = 0
array[np.isnan(array)] = 0
return array
def map_ms2(feature_table:pd.DataFrame, query_data:dict, map_mz_range:float = 1, map_rt_range:float = 0.5, map_mob_range:float = 0.3, map_n_neighbors:int=5, search_unidentified:bool = False, **kwargs)->pd.DataFrame:
"""Map MS1 features to MS2 based on rt and mz.
If ccs is included also add.
Args:
feature_table (pd.DataFrame): Pandas DataFrame with features.
query_data (dict): Data structure containing the query data.
map_mz_range (float, optional): Mapping range for mz (Da). Defaults to 1.
map_rt_range (float, optional): Mapping range for rt (min). Defaults to 0.5.
map_mob_range (float, optional): Mapping range for mobility (%). Defaults to 0.3.
map_n_neighbors (int, optional): Maximum number of neighbors to be extracted. Defaults to 5.
search_unidentified (bool, optional): Flag to perform search on features that have no isotope pattern. Defaults to False.
Returns:
pd.DataFrame: Table with features.
"""
feature_table['rt'] = feature_table['rt_apex']
range_dict = {}
range_dict['mz'] = ('mono_mzs2', map_mz_range)
range_dict['rt'] = ('rt_list_ms2', map_rt_range)
range_dict['mobility'] = ('mobility', map_mob_range)
query_dict = {}
query_dict['rt'] = 'rt_list_ms2'
query_dict['mass'] = 'prec_mass_list2'
query_dict['mz'] = 'mono_mzs2'
query_dict['charge'] = 'charge2'
query_dict['mobility'] = 'mobility'
if 'mobility' not in feature_table.columns:
del range_dict['mobility']
del query_dict['mobility']
use_mob = False
else:
use_mob = True
tree_points = feature_table[list(range_dict.keys())].values
for i, key in enumerate(range_dict):
tree_points[:,i] = tree_points[:,i]/range_dict[key][1]
matching_tree = KDTree(tree_points, metric="euclidean")
ref_points = np.array([query_data[range_dict[_][0]] / range_dict[_][1] for _ in range_dict]).T
ref_points = replace_infs(ref_points)
dist, idx = matching_tree.query(ref_points, k=map_n_neighbors)
ref_matched = np.zeros(ref_points.shape[0], dtype=np.bool_)
all_df = []
for neighbor in range(map_n_neighbors):
ref_df = pd.DataFrame(np.array([query_data[query_dict[_]] for _ in query_dict]).T, columns = query_dict.keys())
for _ in query_dict:
ref_df[_+'_matched'] = feature_table.iloc[idx[:,neighbor]][_].values
ref_df[_+'_offset'] = ref_df[_+'_matched'] - ref_df[_]
ref_df['query_idx'] = ref_df.index
ref_df['feature_idx'] = idx[:,neighbor]
for field in ['ms1_int_sum_area','ms1_int_sum_apex','ms1_int_max_area','ms1_int_max_apex','rt_start','rt_apex','rt_end','fwhm','mobility_lower','mobility_upper']:
if field in feature_table.keys():
ref_df[field] = feature_table.iloc[idx[:,neighbor]][field].values
rt_check = (ref_df['rt_start'] <= ref_df['rt']) & (ref_df['rt'] <= ref_df['rt_end'])
# check isolation window (win=3)
mass_check = np.abs(ref_df['mz_offset'].values) <= 3
_check = rt_check & mass_check
if use_mob:
mob_check = (ref_df['mobility_lower'] <= ref_df['mobility']) & (ref_df['mobility'] <= ref_df['mobility_upper'])
_check &= mob_check
ref_matched |= _check
ref_df['feature_dist'] = dist[:,neighbor]
ref_df = ref_df[_check]
all_df.append(ref_df)
if search_unidentified:
if use_mob:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2'], query_data['mobility']]).T, columns=['rt', 'mass', 'mz', 'charge','mobility'])
else:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2']]).T, columns=['rt', 'mass', 'mz', 'charge'])
unmatched_ref = unmatched_ref[~ref_matched]
unmatched_ref['mass_matched'] = unmatched_ref['mass']
unmatched_ref['mass_offset'] = 0
unmatched_ref['rt_matched'] = unmatched_ref['rt']
unmatched_ref['rt_offset'] = 0
unmatched_ref['mz_matched'] = unmatched_ref['mz']
unmatched_ref['mz_offset'] = 0
unmatched_ref['charge_matched'] = unmatched_ref['charge']
unmatched_ref['query_idx'] = unmatched_ref.index
unmatched_ref['feature_idx'] = np.nan
if use_mob:
ref_df['mobility_matched'] = unmatched_ref['mobility']
ref_df['mobility_offset'] = np.nan
for field in ['ms1_int_sum_area','ms1_int_sum_apex','ms1_int_max_area','ms1_int_max_apex','rt_start','rt_apex','rt_end','fwhm']:
if field in feature_table.keys():
unmatched_ref[field] = np.nan
unmatched_ref['feature_dist'] = np.nan
all_df.append(unmatched_ref)
features = pd.concat(all_df)
features = features.sort_values('mass_matched', ascending=True)
features = features.reset_index(drop=True)
return features
| 39.274919
| 438
| 0.65683
|
0e8e109dc2d5c300a72b06fc16a6718619759d81
| 939
|
py
|
Python
|
scraping/web.py
|
vmarcella/auto-python
|
d98b5cd56d5c04e57992ed6ec2f6f0c8630caf8d
|
[
"MIT"
] | 1
|
2019-05-04T09:26:07.000Z
|
2019-05-04T09:26:07.000Z
|
scraping/web.py
|
C3NZ/auto-python
|
d98b5cd56d5c04e57992ed6ec2f6f0c8630caf8d
|
[
"MIT"
] | 2
|
2021-03-19T00:49:22.000Z
|
2021-06-08T19:56:02.000Z
|
scraping/web.py
|
vmarcella/auto-python
|
d98b5cd56d5c04e57992ed6ec2f6f0c8630caf8d
|
[
"MIT"
] | null | null | null |
"""
Playing around with API calls in python
"""
import requests
def main():
# fetch a bunch of posts
response = requests.get("https://jsonplaceholder.typicode.com/posts")
# Print out our status code, and the first post
print(response)
print(response.json()[0])
# Dictionary of the new post we're creating
new_post = {
"userId": 10,
"title": "OOOOOOOOoOOOOOooooF",
"body": "This is a new post",
}
# Create a new post request
response = requests.post(
"https://jsonplaceholder.typicode.com/posts", json=new_post
)
# Print out the response and our newly created post
print(response)
print(response.json())
# Get a specific post
response = requests.get("https://jsonplaceholder.typicode.com/posts/2")
# Print out the response and specific post
print(response)
print(response.json())
if __name__ == "__main__":
main()
| 22.902439
| 75
| 0.643237
|
0ea37bead89777eb5845fa6e72e443ad32cbcf25
| 81,469
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_11_01/operations/virtual_network_gateways_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
azure-mgmt-network/azure/mgmt/network/v2018_11_01/operations/virtual_network_gateways_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
azure-mgmt-network/azure/mgmt/network/v2018_11_01/operations/virtual_network_gateways_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-11-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual
network gateway operation.
:type parameters:
~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _delete_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkGateway
:rtype:
~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGatewayPaged[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'}
def list_connections(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
VirtualNetworkGatewayConnectionListEntity
:rtype:
~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGatewayConnectionListEntityPaged[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGatewayConnectionListEntity]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_connections.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'}
def _reset_initial(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reset.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Resets the primary of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to
the begin reset of the active-active feature enabled gateway.
:type gateway_vip: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'}
def _reset_vpn_client_shared_key_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reset_vpn_client_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def reset_vpn_client_shared_key(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Resets the VPN client shared key of the virtual network gateway in the
specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_vpn_client_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset_vpn_client_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'}
def _generatevpnclientpackage_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generatevpnclientpackage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN client package for P2S client of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2018_11_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'}
def _generate_vpn_profile_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generate_vpn_profile.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generate_vpn_profile(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN profile for P2S client of the virtual network gateway in
the specified resource group. Used for IKEV2 and radius based
authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2018_11_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'}
def _get_vpn_profile_package_url_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_vpn_profile_package_url.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpn_profile_package_url(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets pre-generated VPN profile for P2S client of the virtual network
gateway in the specified resource group. The profile needs to be
generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'}
def _get_bgp_peer_status_initial(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_bgp_peer_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_bgp_peer_status(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns BgpPeerStatusListResult
or ClientRawResponse<BgpPeerStatusListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_11_01.models.BgpPeerStatusListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_11_01.models.BgpPeerStatusListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'}
def supported_vpn_devices(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.supported_vpn_devices.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'}
def _get_learned_routes_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_learned_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_learned_routes(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
has learned, including routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_11_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_11_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'}
def _get_advertised_routes_initial(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_advertised_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_advertised_routes(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
is advertising to the specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_11_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_11_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'}
def _set_vpnclient_ipsec_parameters_initial(
self, resource_group_name, virtual_network_gateway_name, vpnclient_ipsec_params, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.set_vpnclient_ipsec_parameters.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(vpnclient_ipsec_params, 'VpnClientIPsecParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_vpnclient_ipsec_parameters(
self, resource_group_name, virtual_network_gateway_name, vpnclient_ipsec_params, custom_headers=None, raw=False, polling=True, **operation_config):
"""The Set VpnclientIpsecParameters operation sets the vpnclient ipsec
policy for P2S client of virtual network gateway in the specified
resource group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param vpnclient_ipsec_params: Parameters supplied to the Begin Set
vpnclient ipsec parameters of Virtual Network Gateway P2S client
operation through Network resource provider.
:type vpnclient_ipsec_params:
~azure.mgmt.network.v2018_11_01.models.VpnClientIPsecParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VpnClientIPsecParameters or
ClientRawResponse<VpnClientIPsecParameters> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_11_01.models.VpnClientIPsecParameters]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_11_01.models.VpnClientIPsecParameters]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._set_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
vpnclient_ipsec_params=vpnclient_ipsec_params,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
set_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'}
def _get_vpnclient_ipsec_parameters_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_vpnclient_ipsec_parameters.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpnclient_ipsec_parameters(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""The Get VpnclientIpsecParameters operation retrieves information about
the vpnclient ipsec policy for P2S client of virtual network gateway in
the specified resource group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The virtual network gateway name.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VpnClientIPsecParameters or
ClientRawResponse<VpnClientIPsecParameters> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_11_01.models.VpnClientIPsecParameters]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_11_01.models.VpnClientIPsecParameters]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'}
def vpn_device_configuration_script(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection for which the configuration script
is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device
script operation.
:type parameters:
~azure.mgmt.network.v2018_11_01.models.VpnDeviceScriptParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.vpn_device_configuration_script.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'}
| 49.615713
| 231
| 0.693307
|
3b009c6a5ac7c586c6716a94106231602a560b18
| 466
|
py
|
Python
|
challenges/spoj/ngm/ngm_v001.py
|
Joeffison/coding_challenges
|
0f0c5c2c3dad3a5aabfb66d66c5b6a89bff374ea
|
[
"MIT"
] | 1
|
2018-02-27T15:53:17.000Z
|
2018-02-27T15:53:17.000Z
|
challenges/spoj/ngm/ngm_v001.py
|
Joeffison/coding_challenges
|
0f0c5c2c3dad3a5aabfb66d66c5b6a89bff374ea
|
[
"MIT"
] | null | null | null |
challenges/spoj/ngm/ngm_v001.py
|
Joeffison/coding_challenges
|
0f0c5c2c3dad3a5aabfb66d66c5b6a89bff374ea
|
[
"MIT"
] | 1
|
2021-04-18T09:43:21.000Z
|
2021-04-18T09:43:21.000Z
|
#!/usr/bin/env python3
import fileinput
import sys
def solution(n):
# If a player faces a number n <= 9, they can play it to win
# But if n is 10, there is no chance to win
# and any number divisible per 10 will eventually lead to the same scenario (when both players are optimal)
remainder = n % 10
return "1\n" + str(remainder) if remainder != 0 else "2"
if __name__ == '__main__':
f_in = fileinput.input()
print(solution(int(f_in.readline())))
| 25.888889
| 109
| 0.690987
|
ca9593ea371d5e77d93c3eec24a69f5e1c1e80b5
| 3,375
|
py
|
Python
|
src/2.7/sample/chapter_03/sample_3_2.py
|
nutti/Introduction-to-Addon-Development-in-Blender-Web
|
cae839a4ce5fe1caabae67e252912440277d42e8
|
[
"CC0-1.0"
] | 10
|
2020-02-17T05:50:17.000Z
|
2022-01-02T14:50:48.000Z
|
src/2.7/sample/chapter_03/sample_3_2.py
|
nutti/Introduction-to-Addon-Development-in-Blender-Web
|
cae839a4ce5fe1caabae67e252912440277d42e8
|
[
"CC0-1.0"
] | 1
|
2020-03-29T14:32:46.000Z
|
2020-03-30T08:56:50.000Z
|
src/2.7/sample/chapter_03/sample_3_2.py
|
nutti/Introduction-to-Addon-Development-in-Blender-Web
|
cae839a4ce5fe1caabae67e252912440277d42e8
|
[
"CC0-1.0"
] | 6
|
2019-05-15T16:59:48.000Z
|
2020-08-08T17:20:09.000Z
|
import bpy
from bpy.props import BoolProperty, PointerProperty
from mathutils import Vector
bl_info = {
"name": "サンプル3-2: キーボードのキー入力に応じてオブジェクトを並進移動させる",
"author": "Nutti",
"version": (2, 0),
"blender": (2, 75, 0),
"location": "3Dビュー > プロパティパネル > オブジェクト並進移動",
"description": "キーボードからの入力に応じてオブジェクトを並進移動させるアドオン",
"warning": "",
"support": "TESTING",
"wiki_url": "",
"tracker_url": "",
"category": "Object"
}
# プロパティ
class TOM_Properties(bpy.types.PropertyGroup):
running = BoolProperty(
name="オブジェクト並進移動モード中",
description="オブジェクト並進移動モード中か?",
default=False
)
# オブジェクト並進移動モード時の処理
class TranslateObjectMode(bpy.types.Operator):
bl_idname = "object.translate_object_mode"
bl_label = "オブジェクト並進移動モード"
bl_description = "オブジェクト並進移動モードへ移行します"
def modal(self, context, event):
props = context.scene.tom_props
# 3Dビューの画面を更新
if context.area:
context.area.tag_redraw()
# @include-source start [exit_modal_mode]
# キーボードのQキーが押された場合は、オブジェクト並進移動モードを終了
if event.type == 'Q' and event.value == 'PRESS':
props.running = False
print("サンプル3-2: 通常モードへ移行しました。")
return {'FINISHED'}
# @include-source end [exit_modal_mode]
# @include-source start [check_key_state]
if event.value == 'PRESS':
value = Vector((0.0, 0.0, 0.0))
if event.type == 'X':
value.x = 1.0 if not event.shift else -1.0
if event.type == 'Y':
value.y = 1.0 if not event.shift else -1.0
if event.type == 'Z':
value.z = 1.0 if not event.shift else -1.0
# @include-source end [check_key_state]
# @include-source start [translate_object]
# 選択中のオブジェクトを並進移動する
bpy.ops.transform.translate(value=value)
# @include-source end [translate_object]
return {'RUNNING_MODAL'}
def invoke(self, context, event):
props = context.scene.tom_props
if context.area.type == 'VIEW_3D':
# 開始ボタンが押された時の処理
if props.running is False:
props.running = True
# modal処理クラスを追加
context.window_manager.modal_handler_add(self)
print("サンプル3-2: オブジェクト並進移動モードへ移行しました。")
return {'RUNNING_MODAL'}
else:
return {'CANCELLED'}
# UI
class OBJECT_PT_SOEM(bpy.types.Panel):
bl_label = "オブジェクト並進移動モード"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self, context):
layout = self.layout
props = context.scene.tom_props
# 開始/停止ボタンを追加
if props.running is False:
layout.operator(
TranslateObjectMode.bl_idname, text="開始", icon="PLAY"
)
else:
layout.operator(
TranslateObjectMode.bl_idname, text="終了", icon="PAUSE"
)
def register():
bpy.utils.register_module(__name__)
sc = bpy.types.Scene
sc.tom_props = PointerProperty(
name="プロパティ",
description="本アドオンで利用するプロパティ一覧",
type=TOM_Properties
)
print("サンプル3-2: アドオン「サンプル3-2」が有効化されました。")
def unregister():
del bpy.types.Scene.tom_props
bpy.utils.unregister_module(__name__)
print("サンプル3-2: アドオン「サンプル3-2」が無効化されました。")
if __name__ == "__main__":
register()
| 27.217742
| 70
| 0.600593
|
48b6d8178b1218d6d935a8f37982daf39b81d681
| 4,918
|
py
|
Python
|
app/main.py
|
thundercomb/ingest-clnn-news
|
43b1385fee33b955ddc06fe296d78d898dbfa0fe
|
[
"MIT"
] | 1
|
2020-01-16T17:19:32.000Z
|
2020-01-16T17:19:32.000Z
|
app/main.py
|
thundercomb/ingest-clnn-news
|
43b1385fee33b955ddc06fe296d78d898dbfa0fe
|
[
"MIT"
] | 6
|
2020-01-28T23:08:27.000Z
|
2022-02-10T00:36:11.000Z
|
app/main.py
|
thundercomb/ingest-clnn-news
|
43b1385fee33b955ddc06fe296d78d898dbfa0fe
|
[
"MIT"
] | 1
|
2020-06-27T07:38:43.000Z
|
2020-06-27T07:38:43.000Z
|
from flask import Flask
import requests as rq
import logging
import os
import math
import time
import datetime
import csv
from random import randint
import feedparser
from bs4 import BeautifulSoup
from google.cloud import pubsub_v1
from google.cloud import bigquery
from google.cloud.exceptions import NotFound, Conflict
app = Flask(__name__)
@app.route('/')
def ok():
return 'ok'
@app.route('/ingest')
def ingest():
topic_name = os.getenv('TOPIC')
url = os.getenv('URL')
publisher = pubsub_v1.PublisherClient(batch_settings=pubsub_v1.types.BatchSettings(max_latency=5))
topic_path = publisher.topic_path(project_id, topic_name)
feed = feedparser.parse(url)
pages = []
today_date = f"{datetime.datetime.now():%Y-%m-%d}"
for post in feed.entries:
post_date = "%d-%02d-%02d" % (post.published_parsed.tm_year,\
post.published_parsed.tm_mon, \
post.published_parsed.tm_mday)
#if post_date == today_date:
if post_date:
print("post date: " + post_date)
print("post title: " + post.title)
print("post link: " + post.link)
page = rq.get(post.link).text
pages.append(page)
chunk_size = 50
count = 0
message_count = 0
chunk = []
print('Publishing data to {} ...'.format(topic_path))
for page in pages:
text = ""
flag = 0
soup = BeautifulSoup(page, "lxml")
for s in soup.findAll('p'):
if '<em>− Climate News Network</em>' in str(s):
text = text + s.text.encode("utf-8").decode("utf-8").replace('− Climate News Network','') + "\n"
flag = 1
elif not '<p><' in str(s) and flag == 0:
text = text + s.text.encode("utf-8").decode("utf-8") + "\n"
if count < chunk_size:
chunk.append(text)
count += 1
if count == chunk_size:
bytes_chunk = bytes("@@".join(chunk).encode('utf-8'))
publisher.publish(topic_path, data=bytes_chunk)
chunk = []
count = 0
message_count = message_count + 1
if count > 0:
bytes_chunk = bytes("@@".join(chunk).encode('utf-8'))
publisher.publish(topic_path, data=bytes_chunk)
print('Published {} rows in {} messages'.format((message_count * chunk_size) + count, message_count + math.ceil(count/chunk_size)))
subscribe()
return 'ok'
def subscribe():
future = subscriber.subscribe(subscription_path, callback=callback)
# The subscriber is non-blocking, so we must keep the main thread from
# exiting to allow it to process messages in the background.
print('Listening for messages on {} ...'.format(subscription_path))
loop = True
while loop:
response = subscriber.pull(subscription_path, 10)
if len(response.received_messages) > 0:
time.sleep(1)
else:
print('No more messages, canceling subscription...')
future.cancel()
loop = False
return
def callback(message):
errors = []
if message.data:
decoded_message = message.data.decode('utf-8')
lines = decoded_message.split('@@')
rows_to_insert = []
for line in lines:
tuple_row = tuple([line])
rows_to_insert.append(tuple_row)
try:
table = bq_client.get_table(table_ref)
except NotFound:
create_table()
table = bq_client.get_table(table_ref)
print("Inserting {} rows into BigQuery ...".format(len(rows_to_insert)))
print(rows_to_insert)
errors = bq_client.insert_rows(table, rows_to_insert)
if errors != []:
print(errors)
else:
message.ack()
assert errors == []
def create_table():
schema = [
bigquery.SchemaField("Article", "STRING", mode="NULLABLE"),
]
table = bigquery.Table(table_ref, schema=schema)
try:
bq_client.get_table(table)
except NotFound:
try:
table = bq_client.create_table(table)
print("Created table {}.{}.{}".format(table.project, table.dataset_id, table.table_id))
print("Going to sleep for 60 seconds to ensure data availability in newly created table")
time.sleep(60)
except Conflict:
pass
return
@app.errorhandler(500)
def server_error(e):
print('An internal error occurred')
return 'An internal error occurred.', 500
print("Preparing..")
project_id = os.getenv('PROJECT')
subscription_name = os.getenv('SUBSCRIPTION')
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_name)
dataset_id = os.getenv('DATASET')
table_id = os.getenv('TABLE')
bq_client = bigquery.Client()
table_ref = bq_client.dataset(dataset_id).table(table_id)
| 29.100592
| 135
| 0.617121
|
05e6ce2c50634b4a16f84e7f008cf09cd60e1eef
| 9,535
|
py
|
Python
|
tensorflow/python/tpu/tpu_sharding.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/tpu/tpu_sharding.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/tpu/tpu_sharding.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for sharding during TPU compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import tensor_shape
_DEFAULT_NUMBER_OF_SHARDS = 1
_DEFAULT_SHARD_DIMENSION = 0
# TODO(b/36777903) change other parts of tpu.py to use this class.
class ShardingPolicy(object):
"""An object use to hold the sharding policy for a Tensor.
"""
def __init__(self):
self._number_of_shards = None
self._shard_dimension = None
self._frozen = False
def __str__(self):
if self.number_of_shards is None or self.shard_dimension is None:
return "ShardingPolicy(unset)"
else:
return ("ShardingPolicy(%d shards dimension %d)" %
(self.number_of_shards, self.shard_dimension))
def _fill_default_values(self):
if self._number_of_shards is None:
self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
if self._shard_dimension is None:
self._shard_dimension = tensor_shape.as_dimension(
_DEFAULT_SHARD_DIMENSION)
def freeze(self):
"""Prevents further modification to the sharding policy.
Any values that have not been set when freeze is called are set to
defaults. If the ShardingPolicy is already frozen, this is a NoOp.
"""
if not self._frozen:
self._fill_default_values()
self._frozen = True
@property
def number_of_shards(self):
"""Returns the number of shards in the policy or None if unspecified."""
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards for the current policy.
If the policy has been frozen then number_of_shards must match the
existing setting.
Args:
number_of_shards: The number of shards to use in the policy.
Raises:
ValueError: If the policy has been frozen and number_of_shards
differs from the frozen value; or number_of_shards <= 0.
"""
if self._frozen:
if self._number_of_shards != number_of_shards:
raise ValueError(
"Can't set sharding policy to use %d shards since it has been "
"frozen to use %d." % (number_of_shards, self._number_of_shards))
else:
if number_of_shards > 0:
self._number_of_shards = number_of_shards
else:
raise ValueError(
"Can't set sharding policy to use %s shards; value must be >0" %
str(number_of_shards))
@property
def shard_dimension(self):
"""Returns the shard dimension of the policy or None if unspecified."""
return self._shard_dimension
def set_shard_dimension(self, shard_dimension):
"""Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
"""
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError(
"Can't set shard dimension to %d since it has been frozen to "
"use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
def merge(self, other):
"""Merges the policy of another policy into the current policy.
Args:
other: The policy to merge into this one.
Raises:
ValueError: If this policy has been frozen and the merge conflicts with
the frozen policy.
"""
if other.number_of_shards is not None:
self.set_number_of_shards(other.number_of_shards)
if other.shard_dimension is not None:
self.set_shard_dimension(other.shard_dimension)
def get_sharded_shape(self, shape, shard_index=None):
"""Returns the shape of a shard of a full Tensor.
When given the shape of a 'full-size' Tensor, returns the shape of
the sub-Tensor after it has been sharded. Freezes the policy if it
has not yet been frozen.
Args:
shape: The shape of the full-size Tensor to be sharded.
shard_index: The index of the shard whose shape should be returned.
shard_index can be None for sharding policies that use the same
shape for every shard.
Returns:
The shape of the sharded version of the Tensor.
Raises:
ValueError: If shard_index is None when shards are of different
shapes; or shard_index is not None and
!(0<=shard_index<number_of_shards); or shape does not have at
least self.shard_dimension+1 dimensions; or the value of
shape's shard dimension is not a multiple of
self.number_of_shards
"""
if self._shard_dimension is None or self._number_of_shards is None:
# Don't raise an error if the config is unset.
return None
if shard_index is not None:
if shard_index < 0 or shard_index >= self.number_of_shards:
raise ValueError("shard_index %d, but must be in [0,%d)." %
(shard_index, self._number_of_shards))
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
if dims[self._shard_dimension] is None:
raise ValueError("shape %s must have a fixed size for dimension %d "
"that is known at graph construction time." %
(shape.as_list(), self._shard_dimension))
if (dims[self._shard_dimension] % self._number_of_shards) != 0:
raise ValueError("shape %s cannot be sharded %d ways along dimension %d" %
(shape.as_list(), self._number_of_shards,
self._shard_dimension))
dims[self._shard_dimension] //= self._number_of_shards
return tensor_shape.as_shape(dims)
def _unshard_shape(self, shape):
"""Return the unsharded shape that would generate a given sharded shape.
Args:
shape: the sharded shape to unshard
Returns:
The unsharded shape.
Raises:
ValueError: if shape is unknown or does not contain
self.shard_dimension
TypeError: if shape is not convertible to a TensorShape
"""
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
dims[self._shard_dimension] *= self._number_of_shards
return tensor_shape.as_shape(dims)
def get_unsharded_shape(self, shapes):
"""Returns the shape of an unsharded Tensor given a list of shards.
When given a list of shapes of shards, returns the shape of the
unsharded Tensor that would generate the shards. Sets defaults for the
policy if number_of_shards or shard_dimension is None.
Args:
shapes: The shapes of the Tensor shards to be combined.
Returns:
The shape of the unsharded version of the Tensor.
Raises:
ValueError: if shapes is not a list of length
self.number_of_shards; or any element of shapes is not a valid
shape consistent with the sharding policy; or the list of
shapes is not a valid sharding of a full shape.
TypeError: if an element of shapes is not convertible to a
TensorShape
"""
self._fill_default_values()
if len(shapes) != self.number_of_shards:
raise ValueError(
"shapes is %s but must be a list of length number_of_shards=%d" % (
str(shapes), self.number_of_shards))
unsharded_shapes = [self._unshard_shape(s) for s in shapes]
for i in xrange(self.number_of_shards - 1):
if not unsharded_shapes[i].is_compatible_with(
unsharded_shapes[self.number_of_shards - 1]):
raise ValueError(
"sharded shapes %s are not consistent shards of a full shape "
"sharded %d ways along dimension %d" % (
str(shapes), self.number_of_shards, self.shard_dimension))
return unsharded_shapes[0]
| 37.687747
| 80
| 0.684636
|
7d9d0b541ad72bbd73fba74b2ae991478421f863
| 366
|
py
|
Python
|
search_test.py
|
mochileiroDaGalaxia/Browser-Automation-With-Python-and-WebDriver
|
045c863177993c223291a9b74c05c68e11ab8c1a
|
[
"MIT"
] | null | null | null |
search_test.py
|
mochileiroDaGalaxia/Browser-Automation-With-Python-and-WebDriver
|
045c863177993c223291a9b74c05c68e11ab8c1a
|
[
"MIT"
] | null | null | null |
search_test.py
|
mochileiroDaGalaxia/Browser-Automation-With-Python-and-WebDriver
|
045c863177993c223291a9b74c05c68e11ab8c1a
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome('bin/chromedriver')
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
driver.close()
| 30.5
| 52
| 0.786885
|
ccb641b419a2b184715e21c394e516cb6df87a85
| 1,288
|
py
|
Python
|
setup.py
|
WeVidIt/graphene-gis
|
238466bd02392e79b7f771375da112a8d18ea158
|
[
"MIT"
] | null | null | null |
setup.py
|
WeVidIt/graphene-gis
|
238466bd02392e79b7f771375da112a8d18ea158
|
[
"MIT"
] | null | null | null |
setup.py
|
WeVidIt/graphene-gis
|
238466bd02392e79b7f771375da112a8d18ea158
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
tests_require = [
"pytest>=5.1.2",
"pytest-cov==2.7.1",
"pytest-django>=3.5.1"
]
dev_requires = [
"black==19.3b0",
"flake8==3.7.8",
] + tests_require
with open("README.md", "r") as desc:
long_description = desc.read()
setup(
name="graphene-gis",
version="0.0.5",
description="GIS support for graphene-django",
long_description_content_type='text/markdown',
url="https://github.com/EverWinter23/graphene-gis",
long_description=long_description,
keywords="api graphql graphene geos gis",
packages=find_packages(exclude=["tests"]),
author="Rishabh Mehta",
author_email="eternal.blizzard23@gmail.com",
install_requires=[
"graphene>=2.1,<3",
"graphene-django>=2.5,<3",
"graphql-core>=2.1,<3",
"psycopg2-binary>=2.8,<3"
],
setup_requires=["pytest-runner"],
tests_require=tests_require,
extras_require={
"test": tests_require,
"dev": dev_requires,
},
classifiers=(
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
),
include_package_data=True,
zip_safe=False,
platforms="any",
)
| 24.301887
| 61
| 0.626553
|
5920eb9334e6e542b3bc371c4c9146d1a68adbdb
| 5,161
|
py
|
Python
|
etc/gdb/evawiz/evawiz_codeblock.py
|
einsxiao/evawiz
|
12c148f46c89551c281271718893a92b26da2bfa
|
[
"BSD-2-Clause"
] | 1
|
2019-06-07T03:44:39.000Z
|
2019-06-07T03:44:39.000Z
|
etc/gdb/evawiz/evawiz_codeblock.py
|
einsxiao/evawiz
|
12c148f46c89551c281271718893a92b26da2bfa
|
[
"BSD-2-Clause"
] | null | null | null |
etc/gdb/evawiz/evawiz_codeblock.py
|
einsxiao/evawiz
|
12c148f46c89551c281271718893a92b26da2bfa
|
[
"BSD-2-Clause"
] | null | null | null |
#!/opt/evawiz/python/bin/python
def dprint(*params):
print "debug_info: "
for i in params:
print i,
print
class StdStringPrinter(object):
"Print a std::string"
def __init__(self,val):
self.val = val
def to_string(self):
return self.val['_M_dataplus']['_M_p']
def display_hint(self):
return "string"
##############################################################################
class EvaObjPrinter:
"""Print a EvaObj object."""
max_child = 100
def __init__(self,val,is_detail = True):
self.is_detail = is_detail
self.val = val;
self.this = val.address
self.rtype = str(val['typ'])
self.rstate= int(val['state'])
self.rv= int(val['v'])
self.rv1= val['v1']
self.rv2= val['v2']
self.rre=self.rv1['re']
self.rim=self.rv2['im']
#self.rkey="" rkey set afterwords key may be null
self.relements=self.rv2['elements']
### for elements
def prepare_content(self,count=0):
self.elements = self.relements.referenced_value();
self.begin = self.elements['_M_impl']["_M_start"]
self.end = self.elements['_M_impl']["_M_finish"]
self.end_storage = self.elements['_M_impl']["_M_end_of_storage"]
self.count = self.end - self.begin
self.capacity = self.end_storage - self.begin
if self.count == 0:
return self.rkey
obj = EvaObjPrinter( self.begin.dereference(), False )
res = ""
if self.is_detail:
res = self.rkey+"{\n "+obj.to_string()
else:
res = "["+str(count)+"]"+self.rkey+"{"+obj.to_string()
counter = 1
self.begin+=1
while self.begin != self.end:
obj = EvaObjPrinter( self.begin.dereference(), False )
if self.is_detail:
# if counter != 1:
# res+=",\n"
# else:pass
res += " "+str(counter)+":"+obj.to_string()
#res +=",\n "+str(counter)+":"+obj.to_string(counter)
#res +=" "+obj.to_string(counter)
else:
# if counter != 1: res+=","
# else:pass
# res += obj.to_string()
res+=","+obj.to_string(counter)
counter+=1
if counter > self.max_child: break
self.begin+=1
if counter < self.count:
res+=",\n...with extra "+str(self.count - counter)+" elements"
if self.is_detail:
res+="\n}"
else:
res+="}"
return res
def to_string(self,count=0):
s="";
if ( self.rstate !=0 or self.rv != 0 ):
s=" ,{state="+str(self.rstate)+", v="+str(self.rv)+"}";
if ( self.rtype == "evawiz::NumberT" ):
return str(self.rre)
if ( self.rtype == "evawiz::ComplexT" ):
return "("+str( self.rre )+","+str( self.rim)+")"
if ( self.rtype == "evawiz::StringT" ):
if ( self.rre ): self.rkey=self.rv1['key'].string();
return '"'+self.rkey+'"'
if ( self.rtype == "evawiz::SymbolT" ):
if ( self.rre ): self.rkey=self.rv1['key'].string();
res = ""
#if self.this and self.is_detail :
# res = "EvaObj@"+str(self.this)+":";
if ( self.relements ):
res = res+" "+self.prepare_content(count)
else:
res = " "+self.rkey
res+=s;
return res
if ( self.rtype == "evawiz::ReferenceT" ):
print ">>>deal ref<<<",
self.rlref=self.rv1['lref']
self.rrref=self.rv2['rref']
refobj = EvaObjPrinter( self.rrref.referenced_value() );
res = "-->" + refobj.to_string();
return res
if ( self.rtype == "evawiz::VariableT" ):
return "$_"+int(self.rre)+s
return "{ v1={re="+str(self.rre)+", key="+self.rkey+"},"+"v2={im="+ \
str(self.rim)+", ptr="+str(self.rrref)+"}}"
def display_hint(self):
return "EvaObj"
##########################################################################
##########################################################################
# def lookup_function(val):
# lookup_tag = val.type.tag
# if lookup_tag == None:
# return None
# regexs = (
# (re.compile("^std::basic_string<char,.*>$"),StdStringPrinter),
# )
# for (regex,printer) in regexs:
# print ( regex, printer)
# if ( regex.match(lookup_tag) ):
# return printer(val)
# return None
import gdb.printing
def build_pretty_printers():
pp = gdb.printing.RegexpCollectionPrettyPrinter("evawiz")
pp.add_printer('string',"^std::basic_string<char,.*>$",StdStringPrinter)
pp.add_printer('EvaObj',"^evawiz::EvaObj$",EvaObjPrinter)
return pp
gdb.printing.register_pretty_printer( gdb.current_objfile(), build_pretty_printers() )
##########################################################################
| 33.296774
| 86
| 0.484596
|
9977b6bd8f8aab027bf73f277bd14641ab33ee80
| 1,103
|
py
|
Python
|
master/teachkids-master/teachkids-master/ch04/ViralSpiral.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 4
|
2018-09-07T15:35:24.000Z
|
2019-03-27T09:48:12.000Z
|
master/teachkids-master/teachkids-master/ch04/ViralSpiral.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
master/teachkids-master/teachkids-master/ch04/ViralSpiral.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 3
|
2019-06-18T19:57:17.000Z
|
2020-11-06T03:55:08.000Z
|
# ViralSpiral.py - a spiral of spirals!
import turtle
t = turtle.Pen()
t.penup()
turtle.bgcolor("black")
# Ask the user for the number of sides, default to 4, min 2, max 6
sides = int(turtle.numinput("Number of sides",
"How many sides in your spiral of spirals? (2-6)", 4,2,6))
colors = ["red", "yellow", "blue", "green", "purple", "orange"]
# Our outer spiral loop
for m in range(100):
t.forward(m*4)
position = t.position() # Remember this corner of the spiral
heading = t.heading() # Remember the direction we were heading
print(position, heading)
# Our "inner" spiral loop
# Draws a little spiral at each corner of the big spiral
for n in range(int(m/2)):
t.pendown()
t.pencolor(colors[n%sides])
t.forward(2*n)
t.right(360/sides - 2)
t.penup()
t.setx(position[0]) # Go back to the big spiral's x location
t.sety(position[1]) # Go back to the big spiral's y location
t.setheading(heading) # Point in the big spiral's heading
t.left(360/sides + 2) # Aim at the next point on the big spiral
| 38.034483
| 70
| 0.637353
|
6cd6dae352886c301bc9e172dc228591f66f7620
| 5,821
|
py
|
Python
|
errant/fr/merger.py
|
Maxwell1447/ERRANT-fr
|
023bb3568cb07241000829c2b0a23b7613409d45
|
[
"MIT"
] | null | null | null |
errant/fr/merger.py
|
Maxwell1447/ERRANT-fr
|
023bb3568cb07241000829c2b0a23b7613409d45
|
[
"MIT"
] | null | null | null |
errant/fr/merger.py
|
Maxwell1447/ERRANT-fr
|
023bb3568cb07241000829c2b0a23b7613409d45
|
[
"MIT"
] | null | null | null |
from itertools import combinations, groupby
from re import sub
from string import punctuation
import Levenshtein
import spacy.symbols as POS
from errant.edit import Edit
import logging
# Merger resources
open_pos = {POS.ADJ, POS.AUX, POS.ADV, POS.NOUN, POS.VERB}
# Input: An Alignment object
# Output: A list of Edit objects
def get_rule_edits(alignment):
edits = []
# Split alignment into groups of M, T and rest. (T has a number after it)
for op, group in groupby(
alignment.align_seq, lambda x: x[0][0] if x[0][0] in {"M", "T"} else False
):
group = list(group)
# Ignore M
if op == "M":
continue
# T is always split
elif op == "T":
for seq in group:
edits.append(Edit(alignment.orig, alignment.cor, seq[1:]))
# Process D, I and S subsequence
else:
processed = process_seq(group, alignment)
# Turn the processed sequence into edits
for seq in processed:
edits.append(Edit(alignment.orig, alignment.cor, seq[1:]))
return edits
# Input 1: A sequence of adjacent D, I and/or S alignments
# Input 2: An Alignment object
# Output: A sequence of merged/split alignments
def process_seq(seq, alignment):
# Return single alignments
if len(seq) <= 1:
return seq
# Get the ops for the whole sequence
ops = [op[0] for op in seq]
# Merge all D xor I ops. (95% of human multi-token edits contain S).
if set(ops) == {"D"} or set(ops) == {"I"}:
return merge_edits(seq)
content = False # True if edit includes a content word
# Get indices of all start-end combinations in the seq: 012 = 01, 02, 12
combos = list(combinations(range(0, len(seq)), 2))
# Sort them starting with largest spans first
combos.sort(key=lambda x: x[1] - x[0], reverse=True)
# Loop through combos
for start, end in combos:
# Ignore ranges that do NOT contain a substitution.
if "S" not in ops[start : end + 1]:
continue
# Get the tokens in orig and cor. They will now never be empty.
o = alignment.orig[seq[start][1] : seq[end][2]]
c = alignment.cor[seq[start][3] : seq[end][4]]
# First token possessive suffixes
if start == 0 and (o[0].tag_ == "POS" or c[0].tag_ == "POS"):
return [seq[0]] + process_seq(seq[1:], alignment)
# Case changes
if o[-1].lower == c[-1].lower:
# Merge first token I or D: [Cat -> The big cat]
if start == 0 and (
(len(o) == 1 and c[0].text[0].isupper())
or (len(c) == 1 and o[0].text[0].isupper())
):
return merge_edits(seq[start : end + 1]) + process_seq(
seq[end + 1 :], alignment
)
# Merge with previous punctuation: [, we -> . We], [we -> . We]
if (len(o) > 1 and is_punct(o[-2])) or (len(c) > 1 and is_punct(c[-2])):
return (
process_seq(seq[: end - 1], alignment)
+ merge_edits(seq[end - 1 : end + 1])
+ process_seq(seq[end + 1 :], alignment)
)
# Merge whitespace/hyphens: [acat -> a cat], [sub - way -> subway]
s_str = sub("['-]", "", "".join([tok.lower_ for tok in o]))
t_str = sub("['-]", "", "".join([tok.lower_ for tok in c]))
if s_str == t_str:
return (
process_seq(seq[:start], alignment)
+ merge_edits(seq[start : end + 1])
+ process_seq(seq[end + 1 :], alignment)
)
# Merge same POS or auxiliary/infinitive/phrasal verbs:
# [to eat -> eating], [watch -> look at]
pos_set = set([tok.pos for tok in o] + [tok.pos for tok in c])
if len(o) != len(c) and (
len(pos_set) == 1 or pos_set.issubset({POS.AUX, POS.PART, POS.VERB})
):
return (
process_seq(seq[:start], alignment)
+ merge_edits(seq[start : end + 1])
+ process_seq(seq[end + 1 :], alignment)
)
# Split rules take effect when we get to smallest chunks
if end - start < 2:
# Split adjacent substitutions
if len(o) == len(c) == 2:
return process_seq(seq[: start + 1], alignment) + process_seq(
seq[start + 1 :], alignment
)
# Split similar substitutions at sequence boundaries
if (ops[start] == "S" and char_cost(o[0], c[0]) > 0.75) or (
ops[end] == "S" and char_cost(o[-1], c[-1]) > 0.75
):
return process_seq(seq[: start + 1], alignment) + process_seq(
seq[start + 1 :], alignment
)
# Split final determiners
if end == len(seq) - 1 and (
(ops[-1] in {"D", "S"} and o[-1].pos == POS.DET)
or (ops[-1] in {"I", "S"} and c[-1].pos == POS.DET)
):
return process_seq(seq[:-1], alignment) + [seq[-1]]
# Set content word flag
if not pos_set.isdisjoint(open_pos):
content = True
# Merge sequences that contain content words
if content:
return merge_edits(seq)
else:
return seq
# Check whether token is punctuation
def is_punct(token):
return token.pos == POS.PUNCT or token.text in punctuation
# Calculate the cost of character alignment; i.e. char similarity
def char_cost(a, b):
return Levenshtein.ratio(a.text, b.text)
# Merge the input alignment sequence to a single edit span
def merge_edits(seq):
if seq:
return [("X", seq[0][1], seq[-1][2], seq[0][3], seq[-1][4])]
else:
return seq
| 38.296053
| 84
| 0.540629
|
1e318bedf3b5b4822d0b7ef5c119542c2e6b6ea0
| 972
|
py
|
Python
|
bin/verification_client.py
|
Anas-Dew/copyword
|
fcd21bd972acd00028f754898e7c9158d3daafd8
|
[
"MIT"
] | null | null | null |
bin/verification_client.py
|
Anas-Dew/copyword
|
fcd21bd972acd00028f754898e7c9158d3daafd8
|
[
"MIT"
] | null | null | null |
bin/verification_client.py
|
Anas-Dew/copyword
|
fcd21bd972acd00028f754898e7c9158d3daafd8
|
[
"MIT"
] | null | null | null |
# -------------for-saving-login-data-in-local-machine
import os
def save_logs_on_system(login_data):
# takes login data and
# save in current app working directory
os.chdir(f"{os.getcwd()}")
with open("user_login.file","w") as f:
f.write(str(login_data))
# --------------for-validating-if-entered-mail-is-correct-or-not
import re
basic_email_structure = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
def email_is_valid(email : str):
if(re.search(basic_email_structure,email)):
return True
else:
return False
# --------------for-checking-if-internet-is-active-machine
import requests
url = "https://www.google.com"
timeout = 10
def connection_status_on_machine():
try:
# requesting URL
request = requests.get(url, timeout=timeout)
return True
# catching exception
except (requests.ConnectionError, requests.Timeout) as exception:
return False
| 24.3
| 70
| 0.621399
|
e6ecdea873931818a699d322e8e901f178045e50
| 1,475
|
py
|
Python
|
alien.py
|
AldoNunes001/Alien_Invasion
|
5c317206af1a675d724f49e8f7474e9b575c251d
|
[
"MIT"
] | null | null | null |
alien.py
|
AldoNunes001/Alien_Invasion
|
5c317206af1a675d724f49e8f7474e9b575c251d
|
[
"MIT"
] | null | null | null |
alien.py
|
AldoNunes001/Alien_Invasion
|
5c317206af1a675d724f49e8f7474e9b575c251d
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""A class to represent a single alien in the fleet."""
def __init__(self, ai_settings, screen):
"""Initialize the alien and set its starting position."""
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
# Load the alien imgage and set its rect attribute.
self.image = pygame.image.load('images/alien1.png')
self.rect = self.image.get_rect()
# Start each new alien near the top left of the screen.
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# Store the alien's exact position.
self.x = float(self.rect.x)
def blitme(self):
"""Draw the alien at its current location."""
self.screen.blit(self.image, self.rect)
def check_edges(self):
"""Return True if alien is at edge of screen."""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
def update(self, row, factor):
"""Move the alien."""
if row % 2 == 0:
self.x += (factor * self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction[row])
elif row % 2 == 1:
self.x += (factor * self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction[row])
self.rect.x = self.x
| 32.777778
| 108
| 0.612881
|
c078fbcefb76085776fc94beae03d6a4abb028bb
| 503
|
py
|
Python
|
setup.py
|
igorgad/ray_lightning
|
ab5239a9cbafbd2683714ab975d4116043409c64
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
igorgad/ray_lightning
|
ab5239a9cbafbd2683714ab975d4116043409c64
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
igorgad/ray_lightning
|
ab5239a9cbafbd2683714ab975d4116043409c64
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name="ray_lightning",
packages=find_packages(where=".", include="ray_lightning*"),
version="0.0.1",
author="Ray Team",
description="Ray distributed plugins for Pytorch Lightning.",
long_description="Custom Pytorch Lightning distributed plugins"
"built on top of distributed computing framework Ray.",
url="https://github.com/ray-project/ray_lightning_accelerators")
# install_requires=["pytorch-lightning", "ray"])
| 38.692308
| 68
| 0.73161
|
1fef96426a64a732414f9cf8f01886d7fcd8b711
| 3,591
|
py
|
Python
|
xunit-autolabeler-v2/ast_parser/core/cli_list_source_files.py
|
GoogleCloudPlatform/repo-automation-playground
|
a4c8f104c246ede002f6c18fcebfc0496c8abb94
|
[
"Apache-2.0"
] | 5
|
2019-07-11T17:35:44.000Z
|
2021-10-09T01:49:04.000Z
|
xunit-autolabeler-v2/ast_parser/core/cli_list_source_files.py
|
GoogleCloudPlatform/repo-automation-playground
|
a4c8f104c246ede002f6c18fcebfc0496c8abb94
|
[
"Apache-2.0"
] | 36
|
2019-08-27T18:20:21.000Z
|
2022-01-12T21:29:00.000Z
|
xunit-autolabeler-v2/ast_parser/core/cli_list_source_files.py
|
GoogleCloudPlatform/repo-automation-playground
|
a4c8f104c246ede002f6c18fcebfc0496c8abb94
|
[
"Apache-2.0"
] | 13
|
2019-10-30T19:39:51.000Z
|
2021-04-04T09:31:52.000Z
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from ast_parser.core import analyze
from ast_parser.core import cli_list_source_files_datatypes as cli_datatypes
from ast_parser.core.cli_list_source_files_datatypes \
import ShowTestedFilesOption
# This file contains helper functions for the list_source_files CLI command.
def process_list_source_files(
invocation: cli_datatypes.ListSourceFilesInvocation
) -> cli_datatypes.ListSourceFilesResult:
"""Compute values displayed in list_source_files
This method is a helper method that computes the values displayed in
list_source_files. (Some of the returned values may not be displayed,
depending on CLI options.)
Args:
invocation: A CLI invocation object with the requisite user input.
Returns:
A CLI response object with the required processed data.
"""
grep_tags, source_tags, ignored_tags, source_methods = (
analyze.analyze_json(invocation.data_json, invocation.root_dir))
# Ignore methods without region tags
source_methods = [method for method in source_methods
if method['region_tags']]
any_tested_files = set(
method['source_path'] for method in source_methods
if method['test_methods']
)
any_untested_files = set(
method['source_path'] for method in source_methods
if not method['test_methods']
)
all_files = set(method['source_path'] for method in source_methods)
all_tested_files = [file for file in any_tested_files
if file not in any_untested_files]
not_tested_files = [file for file in any_untested_files
if file not in any_tested_files]
return cli_datatypes.ListSourceFilesResult(
all_files,
all_tested_files,
any_tested_files,
not_tested_files,
)
def format_list_source_files(
invocation: cli_datatypes.ListSourceFilesInvocation,
result: cli_datatypes.ListSourceFilesResult,
) -> List[str]:
"""Convert computed values for list_source_files into formatted output.
This method is a helper method that converts the results generated by the
_process_list_source_files() method into a human-readable list of filepaths
based on user-specified CLI options.
Args:
invocation: A CLI invocation object with the requisite user input.
result: A CLI response object with the required processed data.
Returns:
Human readable output as a list of lines. (This output *does* account
for annotations in .drift-data.yml files.)
"""
tested_filter = invocation.show_tested_files
if tested_filter == ShowTestedFilesOption.ALL_TESTED:
files = result.all_tested_files
elif tested_filter == ShowTestedFilesOption.ANY_TESTED:
files = result.any_tested_files
elif tested_filter == ShowTestedFilesOption.NOT_TESTED:
files = result.not_tested_files
else:
files = result.all_files
return files
| 34.528846
| 79
| 0.728766
|
c606bcc276909723ed111b5e04add80daa9fce36
| 3,133
|
py
|
Python
|
rtplot.py
|
elrobotista/serial_port
|
d77ff1901c2c7d16af5c4e821a482ed0ca92f44e
|
[
"MIT"
] | 3
|
2018-02-19T13:36:23.000Z
|
2020-03-10T23:34:15.000Z
|
rtplot.py
|
AllanStark/serial_port
|
d77ff1901c2c7d16af5c4e821a482ed0ca92f44e
|
[
"MIT"
] | null | null | null |
rtplot.py
|
AllanStark/serial_port
|
d77ff1901c2c7d16af5c4e821a482ed0ca92f44e
|
[
"MIT"
] | 3
|
2018-02-19T05:58:41.000Z
|
2019-05-17T13:11:46.000Z
|
import serial
import json
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import threading
import numpy as np
# Puerto Serial
port = serial.Serial('/dev/ttyACM0', 115200, timeout = 1.)
# Creemos los ejes y la figura donde graficaremos.
fig, ax = plt.subplots()
# Este diccionario almacenara los datos del sensor.
signal = {'x': [], 'y': [], 'z': []}
# Estas lineas dibujaran los datos en la figura.
lines = [ax.plot([], [])[0] for _ in signal.iterkeys()]
# Usaremos esta variable para detectar autoescalado de la grafica.
ylim = ()
# La funcion stream sera llamada periodicamente por el timer
# cada numero determinado de milisegundos definido por la variable
# rate.
def stream():
# Esta es la cadena de caracteres leida por el puerto serial
# en formato JSON.
raw_data = port.readline()
try:
# El modulo json permite convertir un string en formato JSON a
# diccionarios de Python. Si el string no viene en el formato adecuado
# o la informacion se corrompe, el programa nos lo reporta en
# el bloque de excepcion ValueError;.
json_data = json.loads(raw_data)
for k in signal.iterkeys():
signal[k].append(json_data[k])
except ValueError:
print('Could not read data: %s', raw_data)
# Si el puerto sigue abierto, programamos otra llamada a la funcion para
# volver a leer el puerto serial.
if port.is_open:
threading.Timer(10 / 1000., stream).start()
else:
print('Not streaming anymore!')
def animate(i):
# Las siguientes dos lineas de codigo auto ajustan el eje
# de las Y en funcion del contenido de la grafica. Me tomo
# algo de tiempo encontrar estas funciones. Cuidenlo con su
# alma y compartanlo!
global ylim
ax.relim()
ax.autoscale_view()
if ax.get_ylim() != ylim:
# Esta parte del codigo lo que hace es monitorear los valores
# del limite del eje Y para detectar cuando la grafica ha sido
# reajustada. Esto para redibujar las etiquetas del eje Y a
# medida que se reajusta. Si no, las etiquetas permanecen mientras
# el eje se reajusta. Por lo que los valores no coinciden con lo
# desplegado en el eje. Los invito a removerlo para que vean a
# lo que me refiero.
ylim = ax.get_ylim()
fig.canvas.draw()
for name, line in zip(signal.keys(), lines):
# Si no hay datos nuevos, ni siquiera nos molestamos en intentar
# graficar.
if len(signal[name]) > 0:
_, ly = line.get_data()
ly = np.append(ly, signal[name])
_xdata = np.arange(ly.size)
line.set_data(_xdata, ly)
# La informacion ha sido graficada. Ya nos podemos deshacer
# de ella.
signal[name] = []
else:
print('Signal has no data')
return lines
if __name__ == '__main__':
ani = animation.FuncAnimation(fig, animate, interval=50, blit=True)
stream()
plt.show(block = False)
while raw_input('Hit Q to exit.\n\r> ').lower() != 'q':
pass
port.close()
| 34.811111
| 78
| 0.647941
|
3f33a38d6c6a7cd2123e08b97630d637c1cc7f0c
| 1,248
|
py
|
Python
|
jenkins_jobs/modules/hudson_model.py
|
pratikmallya/jenkins-job-builder
|
83114faba6b85bb0dae066ca3ec4f23871683fb5
|
[
"Apache-2.0"
] | 3
|
2019-03-03T20:15:29.000Z
|
2020-11-17T18:48:27.000Z
|
jenkins_jobs/modules/hudson_model.py
|
pratikmallya/jenkins-job-builder
|
83114faba6b85bb0dae066ca3ec4f23871683fb5
|
[
"Apache-2.0"
] | 12
|
2015-11-30T17:13:11.000Z
|
2022-03-18T10:58:07.000Z
|
jenkins_jobs/modules/hudson_model.py
|
pratikmallya/jenkins-job-builder
|
83114faba6b85bb0dae066ca3ec4f23871683fb5
|
[
"Apache-2.0"
] | 11
|
2016-10-25T12:03:32.000Z
|
2021-04-27T12:16:51.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Representation of the hudson.model.Result class
SUCCESS = {
'name': 'SUCCESS',
'ordinal': '0',
'color': 'BLUE',
'complete': True
}
UNSTABLE = {
'name': 'UNSTABLE',
'ordinal': '1',
'color': 'YELLOW',
'complete': True
}
FAILURE = {
'name': 'FAILURE',
'ordinal': '2',
'color': 'RED',
'complete': True
}
NOTBUILD = {
'name': 'NOT_BUILD',
'ordinal': '3',
'color': 'NOTBUILD',
'complete': False
}
ABORTED = {
'name': 'ABORTED',
'ordinal': '4',
'color': 'ABORTED',
'complete': False
}
THRESHOLDS = {
'SUCCESS': SUCCESS,
'UNSTABLE': UNSTABLE,
'FAILURE': FAILURE,
'NOT_BUILD': NOTBUILD,
'ABORTED': ABORTED
}
| 21.894737
| 75
| 0.634615
|
2bcacdb6ab5003877b366bf2fc46259996a95aa7
| 3,204
|
py
|
Python
|
app/home/migrations/0001_initial.py
|
Monxun/monxun-code
|
35ce6d4aaf3a8afa0b80dde1dae9a1e7603a1f84
|
[
"MIT"
] | null | null | null |
app/home/migrations/0001_initial.py
|
Monxun/monxun-code
|
35ce6d4aaf3a8afa0b80dde1dae9a1e7603a1f84
|
[
"MIT"
] | null | null | null |
app/home/migrations/0001_initial.py
|
Monxun/monxun-code
|
35ce6d4aaf3a8afa0b80dde1dae9a1e7603a1f84
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-11-09 09:52
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TableBacktests',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('backtest_chart', models.FileField(upload_to='')),
('backtest_json', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'db_table': 'backtests',
'managed': False,
},
),
migrations.CreateModel(
name='TableCharts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('biz_chart', models.FileField(upload_to='')),
('biz_json', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'db_table': 'charts',
'managed': False,
},
),
migrations.CreateModel(
name='TableCompanyInfo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_json', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'db_table': 'company_info',
'managed': False,
},
),
migrations.CreateModel(
name='TableCurrencyuInfo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_json', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'db_table': 'currency_info',
'managed': False,
},
),
migrations.CreateModel(
name='TableForecasts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('forecast_chart', models.FileField(upload_to='')),
('forecast_json', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'db_table': 'forecasts',
'managed': False,
},
),
migrations.CreateModel(
name='TableSongs',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('song_name', models.CharField(max_length=255)),
('artist', models.CharField(max_length=255)),
('song_file', models.FileField(upload_to='')),
('song_analysis', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'db_table': 'songs',
'managed': False,
},
),
]
| 36.409091
| 117
| 0.518414
|
ad77d224ca791d3e52b17337c0ede159c9c0fc50
| 6,554
|
py
|
Python
|
djforms/languages/tle/views.py
|
carthagecollege/django-djforms
|
6d05a13223646f47062f827c6d90c88bcbdcb470
|
[
"Unlicense"
] | 1
|
2019-11-20T12:30:50.000Z
|
2019-11-20T12:30:50.000Z
|
djforms/languages/tle/views.py
|
carthagecollege/django-djforms
|
6d05a13223646f47062f827c6d90c88bcbdcb470
|
[
"Unlicense"
] | 8
|
2020-03-23T20:36:06.000Z
|
2022-02-20T17:06:13.000Z
|
djforms/languages/tle/views.py
|
carthagecollege/django-djforms
|
6d05a13223646f47062f827c6d90c88bcbdcb470
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.http import Http404
from django.conf import settings
from django.utils.dates import MONTHS
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy
from djforms.languages.tle.forms import *
from djtools.utils.mail import send_mail
from djtools.utils.convert import str_to_class
import datetime
def application_form(request, stype):
form_name = '{0}Form'.format(stype.capitalize())
form = str_to_class('djforms.languages.tle.forms', form_name)()
if not form:
raise Http404
education = ''
ulength = 1
if request.method == 'POST':
form = str_to_class('djforms.languages.tle.forms', form_name)(request.POST)
# check if someone is attempting something nefarious
if not form:
raise Http404
data = request.POST.copy()
if form.is_valid():
cd = form.cleaned_data
if stype == 'masters':
education = ''
# collect our university fields
university = data.getlist('university[]')
country = data.getlist('country[]')
from_month = data.getlist('from_month[]')
to_month = data.getlist('to_month[]')
from_year = data.getlist('from_year[]')
to_year = data.getlist('to_year[]')
degree = data.getlist('degree[]')
# establish the number of universities submitted
# and iterate over them to build education
for index in range(len(university)):
education += '<dl>'
education += '''
<dt>University</dt><dd>{0}</dd>
'''.format(university[index])
education += '<dt>Country</dt><dd>{0}</dd>'.format(
country[index],
)
education += '''
<dt>From</dt><dd>{0} {1}</dd>
'''.format(from_month[index], from_year[index])
education += '''
<dt>To</dt><dd>{0} {1}</dd>
'''.format(to_month[index], to_year[index])
education += '<dt>Degree</dt><dd>{0}</dd>'.format(
degree[index]
)
education += '</dl>'
cd['education'] = education
cd['type'] = stype
if settings.DEBUG:
TO_LIST = [settings.SERVER_EMAIL]
else:
TO_LIST = settings.MODERN_LANGUAGES_TLE_APPLICATIONS
TO_LIST.append(cd['email'])
subject = '[Modern Languages] TLE {0}: {1} {2}'.format(
stype.capitalize(), cd['first_name'], cd['last_name'],
)
send_mail(
request,
TO_LIST,
subject,
cd['email'],
'languages/tle/email.html',
cd,
settings.MANAGERS,
)
return HttpResponseRedirect(reverse_lazy('tle_success'))
elif stype == 'masters':
# collect our fields
university = data.getlist('university[]')
country = data.getlist('country[]')
from_month = data.getlist('from_month[]')
to_month = data.getlist('to_month[]')
from_year = data.getlist('from_year[]')
to_year = data.getlist('to_year[]')
degree = data.getlist('degree[]')
# establish the number of universities submitted
# and iterate over them to build our form parts
ulength = len(university)
for index in range(ulength):
if len(university) == 1 or index == 0:
education += '<ol id="universities">'
elif index > 0:
num = int(index) - 1
education += '<ol id="universities{}">'.format(str(num))
education += '<li class="ctrlHolder"><h3>University Name</h3><input type="text" name="university[]" value="{0}" />'.format(university[index])
education += '<li class="ctrlHolder"><h3>Country</h3><input type="text" name="country[]" value="{0}" />'.format(country[index])
education += '<li class="ctrlHolder"><h3>From</h3>'
education += '<select name="from_month[]" class="small">'
options_month = ''
for month in range(len(MONTHS)):
selected = ''
if MONTHS[month+1] == from_month[index]:
selected = ' selected="selected"'
options_month += '<option value="{0}"{1}>{2}</option>'.format(MONTHS[month+1], selected, MONTHS[month+1])
education += options_month
education += '</select>'
education += 'Year <input type="text" class="small" name="from_year[]" value="{0}" />'.format(from_year[index])
education += '</li>'
education += '<li class="ctrlHolder"><h3>To</h3>'
education += '<select name="to_month[]" class="small">'
options_month = ''
for month in range(len(MONTHS)):
selected = ''
if MONTHS[month+1] == to_month[index]:
selected = ' selected="selected"'
options_month += '<option value="{0}"{1}>{2}</option>'.format(
MONTHS[month+1], selected, MONTHS[month+1],
)
education += options_month
education += '</select>'
education += 'Year <input type="text" class="small" name="to_year[]" value="{0}" />'.format(to_year[index])
education += '</li>'
education += '<li class="ctrlHolder"><h3>Diploma/Degree</h3><input type="text" name="degree[]" value="{0}" /></li>'.format(degree[index])
education += '<li class="ctrlHolder"><hr /></li>'
education += '</ol>'
return render(
request,
'languages/tle/form.html',
{
'form': form,
'type': stype,
'months': MONTHS,
'education': education,
'length': ulength,
}
)
| 44.890411
| 158
| 0.489319
|
bcdd539602dd68dfd5e90eba2645f29db0364b64
| 97
|
py
|
Python
|
api/tests/utils.py
|
equinor/lcm
|
338bf67e6eb412446e469b4c73f7000990445ebd
|
[
"MIT"
] | 3
|
2020-12-02T11:14:31.000Z
|
2021-12-09T16:53:53.000Z
|
api/tests/utils.py
|
equinor/lcm
|
338bf67e6eb412446e469b4c73f7000990445ebd
|
[
"MIT"
] | 76
|
2020-09-29T10:59:10.000Z
|
2022-01-03T07:41:29.000Z
|
api/tests/utils.py
|
equinor/lcm
|
338bf67e6eb412446e469b4c73f7000990445ebd
|
[
"MIT"
] | 2
|
2021-01-25T14:24:57.000Z
|
2021-01-25T14:51:16.000Z
|
def read_file(path: str) -> bytes:
with open(path, "rb") as file:
return file.read()
| 24.25
| 34
| 0.597938
|
1b8ca10a0614292ef2a15e796b2a09eaacfdd4ce
| 11,196
|
py
|
Python
|
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/je_os_fixedaggr_leaky.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/je_os_fixedaggr_leaky.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/je_os_fixedaggr_leaky.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 20
validate_train_set = True
save_every = 20
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 8
sunny_batch_size = 4
batches_per_chunk = 16
num_epochs_train = 200
# - learning rate and method
base_lr = 0.0001
learning_rate_schedule = {
0: base_lr,
9*num_epochs_train/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(64,64)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have mre than 6 slices
return [
folder for folder in folders
if data_loader.compute_nr_slices(folder) > 6]
# Input sizes
image_size = 64
nr_slices = 20
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice": (batch_size, 30, image_size, image_size),
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.very_leaky_rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
# Convolutional layers and some dense layers are defined in a submodel
l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
l1a = nn.layers.dnn.Conv2DDNNLayer(l0_slices, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
l_sys_mu = nn.layers.DenseLayer(ldsys2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(10.0), nonlinearity=None)
l_sys_sigma = nn.layers.DenseLayer(ldsys2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(3.), nonlinearity=lb_softplus(.1))
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
l_dia_mu = nn.layers.DenseLayer(lddia2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(10.0), nonlinearity=None)
l_dia_sigma = nn.layers.DenseLayer(lddia2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(3.), nonlinearity=lb_softplus(.1))
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
# Diastole
l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
return {
"inputs":{
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
l_sys_mu: l2_weight_out,
l_sys_sigma: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
l_dia_mu: l2_weight_out,
l_dia_sigma: l2_weight_out,
},
}
| 45.145161
| 193
| 0.7199
|
0a54e537b27cd46df3f5d33aab434dbfd5789c55
| 2,453
|
py
|
Python
|
examples/torch/dqn_cartpole.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | 1
|
2021-01-11T18:40:52.000Z
|
2021-01-11T18:40:52.000Z
|
examples/torch/dqn_cartpole.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | null | null | null |
examples/torch/dqn_cartpole.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""An example to train a task with DQN algorithm.
Here it creates a gym environment CartPole, and trains a DQN with 50k steps.
"""
import click
from garage import wrap_experiment
from garage.envs import GymEnv
from garage.experiment.deterministic import set_seed
from garage.np.exploration_policies import EpsilonGreedyPolicy
from garage.replay_buffer import PathBuffer
from garage.sampler import LocalSampler
from garage.torch.algos import DQN
from garage.torch.policies import DiscreteQFArgmaxPolicy
from garage.torch.q_functions import DiscreteMLPQFunction
from garage.trainer import Trainer
@click.command()
@click.option('--seed', default=24)
@wrap_experiment(snapshot_mode='none')
def dqn_cartpole(ctxt=None, seed=24):
"""Train DQN with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
runner = Trainer(ctxt)
n_epochs = 100
steps_per_epoch = 10
sampler_batch_size = 512
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GymEnv('CartPole-v0')
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(8, 5))
policy = DiscreteQFArgmaxPolicy(env_spec=env.spec, qf=qf)
exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.01,
decay_ratio=0.4)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
steps_per_epoch=steps_per_epoch,
qf_lr=5e-5,
discount=0.9,
min_buffer_size=int(1e4),
n_train_steps=500,
target_update_freq=30,
buffer_batch_size=64)
runner.setup(algo, env, sampler_cls=LocalSampler)
runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
env.close()
dqn_cartpole()
| 35.550725
| 76
| 0.649001
|
956c966f9886c941baa42d9ea3afe5e67939c151
| 8,025
|
py
|
Python
|
render_swisscube.py
|
Komod0D/DeepIM-PyTorch
|
ecd4788a4a7617e1cf9c309910ef024fb2c09f85
|
[
"BSD-Source-Code"
] | null | null | null |
render_swisscube.py
|
Komod0D/DeepIM-PyTorch
|
ecd4788a4a7617e1cf9c309910ef024fb2c09f85
|
[
"BSD-Source-Code"
] | null | null | null |
render_swisscube.py
|
Komod0D/DeepIM-PyTorch
|
ecd4788a4a7617e1cf9c309910ef024fb2c09f85
|
[
"BSD-Source-Code"
] | null | null | null |
import pyrender
import os
import trimesh
import numpy as np
import cv2
import os
import torch
from transforms3d.quaternions import mat2quat, quat2mat, qmult
from scipy.spatial.transform import Rotation as R
os.environ['PYOPENGL_PLATFORM'] = 'egl'
def to_homo(rotation, translation):
transform = np.eye(4)
transform[:3, :3] = rotation
transform[:3, 3] = translation
return transform
def get_corners(mesh, intrinsic, rotation, translation):
box = mesh.bounding_box.to_mesh()
vertices = np.array(box.vertices)
proj = intrinsic @ (rotation @ vertices.T + translation[:, np.newaxis])
proj[0] = proj[0] / proj[2]
proj[1] = proj[1] / proj[2]
return proj[:2].T
def add_pose_contour(mesh, intrinsic, rotation, translation, color, image, img_scaling=4, thickness=1):
image = np.copy(image)
height, width, _ = image.shape
vs = get_corners(mesh, intrinsic, rotation, translation) / img_scaling
ps = [(int(vs[i, 0]), int(vs[i, 1])) for i in range(vs.shape[0])]
# z direction
for i in range(4):
cv2.line(image, ps[2 * i], ps[2 * i + 1], color, thickness=thickness)
# y direction
for j in range(2):
for i in range(2):
cv2.line(image, ps[i + 4 * j], ps[i + 2 + 4 * j], color, thickness=thickness)
# x direction
for i in range(4):
cv2.line(image, ps[i], ps[i + 4], color, thickness=thickness)
return image
def quaternion2rotation(quat):
'''
Do not use the quat2dcm() function in the SPEED utils.py, it is not rotation
'''
assert (len(quat) == 4)
# normalize first
quat = quat / np.linalg.norm(quat)
a, b, c, d = quat
a2 = a * a
b2 = b * b
c2 = c * c
d2 = d * d
ab = a * b
ac = a * c
ad = a * d
bc = b * c
bd = b * d
cd = c * d
# s = a2 + b2 + c2 + d2
m0 = a2 + b2 - c2 - d2
m1 = 2 * (bc - ad)
m2 = 2 * (bd + ac)
m3 = 2 * (bc + ad)
m4 = a2 - b2 + c2 - d2
m5 = 2 * (cd - ab)
m6 = 2 * (bd - ac)
m7 = 2 * (cd + ab)
m8 = a2 - b2 - c2 + d2
return np.array([m0, m1, m2, m3, m4, m5, m6, m7, m8]).reshape(3, 3)
class Renderer:
def __init__(self, synthetic=False):
self.synthetic = synthetic
os.environ['PYOPENGL_PLATFORM'] = 'egl'
tscene = trimesh.load('/cvlabdata2/cvlab/datasets_protopap/deepim/data/models/swisscube/swisscube.obj')
mesh = pyrender.Mesh.from_trimesh(list(tscene.geometry.values()), smooth=False)
if synthetic:
width, height = 1024, 1024
else:
width, height = 2048, 2048
self.renderer = pyrender.OffscreenRenderer(viewport_width=width, viewport_height=height, point_size=1.0)
scene = pyrender.Scene(ambient_light=[0.02, 0.02, 0.02], bg_color=[0, 0, 0])
light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1000000.0)
if synthetic:
fx, fy, cx, cy = 607, 607, 512, 512
else:
fx, fy, cx, cy = 4000, 4000, 1024, 1024
self.intrinsic = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape((3, 3))
cam = pyrender.IntrinsicsCamera(fx, fy, cx, cy, zfar=2000)
cam_rot = R.from_euler('y', 180, degrees=True).as_matrix()
cam_matrix = to_homo(cam_rot, np.zeros((3,)))
self.nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))
nl = pyrender.Node(light=light, matrix=np.eye(4))
nc = pyrender.Node(camera=cam, matrix=cam_matrix)
scene.add_node(self.nm)
scene.add_node(nl)
scene.add_node(nc)
self.scene = scene
def set_light_pos(self, *args):
pass
def set_light_color(self, *args):
pass
def set_projection_matrix(self, width, height, fx, fy, px, py, znear, zfar):
pass
def set_poses(self, poses):
self.set_pose(poses[0])
def set_pose(self, pose):
pose = np.array(pose)
if pose.shape[0] == 9:
pose = pose[2:]
translation, rotation_quat = pose[4:], pose[:4]
translation = np.array(translation)
if not self.synthetic:
rotation = quaternion2rotation(rotation_quat)
else:
rotation = quat2mat(rotation_quat)
rotation = rotation @ R.from_euler('x', 90, degrees=True).as_matrix()
transform = to_homo(rotation, translation)
self.scene.set_pose(self.nm, pose=transform)
def render_(self):
color, depth = self.renderer.render(self.scene)
color = cv2.resize(color, (640, 640), cv2.INTER_AREA)
color = color[80:560]
return np.flip(color, (0, 1)).copy()
def render(self, image_tensor):
rgb = self.render_()
tensor = torch.from_numpy(np.transpose(rgb, (2, 0, 1)))
image_tensor.copy_(tensor)
def get_next(self, iteritems):
if not self.synthetic:
img, pose = next(iteritems)
img = os.path.join(img.split('/')[0], 'Test', img.split('/')[1])
img = cv2.imread(img)
img = cv2.resize(img, (640, 640), cv2.INTER_AREA)
img = img[80:560]
rotation = pose['rotation_m2c']
translation = pose['translation_m2c']
else:
img_path = next(iteritems).strip()
full_path = os.path.join('/cvlabdata2/home/yhu/data/SwissCube_1.0', img_path)
num = str(int(os.path.splitext(os.path.basename(full_path))[0]))
img = cv2.imread(full_path)
img = cv2.resize(img, (640, 640), cv2.INTER_AREA)
img = img[80:560]
seq_name = os.path.dirname(os.path.dirname(full_path))
poses_name = os.path.join(seq_name, 'scene_gt.json')
with open(poses_name, 'r') as j:
poses = json.load(j)
pose = poses[num][0]
translation = np.array(pose['cam_t_m2c'])
rotation = np.array(pose['cam_R_m2c']).reshape((3, 3))
rotation = R.from_matrix(rotation).as_quat()
return img, translation, rotation
if __name__ == '__main__':
r = Renderer(True)
import os
import json
if r.synthetic:
os.chdir('/cvlabdata2/home/yhu/data/SwissCube_1.0')
with open('training.txt', 'r') as f:
images = f.readlines()
iteritems = iter(images)
else:
os.chdir('/cvlabdata2/cvlab/datasets_protopap/SwissCubeReal')
with open('data.json', 'r') as f:
poses = json.load(f)
iteritems = iter(poses.items())
img, translation, rotation = r.get_next(iteritems)
translation = np.array(translation)
cv2.imshow('image', img)
x, y, z = translation
dx, dy, dz = 0, 0, 0
while True:
rotation = R.from_quat(rotation).as_matrix() @ R.from_euler('xyz', [dx, dy, dz], degrees=True).as_matrix()
rotation = R.from_matrix(rotation).as_quat()
a, b, c, d = rotation
pose = [x, y, z, a, b, c, d]
r.set_pose(pose)
color = r.render_()
cv2.imshow('render', color)
key = cv2.waitKey(0)
if key == 27:
break
elif key == 13:
img, translation, rotation = r.get_next(iteritems)
x, y, z = translation
a, b, c, d = rotation
cv2.imshow('image', img)
elif key == 119:
y += 10
elif key == 115:
y -= 10
elif key == 97:
x -= 10
elif key == 100:
x += 10
elif key == 101:
z += 10
elif key == 113:
z -= 10
elif key == 81:
dy -= 15
elif key == 83:
dy += 15
elif key == 82:
dx -= 15
elif key == 84:
dx += 15
elif key == 85:
dz += 15
elif key == 86:
dz -= 15
| 29.612546
| 114
| 0.541558
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.