text stringlengths 4 1.02M | meta dict |
|---|---|
import multiprocessing
import numpy as np
import xarray as xr
import healpy as hp
from astropy.cosmology import Planck15 as Cosmo
from opstats.utils import MWA_FREQ_EOR_ALL_80KHZ as F
from opstats.utils import F21
# Simulation parameters
nf = F.size
nside = 4096
cube_res = 1000 / 128
cube_size = 128
# Figure out pixel mapping
# Query only pixels with 35 degree from the north pole of the healpix sphere
vx0, vy0, vz0 = hp.pix2vec(nside, 0) # North Pole vector
valid_hpx_pix = hp.query_disc(nside, (vx0, vy0, vz0), np.pi * 35 / 180)
npix_hpx = valid_hpx_pix.size
# Comoving distance to redshift of interest. Shape (npix_hpx,)
dc = Cosmo.comoving_distance(F21 / (F * 1e6) - 1).value
# Unit vector of all valid healpix pixels. Shape (npix_hpx,)
vx, vy, vz = hp.pix2vec(nside, valid_hpx_pix)
# Construct healpix lightcone cubes from tiled simulation cube
cube_files = ['/data6/piyanat/models/21cm/cubes/interpolated/'
'interp_delta_21cm_l128_{:.3f}MHz.npy'
.format(f) for f in F]
def cube2hpx(i):
incube = np.load(cube_files[i])
incube -= incube.mean() # zero out mean
# Map vector and comoving distance to cube pixels. Shape (npix_hpx,)
xi = np.mod(
np.around(vx * dc[i] / cube_res).astype(int),
cube_size)
yi = np.mod(
np.around(vy * dc[i] / cube_res).astype(int),
cube_size)
zi = np.mod(
np.around(vz * dc[i] / cube_res).astype(int),
cube_size)
healpix_map = incube[xi, yi, zi]
np.save('/data6/piyanat/projects/hera1p/lightcone_healpix_northpole_fov35d/'
'lightcone_healpix_northpole_fov35d_{:.3f}MHz.npy'.format(F[i]),
healpix_map)
pool = multiprocessing.Pool(8)
pool.map(cube2hpx, np.arange(nf))
pool.close()
pool.join()
| {
"content_hash": "2e97342561f4695732ca5790bf23af48",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 29.065573770491802,
"alnum_prop": 0.6661026508742245,
"repo_name": "piyanatk/sim",
"id": "cf389eb2d2df6da1263ab80a2fbf689cb781f7d5",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/lightcone/healpix_lightcone_northpole_35d.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import sys
import re
import os
python_out_folder = sys.argv[1]
filematch = re.compile(r"(\w+)_stemmer\.py$")
imports = []
languages = []
for pyscript in os.listdir(python_out_folder):
match = filematch.match(pyscript)
if (match):
langname = match.group(1)
titlecase = langname.title()
languages.append(" '%(lang)s': %(title)sStemmer," % {'lang': langname, 'title': titlecase})
imports.append('from .%(lang)s_stemmer import %(title)sStemmer' % {'lang': langname, 'title': titlecase})
imports.sort()
languages.sort()
src = '''__all__ = ('language', 'stemmer')
%(imports)s
_languages = {
%(languages)s
}
try:
import Stemmer
cext_available = True
except ImportError:
cext_available = False
def algorithms():
if cext_available:
return Stemmer.language()
else:
return list(_languages.keys())
def stemmer(lang):
if cext_available:
return Stemmer.Stemmer(lang)
if lang.lower() in _languages:
return _languages[lang.lower()]()
else:
raise KeyError("Stemming algorithm '%%s' not found" %% lang)
''' % {'imports': '\n'.join(imports), 'languages': '\n'.join(languages)}
with open(os.path.join(python_out_folder, '__init__.py'), 'w') as out:
out.write(src)
| {
"content_hash": "b9ef0d1fe482761f73ff87d56641de54",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 113,
"avg_line_length": 24.5,
"alnum_prop": 0.6263736263736264,
"repo_name": "snowballstem/snowball",
"id": "2346fbef8b40d6b13e53f24f9fb9f21b880ebf70",
"size": "1297",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/create_init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "35949"
},
{
"name": "C",
"bytes": "507438"
},
{
"name": "C#",
"bytes": "27400"
},
{
"name": "Go",
"bytes": "10445"
},
{
"name": "Java",
"bytes": "11950"
},
{
"name": "JavaScript",
"bytes": "11164"
},
{
"name": "Makefile",
"bytes": "27833"
},
{
"name": "Pascal",
"bytes": "11533"
},
{
"name": "Perl",
"bytes": "8392"
},
{
"name": "Python",
"bytes": "19152"
},
{
"name": "Rust",
"bytes": "18667"
}
],
"symlink_target": ""
} |
"""Restrained Amber Minimization of a structure."""
import io
import time
from typing import Collection, Optional, Sequence
from absl import logging
from alphafold.common import protein
from alphafold.common import residue_constants
from alphafold.model import folding
from alphafold.relax import cleanup
from alphafold.relax import utils
import ml_collections
import numpy as np
from simtk import openmm
from simtk import unit
from simtk.openmm import app as openmm_app
from simtk.openmm.app.internal.pdbstructure import PdbStructure
ENERGY = unit.kilocalories_per_mole
LENGTH = unit.angstroms
def will_restrain(atom: openmm_app.Atom, rset: str) -> bool:
"""Returns True if the atom will be restrained by the given restraint set."""
if rset == "non_hydrogen":
return atom.element.name != "hydrogen"
elif rset == "c_alpha":
return atom.name == "CA"
def _add_restraints(
system: openmm.System,
reference_pdb: openmm_app.PDBFile,
stiffness: unit.Unit,
rset: str,
exclude_residues: Sequence[int]):
"""Adds a harmonic potential that restrains the system to a structure."""
assert rset in ["non_hydrogen", "c_alpha"]
force = openmm.CustomExternalForce(
"0.5 * k * ((x-x0)^2 + (y-y0)^2 + (z-z0)^2)")
force.addGlobalParameter("k", stiffness)
for p in ["x0", "y0", "z0"]:
force.addPerParticleParameter(p)
for i, atom in enumerate(reference_pdb.topology.atoms()):
if atom.residue.index in exclude_residues:
continue
if will_restrain(atom, rset):
force.addParticle(i, reference_pdb.positions[i])
logging.info("Restraining %d / %d particles.",
force.getNumParticles(), system.getNumParticles())
system.addForce(force)
def _openmm_minimize(
pdb_str: str,
max_iterations: int,
tolerance: unit.Unit,
stiffness: unit.Unit,
restraint_set: str,
exclude_residues: Sequence[int],
use_gpu: bool):
"""Minimize energy via openmm."""
pdb_file = io.StringIO(pdb_str)
pdb = openmm_app.PDBFile(pdb_file)
force_field = openmm_app.ForceField("amber99sb.xml")
constraints = openmm_app.HBonds
system = force_field.createSystem(
pdb.topology, constraints=constraints)
if stiffness > 0 * ENERGY / (LENGTH**2):
_add_restraints(system, pdb, stiffness, restraint_set, exclude_residues)
integrator = openmm.LangevinIntegrator(0, 0.01, 0.0)
platform = openmm.Platform.getPlatformByName("CUDA" if use_gpu else "CPU")
simulation = openmm_app.Simulation(
pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
ret = {}
state = simulation.context.getState(getEnergy=True, getPositions=True)
ret["einit"] = state.getPotentialEnergy().value_in_unit(ENERGY)
ret["posinit"] = state.getPositions(asNumpy=True).value_in_unit(LENGTH)
simulation.minimizeEnergy(maxIterations=max_iterations,
tolerance=tolerance)
state = simulation.context.getState(getEnergy=True, getPositions=True)
ret["efinal"] = state.getPotentialEnergy().value_in_unit(ENERGY)
ret["pos"] = state.getPositions(asNumpy=True).value_in_unit(LENGTH)
ret["min_pdb"] = _get_pdb_string(simulation.topology, state.getPositions())
return ret
def _get_pdb_string(topology: openmm_app.Topology, positions: unit.Quantity):
"""Returns a pdb string provided OpenMM topology and positions."""
with io.StringIO() as f:
openmm_app.PDBFile.writeFile(topology, positions, f)
return f.getvalue()
def _check_cleaned_atoms(pdb_cleaned_string: str, pdb_ref_string: str):
"""Checks that no atom positions have been altered by cleaning."""
cleaned = openmm_app.PDBFile(io.StringIO(pdb_cleaned_string))
reference = openmm_app.PDBFile(io.StringIO(pdb_ref_string))
cl_xyz = np.array(cleaned.getPositions().value_in_unit(LENGTH))
ref_xyz = np.array(reference.getPositions().value_in_unit(LENGTH))
for ref_res, cl_res in zip(reference.topology.residues(),
cleaned.topology.residues()):
assert ref_res.name == cl_res.name
for rat in ref_res.atoms():
for cat in cl_res.atoms():
if cat.name == rat.name:
if not np.array_equal(cl_xyz[cat.index], ref_xyz[rat.index]):
raise ValueError(f"Coordinates of cleaned atom {cat} do not match "
f"coordinates of reference atom {rat}.")
def _check_residues_are_well_defined(prot: protein.Protein):
"""Checks that all residues contain non-empty atom sets."""
if (prot.atom_mask.sum(axis=-1) == 0).any():
raise ValueError("Amber minimization can only be performed on proteins with"
" well-defined residues. This protein contains at least"
" one residue with no atoms.")
def _check_atom_mask_is_ideal(prot):
"""Sanity-check the atom mask is ideal, up to a possible OXT."""
atom_mask = prot.atom_mask
ideal_atom_mask = protein.ideal_atom_mask(prot)
utils.assert_equal_nonterminal_atom_types(atom_mask, ideal_atom_mask)
def clean_protein(
prot: protein.Protein,
checks: bool = True):
"""Adds missing atoms to Protein instance.
Args:
prot: A `protein.Protein` instance.
checks: A `bool` specifying whether to add additional checks to the cleaning
process.
Returns:
pdb_string: A string of the cleaned protein.
"""
_check_atom_mask_is_ideal(prot)
# Clean pdb.
prot_pdb_string = protein.to_pdb(prot)
pdb_file = io.StringIO(prot_pdb_string)
alterations_info = {}
fixed_pdb = cleanup.fix_pdb(pdb_file, alterations_info)
fixed_pdb_file = io.StringIO(fixed_pdb)
pdb_structure = PdbStructure(fixed_pdb_file)
cleanup.clean_structure(pdb_structure, alterations_info)
logging.info("alterations info: %s", alterations_info)
# Write pdb file of cleaned structure.
as_file = openmm_app.PDBFile(pdb_structure)
pdb_string = _get_pdb_string(as_file.getTopology(), as_file.getPositions())
if checks:
_check_cleaned_atoms(pdb_string, prot_pdb_string)
return pdb_string
def make_atom14_positions(prot):
"""Constructs denser atom positions (14 dimensions instead of 37)."""
restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37
restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14
restype_atom14_mask = []
for rt in residue_constants.restypes:
atom_names = residue_constants.restype_name_to_atom14_names[
residue_constants.restype_1to3[rt]]
restype_atom14_to_atom37.append([
(residue_constants.atom_order[name] if name else 0)
for name in atom_names
])
atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
restype_atom37_to_atom14.append([
(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0)
for name in residue_constants.atom_types
])
restype_atom14_mask.append([(1. if name else 0.) for name in atom_names])
# Add dummy mapping for restype 'UNK'.
restype_atom14_to_atom37.append([0] * 14)
restype_atom37_to_atom14.append([0] * 37)
restype_atom14_mask.append([0.] * 14)
restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32)
restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32)
restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32)
# Create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein.
residx_atom14_to_atom37 = restype_atom14_to_atom37[prot["aatype"]]
residx_atom14_mask = restype_atom14_mask[prot["aatype"]]
# Create a mask for known ground truth positions.
residx_atom14_gt_mask = residx_atom14_mask * np.take_along_axis(
prot["all_atom_mask"], residx_atom14_to_atom37, axis=1).astype(np.float32)
# Gather the ground truth positions.
residx_atom14_gt_positions = residx_atom14_gt_mask[:, :, None] * (
np.take_along_axis(prot["all_atom_positions"],
residx_atom14_to_atom37[..., None],
axis=1))
prot["atom14_atom_exists"] = residx_atom14_mask
prot["atom14_gt_exists"] = residx_atom14_gt_mask
prot["atom14_gt_positions"] = residx_atom14_gt_positions
prot["residx_atom14_to_atom37"] = residx_atom14_to_atom37
# Create the gather indices for mapping back.
residx_atom37_to_atom14 = restype_atom37_to_atom14[prot["aatype"]]
prot["residx_atom37_to_atom14"] = residx_atom37_to_atom14
# Create the corresponding mask.
restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
for restype, restype_letter in enumerate(residue_constants.restypes):
restype_name = residue_constants.restype_1to3[restype_letter]
atom_names = residue_constants.residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = residue_constants.atom_order[atom_name]
restype_atom37_mask[restype, atom_type] = 1
residx_atom37_mask = restype_atom37_mask[prot["aatype"]]
prot["atom37_atom_exists"] = residx_atom37_mask
# As the atom naming is ambiguous for 7 of the 20 amino acids, provide
# alternative ground truth coordinates where the naming is swapped
restype_3 = [
residue_constants.restype_1to3[res] for res in residue_constants.restypes
]
restype_3 += ["UNK"]
# Matrices for renaming ambiguous atoms.
all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3}
for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
correspondences = np.arange(14)
for source_atom_swap, target_atom_swap in swap.items():
source_index = residue_constants.restype_name_to_atom14_names[
resname].index(source_atom_swap)
target_index = residue_constants.restype_name_to_atom14_names[
resname].index(target_atom_swap)
correspondences[source_index] = target_index
correspondences[target_index] = source_index
renaming_matrix = np.zeros((14, 14), dtype=np.float32)
for index, correspondence in enumerate(correspondences):
renaming_matrix[index, correspondence] = 1.
all_matrices[resname] = renaming_matrix.astype(np.float32)
renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3])
# Pick the transformation matrices for the given residue sequence
# shape (num_res, 14, 14).
renaming_transform = renaming_matrices[prot["aatype"]]
# Apply it to the ground truth positions. shape (num_res, 14, 3).
alternative_gt_positions = np.einsum("rac,rab->rbc",
residx_atom14_gt_positions,
renaming_transform)
prot["atom14_alt_gt_positions"] = alternative_gt_positions
# Create the mask for the alternative ground truth (differs from the
# ground truth mask, if only one of the atoms in an ambiguous pair has a
# ground truth position).
alternative_gt_mask = np.einsum("ra,rab->rb",
residx_atom14_gt_mask,
renaming_transform)
prot["atom14_alt_gt_exists"] = alternative_gt_mask
# Create an ambiguous atoms mask. shape: (21, 14).
restype_atom14_is_ambiguous = np.zeros((21, 14), dtype=np.float32)
for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
for atom_name1, atom_name2 in swap.items():
restype = residue_constants.restype_order[
residue_constants.restype_3to1[resname]]
atom_idx1 = residue_constants.restype_name_to_atom14_names[resname].index(
atom_name1)
atom_idx2 = residue_constants.restype_name_to_atom14_names[resname].index(
atom_name2)
restype_atom14_is_ambiguous[restype, atom_idx1] = 1
restype_atom14_is_ambiguous[restype, atom_idx2] = 1
# From this create an ambiguous_mask for the given sequence.
prot["atom14_atom_is_ambiguous"] = (
restype_atom14_is_ambiguous[prot["aatype"]])
return prot
def find_violations(prot_np: protein.Protein):
"""Analyzes a protein and returns structural violation information.
Args:
prot_np: A protein.
Returns:
violations: A `dict` of structure components with structural violations.
violation_metrics: A `dict` of violation metrics.
"""
batch = {
"aatype": prot_np.aatype,
"all_atom_positions": prot_np.atom_positions.astype(np.float32),
"all_atom_mask": prot_np.atom_mask.astype(np.float32),
"residue_index": prot_np.residue_index,
}
batch["seq_mask"] = np.ones_like(batch["aatype"], np.float32)
batch = make_atom14_positions(batch)
violations = folding.find_structural_violations(
batch=batch,
atom14_pred_positions=batch["atom14_gt_positions"],
config=ml_collections.ConfigDict(
{"violation_tolerance_factor": 12, # Taken from model config.
"clash_overlap_tolerance": 1.5, # Taken from model config.
}))
violation_metrics = folding.compute_violation_metrics(
batch=batch,
atom14_pred_positions=batch["atom14_gt_positions"],
violations=violations,
)
return violations, violation_metrics
def get_violation_metrics(prot: protein.Protein):
"""Computes violation and alignment metrics."""
structural_violations, struct_metrics = find_violations(prot)
violation_idx = np.flatnonzero(
structural_violations["total_per_residue_violations_mask"])
struct_metrics["residue_violations"] = violation_idx
struct_metrics["num_residue_violations"] = len(violation_idx)
struct_metrics["structural_violations"] = structural_violations
return struct_metrics
def _run_one_iteration(
*,
pdb_string: str,
max_iterations: int,
tolerance: float,
stiffness: float,
restraint_set: str,
max_attempts: int,
use_gpu: bool,
exclude_residues: Optional[Collection[int]] = None):
"""Runs the minimization pipeline.
Args:
pdb_string: A pdb string.
max_iterations: An `int` specifying the maximum number of L-BFGS iterations.
A value of 0 specifies no limit.
tolerance: kcal/mol, the energy tolerance of L-BFGS.
stiffness: kcal/mol A**2, spring constant of heavy atom restraining
potential.
restraint_set: The set of atoms to restrain.
max_attempts: The maximum number of minimization attempts.
use_gpu: Whether to run on GPU.
exclude_residues: An optional list of zero-indexed residues to exclude from
restraints.
Returns:
A `dict` of minimization info.
"""
exclude_residues = exclude_residues or []
# Assign physical dimensions.
tolerance = tolerance * ENERGY
stiffness = stiffness * ENERGY / (LENGTH**2)
start = time.time()
minimized = False
attempts = 0
while not minimized and attempts < max_attempts:
attempts += 1
try:
logging.info("Minimizing protein, attempt %d of %d.",
attempts, max_attempts)
ret = _openmm_minimize(
pdb_string, max_iterations=max_iterations,
tolerance=tolerance, stiffness=stiffness,
restraint_set=restraint_set,
exclude_residues=exclude_residues,
use_gpu=use_gpu)
minimized = True
except Exception as e: # pylint: disable=broad-except
logging.info(e)
if not minimized:
raise ValueError(f"Minimization failed after {max_attempts} attempts.")
ret["opt_time"] = time.time() - start
ret["min_attempts"] = attempts
return ret
def run_pipeline(
prot: protein.Protein,
stiffness: float,
use_gpu: bool,
max_outer_iterations: int = 1,
place_hydrogens_every_iteration: bool = True,
max_iterations: int = 0,
tolerance: float = 2.39,
restraint_set: str = "non_hydrogen",
max_attempts: int = 100,
checks: bool = True,
exclude_residues: Optional[Sequence[int]] = None):
"""Run iterative amber relax.
Successive relax iterations are performed until all violations have been
resolved. Each iteration involves a restrained Amber minimization, with
restraint exclusions determined by violation-participating residues.
Args:
prot: A protein to be relaxed.
stiffness: kcal/mol A**2, the restraint stiffness.
use_gpu: Whether to run on GPU.
max_outer_iterations: The maximum number of iterative minimization.
place_hydrogens_every_iteration: Whether hydrogens are re-initialized
prior to every minimization.
max_iterations: An `int` specifying the maximum number of L-BFGS steps
per relax iteration. A value of 0 specifies no limit.
tolerance: kcal/mol, the energy tolerance of L-BFGS.
The default value is the OpenMM default.
restraint_set: The set of atoms to restrain.
max_attempts: The maximum number of minimization attempts per iteration.
checks: Whether to perform cleaning checks.
exclude_residues: An optional list of zero-indexed residues to exclude from
restraints.
Returns:
out: A dictionary of output values.
"""
# `protein.to_pdb` will strip any poorly-defined residues so we need to
# perform this check before `clean_protein`.
_check_residues_are_well_defined(prot)
pdb_string = clean_protein(prot, checks=checks)
exclude_residues = exclude_residues or []
exclude_residues = set(exclude_residues)
violations = np.inf
iteration = 0
while violations > 0 and iteration < max_outer_iterations:
ret = _run_one_iteration(
pdb_string=pdb_string,
exclude_residues=exclude_residues,
max_iterations=max_iterations,
tolerance=tolerance,
stiffness=stiffness,
restraint_set=restraint_set,
max_attempts=max_attempts,
use_gpu=use_gpu)
prot = protein.from_pdb_string(ret["min_pdb"])
if place_hydrogens_every_iteration:
pdb_string = clean_protein(prot, checks=True)
else:
pdb_string = ret["min_pdb"]
ret.update(get_violation_metrics(prot))
ret.update({
"num_exclusions": len(exclude_residues),
"iteration": iteration,
})
violations = ret["violations_per_residue"]
exclude_residues = exclude_residues.union(ret["residue_violations"])
logging.info("Iteration completed: Einit %.2f Efinal %.2f Time %.2f s "
"num residue violations %d num residue exclusions %d ",
ret["einit"], ret["efinal"], ret["opt_time"],
ret["num_residue_violations"], ret["num_exclusions"])
iteration += 1
return ret
| {
"content_hash": "d947d9d1fd7e2fd06bf5c860c2466c60",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 80,
"avg_line_length": 37.549180327868854,
"alnum_prop": 0.6886596812922943,
"repo_name": "deepmind/alphafold",
"id": "4694f440295278724b18574d5e8e1c6592b220f3",
"size": "18919",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "alphafold/relax/amber_minimize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "Jupyter Notebook",
"bytes": "43833"
},
{
"name": "Python",
"bytes": "796703"
},
{
"name": "Shell",
"bytes": "17888"
}
],
"symlink_target": ""
} |
from pycket import values, values_parameter
from pycket.arity import Arity
from pycket.argument_parser import ArgParser, EndOfInput
from pycket.prims.expose import default, expose, expose_val
from rpython.rlib import jit
DEBUG = values.W_Symbol.make("debug")
w_default_logger = values.W_Logger(values.w_false, values.w_false, values.w_false, [])
LOG_LEVEL = ['none', 'fatal', 'error', 'warning', 'info', 'debug']
LOG_LEVEL = tuple(map(values.W_Symbol.make, LOG_LEVEL) + [values.w_false])
@expose("make-logger", arity=Arity.geq(0))
@jit.unroll_safe
def make_logger(args):
parser = ArgParser("make-logger", args)
topic = values.w_false
parent = values.w_false
propagate_level = DEBUG
try:
topic = parser.expect(values.W_Symbol, values.w_false)
parent = parser.expect(values.W_Logger, values.w_false)
propagate_level = parser.expect(*LOG_LEVEL)
except EndOfInput:
pass
# Any remaining arguments are propagate topics
propagate_topic = parser.expect_many(values.W_Symbol, values.w_false)
return values.W_Logger(topic, parent, propagate_level, propagate_topic)
@expose("log-level?", [values.W_Object, values.W_Object, default(values.W_Object, values.w_false)])
def log_level(logger, level, topic):
# TODO: Actual implementation
return values.w_false
@expose("log-message", arity=Arity.oneof(4, 5, 6))
def log_message(args):
# TODO: Actual implementation
return
@expose("logger-name", [values.W_Logger])
def logger_name(logger):
return logger.topic
w_current_logger = values_parameter.W_Parameter(w_default_logger)
expose_val("current-logger", w_current_logger)
| {
"content_hash": "b62485592f87c01a7078ca3d16e412e1",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 99,
"avg_line_length": 33.6078431372549,
"alnum_prop": 0.6878646441073513,
"repo_name": "magnusmorton/pycket",
"id": "f1bbb1c99f9c7d86d333af563c951920673cbeff",
"size": "1715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycket/prims/logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654"
},
{
"name": "Eagle",
"bytes": "1986"
},
{
"name": "KiCad Layout",
"bytes": "76411"
},
{
"name": "Makefile",
"bytes": "2680"
},
{
"name": "Python",
"bytes": "1059030"
},
{
"name": "Racket",
"bytes": "702764"
},
{
"name": "Scheme",
"bytes": "215"
},
{
"name": "Shell",
"bytes": "8746"
}
],
"symlink_target": ""
} |
import os
import shlex
import six
import sys
from mock import MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# path = os.path.abspath('../paradrop/paradrop')
# print path
sys.path.insert(0, os.path.abspath('../paradrop/daemon'))
sys.path.insert(1, os.path.abspath('../tools/pdtools'))
# -- General configuration ------------------------------------------------
add_module_names = False
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.autohttp.flask',
'sphinx-jsonschema',
'sphinx_click.ext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'paradrop'
copyright = u'2017-2018, ParaDrop Labs'
author = u'ParaDrop Labs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.13"
# The full version, including alpha/beta/rc tags.
release = "0.13.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
'venv'
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'paradropdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'paradrop.tex', u'paradrop Documentation',
u'Paradrop Labs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'paradrop', u'paradrop Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'paradrop', u'paradrop Documentation',
author, 'paradrop', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Mock out modules -------------------------------------------
# These modules use C extensions which cause trouble when building on
# readthedocs.
MOCK_MODULES = ['git', 'pulsectl', 'pycurl']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = MagicMock()
#
# Monkey patch sphinxcontrib.autohttp.flask_base for documenting our Klein API.
#
# Modified version of get_routes function from sphinxcontrib.autohttp.flask_base.
#
# Additions:
# - Default to GET if the methods list is missing.
# - Search for doc_prefix attribute and insert into the path that will be
# displayed.
def get_routes(app, endpoint=None, order=None):
endpoints = []
for rule in app.url_map.iter_rules(endpoint):
url_with_endpoint = (
six.text_type(next(app.url_map.iter_rules(rule.endpoint))),
rule.endpoint
)
if url_with_endpoint not in endpoints:
endpoints.append(url_with_endpoint)
if order == 'path':
endpoints.sort()
endpoints = [e for _, e in endpoints]
for endpoint in endpoints:
methodrules = {}
for rule in app.url_map.iter_rules(endpoint):
if rule.methods is None:
methods = ['GET']
else:
methods = rule.methods.difference(['OPTIONS', 'HEAD'])
prefix = getattr(rule, 'doc_prefix', '')
path = prefix + sphinxcontrib.autohttp.flask_base.translate_werkzeug_rule(rule.rule)
for method in methods:
if method in methodrules:
methodrules[method].append(path)
else:
methodrules[method] = [path]
for method, paths in methodrules.items():
yield method, paths, endpoint
import sphinxcontrib.autohttp.flask_base
sphinxcontrib.autohttp.flask_base.get_routes = get_routes
| {
"content_hash": "6825391f53e5db6a88dbb004a8b42175",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 96,
"avg_line_length": 32.43909348441926,
"alnum_prop": 0.6872762204174307,
"repo_name": "ParadropLabs/Paradrop",
"id": "b449cd448ed9fc41ca5cec1475de606755b46dd6",
"size": "11872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "148071"
},
{
"name": "Dockerfile",
"bytes": "10449"
},
{
"name": "HTML",
"bytes": "554"
},
{
"name": "Makefile",
"bytes": "1665"
},
{
"name": "Python",
"bytes": "1049444"
},
{
"name": "Shell",
"bytes": "9897"
}
],
"symlink_target": ""
} |
from django import forms
from teacher.models import Teacher_personal_detail
class Teacher_personal_detailform(forms.ModelForm):
"""Upload files with this form"""
dob = forms.DateField(widget=forms.DateInput(format = '%d/%m/%Y'), input_formats=('%d/%m/%Y',))
class Meta:
model = Teacher_personal_detail
class Teacher_personal_detail_Poolform(forms.ModelForm):
class Meta:
model = Teacher_personal_detail
fields = ['name','gpf_no','dob'] | {
"content_hash": "1879f0af1e65ed3aa4282f4c1755632b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 99,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.6728016359918201,
"repo_name": "tnemisteam/cdf-steps",
"id": "f570b99079670ccd01ac59731e0d33d0b505630b",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teacher/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "766660"
},
{
"name": "HTML",
"bytes": "1419093"
},
{
"name": "JavaScript",
"bytes": "3631641"
},
{
"name": "PHP",
"bytes": "5016"
},
{
"name": "Python",
"bytes": "921129"
}
],
"symlink_target": ""
} |
'''OpenGL extension ARB.gpu_shader_fp64
This module customises the behaviour of the
OpenGL.raw.GL.ARB.gpu_shader_fp64 to provide a more
Python-friendly API
Overview (from the spec)
This extension allows GLSL shaders to use double-precision floating-point
data types, including vectors and matrices of doubles. Doubles may be
used as inputs, outputs, and uniforms.
The shading language supports various arithmetic and comparison operators
on double-precision scalar, vector, and matrix types, and provides a set
of built-in functions including:
* square roots and inverse square roots;
* fused floating-point multiply-add operations;
* splitting a floating-point number into a significand and exponent
(frexp), or building a floating-point number from a significand and
exponent (ldexp);
* absolute value, sign tests, various functions to round to an integer
value, modulus, minimum, maximum, clamping, blending two values, step
functions, and testing for infinity and NaN values;
* packing and unpacking doubles into a pair of 32-bit unsigned integers;
* matrix component-wise multiplication, and computation of outer
products, transposes, determinants, and inverses; and
* vector relational functions.
Double-precision versions of angle, trigonometry, and exponential
functions are not supported.
Implicit conversions are supported from integer and single-precision
floating-point values to doubles, and this extension uses the relaxed
function overloading rules specified by the ARB_gpu_shader5 extension to
resolve ambiguities.
This extension provides API functions for specifying double-precision
uniforms in the default uniform block, including functions similar to the
uniform functions added by EXT_direct_state_access (if supported).
This extension provides an "LF" suffix for specifying double-precision
constants. Floating-point constants without a suffix in GLSL are treated
as single-precision values for backward compatibility with versions not
supporting doubles; similar constants are treated as double-precision
values in the "C" programming language.
This extension does not support interpolation of double-precision values;
doubles used as fragment shader inputs must be qualified as "flat".
Additionally, this extension does not allow vertex attributes with 64-bit
components. That support is added separately by EXT_vertex_attrib_64bit.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/gpu_shader_fp64.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.gpu_shader_fp64 import *
### END AUTOGENERATED SECTION | {
"content_hash": "1364f7db59bbc38b00e92d861ea5811b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 75,
"avg_line_length": 41.88059701492537,
"alnum_prop": 0.7890235210263721,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "f925fdf6a3db45d05c5945f1b0fe99fcee33dbc6",
"size": "2806",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/GL/ARB/gpu_shader_fp64.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""The tests for the notify yessssms platform."""
import unittest
import requests_mock
import homeassistant.components.yessssms.notify as yessssms
class TestNotifyYesssSMS(unittest.TestCase):
"""Test the yessssms notify."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
login = "06641234567"
passwd = "testpasswd"
recipient = "06501234567"
self.yessssms = yessssms.YesssSMSNotificationService(login, passwd, recipient)
@requests_mock.Mocker()
def test_login_error(self, mock):
"""Test login that fails."""
mock.register_uri(
requests_mock.POST,
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
status_code=200,
text="BlaBlaBla<strong>Login nicht erfolgreichBlaBla",
)
message = "Testing YesssSMS platform :)"
with self.assertLogs("homeassistant.components.yessssms.notify", level="ERROR"):
self.yessssms.send_message(message)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 1)
def test_empty_message_error(self):
"""Test for an empty SMS message error."""
message = ""
with self.assertLogs("homeassistant.components.yessssms.notify", level="ERROR"):
self.yessssms.send_message(message)
@requests_mock.Mocker()
def test_error_account_suspended(self, mock):
"""Test login that fails after multiple attempts."""
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
status_code=200,
text="BlaBlaBla<strong>Login nicht erfolgreichBlaBla",
)
message = "Testing YesssSMS platform :)"
with self.assertLogs("homeassistant.components.yessssms.notify", level="ERROR"):
self.yessssms.send_message(message)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 1)
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
status_code=200,
text="Wegen 3 ungültigen Login-Versuchen ist Ihr Account für "
"eine Stunde gesperrt.",
)
message = "Testing YesssSMS platform :)"
with self.assertLogs("homeassistant.components.yessssms.notify", level="ERROR"):
self.yessssms.send_message(message)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 2)
def test_error_account_suspended_2(self):
"""Test login that fails after multiple attempts."""
message = "Testing YesssSMS platform :)"
# pylint: disable=protected-access
self.yessssms.yesss._suspended = True
with self.assertLogs(
"homeassistant.components.yessssms.notify", level="ERROR"
) as context:
self.yessssms.send_message(message)
self.assertIn("Account is suspended, cannot send SMS.", context.output[0])
@requests_mock.Mocker()
def test_send_message(self, mock):
"""Test send message."""
message = "Testing YesssSMS platform :)"
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": self.yessssms.yesss._kontomanager},
)
# pylint: disable=protected-access
login = self.yessssms.yesss._logindata["login_rufnummer"]
mock.register_uri(
"GET",
# pylint: disable=protected-access
self.yessssms.yesss._kontomanager,
status_code=200,
text="test..." + login + "</a>",
)
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._websms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich verschickt!</h1>",
)
mock.register_uri(
"GET",
# pylint: disable=protected-access
self.yessssms.yesss._logout_url,
status_code=200,
)
with self.assertLogs(
"homeassistant.components.yessssms.notify", level="INFO"
) as context:
self.yessssms.send_message(message)
self.assertIn("SMS sent", context.output[0])
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 4)
self.assertIn(
mock.last_request.scheme
+ "://"
+ mock.last_request.hostname
+ mock.last_request.path
+ "?"
+ mock.last_request.query,
# pylint: disable=protected-access
self.yessssms.yesss._logout_url,
)
def test_no_recipient_error(self):
"""Test for missing/empty recipient."""
message = "Testing YesssSMS platform :)"
# pylint: disable=protected-access
self.yessssms._recipient = ""
with self.assertLogs(
"homeassistant.components.yessssms.notify", level="ERROR"
) as context:
self.yessssms.send_message(message)
self.assertIn(
"You need to provide a recipient for SMS notification", context.output[0]
)
@requests_mock.Mocker()
def test_sms_sending_error(self, mock):
"""Test sms sending error."""
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": self.yessssms.yesss._kontomanager},
)
# pylint: disable=protected-access
login = self.yessssms.yesss._logindata["login_rufnummer"]
mock.register_uri(
"GET",
# pylint: disable=protected-access
self.yessssms.yesss._kontomanager,
status_code=200,
text="test..." + login + "</a>",
)
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._websms_url,
status_code=500,
)
message = "Testing YesssSMS platform :)"
with self.assertLogs(
"homeassistant.components.yessssms.notify", level="ERROR"
) as context:
self.yessssms.send_message(message)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 3)
self.assertIn("YesssSMS: error sending SMS", context.output[0])
@requests_mock.Mocker()
def test_connection_error(self, mock):
"""Test connection error."""
mock.register_uri(
"POST",
# pylint: disable=protected-access
self.yessssms.yesss._login_url,
exc=ConnectionError,
)
message = "Testing YesssSMS platform :)"
with self.assertLogs(
"homeassistant.components.yessssms.notify", level="ERROR"
) as context:
self.yessssms.send_message(message)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 1)
self.assertIn("unable to connect", context.output[0])
| {
"content_hash": "c7e9e0a21b65af82e0f5b26bc31148b4",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 88,
"avg_line_length": 34.89622641509434,
"alnum_prop": 0.5884022708840227,
"repo_name": "fbradyirl/home-assistant",
"id": "3d11cdedc67bd71bcc32e1d5b5e25e55f7d11707",
"size": "7400",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/yessssms/test_notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
} |
from . import sysfs
##
# @brief Simple containter class used for pass by reference.
class Reference(object):
def __init__(self, value=None):
## Read/write attribute holding the referrent.
self.value = value
def __str__(self):
return "<%s@%x value=%s>" % (self.__class__.__name__, id(self), repr(self.value))
def __repr__(self):
return self.__str__()
| {
"content_hash": "daf5febc3e0a8fcd8d58bc2855850a52",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 89,
"avg_line_length": 26.533333333333335,
"alnum_prop": 0.5879396984924623,
"repo_name": "EmbeddedRPC/erpc-imx-demos",
"id": "3e5680dc1f4325aae1f7f251cdff14ded6e9a813",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "middleware/rpmsg-python/rpmsg/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "55789"
},
{
"name": "BitBake",
"bytes": "710"
},
{
"name": "C",
"bytes": "7339305"
},
{
"name": "C++",
"bytes": "349781"
},
{
"name": "CMake",
"bytes": "71372"
},
{
"name": "Python",
"bytes": "30659"
},
{
"name": "Shell",
"bytes": "2600"
}
],
"symlink_target": ""
} |
"""A script for dumping experiment results to csv files.
This aims to make the data easy to consume via straight up python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import time
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
BATCH_SIZE = 128
MODEL_ROOT = ''
OUTPUT_DIR = '/tmp/igt_neurips19_imagenet_bs={}'.format(BATCH_SIZE)
flags.DEFINE_string(
'root_dir',
default=MODEL_ROOT,
help='Root directory containing the experiment directories.')
flags.DEFINE_string('output_dir', default=OUTPUT_DIR, help='Output directory.')
FLAGS = flags.FLAGS
def get_experiment_dirs(root_dir):
"""Returns the list of directories in a root directory."""
experiment_dirs = []
for name in tf.io.gfile.listdir(root_dir):
path = os.path.join(root_dir, name)
if tf.io.gfile.isdir(path):
experiment_dirs.append(path)
return experiment_dirs
def get_metrics_from_file(file_path):
"""Returns metrics from a specific summary file."""
steps = []
losses = []
accuracies = []
for event in tf.compat.v1.train.summary_iterator(file_path):
step = event.step
got_loss = False
got_accuracy = False
for value in event.summary.value:
if value.tag == 'loss':
losses.append(value.simple_value)
got_loss = True
elif value.tag == 'top_1_accuracy':
accuracies.append(value.simple_value)
got_accuracy = True
assert got_loss == got_accuracy
if got_loss:
steps.append(step)
return steps, losses, accuracies
def get_metrics(metrics_dir):
"""Return metrics from a specific metrics directory."""
files = []
for name in tf.gfile.ListDirectory(metrics_dir):
files.append(os.path.join(metrics_dir, name))
data = []
for file_path in files:
steps, losses, accuracies = get_metrics_from_file(file_path)
data.append((steps, losses, accuracies))
data.sort() # Sort by ascending step.
all_steps = []
all_losses = []
all_accuracies = []
for steps, losses, accuracies in data:
all_steps.extend(steps)
all_losses.extend(losses)
all_accuracies.extend(accuracies)
assert all_steps == sorted(all_steps)
return all_steps, all_losses, all_accuracies
def filter_duplicates(steps, losses, accuracies):
"""Returns copies of the data with duplicates filtered out."""
assert steps
assert len(steps) == len(losses)
assert len(steps) == len(accuracies)
out_steps = [steps[0]]
out_losses = [losses[0]]
out_accuracies = [accuracies[0]]
for cur in range(1, len(steps)):
# Consider step for inclusion.
prev = cur - 1
if steps[cur] != steps[prev]:
out_steps.append(steps[cur])
out_losses.append(losses[cur])
out_accuracies.append(accuracies[cur])
return out_steps, out_losses, out_accuracies
def dump_metrics(experiment_dir, parameters='shift'):
"""Dump metrics from an experiment directory to a csv file.
Args:
experiment_dir: A string, the experiment directory.
parameters: A string, the parameters for which to dump metrics (shift or
true).
"""
train_metrics_dir = 'eval_train_' + parameters
train_metrics_dir = os.path.join(experiment_dir, train_metrics_dir)
train_steps, train_losses, train_accuracies = get_metrics(train_metrics_dir)
test_metrics_dir = 'eval_eval_' + parameters
test_metrics_dir = os.path.join(experiment_dir, test_metrics_dir)
test_steps, test_losses, test_accuracies = get_metrics(test_metrics_dir)
# Observed some duplicates train / test steps.
train_steps, train_losses, train_accuracies = filter_duplicates(
train_steps, train_losses, train_accuracies)
test_steps, test_losses, test_accuracies = filter_duplicates(
test_steps, test_losses, test_accuracies)
if train_steps != test_steps:
print(train_steps)
print(test_steps)
assert train_steps == test_steps
data = zip(train_steps, train_losses, train_accuracies, test_losses,
test_accuracies)
out_file = os.path.basename(experiment_dir)
if parameters == 'true':
out_file += '_true'
out_file += '.csv'
out_file = os.path.join(FLAGS.output_dir, out_file)
tf.logging.info('Dumping results to %s', out_file)
with tf.gfile.Open(out_file, 'w') as fd:
spamwriter = csv.writer(fd)
spamwriter.writerows(data)
def process_experiment(experiment_dir):
dump_metrics(experiment_dir)
if re.search(r'opt=eigt', os.path.basename(experiment_dir)):
# Look for "true parameter" metrics.
dump_metrics(experiment_dir, 'true')
def main(_):
tf.logging.info('Using output directory: %s', FLAGS.output_dir)
tf.io.gfile.makedirs(FLAGS.output_dir)
experiment_dirs = get_experiment_dirs(FLAGS.root_dir)
tf.logging.info('Found %d experiments.', len(experiment_dirs))
for i, experiment_dir in enumerate(experiment_dirs):
start = time.time()
tf.logging.info('Processing %d: %s', i, experiment_dir)
process_experiment(experiment_dir)
tf.logging.info('Processing took %d seconds.', time.time() - start)
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "acb2e58f5b74fecf18ce4cd8d1add0a1",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 29.69942196531792,
"alnum_prop": 0.691124951342935,
"repo_name": "google-research/google-research",
"id": "3c1962a02991f94bba35156127356e83043c2837",
"size": "5746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "igt_optimizer/dump_metrics_to_csv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = [
'marvinbot',
'bs4'
]
setup(
name='marvinbot_package_tracker_plugin',
version='0.1.0',
description="A plugin for marvinbot to track packages.",
long_description=readme,
author="Ricardo Cabral",
author_email='ricardo.arturo.cabral@gmail.com',
url='https://github.com/Cameri/marvinbot_package_tracker_plugin',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='marvinbot_package_tracker_plugin',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
dependency_links=[
'git+ssh://git@github.com:BotDevGroup/marvin.git#egg=marvinbot',
],
)
| {
"content_hash": "2666b32019d83fafabb8df2c8dcc597d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 72,
"avg_line_length": 31.25581395348837,
"alnum_prop": 0.6302083333333334,
"repo_name": "BotDevGroup/marvinbot_package_tracker_plugin",
"id": "14f7e57282dd5549febad5763a43d6ec7bdbe391",
"size": "1390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25608"
}
],
"symlink_target": ""
} |
"""
Collect icmp round trip times
Only valid for ipv4 hosts currently
#### Dependencies
* ping
#### Configuration
Configuration is done by:
Create a file named: PingCollector.conf in the collectors_config_path
* enabled = true
* interval = 60
* target_1 = example.org
* target_fw = 192.168.0.1
* target_localhost = localhost
Test your configuration using the following command:
diamond-setup --print -C PingCollector
You should get a reponse back that indicates 'enabled': True and see entries
for your targets in pairs like:
'target_1': 'example.org'
We extract out the key after target_ and use it in the graphite node we push.
"""
import os
import sys
import diamond.collector
from diamond.collector import str_to_bool
if os.name == 'posix' and sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
class PingCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PingCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the ping binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
'timeout': 'Timeout on the ping command'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PingCollector, self).get_default_config()
config.update({
'path': 'ping',
'bin': '/bin/ping',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'timeout': None,
'subprocess_timeout': 15
})
return config
def collect(self):
for key in self.config.keys():
if key[:7] == "target_":
host = self.config[key]
metric_name = host.replace('.', '_')
if not os.access(self.config['bin'], os.X_OK):
self.log.error("Path %s does not exist or is not executable"
% self.config['bin'])
return
command = [self.config['bin'], '-nq', '-c 1']
timeout = self.config['timeout']
if timeout:
command.append('-w %s' % timeout)
command.append(host)
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
timeout = int(self.config['subprocess_timeout'])
ping = subprocess.Popen(
command, stdout=subprocess.PIPE).communicate(
timeout=timeout)[0].strip().split("\n")[-1]
# Linux
if ping.startswith('rtt'):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
# OS X
elif ping.startswith('round-trip '):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
# Unknown
else:
metric_value = 10000
self.publish(metric_name, metric_value)
| {
"content_hash": "eeb9501f7de829e7b28342061991030e",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 30.36111111111111,
"alnum_prop": 0.5272949069838365,
"repo_name": "tellapart/Diamond",
"id": "d485711881dfec494443524376bf68832e1a4f7b",
"size": "3295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/collectors/ping/ping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21681"
},
{
"name": "Makefile",
"bytes": "4876"
},
{
"name": "Python",
"bytes": "1383118"
},
{
"name": "Roff",
"bytes": "17806"
},
{
"name": "Ruby",
"bytes": "230"
},
{
"name": "Shell",
"bytes": "7637"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^/post/', views.post, name='post'),
] | {
"content_hash": "1e566044c5d215971fb3cf6d96556450",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 45,
"avg_line_length": 20.125,
"alnum_prop": 0.6273291925465838,
"repo_name": "deniszgonjanin/docker12factor",
"id": "6cb7136e2c0a5eb2d505e1d1a308226df7fe6104",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comments/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "208"
},
{
"name": "Python",
"bytes": "5839"
}
],
"symlink_target": ""
} |
"""
Utilities for working with collections.
"""
from collections import Iterable, Mapping
import imp
from functools import partial
from revkom.data import BooleanData
from revkom.mixins import PropMetaclass
def deep_update(mapping, updated):
"""
Deep, non-destructive updating for dictionaries.
"""
for (key, val) in updated.iteritems():
if isinstance(val, Mapping):
mapping[key] = deep_update(mapping.get(key, {}), val)
else:
mapping[key] = updated[key]
return mapping
def flat_list(*args):
"""
Flatten nested iterables (excluding strings) into a list. Any strings or
non-iterable items passed as arguments are just added to the list in the
order in which they are passed.
"""
flattened_list = []
for arg in args:
if isinstance(arg, Iterable) and not isinstance(arg, basestring):
flattened_list += flat_list(*arg)
else:
flattened_list.append(arg)
return flattened_list
def unique_list(seq):
"""
Take a sequence and return a list of unique items from that sequence. The
order of the original sequence will be preserved, with the first occurence
of any duplicate items retained and the rest omitted.
"""
# This is the fastest method according to tests run by Peter Bengtsson:
# http://www.peterbe.com/plog/uniqifiers-benchmark
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
class EasyList(list):
"""
A list with utility functions designed to make it easier to manipulate.
The methods present on Python's built-in ``list()`` reflect its underlying
data structure, and thus the set of operations that may be performed on
lists efficiently. The default methods are also atomic (e.g. an insert
will only take a single item to insert), probably because good Python
means composing atomic operations with functional operators. EasyList,
by contrast, is designed for clean and readable code when efficiency is
not a concern.
"""
flat = BooleanData(False)
unique = BooleanData(False)
def __init__(self, *iterables, **kwargs):
# Pre- and post-processors are functions that take and return
# iterables as arguments before and after operations on the list.
self._preprocessors = []
super(EasyList, self).__init__()
# Register observers for changes on the list's properties
self.flat.attach_observer("set_true", self.flatten)
self.unique.attach_observer("set_true", self.uniquify)
# Decorate instance methods according to the list's properties
self._setup_processors()
# Populate our self with initial data
items = self._preprocess(iterables)
map(self.extend, iterables)
def _setup_processors():
if self.flat:
self._preprocessors.append(flat_list)
if self.unique:
self._preprocessors.append(self._uniquify)
def _preprocess(*items):
for func in self._preprocessors:
for item in items:
yield func(item)
###
# Familiar methods
###
def insert(self, index, *items):
"""
Insert items into the array, at index, in the order they are passed,
and return self.
"""
insert_at_index = partial(super(EasyList, self).insert, index)
map(insert_at_index, reversed(self._preprocess(items)))
self._postprocess()
return self
def _insert_before(self, target, *items):
"""
Insert items before a specified target in the list, in the order in
which they are passed, and return self.
"""
items = self._preprocess(items)
self.insert(self.index(target), *items)
self._postprocess()
return self
def _insert_after(self, target, *items):
"""
Insert items after a specified target in the list, in the order in
which they are passed, and return self.
"""
items = self._preprocess(items)
self.insert(self.index(target) + 1, *items)
self._postprocess()
return self
def _extend(self, *iterables):
map(super(EasyList, self).extend, iterables)
return self
def _extend_left(self, *iterables):
"""
Extend list by appending elements from iterables to the left-hand side
of the list, preserving their order, and return self.
"""
[self.insert(0, item) for iterable in iterables for item in reversed(iterable)]
return self
def _prepend(self, *items):
"""
Add item to the beginning of the list, and return self.
"""
[self.insert(0, item) for item in items]
return self
###
# Helpers for special properites of the list
###
def flatten(self):
"""
Flatten nested iterables (excluding strings).
"""
flat_self = flat_list(self)
del self[:]
self.extend(flat_self)
def uniquify(self):
"""
Remove duplicate elements from the list (keeping the earliest
occurences (from left to right)).
"""
unique_self = unique_list(self)
del self[:]
self.extend(unique_self)
###
# Python magic
###
pass
class EasyDict(dict):
pass
| {
"content_hash": "d5dcadbb6dce73faf115cb61e2043236",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 87,
"avg_line_length": 31.23121387283237,
"alnum_prop": 0.6277993707199704,
"repo_name": "hipikat/django-revkom",
"id": "e2f18ecfac2fd84239bd98d72edbdb65f0edf398",
"size": "5403",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "revkom/coll.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "25281"
}
],
"symlink_target": ""
} |
"""Test case implementation"""
import sys
import difflib
import pprint
import re
import unittest
import warnings
import six
from unittest2 import result
from unittest2.util import (
safe_repr, safe_str, strclass,
unorderable_list_difference
)
from unittest2.compatibility import wraps
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info, bugnumber=None):
# can't use super because Python 2.4 exceptions are old style
Exception.__init__(self)
self.exc_info = exc_info
self.bugnumber = bugnumber
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
def __init__(self, exc_info, bugnumber=None):
# can't use super because Python 2.4 exceptions are old style
Exception.__init__(self)
self.exc_info = exc_info
self.bugnumber = bugnumber
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (
isinstance(
test_item,
type) and issubclass(
test_item,
TestCase)):
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(bugnumber=None):
if callable(bugnumber):
@wraps(bugnumber)
def expectedFailure_easy_wrapper(*args, **kwargs):
try:
bugnumber(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info(), None)
raise _UnexpectedSuccess(sys.exc_info(), None)
return expectedFailure_easy_wrapper
else:
def expectedFailure_impl(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info(), bugnumber)
raise _UnexpectedSuccess(sys.exc_info(), bugnumber)
return wrapper
return expectedFailure_impl
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"%s not raised" % (exc_name,))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _TypeEqualityDict(object):
def __init__(self, testcase):
self.testcase = testcase
self._store = {}
def __setitem__(self, key, value):
self._store[key] = value
def __getitem__(self, key):
value = self._store[key]
if isinstance(value, six.string_types):
return getattr(self.testcase, value)
return value
def get(self, key, default=None):
if key in self._store:
return self[key]
return default
class TestCase(unittest.TestCase):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80 * 8
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = True
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = _TypeEqualityDict(self)
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
if six.PY2:
self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
else:
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if not isinstance(self, type(other)):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn(
"Use of a TestResult without an addSkip method is deprecated",
DeprecationWarning,
2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (
getattr(
self.__class__,
'__unittest_skip_why__',
'') or getattr(
testMethod,
'__unittest_skip_why__',
''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest as e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
success = self.runMethod(testMethod, result)
try:
self.tearDown()
except Exception:
result.addCleanupError(self, sys.exc_info())
success = False
self.dumpSessionInfo()
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def runMethod(self, testMethod, result):
"""Runs the test method and catches any exception that might be thrown.
This is factored out of TestCase.run() to ensure that any exception
thrown during the test goes out of scope before tearDown. Otherwise, an
exception could hold references to Python objects that are bound to
SB objects and prevent them from being deleted in time.
"""
try:
testMethod()
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure as e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info, e.bugnumber)
else:
warnings.warn(
"Use of a TestResult without an addExpectedFailure method is deprecated",
DeprecationWarning)
result.addSuccess(self)
except _UnexpectedSuccess as x:
addUnexpectedSuccess = getattr(
result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self, x.bugnumber)
else:
warnings.warn(
"Use of a TestResult without an addUnexpectedSuccess method is deprecated",
DeprecationWarning)
result.addFailure(self, sys.exc_info())
except SkipTest as e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
return True
return False
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except Exception:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"Fail the test if the expression is true."
if expr:
msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
if callableObj is None:
return _AssertRaisesContext(excClass, self)
try:
callableObj(*args, **kwargs)
except excClass:
return
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if isinstance(first, type(second)):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(
self,
first,
second,
places=None,
msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second - first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(
self,
first,
second,
places=None,
msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(
abs(second - first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2,
msg=None, seq_type=None, max_diff=80 * 8):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
max_diff: Maximum size off the diff, larger diffs are not shown
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = repr(seq1)
seq2_repr = repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
not isinstance(seq1, type(seq2))):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail(
'second argument does not support set difference: %s' %
e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (
safe_repr(member), safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assert_(
isinstance(
d1,
dict),
'First argument is not a dictionary')
self.assert_(
isinstance(
d2,
dict),
'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (
safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except TypeError:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual, ignore_duplicate=False
)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append('Expected, but missing:\n %s' %
safe_repr(missing))
if unexpected:
errors.append('Unexpected, but present:\n %s' %
safe_repr(unexpected))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assert_(isinstance(first, six.string_types), (
'First argument is not a string'))
self.assert_(isinstance(second, six.string_types), (
'Second argument is not a string'))
if first != second:
standardMsg = '%s != %s' % (
safe_repr(first, True), safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
second.splitlines(True)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (
safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (
safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (
safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
if callable_obj is None:
return _AssertRaisesContext(
expected_exception, self, expected_regexp)
try:
callable_obj(*args, **kwargs)
except expected_exception as exc_value:
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException("%s not raised" % excName)
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (
msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, six.string_types):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| {
"content_hash": "8a0d31834094761f93377a9e2b49094b",
"timestamp": "",
"source": "github",
"line_count": 1174,
"max_line_length": 96,
"avg_line_length": 38.11584327086882,
"alnum_prop": 0.5679583445070171,
"repo_name": "llvm-mirror/lldb",
"id": "c567037ea8a41cd19716f0bef2cfac50f9ebaf81",
"size": "44748",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "third_party/Python/module/unittest2/unittest2/case.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "131618"
},
{
"name": "C",
"bytes": "195293"
},
{
"name": "C++",
"bytes": "23346708"
},
{
"name": "CMake",
"bytes": "167302"
},
{
"name": "DTrace",
"bytes": "334"
},
{
"name": "LLVM",
"bytes": "6106"
},
{
"name": "Makefile",
"bytes": "50396"
},
{
"name": "Objective-C",
"bytes": "106956"
},
{
"name": "Objective-C++",
"bytes": "24806"
},
{
"name": "Perl",
"bytes": "72175"
},
{
"name": "Python",
"bytes": "3669886"
},
{
"name": "Shell",
"bytes": "6573"
},
{
"name": "Vim script",
"bytes": "8434"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, render_to_response
from django import template
register = template.Library()
def test_render_view(request):
return render(request, 'test_render.html', {'content': 'data'})
def test_render_to_response_view(request):
return render_to_response('test_render_to_response.html', {'content': 'data'})
@register.filter
def test_filter(f):
"""this is my filter"""
pass
@register.tag
def test_tag(*a, **kw):
pass
@register.filter('test_filter_2')
def test_filter_function(f):
"""this is my filter"""
pass
@register.tag('test_tag_2')
def test_tag_function(*a, **kw):
pass
@register.assignment_tag('test_assignment_tag')
def test_assignment_tag(*a, **kw):
pass
@register.simple_tag('test_simple_tag')
def test_simple_tag(*a, **kw):
pass
| {
"content_hash": "d3dc882ade0043d99c5b81bea7ed1bed",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 82,
"avg_line_length": 22.52777777777778,
"alnum_prop": 0.6831072749691739,
"repo_name": "Microsoft/PTVS",
"id": "13b70ceb63d43ac4fb9e95483bb093d60866b455",
"size": "811",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Python/Tests/TestData/DjangoAnalysisTestApp/test_render/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12235396"
},
{
"name": "C++",
"bytes": "212001"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "44322"
},
{
"name": "Python",
"bytes": "847130"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
import configparser
import datetime
import getopt
import os
import socket
import sys
import re
import packagesimportutil
PY_MAJOR_VERSION = 0
PY_MINOR_VERSION = 1
# append worker binary source path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
packagesimportutil.add_all_packages_under_automationworker_to_sys_path()
# since we are using the worker httpclient, some configuration values are expected
import configuration3 as configuration
import httpclientfactory
import linuxutil
''''An instance of simplejson module if the installed Python version is below 2.6
An instance of json module if the installed Python version is or is above 2.6'''
if sys.version_info[PY_MAJOR_VERSION] == 2 and sys.version_info[PY_MINOR_VERSION] < 6:
import simplejson as json
else:
import json
REGISTER = "register"
DEREGISTER = "deregister"
def is_ipv4(hostname):
match = re.match('^(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3}).(\\d{1,3})$', hostname)
if not match:
return False
for i in range(1, 5):
num = int(match.group(i))
if num < 0 or num > 255:
return False
return True
def get_hostname():
oms_agent_hostname_command = ['/opt/microsoft/omsagent/ruby/bin/ruby', '-r',
'/opt/microsoft/omsagent/plugin/oms_common.rb', '-e', 'puts OMS::Common.get_hostname']
# Use the ruby util OMS agent uses to get hostname
try:
process, output, error = linuxutil.popen_communicate(oms_agent_hostname_command)
output = output.decode() if isinstance(output, bytes) else output
error = error.decode() if isinstance(error, bytes) else error
if process.returncode == 0 and not error:
return output.strip()
except OSError:
pass
# Unable to use ruby util, falling back on socket to get hostname
hostname = socket.gethostname()
if is_ipv4(hostname):
return hostname
else:
return hostname.split(".")[0]
def get_hybrid_worker_group_name(agent_id):
"""Generates the hybrid worker group name.
Notes:
The format as to match the OMSAgent computer_agentid format.
The OMSAgent doesn't use FQDN for the computer name.
See : https://github.com/Microsoft/OMS-Agent-for-Linux/blob/master/source/code/plugins/oms_common.rb#L600
Returns:
String, hybrid worker group name
"""
# following same format as OMSAgent (get_hostname())
# see : https://github.com/Microsoft/OMS-Agent-for-Linux/blob/master/source/code/plugins/oms_common.rb#L600
return get_hostname() + "_" + agent_id
def get_ip_address():
"""Gets the host ip address.
Notes:
Defaulting to 127.0.01 for host that are not configured properly. This field is only informational
for AgentService.
Returns:
String, IpAddress
"""
try:
return socket.gethostbyname(socket.gethostname())
except:
return "127.0.0.1"
def get_metadata_from_imds(http_client):
"""
Tries to get azurevm metadata from IMDS
:return: a dictionary of the following format
{
"compute": {
"location": "some-location",
"name": "some-computer",
"offer": "some-offer",
"osType": "Linux",
"placementGroupId": "",
"platformFaultDomain": "0",
"platformUpdateDomain": "0",
"publisher": "some-publisher",
"resourceGroupName": "rome-resourceGroup",
"sku": "some-sku",
"subscriptionId": "aaaa0000-aa00-aa00-aa00-aaaaaa000000",
"tags": "",
"version": "1.1.10",
"vmId": "bbbb0000-bb00-bb00-bb00-bbbbbb000000",
"vmSize": "Standard_D1"
},
"network": {
"interface": [
{
"ipv4": {
"ipAddress": [
{
"privateIpAddress": "0.0.0.0",
"publicIpAddress": "0.0.0.0"
}
],
"subnet": [
{
"address": "10.0.0.0",
"prefix": "24"
}
]
},
"ipv6": {
"ipAddress": []
},
"macAddress": "000AAABBB11"
}
]
}
}
"""
try:
mdUrl = "http://169.254.169.254/metadata/instance?api-version=2017-08-01"
header = {'Metadata': 'True'}
response = http_client.get(mdUrl, headers=header)
jsonObj = json.loads(response.raw_data)
return jsonObj
except:
return None
def get_headers_and_payload(worker_group_name, is_azure_vm, vm_id, azure_resource_id, certificate_path, http_client):
"""Formats the required headers and payload for the registration and deregitration requests.
Returns:
A tuple containing a dictionary for the request headers and a dictionary for the payload (request body).
"""
issuer, subject, thumbprint = linuxutil.get_cert_info(certificate_path)
headers = {"ProtocolVersion": "2.0",
"x-ms-date": datetime.datetime.utcnow().isoformat() + "0-00:00",
"Content-Type": "application/json"}
asset_tag = "Unknown"
platform_update_domain = ""
tags = {}
if is_azure_vm:
asset_tag = linuxutil.get_azure_vm_asset_tag()
try:
metadata = get_metadata_from_imds(http_client)
if metadata is not None:
try:
vm_id = metadata["compute"]["vmId"]
sub_id = metadata["compute"]["subscriptionId"]
resource_group = metadata["compute"]["resourceGroupName"]
vm_name = metadata["compute"]["name"]
azure_resource_id = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}".format(sub_id, resource_group, vm_name)
platform_update_domain = metadata["compute"]["platformUpdateDomain"]
tags = metadata["compute"]["tags"]
except KeyError:
pass
except:
pass
payload = {"RunbookWorkerGroup": worker_group_name,
"MachineName": get_hostname(),
"IpAddress": get_ip_address(),
"Thumbprint": thumbprint,
"Issuer": issuer,
"OperatingSystem": 2,
"SMBIOSAssetTag": asset_tag,
"VirtualMachineId": vm_id,
"Subject": subject,
"platformUpdateDomain": platform_update_domain,
"tags": tags}
if azure_resource_id is not None:
payload["AzureResourceId"] = azure_resource_id
return headers, payload
# TODO: add test-register code to test changes in payload, change cadence to increase polling time period
def register(registration_endpoint, worker_group_name, machine_id, cert_path, key_path, is_azure_vm, vm_id,
azure_resource_id, test_mode):
"""Registers the worker through the automation linked account with the Agent Service.
Returns:
The deserialized response from the Agent Service.
"""
http_client_factory = httpclientfactory.HttpClientFactory(cert_path, key_path, test_mode)
http_client = http_client_factory.create_http_client(sys.version_info)
no_proxy_http_client_factory = httpclientfactory.HttpClientFactory(cert_path, key_path, test_mode, force_no_proxy=True)
no_proxy_http_client = no_proxy_http_client_factory.create_http_client(sys.version_info)
headers, payload = get_headers_and_payload(worker_group_name, is_azure_vm, vm_id, azure_resource_id, cert_path,
no_proxy_http_client)
url = registration_endpoint + "/HybridV2(MachineId='" + machine_id + "')"
response = http_client.put(url, headers=headers, data=payload)
if response.status_code != 200:
raise Exception("Unable to register [status_code=" + str(response.status_code) + "]")
return json.loads(response.raw_data), payload
def deregister(registration_endpoint, worker_group_name, machine_id, cert_path, key_path, test_mode):
"""Deregisters the worker through the automation linked account with the Agent Service.
Note:
This method is only present for testing purposes for now. Linked account deregistration is not yet implemented
and deregistration need to be made through using the automation account information.
Returns:
"""
headers, payload = get_headers_and_payload(worker_group_name, certificate_path=cert_path)
url = registration_endpoint + "/Hybrid(MachineId='" + machine_id + "')"
http_client_factory = httpclientfactory.HttpClientFactory(cert_path, key_path, test_mode)
http_client = http_client_factory.create_http_client(sys.version_info)
response = http_client.delete(url, headers=headers, data=payload)
if response.status_code != 200:
raise Exception("Unable to deregister [status_code=" + str(response.status_code) + "]")
def create_worker_configuration_file(working_directory, jrds_uri, registration_endpoint, workspace_id,
automation_account_id, worker_group_name, machine_id, oms_cert_path, oms_key_path,
state_directory, gpg_keyring_path, proxy_configuration_path, test_mode,
cert_info, is_azure_vm, vm_id):
"""Creates the automation hybrid worker configuration file.
Note:
The generated file has to match the latest worker.conf template.
"""
issuer, subject, thumbprint = cert_info
worker_conf_path = os.path.join(state_directory, "worker.conf")
config = configparser.ConfigParser()
if os.path.isfile(worker_conf_path):
config.read(worker_conf_path)
conf_file = open(worker_conf_path, 'w')
worker_required_section = configuration.WORKER_REQUIRED_CONFIG_SECTION
if not config.has_section(worker_required_section):
config.add_section(worker_required_section)
config.set(worker_required_section, configuration.CERT_PATH, oms_cert_path)
config.set(worker_required_section, configuration.KEY_PATH, oms_key_path)
config.set(worker_required_section, configuration.BASE_URI, jrds_uri)
config.set(worker_required_section, configuration.ACCOUNT_ID, automation_account_id)
config.set(worker_required_section, configuration.MACHINE_ID, machine_id)
config.set(worker_required_section, configuration.HYBRID_WORKER_GROUP_NAME, worker_group_name)
config.set(worker_required_section, configuration.WORKING_DIRECTORY_PATH, working_directory)
worker_optional_section = configuration.WORKER_OPTIONAL_CONFIG_SECTION
if not config.has_section(worker_optional_section):
config.add_section(worker_optional_section)
config.set(worker_optional_section, configuration.GPG_PUBLIC_KEYRING_PATH, gpg_keyring_path)
config.set(worker_optional_section, configuration.PROXY_CONFIGURATION_PATH, proxy_configuration_path)
config.set(worker_optional_section, configuration.STATE_DIRECTORY_PATH, state_directory)
if test_mode is True:
config.set(worker_optional_section, configuration.BYPASS_CERTIFICATE_VERIFICATION, True)
metadata_section = configuration.METADATA_CONFIG_SECTION
if not config.has_section(metadata_section):
config.add_section(metadata_section)
config.set(metadata_section, configuration.IS_AZURE_VM, str(is_azure_vm))
config.set(metadata_section, configuration.VM_ID, vm_id)
config.set(metadata_section, configuration.WORKER_TYPE, "auto-registered")
oms_metadata_section = "oms-metadata"
if not config.has_section(oms_metadata_section):
config.add_section(oms_metadata_section)
config.set(oms_metadata_section, configuration.AGENT_ID, machine_id)
config.set(oms_metadata_section, configuration.WORKSPACE_ID, workspace_id)
config.set(oms_metadata_section, configuration.REGISTRATION_ENDPOINT, registration_endpoint)
config.set(oms_metadata_section, configuration.CERTIFICATE_THUMBPRINT, thumbprint)
config.write(conf_file)
conf_file.close()
def main(argv):
agent_id = None
is_azure_vm = False
vm_id = None
oms_cert_path = None
oms_key_path = None
endpoint = None
gpg_keyring_path = None
operation = None
proxy_configuration_path = None
test_mode = False
state_directory = None
working_directory = None
workspace_id = None
mock_powershelldsc_test = False
diy_account_id = None
azure_resource_id = None
# parse cmd line args
try:
opts, args = getopt.getopt(argv, "hrdw:a:c:k:e:f:s:p:g:y:i:v:zt",
["help", "register", "deregister", "workspaceid=", "agentid=", "certpath=",
"keypath=", "endpoint=", "workingdirpath=", "statepath=", "proxyconfpath=",
"gpgkeyringpath=", "diyaccountid=", "mock_powershelldsc_test=", "vmid=",
"azureresourceid="])
except getopt.GetoptError:
print (__file__ + "[--register, --deregister] -w <workspaceid> -a <agentid> -c <certhpath> -k <keypath> " \
"-e <endpoint> -f <workingdirpath> -s <statepath> -p <proxyconfpath> -g <gpgkeyringpath>" \
"-y <diyaccountid> -i <vmid>")
sys.exit(2)
for opt, arg in opts:
if opt == ("-h", "--help"):
print (__file__ + "[--register, --deregister] -w <workspaceid> -a <agentid> -c <certhpath> -k <keypath> " \
"-e <endpoint> -f <workingdirpath> -s <statepath> -p <proxyconfpath> -g <gpgkeyringpath>" \
"-y <diyaccountid> -i <vmid>")
sys.exit()
elif opt in ("-r", "--register"):
operation = REGISTER
elif opt in ("-d", "--deregister"):
operation = DEREGISTER
elif opt in ("-w", "--workspaceid"):
workspace_id = arg.strip()
elif opt in ("-a", "--agentid"):
agent_id = arg.strip()
elif opt in ("-c", "--certpath"):
oms_cert_path = arg.strip()
elif opt in ("-k", "--keypath"):
oms_key_path = arg.strip()
elif opt in ("-e", "--endpoint"):
endpoint = arg.strip()
elif opt in ("-f", "--workingdirpath"):
working_directory = arg.strip()
elif opt in ("-p", "--proxyconfpath"):
proxy_configuration_path = arg.strip()
elif opt in ("-s", "--statepath"):
state_directory = arg.strip()
elif opt in ("-g", "--gpgkeyringpath"):
gpg_keyring_path = arg.strip()
elif opt in ("-y", "--diyaccountid"):
diy_account_id = arg.strip()
elif opt in ("-z", "--azurevm"):
is_azure_vm = True
elif opt in ("-v", "--azureresourceid"):
azure_resource_id = arg.strip() # Use the Resource ID from DSC resource as a backup. Overwrite it with metadata from IMDS when available
elif opt in ("-i", "--vmid"):
vm_id = arg.strip() # Use the VM ID from DSC resource as a backup. Overwrite it with metadata from IMDS when available
elif opt in ("-t", "--test"):
test_mode = True
elif opt == "--mock_powershelldsc_test":
# generate a dummy configuration file
# does not do actual registration, just creates the resulting config file
mock_powershelldsc_test = True
if workspace_id is None or agent_id is None or oms_cert_path is None or oms_key_path is None \
or endpoint is None or gpg_keyring_path is None or proxy_configuration_path is None \
or working_directory is None or state_directory is None or vm_id is None:
print ("Missing mandatory arguments.")
print ("Use -h or --help for usage.")
sys.exit(1)
else:
if mock_powershelldsc_test is True:
# Don't validate paths if we want to generate a dummy config file
pass
else:
# validate that the cert and key exists
if os.path.isfile(oms_cert_path) is False or os.path.isfile(oms_key_path) is False:
raise Exception("Certificate or key file doesn't exist. Are you using absolute path?")
configuration.clear_config()
configuration.set_config(
{configuration.PROXY_CONFIGURATION_PATH: proxy_configuration_path,
configuration.WORKER_VERSION: "LinuxAutoRegister",
configuration.WORKING_DIRECTORY_PATH: "/var/opt/microsoft/omsagent/tmp"})
# build registration endpoint
# example endpoint : agentsvc.azure-automation.net
registration_endpoint = "https://" + workspace_id + "." + endpoint + "/accounts/" + workspace_id
if "df-agentsvc" in registration_endpoint:
registration_endpoint = "https://oaasagentsvcdf.test.azure-automation.net/accounts/" + workspace_id
test_mode = True
# rename to match oms concepts to automation
machine_id = agent_id
worker_group_name = get_hybrid_worker_group_name(agent_id=agent_id)
# action
if operation == REGISTER:
if mock_powershelldsc_test is True:
# Don't do the actual registration in case we want only a dummy registration file
# create a dummy response instead
registration_response = \
{'jobRuntimeDataServiceUri': 'https://we-jobruntimedata-prod-su1.azure-automation.net',
'AccountId': '23216587-8f56-428c-9006-4c2f28c036f5'}
cert_info = ['', '', '959GG850526XC5JT35E269CZ69A55E1C7E1256JH']
else:
registration_response, payload = register(registration_endpoint, worker_group_name, machine_id, oms_cert_path,
oms_key_path, is_azure_vm, vm_id, azure_resource_id, test_mode)
cert_info = linuxutil.get_cert_info(oms_cert_path)
account_id = registration_response["AccountId"]
if test_mode is False and diy_account_id is not None and diy_account_id != account_id:
sys.stderr.write("Unable to create worker configuration. DIY Automation account differs from "
"linked account.")
sys.exit(-5)
create_worker_configuration_file(working_directory, registration_response["jobRuntimeDataServiceUri"],
registration_endpoint, workspace_id, account_id,
worker_group_name, machine_id, oms_cert_path, oms_key_path,
state_directory, gpg_keyring_path, proxy_configuration_path, test_mode,
cert_info, is_azure_vm,
payload["VirtualMachineId"])
elif operation == DEREGISTER:
deregister(registration_endpoint, worker_group_name, machine_id, oms_cert_path, oms_key_path, test_mode)
else:
raise Exception("No option specified, specify --register, --deregister or --help.")
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "dd2d496b3ee5ebe0cae14db993bfbbb2",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 167,
"avg_line_length": 43.48893805309734,
"alnum_prop": 0.6044666022282139,
"repo_name": "MSFTOSSMgmt/WPSDSCLinux",
"id": "1b80c9ac5a2308818012fdda705c3a23c5de6ffe",
"size": "19820",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Providers/nxOMSAutomationWorker/automationworker/3.x/scripts/register_oms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5870322"
},
{
"name": "C#",
"bytes": "98943"
},
{
"name": "C++",
"bytes": "670183"
},
{
"name": "CMake",
"bytes": "13826"
},
{
"name": "HTML",
"bytes": "166861"
},
{
"name": "Makefile",
"bytes": "164013"
},
{
"name": "Objective-C",
"bytes": "61644"
},
{
"name": "PowerShell",
"bytes": "40239"
},
{
"name": "Python",
"bytes": "1858427"
},
{
"name": "Shell",
"bytes": "8136"
},
{
"name": "SourcePawn",
"bytes": "60242"
},
{
"name": "Yacc",
"bytes": "35814"
}
],
"symlink_target": ""
} |
"""
This module defines filters for Container instances.
"""
from __future__ import print_function
from tunacell.filters.main import FilterGeneral
class FilterContainer(FilterGeneral):
"""General class to filter containers"""
_type = 'CONTAINER'
class FilterContainerAny(FilterContainer):
"""True for any container"""
def __init__(self):
label = 'True for any container'
self.label = label
return
def func(self, container):
return True
class FilterContainerMetadataEquals(FilterContainer):
"""Test a given metadata value
Parameters
----------
key : str
value : requested type (str, float, int)
Raises
------
KeyError : when requested key is not present in container metadata
"""
def __init__(self, key, value):
self.key = key
self.value = value
label = '{}={}'.format(key, value)
self.label = label
return
def func(self, container):
if not hasattr(container.metadata, self.key):
raise KeyError
svalue = getattr(container.metadata, self.key)
if type(self.value) == str:
if self.value == svalue:
return True
else:
return False
elif type(self.value) == int:
if int(svalue) == self.value:
return True
else:
return False
elif type(self.value) == float:
if float(svalue) == self.value:
return True
else:
return False
else:
raise TypeError('type not understood')
| {
"content_hash": "3167555e62b7e0ddfc48608ca8fc0fbc",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 70,
"avg_line_length": 24.567164179104477,
"alnum_prop": 0.5643985419198055,
"repo_name": "LeBarbouze/tunacell",
"id": "2772d3bef4a9270d78ea31711b22a69f7a486574",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tunacell/filters/containers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Python",
"bytes": "541092"
}
],
"symlink_target": ""
} |
from .product import Product
from .product_properties import ProductProperties
from .operation_result import OperationResult
from .paging_get_multiple_pages_options import PagingGetMultiplePagesOptions
from .paging_get_odata_multiple_pages_options import PagingGetOdataMultiplePagesOptions
from .paging_get_multiple_pages_with_offset_options import PagingGetMultiplePagesWithOffsetOptions
from .product_paged import ProductPaged
from .product_paged1 import ProductPaged1
__all__ = [
'Product',
'ProductProperties',
'OperationResult',
'PagingGetMultiplePagesOptions',
'PagingGetOdataMultiplePagesOptions',
'PagingGetMultiplePagesWithOffsetOptions',
'ProductPaged',
'ProductPaged1',
]
| {
"content_hash": "14d91b35dc703e64467cdfbe4ca2f9c4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 98,
"avg_line_length": 37.68421052631579,
"alnum_prop": 0.8114525139664804,
"repo_name": "xingwu1/autorest",
"id": "c70a5991daebed12f776eee14a664045255e0c9f",
"size": "1190",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/Paging/autorestpagingtestservice/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12942"
},
{
"name": "C#",
"bytes": "11523351"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "4720447"
},
{
"name": "JavaScript",
"bytes": "4712361"
},
{
"name": "PowerShell",
"bytes": "29614"
},
{
"name": "Python",
"bytes": "2275107"
},
{
"name": "Ruby",
"bytes": "246219"
},
{
"name": "Shell",
"bytes": "423"
},
{
"name": "TypeScript",
"bytes": "179577"
}
],
"symlink_target": ""
} |
"""The test for the NuHeat thermostat module."""
from homeassistant.components.nuheat.const import DOMAIN
from homeassistant.setup import async_setup_component
from .mocks import (
_get_mock_nuheat,
_get_mock_thermostat_run,
_get_mock_thermostat_schedule_hold_available,
_get_mock_thermostat_schedule_hold_unavailable,
_get_mock_thermostat_schedule_temporary_hold,
_mock_get_config,
)
from tests.async_mock import patch
async def test_climate_thermostat_run(hass):
"""Test a thermostat with the schedule running."""
mock_thermostat = _get_mock_thermostat_run()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat", return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.master_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 22.2,
"friendly_name": "Master bathroom",
"hvac_action": "heating",
"hvac_modes": ["auto", "heat"],
"max_temp": 69.4,
"min_temp": 5.0,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 22.2,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_hold_unavailable(hass):
"""Test a thermostat with the schedule hold that is offline."""
mock_thermostat = _get_mock_thermostat_schedule_hold_unavailable()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat", return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.guest_bathroom")
assert state.state == "unavailable"
expected_attributes = {
"friendly_name": "Guest bathroom",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -6.1,
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_hold_available(hass):
"""Test a thermostat with the schedule hold that is online."""
mock_thermostat = _get_mock_thermostat_schedule_hold_available()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat", return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.available_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 38.9,
"friendly_name": "Available bathroom",
"hvac_action": "idle",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -6.1,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 26.1,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_temporary_hold(hass):
"""Test a thermostat with the temporary schedule hold that is online."""
mock_thermostat = _get_mock_thermostat_schedule_temporary_hold()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat", return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 94.4,
"friendly_name": "Temp bathroom",
"hvac_action": "idle",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -0.6,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 37.2,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
| {
"content_hash": "44a64ac166e1a16cdc675b7995bcf9ce",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 88,
"avg_line_length": 38.31578947368421,
"alnum_prop": 0.6573783359497645,
"repo_name": "titilambert/home-assistant",
"id": "b407461fa89fba0f6176d3be633dbbda87955866",
"size": "5096",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/nuheat/test_climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
import time, json
from O365 import Schedule
class Client:
def __init__(self, username=None, password=None):
config = {}
execfile("credentials.txt", config)
if username is None:
self.username = config["username"]
else:
self.username = username
if password is None:
self.password = config["password"]
else:
self.password = password
self.schedule = Schedule((self.username, self.password))
try:
self.result = self.schedule.getCalendars()
print 'Fetched calendars for',self.username,'was successful:',self.result
self.firstEvent = self.getFirstEvent()
except:
print 'Login failed for',self.username
self.firstEvent = None
def getFirstEvent(self):
first = None
for cal in self.schedule.calendars:
end = time.time()
# end += 3600*24*7 # Next 7 days
end += 3600*24 # Next 24h
end = time.gmtime(end)
end = time.strftime(cal.time_string,end)
cal.getEvents(end=end)
# events = []
# print u"Calendar \"{}\":".format(cal.getName())
# for e in cal.events:
# print u" [{}] {}".format(e.getStart(), e.getSubject())
# events.append(e.fullcalendarioJson())
# print json.dumps(events,sort_keys=True,indent=4,ensure_ascii=False)
if len(cal.events):
if first is None:
first = cal.events[0]
for e in cal.events:
if e.getStart() < first.getStart():
first = e
# if first is not None:
# hour = first.getStart().tm_hour - time.altzone/(60**2) - time.daylight
# mins = first.getStart().tm_min
# if mins >0:
# print u"First event on the schedule: {} @ {}:{}".format(first.getSubject(), hour, mins)
# else:
# print u"First event on the schedule: {} @ {}:00".format(first.getSubject(), hour)
if first is not None:
return first
else:
return None
| {
"content_hash": "ebeb6717186645a67caec7195a7c50e4",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 104,
"avg_line_length": 34.96825396825397,
"alnum_prop": 0.5229232864275988,
"repo_name": "miek770/nanny",
"id": "5ef3ab7a7ae8f4e7caa1e59728ac90088b511c8a",
"size": "2250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "o365_client.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15360"
}
],
"symlink_target": ""
} |
"""Implementation of JSONEncoder
"""
import re
import math
try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
__all__ = ['JSONEncoder']
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
FLOAT_REPR = repr
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if math.isnan(o):
text = 'NaN'
elif math.isinf(o):
if math.copysign(1., o) == 1.:
text = 'Infinity'
else:
text = '-Infinity'
else:
return FLOAT_REPR(o)
if not allow_nan:
msg = "Out of range float values are not JSON compliant: " + repr(o)
raise ValueError(msg)
return text
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
if c_encode_basestring_ascii is not None:
encode_basestring_ascii = c_encode_basestring_ascii
else:
encode_basestring_ascii = py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key {0!r} is not a string".format(key))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""Implement this method in a subclass such that it returns a serializable
object for ``o``, or calls the base implementation (to raise a
``TypeError``).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""Encode the given object and yield each string representation as
available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
| {
"content_hash": "6b4b0fed9498e6fc6b86546a4ec3eeff",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 82,
"avg_line_length": 35.994791666666664,
"alnum_prop": 0.521198090001447,
"repo_name": "babyliynfg/cross",
"id": "621826d9c99c955e26825a7b593fb5ad66306127",
"size": "13822",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/project-creator/Python2.6.6/Lib/json/encoder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36722"
},
{
"name": "C",
"bytes": "6345646"
},
{
"name": "C++",
"bytes": "15980000"
},
{
"name": "CMake",
"bytes": "1238"
},
{
"name": "GLSL",
"bytes": "64406"
},
{
"name": "HTML",
"bytes": "147661"
},
{
"name": "Java",
"bytes": "574078"
},
{
"name": "JavaScript",
"bytes": "503327"
},
{
"name": "Makefile",
"bytes": "18778"
},
{
"name": "Objective-C",
"bytes": "396703"
},
{
"name": "Objective-C++",
"bytes": "378740"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "15265548"
},
{
"name": "Roff",
"bytes": "23"
},
{
"name": "Shell",
"bytes": "61021"
},
{
"name": "Visual Basic",
"bytes": "19200"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import str as unicode
from quark_runtime import *
_lazyImport.plug("docs")
import quark.reflect
class Test(_QObject):
"""
hey, here are some docs
here are some more docs
a bunch of docs in fact
"""
def _init(self):
self.name = None
def __init__(self): self._init()
def test(self, param):
"""
method docs
"""
return 3
def _getClass(self):
return u"docs.Test"
def _getField(self, name):
if ((name) == (u"name")):
return (self).name
return None
def _setField(self, name, value):
if ((name) == (u"name")):
(self).name = _cast(value, lambda: unicode)
Test.docs_Test_ref = None
def _lazy_import_quark_ffi_signatures_md():
import quark_ffi_signatures_md
globals().update(locals())
_lazyImport("import quark_ffi_signatures_md", _lazy_import_quark_ffi_signatures_md)
_lazyImport.pump("docs")
| {
"content_hash": "e7a3ada054960eacc39e51a6c72444a2",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 83,
"avg_line_length": 21.326923076923077,
"alnum_prop": 0.6113615870153292,
"repo_name": "datawire/quark",
"id": "f82a81842732f71fd1f2cd6e41f18edb7d4d5b91",
"size": "1109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quarkc/test/ffi/expected/py/signatures/docs/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1802"
},
{
"name": "HTML",
"bytes": "8346"
},
{
"name": "Java",
"bytes": "381125"
},
{
"name": "JavaScript",
"bytes": "501785"
},
{
"name": "Python",
"bytes": "643417"
},
{
"name": "Ruby",
"bytes": "370423"
},
{
"name": "Shell",
"bytes": "21479"
}
],
"symlink_target": ""
} |
""" Specify the NetworkNode with its action, context-menus """
# Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and
# University Hospital Center and University of Lausanne (UNIL-CHUV)
#
# Modified BSD License
# Standard library imports
import os
# Enthought library imports
from traits.api import Instance, Str, Any
from traitsui.api import TreeNode
from traitsui.menu import Menu, Action, Separator
# ConnectomeViewer imports
from cviewer.plugins.cff2.cnetwork import CNetwork
# Logging import
import logging
logger = logging.getLogger('root.'+__name__)
class CNetworkTreeNode(TreeNode):
# The object that contains the container ;^)
parent = Any
# the network associated with this node
node_for=[CNetwork]
# a default icons
# Name of group item icon
icon_group = Str('home.png')
# Name of leaf item icon
icon_item=Str('home.png')
# Name of opened group item icon
icon_open=Str('home.png')
# labels
label='dname'
###
# Private Traits
# activate / deactivate logic
# if the node is activated, this means that there exists a
# corresponding RenderManager instance
_ShowName = Instance(Action,
kw={'name': 'Show name',
'action': 'object.show_name',
'tooltip': 'Shows the network name'}, )
_ChangeParameters = Instance(Action,
kw={'name': 'Edge Parameters',
'action': 'object._edge_parameters',
'tooltip': 'Thresholding and Change Attributes',
'enabled_when' : 'object.loaded == True'}, )
_RenderMatrixAction = Instance(Action,
kw={'name': 'Connectome Matrix Viewer',
'action': 'object.invoke_matrix_viewer',
'tooltip':'View the connectivity matrices',
'enabled_when':'object.loaded == True'}, )
# the menu shown after right-click
menu = Instance(Menu, transient=True)
def get_children(self, object):
""" Get the object's children. """
pass
# Collate the window's views into categories.
#return object.surfaces + object.volumes + object.tracks
######################################################################
# Non-public interface
######################################################################
def _menu_default(self):
""" Standard menus for network nodes """
menu_actions = []
return Menu( *menu_actions)
| {
"content_hash": "26d09f46d722151faec0caabc6259a0b",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 85,
"avg_line_length": 32.84705882352941,
"alnum_prop": 0.5351002865329513,
"repo_name": "LTS5/connectomeviewer",
"id": "899b4cf3b5df942f5b26b4c7db207817a2bd4e47",
"size": "2792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cviewer/plugins/cff2/ui/cnetwork_tree_node.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1788"
},
{
"name": "Python",
"bytes": "290497"
}
],
"symlink_target": ""
} |
"""All the features specific to the file scheme."""
from pathlib import Path
from functools import partial
from urlpathlib.urlpath import PureUrlPath
def _generic(self, _name, *args, **kwargs):
func = getattr(Path(self.path), _name)
return func(*args, **kwargs)
stat = partial(_generic, _name='stat')
chmod = partial(_generic, _name='chmod')
exists = partial(_generic, _name='exists')
group = partial(_generic, _name='group')
is_dir = partial(_generic, _name='is_dir')
is_file = partial(_generic, _name='is_file')
is_symlink = partial(_generic, _name='is_symlink')
is_socket = partial(_generic, _name='is_socket')
is_fifo = partial(_generic, _name='is_fifo')
is_block_device = partial(_generic, _name='is_block_device')
is_char_device = partial(_generic, _name='is_char_device')
lchmod = partial(_generic, _name='lchmod')
lstat = partial(_generic, _name='lstat')
mkdir = partial(_generic, _name='mkdir')
open = partial(_generic, _name='open')
owner = partial(_generic, _name='owner')
read_bytes = partial(_generic, _name='read_bytes')
read_text = partial(_generic, _name='read_text')
rmdir = partial(_generic, _name='rmdir')
touch = partial(_generic, _name='touch')
unlink = partial(_generic, _name='unlink')
write_bytes = partial(_generic, _name='write_bytes')
write_text = partial(_generic, _name='write_text')
def expanduser(self):
cls = type(self)
return cls(str(_generic(self, 'expanduser')))
def glob(self, pattern):
cls = type(self)
return [cls(str(p)) for p in _generic(self, 'glob', pattern)]
def iterdir(self):
cls = type(self)
for child in _generic(self, 'iterdir'):
yield cls(str(child))
def rename(self, target):
target = target.path if isinstance(target, PureUrlPath) else str(target)
Path(self.path).rename(target)
def replace(self, target):
target = target.path if isinstance(target, PureUrlPath) else str(target)
Path(self.path).replace(target)
def resolve(self, strict=False):
cls = type(self)
return cls(str(_generic(self, 'resolve')))
def rglob(self, pattern):
cls = type(self)
return [cls(str(p)) for p in _generic(self, 'rglob', pattern)]
def samefile(self, other_path):
cls = type(self)
if isinstance(other_path, PureUrlPath) and other.netloc in ('', 'file'):
return _generic(self, 'samefile', Path(other_path.path))
return False
def symlink_to(self, target, target_is_directory=False):
target = target.path if isinstance(target, PureUrlPath) else str(target)
return _generic(self, 'symlink_to', target, target_is_directory)
| {
"content_hash": "4186bec7d0428400e2cc4de68dcfd1e5",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 30.97590361445783,
"alnum_prop": 0.6830027226760016,
"repo_name": "morgan-del/urlpathlib",
"id": "92a874645704bf2eff94d78e8af619d1eb4d4949",
"size": "2756",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "urlpathlib/schemes/file.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "37107"
}
],
"symlink_target": ""
} |
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
else:
output = []
return output
| {
"content_hash": "631671f791fa7dbd1585b68e8a247a4f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 146,
"avg_line_length": 48.65384615384615,
"alnum_prop": 0.5486166007905139,
"repo_name": "mitsuhiko/django",
"id": "bdd817db4c9b22dfd585374de262f3b067646819",
"size": "3795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/backends/postgresql_psycopg2/creation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "85678"
},
{
"name": "Python",
"bytes": "7282847"
},
{
"name": "Shell",
"bytes": "4559"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User, Permission, Group
from .models import Key
class KeyAuthBackend(object):
"""
Authentication backend
"""
def authenticate(self, token=None):
keys = Key.objects.filter(token=token).select_related('user')
if keys and not keys[0].has_expired():
return keys[0]
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| {
"content_hash": "fcef98ceb303e695a07fc8bf084f02d3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.6173570019723866,
"repo_name": "laginha/django-key-auth",
"id": "d8fa155554b1f0457102a0367166aad0ecc0d1a8",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/keyauth/backends.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "61"
},
{
"name": "Python",
"bytes": "38966"
}
],
"symlink_target": ""
} |
from blazar.tests import api
class TestRoot(api.APITest):
def setUp(self):
super(TestRoot, self).setUp()
self.versions = {
"versions":
[{"status": "DEPRECATED",
"id": "v2.0",
"links": [{"href": "http://localhost/v2", "rel": "self"}]}]}
def test_version_discovery_root(self):
response = self.get_json('/',
expect_errors=True,
path_prefix='')
self.assertEqual(300, response.status_int)
self.assertEqual("application/json", response.content_type)
self.assertEqual(self.versions, response.json)
def test_version_discovery_versions(self):
response = self.get_json('/versions',
expect_errors=True,
path_prefix='')
self.assertEqual(300, response.status_int)
self.assertEqual("application/json", response.content_type)
self.assertEqual(self.versions, response.json)
def test_bad_uri(self):
response = self.get_json('/bad/path',
expect_errors=True,
path_prefix='')
self.assertEqual(404, response.status_int)
self.assertEqual("text/plain", response.content_type)
| {
"content_hash": "111be56895e025c362543260e9d1ca09",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 74,
"avg_line_length": 39.05882352941177,
"alnum_prop": 0.5346385542168675,
"repo_name": "stackforge/blazar",
"id": "a5e1aee61940e0d90cf665dafc685f2be8f7d4f7",
"size": "1903",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "blazar/tests/api/test_root.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1014"
},
{
"name": "Python",
"bytes": "905154"
},
{
"name": "Shell",
"bytes": "8897"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from spacy.en import English
import pytest
@pytest.fixture
def tagged(EN):
string = u'Bananas in pyjamas are geese.'
tokens = EN(string, tag=True, parse=False)
return tokens
@pytest.fixture
def lemmas(tagged):
return [t.lemma_ for t in tagged]
@pytest.mark.models
def test_lemmas(lemmas, tagged):
assert lemmas[0] == 'banana'
assert lemmas[1] == 'in'
assert lemmas[2] == 'pyjama'
assert lemmas[3] == 'be'
if tagged[2].tag == tagged[4].tag:
assert lemmas[4] == 'goose'
def test_didnt(EN):
tokens = EN(u"I didn't do it")
assert tokens[1].lemma_ != u""
| {
"content_hash": "23c42ed338d65768375ea80851e1244a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 46,
"avg_line_length": 20.375,
"alnum_prop": 0.6411042944785276,
"repo_name": "rebeling/spaCy",
"id": "1895310cff93cfc1687b28cc2924d73ac476823e",
"size": "652",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/tagger/test_add_lemmas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "315320"
},
{
"name": "CSS",
"bytes": "39174"
},
{
"name": "Groff",
"bytes": "188349"
},
{
"name": "HTML",
"bytes": "548974"
},
{
"name": "JavaScript",
"bytes": "9925"
},
{
"name": "Makefile",
"bytes": "91661"
},
{
"name": "PostScript",
"bytes": "460967"
},
{
"name": "Python",
"bytes": "433006"
},
{
"name": "Shell",
"bytes": "96067"
}
],
"symlink_target": ""
} |
"""
Given array of integers, find the maximal possible sum of some of its k consecutive elements.
Example
For inputArray = [2, 3, 5, 1, 6] and k = 2, the output should be
arrayMaxConsecutiveSum(inputArray, k) = 8.
All possible sums of 2 consecutive elements are:
2 + 3 = 5;
3 + 5 = 8;
5 + 1 = 6;
1 + 6 = 7.
Thus, the answer is 8.
Input/Output
[time limit] 4000ms (py)
[input] array.integer inputArray
Array of positive integers.
Guaranteed constraints:
3 <= inputArray.length <= 105,
1 <= inputArray[i] <= 1000.
[input] integer k
An integer (not greater than the length of inputArray).
Guaranteed constraints:
1 <= k <= inputArray.length.
[output] integer
The maximal possible sum.
"""
"""
This is the original code I used to solve for this problem but it runs at O(n^2) which is too slow once you start
to utilize larger arrays and larger k values. The re-work runs at O(n) which is very fast in comparison.
"""
def arrayMaxConsecutiveSum_old(inputArray, k):
max = 0
for x in range(0, len(inputArray) - k + 1):
if sum(inputArray[x:x+k]) > max:
max = sum(inputArray[x:x+k])
return max
def arrayMaxConsecutiveSum(inputArray, k):
max_sum = sum(inputArray[0:k])
current = sum(inputArray[0:k])
for x in range(0, len(inputArray) - k):
right_add = inputArray[k + x]
left_subtract = inputArray[0 + x]
new_total = current + right_add - left_subtract
if new_total > max_sum:
max_sum = new_total
current = new_total
else:
current = new_total
return max_sum
if __name__ == '__main__':
print arrayMaxConsecutiveSum([2, 3, 5, 4, 6], 2)
| {
"content_hash": "640a7f4974af15fe38a855448702d36f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 113,
"avg_line_length": 23.25,
"alnum_prop": 0.6451612903225806,
"repo_name": "coingraham/codefights",
"id": "ce01ecdf2bc89f74a3ae40d074ef52376f66613c",
"size": "1674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/arrayMaxConsecutiveSum/arrayMaxConsecutiveSum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8742"
},
{
"name": "Python",
"bytes": "90132"
},
{
"name": "Scala",
"bytes": "574"
},
{
"name": "XSLT",
"bytes": "41986"
}
],
"symlink_target": ""
} |
'''
Animations tests
================
'''
import unittest
from time import time, sleep
from kivy.animation import Animation, AnimationTransition
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.graphics import Scale
class AnimationTestCase(unittest.TestCase):
def sleep(self, t):
start = time()
while time() < start + t:
sleep(.01)
Clock.tick()
def setUp(self):
self.a = Animation(x=100, d=1, t='out_bounce')
self.w = Widget()
def test_start_animation(self):
self.a.start(self.w)
self.sleep(1.5)
self.assertAlmostEqual(self.w.x, 100)
def test_animation_duration_0(self):
a = Animation(x=100, d=0)
a.start(self.w)
self.sleep(.5)
def test_stop_animation(self):
self.a.start(self.w)
self.sleep(.5)
self.a.stop(self.w)
self.assertNotAlmostEqual(self.w.x, 100)
self.assertNotAlmostEqual(self.w.x, 0)
def test_stop_all(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w)
def test_stop_all_2(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w, 'x')
def test_duration(self):
self.assertEqual(self.a.duration, 1)
def test_transition(self):
self.assertEqual(self.a.transition, AnimationTransition.out_bounce)
def test_animated_properties(self):
self.assertEqual(self.a.animated_properties['x'], 100)
def test_animated_instruction(self):
instruction = Scale(3)
self.a.start(instruction)
self.assertEqual(self.a.animated_properties['x'], 100)
self.assertAlmostEqual(instruction.x, 3)
self.sleep(1.5)
self.assertAlmostEqual(instruction.x, 100)
def test_weakref(self):
widget = Widget()
anim = Animation(x=100)
anim.start(widget.proxy_ref)
del widget
try:
self.sleep(1.)
except ReferenceError:
pass
class SequentialAnimationTestCase(unittest.TestCase):
def sleep(self, t):
start = time()
while time() < start + t:
sleep(.01)
Clock.tick()
def setUp(self):
self.a = Animation(x=100, d=1, t='out_bounce')
self.a += Animation(x=0, d=1, t='out_bounce')
self.w = Widget()
def test_stop_all(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w)
def test_stop_all_2(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w, 'x')
def _test_on_progress(self, anim, widget, progress):
self._on_progress_called = True
def _test_on_complete(self, anim, widget):
self._on_complete_called = True
def test_events(self):
self._on_progress_called = False
self._on_complete_called = False
self.a.bind(on_progress=self._test_on_progress,
on_complete=self._test_on_complete)
self.a.start(self.w)
self.sleep(.5)
self.assertTrue(self._on_progress_called)
self.sleep(2)
self.assertTrue(self._on_progress_called)
self.assertTrue(self._on_complete_called)
| {
"content_hash": "ac768ce4651b22f4e8c6453cf4b47ab8",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 75,
"avg_line_length": 27.394957983193276,
"alnum_prop": 0.593558282208589,
"repo_name": "cbenhagen/kivy",
"id": "c71c0a464b85ab648bec7dc901a3b148074d514a",
"size": "3260",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "kivy/tests/test_animations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "340566"
},
{
"name": "Emacs Lisp",
"bytes": "9695"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "4201"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3605461"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
} |
import crc16
import struct
class PacketWriter(object):
MAX_PAYLOAD = 1584
MIN_LEN = 6
MAX_LEN = 1590
SOF = 0x01
OFFSET_SOF = 0
OFFSET_LENGTH = 1
OFFSET_CMD = 3
OFFSET_PAYLOAD = 4
def __init__(self):
self._packet = None
def Clear(self):
self._packet = None
def NewSOF(self, v):
self._packet[0] = chr(v)
def PacketString(self):
return ''.join(self._packet)
def AppendCrc(self):
self.SetLength()
ps = self.PacketString()
crc = crc16.crc16(ps, 0, len(ps))
for x in struct.pack('H', crc):
self._packet.append(x)
def SetLength(self):
self._packet[1] = chr(len(self._packet) + 2)
def _Add(self, x):
try:
len(x)
for y in x:
self._Add(y)
except:
self._packet.append(x)
def ComposePacket(self, command, payload=None):
assert self._packet is None
self._packet = ["\x01", None, "\x00", chr(command)]
if payload:
self._Add(payload)
self.AppendCrc()
| {
"content_hash": "666067e29db9b28eac2aa8deaa8fb93b",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 55,
"avg_line_length": 19.918367346938776,
"alnum_prop": 0.5952868852459017,
"repo_name": "compbrain/dexcom_reader",
"id": "6e033422ce1c43561a24328d13f1ff3d6efbe033",
"size": "976",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dexcom_reader/packetwriter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34925"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# uncommented following line and changed to v1.4
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.4.9'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates', '../common/_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OSIsoft Qi'
copyright = '2017, OSIsoft LLC'
author = 'OSIsoft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.0.1'
# The full version, including alpha/beta/rc tags.
# release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','sample/*','samples/*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
rst_epilog = """
.. include:: /epilog.rst
"""
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# This allows sphinx_rtd_theme to work locally
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
html_context = {
'on_rtd' : on_rtd
}
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
#extensions.append('yasfb')
feed_num_items = 15
feed_skip_regex = '(.)*index'
feed_base_url = 'http://qi-docs.osisoft.com/en/latest/'
feed_description = 'Qi Documentation'
feed_author = 'OSIsoft'
def setup(app):
app.add_stylesheet('docs.css')
#if on_rtd:
# app.add_javascript('wedc.js?v=4')
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'qi-docs-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../common/_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'qidocs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'qidocs.tex', 'OSIsoft Qi Documentation',
'OSIsoft', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Qi', 'OSIsoft Qi Documentation',
['OSIsoft'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'qi', 'OSIsoft Qi Documentation',
'OSIsoft', 'Qi', 'OSIsoft Qi Documentation',
'Qi Development'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {
# 'api': ('http://docs.asp.net/projects/api/en/latest', '../common/api.inv')
#}
| {
"content_hash": "c5e97c5c99dbd653fbf9f71defffa62f",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 80,
"avg_line_length": 30.68857142857143,
"alnum_prop": 0.6998417279582907,
"repo_name": "osisoft/Qi-Docs",
"id": "8bbda306dea648eb2077b2ab6dfab09cb77287dd",
"size": "11072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2353"
}
],
"symlink_target": ""
} |
def omp_single_copyprivate():
result = 0
nr_iterations = 0
LOOPCOUNT = 1000
j = 0
if 'omp parallel private(j)':
for i in range(LOOPCOUNT):
if 'omp single copyprivate(j)':
nr_iterations += 1
j = i
if 'omp critical':
result += j - i
'omp barrier'
return result == 0 and nr_iterations == LOOPCOUNT
| {
"content_hash": "9e23bb93b505946f776aeb0bb73d4246",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 25.8125,
"alnum_prop": 0.4963680387409201,
"repo_name": "serge-sans-paille/pythran",
"id": "09c94f57f9e4ff0c7c833ce5c468bc0ae68d0902",
"size": "413",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythran/tests/openmp.legacy/omp_single_copyprivate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2074873"
},
{
"name": "Cython",
"bytes": "1701"
},
{
"name": "Jupyter Notebook",
"bytes": "27461"
},
{
"name": "Makefile",
"bytes": "1162"
},
{
"name": "Python",
"bytes": "2025760"
}
],
"symlink_target": ""
} |
"""Spellcorrection module, for custom word sets.
Forked from https://github.com/phatpiglet/autocorrect and modified so it uses
a fixed set of words.
"""
from itertools import chain
ALPHABET = "abcdefghijklmnopqrstuvwxyz',"
class SpellCorrection(object):
"""Corrects a word based on given set of words."""
def __init__(self, words):
"""Initialize word set."""
self.words = words
def spell(self, word):
"""Return lowercase correction of word.
If no such word exists, returns False instead.
"""
w = Word(word)
candidates = (
self.known([word]) or self.known(w.typos()) or self.known(w.double_typos())
)
if candidates:
# Take the first candidate
correction = candidates.pop()
else:
correction = False
return correction
def known(self, words):
"""{'Gazpacho', 'gazzpacho'} => {'gazpacho'}."""
return {w.lower() for w in words} & self.words
class Word(object):
"""Container for word-based methods."""
def __init__(self, word):
"""
Generate slices to assist with type definitions.
'the' => (('', 'the'), ('t', 'he'),
('th', 'e'), ('the', ''))
"""
word_ = word.lower()
slice_range = range(len(word_) + 1)
self.slices = tuple((word_[:i], word_[i:]) for i in slice_range)
self.word = word
def _deletes(self):
"""th."""
return {concat(a, b[1:]) for a, b in self.slices[:-1]}
def _transposes(self):
"""teh."""
return {concat(a, reversed(b[:2]), b[2:]) for a, b in self.slices[:-2]}
def _replaces(self):
"""tge."""
return {concat(a, c, b[1:]) for a, b in self.slices[:-1] for c in ALPHABET}
def _inserts(self):
"""thwe."""
return {concat(a, c, b) for a, b in self.slices for c in ALPHABET}
def typos(self):
"""Letter combinations one typo away from word."""
return self._deletes() | self._transposes() | self._replaces() | self._inserts()
def double_typos(self):
"""Letter combinations two typos away from word."""
return {e2 for e1 in self.typos() for e2 in Word(e1).typos()}
def concat(*args):
"""reversed('th'), 'e' => 'hte'."""
try:
return "".join(args)
except TypeError:
return "".join(chain.from_iterable(args))
| {
"content_hash": "4fcf6b58ea51e275a0009a8bcf8d6768",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 88,
"avg_line_length": 28.290697674418606,
"alnum_prop": 0.5511713933415536,
"repo_name": "NMisko/monkalot",
"id": "141787dfe3b4146720a899ce9b42333aa801486c",
"size": "2433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/utilities/spellcorrection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "161"
},
{
"name": "Python",
"bytes": "194297"
}
],
"symlink_target": ""
} |
import unittest2 as unittest
from keystone.middleware.url import NormalizingFilter
class MockWsgiApp(object):
def __init__(self):
pass
def __call__(self, env, start_response):
pass
def _start_response():
pass
class NormalizingFilterTest(unittest.TestCase):
def setUp(self):
self.filter = NormalizingFilter(MockWsgiApp(), {})
def test_trailing_slash(self):
env = {'PATH_INFO': '/v2.0/'}
self.filter(env, _start_response)
self.assertEqual('/', env['PATH_INFO'])
def test_remove_trailing_slash_from_empty_path(self):
"""Empty paths should still equate to a slash"""
env = {'PATH_INFO': '/'}
self.filter(env, _start_response)
self.assertEqual('/', env['PATH_INFO'])
def test_no_extension(self):
env = {'PATH_INFO': '/v2.0/someresource'}
self.filter(env, _start_response)
self.assertEqual('/someresource', env['PATH_INFO'])
self.assertEqual('application/json', env['HTTP_ACCEPT'])
def test_xml_extension(self):
env = {'PATH_INFO': '/v2.0/someresource.xml'}
self.filter(env, _start_response)
self.assertEqual('/someresource', env['PATH_INFO'])
self.assertEqual('application/xml', env['HTTP_ACCEPT'])
def test_json_extension(self):
env = {'PATH_INFO': '/v2.0/someresource.json'}
self.filter(env, _start_response)
self.assertEqual('/someresource', env['PATH_INFO'])
self.assertEqual('application/json', env['HTTP_ACCEPT'])
def test_extension_overrides_header(self):
env = {
'PATH_INFO': '/v2.0/someresource.json',
'HTTP_ACCEPT': 'application/xml'}
self.filter(env, _start_response)
self.assertEqual('/someresource', env['PATH_INFO'])
self.assertEqual('application/json', env['HTTP_ACCEPT'])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e8e8fc9b9feee989477d7e21575a4811",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 64,
"avg_line_length": 30.951612903225808,
"alnum_prop": 0.6128191766545076,
"repo_name": "genius1611/Keystone",
"id": "e83af7628510205df553651a4ccfd3e3f0af47d8",
"size": "2554",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keystone/test/unit/test_normalizingfilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67937"
},
{
"name": "Python",
"bytes": "703106"
},
{
"name": "Shell",
"bytes": "4288"
}
],
"symlink_target": ""
} |
"""
Filename: plot_optical_depth.py
Author: Damien Irving, irving.damien@gmail.com
Description:
"""
# Import general Python modules
import sys, os, pdb
import argparse
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import seaborn
seaborn.set_context('talk')
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
experiment_colors = {}
experiment_colors['historical'] = 'green'
experiment_colors['historicalAA'] = 'blue'
experiment_colors['historicalGHG'] = 'red'
experiment_colors['historicalnoAA'] = 'orange'
region_styles = {}
region_styles['sh'] = 'dashed'
region_styles['nh'] = 'dashdot'
region_styles['global'] = 'solid'
def get_file_info(infile):
"""Strip information from the file name."""
file_components = infile.split('/')
fname = file_components[-1]
metric, realm, model, experiment, mip, time = fname.split('_')
assert 'historical' in experiment
var, region, aggregator = metric.split('-')
assert region in region_styles.keys()
if experiment == 'historicalMisc':
experiment = 'historicalAA'
return experiment, model, region, mip
def main(inargs):
"""Run the program."""
fig = plt.figure(figsize=[10, 10])
for infile in inargs.infiles:
cube = iris.load_cube(infile)
experiment, model, region, mip = get_file_info(infile)
color = experiment_colors[experiment]
style = region_styles[region]
iplt.plot(cube, color=color, linestyle=style, label=experiment+', '+region)
plt.title(model + ', ' + mip[0:2])
plt.legend(loc=2)
plt.ylabel('Aerosol optical depth at 550nm')
plt.xlabel('Year')
plt.savefig(inargs.outfile, bbox_inches='tight')
gio.write_metadata(inargs.outfile, file_info={infile: cube.attributes['history']})
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
note:
"""
description=''
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infiles", type=str, nargs='*', help="Input file names")
parser.add_argument("outfile", type=str, help="Output file name")
args = parser.parse_args()
main(args)
| {
"content_hash": "b197523721b2302a38be25444f9d8079",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 94,
"avg_line_length": 26.735849056603772,
"alnum_prop": 0.6446718419195483,
"repo_name": "DamienIrving/ocean-analysis",
"id": "b3dd21c044f39420896dad0c56288c1455cb103a",
"size": "2834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualisation/plot_optical_depth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "39906464"
},
{
"name": "Makefile",
"bytes": "171683"
},
{
"name": "Python",
"bytes": "887747"
},
{
"name": "Shell",
"bytes": "114403"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from webob import exc
from nova.api.openstack.api_version_request \
import MAX_PROXY_API_SUPPORT_VERSION
from nova.api.openstack import wsgi
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.network import neutron
from nova.policies import tenant_networks as tn_policies
from nova import quota
CONF = nova.conf.CONF
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
def network_dict(network):
# convert from a neutron response to something resembling what we used to
# produce with nova-network
return {
'id': network.get('id'),
# yes, this is bananas, but this is what the API returned historically
# when using neutron instead of nova-network, so we keep on returning
# that
'cidr': str(None),
'label': network.get('name'),
}
class TenantNetworkController(wsgi.Controller):
def __init__(self):
super(TenantNetworkController, self).__init__()
self.network_api = neutron.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.api.use_neutron_default_nets:
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception("Failed to get default networks")
def _get_default_networks(self):
project_id = CONF.api.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
return self.network_api.get_all(ctx)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
context.can(tn_policies.BASE_POLICY_NAME)
networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
context.can(tn_policies.BASE_POLICY_NAME)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(network)}
@wsgi.expected_errors(410)
def delete(self, req, id):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def create(self, req, body):
raise exc.HTTPGone()
| {
"content_hash": "8c7303d1191a3dce7d2caa76ba51cf14",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 78,
"avg_line_length": 33.726190476190474,
"alnum_prop": 0.644546417225556,
"repo_name": "rahulunair/nova",
"id": "dee7212b50963ab4aeab5d8a7bb9a0a65d2b8faf",
"size": "3469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/tenant_networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22804450"
},
{
"name": "Shell",
"bytes": "41649"
},
{
"name": "Smarty",
"bytes": "472764"
}
],
"symlink_target": ""
} |
from robot.utils import Utf8Reader
NBSP = u'\xA0'
class TsvReader(object):
def read(self, tsvfile, populator):
process = False
for row in Utf8Reader(tsvfile).readlines():
row = self._process_row(row)
cells = [self._process_cell(cell) for cell in self.split_row(row)]
if cells and cells[0].strip().startswith('*') and \
populator.start_table([c.replace('*', '') for c in cells]):
process = True
elif process:
populator.add(cells)
populator.eof()
def _process_row(self, row):
if NBSP in row:
row = row.replace(NBSP, ' ')
return row.rstrip()
@classmethod
def split_row(cls, row):
return row.split('\t')
def _process_cell(self, cell):
if len(cell) > 1 and cell[0] == cell[-1] == '"':
cell = cell[1:-1].replace('""', '"')
return cell
| {
"content_hash": "2760b2a7f55039ace0702805b35ed88d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 28.636363636363637,
"alnum_prop": 0.5291005291005291,
"repo_name": "joongh/robotframework",
"id": "5f3217e18ecb951620903966a311dd74008478b5",
"size": "1589",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/robot/parsing/tsvreader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "57497"
},
{
"name": "JavaScript",
"bytes": "160797"
},
{
"name": "Python",
"bytes": "2209566"
},
{
"name": "RobotFramework",
"bytes": "2048926"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Build
@admin.register(Build)
class BuildAdmin(admin.ModelAdmin):
pass | {
"content_hash": "fffb4b6ca073ddaebefd42b5bdfcacb8",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 35,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.8145161290322581,
"repo_name": "ajaybhatia/ypg-odm-project",
"id": "2e74793c8764a9033ee2b1c2860b5707a58ae87f",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/odm_builder/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3409"
},
{
"name": "HTML",
"bytes": "26413"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "248932"
},
{
"name": "Shell",
"bytes": "1939"
}
],
"symlink_target": ""
} |
"""
Format localization for English (British) language
"""
DATE_FORMAT = "j F Y"
DATETIME_FORMAT = "j F Y, g:i a"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y, H:i"
TIME_FORMAT = "g:i a"
SHORT_TIME_FORMAT = "H:i"
| {
"content_hash": "433fba2534c88d0b00f849559b9a171c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 50,
"avg_line_length": 22.8,
"alnum_prop": 0.6491228070175439,
"repo_name": "tovmeod/anaf",
"id": "b9f8af16d0ca5cd61c6cd82c9453ed30c1238b09",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/drf",
"path": "formats/en/formats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "400736"
},
{
"name": "HTML",
"bytes": "1512873"
},
{
"name": "JavaScript",
"bytes": "2136807"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2045934"
},
{
"name": "Shell",
"bytes": "18005"
},
{
"name": "TSQL",
"bytes": "147855"
}
],
"symlink_target": ""
} |
import end
class Class(object):
def __init__(self):
pass
end
# end is missing
| {
"content_hash": "a92eba59ede788ac236ce14cf59a140a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 23,
"avg_line_length": 13.571428571428571,
"alnum_prop": 0.5789473684210527,
"repo_name": "nya3jp/end",
"id": "b0fbc95e79129b7b4ff26ad91f4e5a5e1be98723",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/cases/class_ng.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14100"
}
],
"symlink_target": ""
} |
import networkx as nx
import random
from examples.sollini.pm_bigram_sorted_042521 import freqsort_data as tamil_bigram
from examples.sollini.pm_unigram_sorted_050621 import freqsort_data as tamil_unigram
from tamil.utf8 import get_letters
# Plan
# 1. Build bi-gram trellis
# 2. Run Viterbi algorithm
def build_network():
n = nx.DiGraph()
for rec in tamil_bigram:
ab,weight=rec[0],rec[1]
a,b=get_letters(ab)
n.add_edge(a,b,weight=weight)
print( n.number_of_nodes(), n.number_of_edges())
#pos = nx.drawing.nx_pydot.graphviz_layout(n, prog='dot')
#edge_labels = {(n1, n2): d['label'] for n1, n2, d in n.edges(data=True)}
#nx.draw_networkx_edge_labels(n, pos, edge_labels=edge_labels)
#nx.drawing.nx_pydot.write_dot(n, 'tamil_bigram.dot')
return n
def print_word(network,length):
word = []
uni_letters = []
uni_prob = []
big_letters = []
big_prob = []
big = {}
for l,p in tamil_unigram:
uni_prob.append(p)
uni_letters.append(l)
for l,p in tamil_bigram:
big_letters.append(l)
big_prob.append(p)
big[l] = p
word.append( random.choices(population=uni_letters,weights=uni_prob,k=1)[0] )
length -= 1
while length > 0:
startswith_letters = list(filter( lambda x: x.startswith( word[-1]) ,big_letters))
startswith_prob = [big[l] for l in startswith_letters]
big_choice = random.choices(population=startswith_letters,weights=startswith_prob,k=1)[0]
word.append( get_letters(big_choice)[-1] )
length -= 1
w = "|".join(word)
print(w)
return w
network = build_network()
for i in range(100):
print_word(network,random.randint(1,10))
| {
"content_hash": "03dce6a645dd9f534140921c7725a823",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 97,
"avg_line_length": 32.41509433962264,
"alnum_prop": 0.639697322467986,
"repo_name": "Ezhil-Language-Foundation/open-tamil",
"id": "f80c8e28fa761d2912a26b14ef5f864ff03c7af4",
"size": "1855",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "examples/sollini/example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14505"
},
{
"name": "CSS",
"bytes": "44888"
},
{
"name": "Dockerfile",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "231235"
},
{
"name": "Java",
"bytes": "169963"
},
{
"name": "JavaScript",
"bytes": "5380069"
},
{
"name": "Less",
"bytes": "177531"
},
{
"name": "MATLAB",
"bytes": "1120"
},
{
"name": "Makefile",
"bytes": "952"
},
{
"name": "Python",
"bytes": "1869433"
},
{
"name": "Ruby",
"bytes": "1959"
},
{
"name": "Rust",
"bytes": "27119"
},
{
"name": "Shell",
"bytes": "2443"
}
],
"symlink_target": ""
} |
import sphinx_rtd_theme
from recommonmark.parser import CommonMarkParser
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst','.md']
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Instagram PHP'
copyright = '2016, Marvin Osswald'
author = 'Marvin Osswald'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Instagram PHP v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'InstagramPHPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'InstagramPHP.tex', 'Instagram PHP Documentation',
'Marvin Osswald', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'instagramphp', 'Instagram PHP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'InstagramPHP', 'Instagram PHP Documentation',
author, 'InstagramPHP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| {
"content_hash": "a9375c478282462f18862799f67e9df8",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 80,
"avg_line_length": 29.092307692307692,
"alnum_prop": 0.6925436277102063,
"repo_name": "marvinosswald/instagram-php",
"id": "f11ab56939214a2f1dc4032cce7c911a9647abf3",
"size": "10212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "29993"
}
],
"symlink_target": ""
} |
""" An ipython profile for zope and plone.
Some ideas stolen from http://www.tomster.org.
Authors
-------
- Stefan Eletzhofer <stefan.eletzhofer@inquant.de>
"""
# File: ipy_profile_zope.py
#
# Copyright (c) InQuant GmbH
#
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
from IPython import ipapi
from IPython import Release
from types import StringType
import sys
import os
import textwrap
# The import below effectively obsoletes your old-style ipythonrc[.ini],
# so consider yourself warned!
# import ipy_defaults
_marker = []
def shasattr(obj, attr, acquire=False):
""" See Archetypes/utils.py
"""
if not acquire:
obj = obj.aq_base
return getattr(obj, attr, _marker) is not _marker
class ZopeDebug(object):
def __init__(self):
self.instancehome = os.environ.get( "INSTANCE_HOME" )
configfile = os.environ.get( "CONFIG_FILE" )
if configfile is None and self.instancehome is not None:
configfile = os.path.join( self.instancehome, "etc", "zope.conf" )
if configfile is None:
raise RuntimeError( "CONFIG_FILE env not set" )
print "CONFIG_FILE=", configfile
print "INSTANCE_HOME=", self.instancehome
self.configfile = configfile
try:
from Zope2 import configure
except ImportError:
from Zope import configure
configure( configfile )
try:
import Zope2
app = Zope2.app()
except ImportError:
import Zope
app = Zope.app()
from Testing.makerequest import makerequest
self.app = makerequest( app )
try:
self._make_permissive()
print "Permissive security installed"
except:
print "Permissive security NOT installed"
self._pwd = self.portal or self.app
try:
from zope.component import getSiteManager
from zope.component import getGlobalSiteManager
from zope.app.component.hooks import setSite
if self.portal is not None:
setSite( self.portal )
gsm = getGlobalSiteManager()
sm = getSiteManager()
if sm is gsm:
print "ERROR SETTING SITE!"
except:
pass
@property
def utils(self):
class Utils(object):
commit = self.commit
sync = self.sync
objectInfo = self.objectInfo
ls = self.ls
pwd = self.pwd
cd = self.cd
su = self.su
getCatalogInfo = self.getCatalogInfo
@property
def cwd(self):
return self.pwd()
return Utils()
@property
def namespace(self):
return dict( utils=self.utils, app=self.app, portal=self.portal )
@property
def portal(self):
portals = self.app.objectValues( "Plone Site" )
if len(portals):
return portals[0]
else:
raise KeyError( "No Plone Site found.")
def pwd(self):
return self._pwd
def _make_permissive(self):
"""
Make a permissive security manager with all rights. Hell,
we're developers, aren't we? Security is for whimps. :)
"""
from Products.CMFCore.tests.base.security import PermissiveSecurityPolicy
import AccessControl
from AccessControl.SecurityManagement import newSecurityManager
from AccessControl.SecurityManager import setSecurityPolicy
_policy = PermissiveSecurityPolicy()
self.oldpolicy = setSecurityPolicy(_policy)
newSecurityManager(None, AccessControl.User.system)
def su(self, username):
""" Change to named user.
"""
# TODO Make it easy to change back to permissive security.
user = self.portal.acl_users.getUser(username)
if not user:
print "Can't find %s in %s" % (username, self.portal.acl_users)
return
from AccessControl import ZopeSecurityPolicy
import AccessControl
from AccessControl.SecurityManagement import newSecurityManager, getSecurityManager
from AccessControl.SecurityManager import setSecurityPolicy
_policy = ZopeSecurityPolicy
self.oldpolicy = setSecurityPolicy(_policy)
wrapped_user = user.__of__(self.portal.acl_users)
newSecurityManager(None, user)
print 'User changed.'
return getSecurityManager().getUser()
def getCatalogInfo(self, obj=None, catalog='portal_catalog', query=None, sort_on='created', sort_order='reverse' ):
""" Inspect portal_catalog. Pass an object or object id for a
default query on that object, or pass an explicit query.
"""
if obj and query:
print "Ignoring %s, using query." % obj
catalog = self.portal.get(catalog)
if not catalog:
return 'No catalog'
indexes = catalog._catalog.indexes
if not query:
if type(obj) is StringType:
cwd = self.pwd()
obj = cwd.unrestrictedTraverse( obj )
# If the default in the signature is mutable, its value will
# persist across invocations.
query = {}
if indexes.get('path'):
from string import join
path = join(obj.getPhysicalPath(), '/')
query.update({'path': path})
if indexes.get('getID'):
query.update({'getID': obj.id, })
if indexes.get('UID') and shasattr(obj, 'UID'):
query.update({'UID': obj.UID(), })
if indexes.get(sort_on):
query.update({'sort_on': sort_on, 'sort_order': sort_order})
if not query:
return 'Empty query'
results = catalog(**query)
result_info = []
for r in results:
rid = r.getRID()
if rid:
result_info.append(
{'path': catalog.getpath(rid),
'metadata': catalog.getMetadataForRID(rid),
'indexes': catalog.getIndexDataForRID(rid), }
)
else:
result_info.append({'missing': rid})
if len(result_info) == 1:
return result_info[0]
return result_info
def commit(self):
"""
Commit the transaction.
"""
try:
import transaction
transaction.get().commit()
except ImportError:
get_transaction().commit()
def sync(self):
"""
Sync the app's view of the zodb.
"""
self.app._p_jar.sync()
def objectInfo( self, o ):
"""
Return a descriptive string of an object
"""
Title = ""
t = getattr( o, 'Title', None )
if t:
Title = t()
return {'id': o.getId(),
'Title': Title,
'portal_type': getattr( o, 'portal_type', o.meta_type),
'folderish': o.isPrincipiaFolderish
}
def cd( self, path ):
"""
Change current dir to a specific folder.
cd( ".." )
cd( "/plone/Members/admin" )
cd( portal.Members.admin )
etc.
"""
if type(path) is not StringType:
path = '/'.join(path.getPhysicalPath())
cwd = self.pwd()
x = cwd.unrestrictedTraverse( path )
if x is None:
raise KeyError( "Can't cd to %s" % path )
print "%s -> %s" % ( self.pwd().getId(), x.getId() )
self._pwd = x
def ls( self, x=None ):
"""
List object(s)
"""
if type(x) is StringType:
cwd = self.pwd()
x = cwd.unrestrictedTraverse( x )
if x is None:
x = self.pwd()
if x.isPrincipiaFolderish:
return [self.objectInfo(o) for id, o in x.objectItems()]
else:
return self.objectInfo( x )
zope_debug = None
def ipy_set_trace():
import IPython; IPython.Debugger.Pdb().set_trace()
def main():
global zope_debug
ip = ipapi.get()
o = ip.options
# autocall to "full" mode (smart mode is default, I like full mode)
SOFTWARE_HOME = os.environ.get( "SOFTWARE_HOME" )
sys.path.append( SOFTWARE_HOME )
print "SOFTWARE_HOME=%s\n" % SOFTWARE_HOME
zope_debug = ZopeDebug()
# <HACK ALERT>
import pdb;
pdb.set_trace = ipy_set_trace
# </HACK ALERT>
# I like my banner minimal.
o.banner = "ZOPE Py %s IPy %s\n" % (sys.version.split('\n')[0],Release.version)
print textwrap.dedent("""\
ZOPE mode iPython shell.
Bound names:
app
portal
utils.{ %s }
Uses the $SOFTWARE_HOME and $CONFIG_FILE environment
variables.
""" % ( ",".join([ x for x in dir(zope_debug.utils) if not x.startswith("_") ] ) ) )
ip.user_ns.update( zope_debug.namespace )
main()
# vim: set ft=python ts=4 sw=4 expandtab :
| {
"content_hash": "b54798b5ada42db1665eecc5752900b0",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 119,
"avg_line_length": 28.90282131661442,
"alnum_prop": 0.5594360086767896,
"repo_name": "yongshengwang/hue",
"id": "172853c677298f0644f7729ddac38d4e2b43cff6",
"size": "9244",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "build/env/lib/python2.7/site-packages/ipython-0.10-py2.7.egg/IPython/Extensions/ipy_profile_zope.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2479183"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "1133541"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "28547"
},
{
"name": "HTML",
"bytes": "26230478"
},
{
"name": "Java",
"bytes": "133906"
},
{
"name": "JavaScript",
"bytes": "9757355"
},
{
"name": "Makefile",
"bytes": "94066"
},
{
"name": "Mako",
"bytes": "2185828"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "88056623"
},
{
"name": "Scala",
"bytes": "191428"
},
{
"name": "Shell",
"bytes": "59514"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "101931"
},
{
"name": "VimL",
"bytes": "1530"
},
{
"name": "XSLT",
"bytes": "357625"
}
],
"symlink_target": ""
} |
from PyDictionary import PyDictionary
import random
def print_options():
print('a -> add word')
print('l -> list all the words')
print('s -> stop adding words and test me!')
def get_defintions(d, word):
print("getting definition")
meaning = d.meaning(word)
meaning_list = list()
if meaning == None:
return meaning_list
for pos, means in meaning.items():
for mean in means:
meaning_list.append(pos + ": " + mean)
return meaning_list
def initialize_flashcard():
return dict()
def check_word_in_vocabs(vocab_dict, word):
return word in vocab_dict
def add_word_definition(vocab_dict, word, definition):
vocab_dict[word] = definition
def beautify_vocabs_list(vocab_dict):
beautified = list()
for word, meaning in vocab_dict.items():
beautified.append(word + " - " + meaning)
return beautified
def test_option():
print('Here are the options:')
print('r -> randomized')
print('a -> alphabetized')
print('q -> quit')
return input("Pick an option: ")
def test_helper(o_list, vocab_dict):
while o_list:
word = o_list[0]
print('Define this: ' + word + '\n\n')
input('Press enter when ready for the definition...')
print('Here is the definition:\n' + vocab_dict[word])
ans = input('If you were correct, type y. Otherwise, type n: ')
if ans == 'n':
o_list.append(o_list.pop(0))
else:
o_list.pop(0)
def test_user(vocab_dict):
copy_dict = dict(vocab_dict)
while (1):
opt = test_option()
words = list(vocab_dict)
if opt == 'a':
words.sort()
test_helper(words, vocab_dict)
elif opt == 'r':
words = list(words)
random.shuffle(words)
test_helper(words, vocab_dict)
elif opt == 'q':
print("Thank you for using the flashcard app!")
break
| {
"content_hash": "2bcb3d3a36169f650ca68d5ffb858b82",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 71,
"avg_line_length": 28.794117647058822,
"alnum_prop": 0.5827374872318692,
"repo_name": "kevink97/Teaching-Materials",
"id": "689ee6a7587ec56776830204b3b45cee65c817b8",
"size": "2092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutor_hs/les5/flash_lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3217"
},
{
"name": "JavaScript",
"bytes": "9100"
},
{
"name": "Python",
"bytes": "41042"
},
{
"name": "TeX",
"bytes": "47359"
}
],
"symlink_target": ""
} |
from unittest import TestCase, mock
from data_vault import get_synced_params, VaultEnvironment
from keepercommander.commands import register
from keepercommander.proto import APIRequest_pb2, record_pb2
from keepercommander import utils
vault_env = VaultEnvironment()
class TestRegister(TestCase):
expected_commands = []
def setUp(self):
self.communicate_mock = mock.patch('keepercommander.api.run_command').start()
self.communicate_mock.side_effect = TestRegister.communicate_success
self.communicate_mock = mock.patch('keepercommander.api.communicate_rest').start()
self.communicate_mock.side_effect = TestRegister.communicate_rest_success
TestRegister.expected_commands.clear()
def tearDown(self):
mock.patch.stopall()
def test_share_record(self):
params = get_synced_params()
record_uid = next(iter([x['record_uid'] for x in params.meta_data_cache.values() if x['can_share']]))
cmd = register.ShareRecordCommand()
self.record_share_mock = mock.patch('keepercommander.api.get_record_shares').start()
def not_shared(params, record_uids, is_share_admin):
pass
self.record_share_mock.side_effect = not_shared
TestRegister.expected_commands.extend(['records_share_update'])
cmd.execute(params, email=['user2@keepersecurity.com'], action='grant', can_share=False, can_edit=True, record=record_uid)
self.assertEqual(len(TestRegister.expected_commands), 0)
TestRegister.expected_commands.extend(['records_share_update'])
cmd.execute(params, email=['user2@keepersecurity.com'], action='owner', can_share=False, can_edit=True, record=record_uid)
self.assertEqual(len(TestRegister.expected_commands), 0)
def shared(params, record_uids, is_share_admin):
return [{
'shares': {
'user_permissions': [
{
'username': params.user,
'owner': True,
},
{
'username': 'user2@keepersecurity.com',
'owner': False,
'shareable': False,
'editable': False
}
]
}
}]
self.record_share_mock.side_effect = shared
TestRegister.expected_commands.extend(['records_share_update'])
cmd.execute(params, email=['user2@keepersecurity.com'], action='revoke', record=record_uid)
self.assertEqual(len(TestRegister.expected_commands), 0)
def test_share_folder(self):
params = get_synced_params()
shared_folder_uid = next(iter([x['shared_folder_uid'] for x in params.shared_folder_cache.values()]))
cmd = register.ShareFolderCommand()
TestRegister.expected_commands.extend(['shared_folder_update_v3'])
cmd.execute(params, action='grant', user=['user2@keepersecurity.com'], manage_records=True, manage_users=False, folder=shared_folder_uid)
self.assertEqual(len(TestRegister.expected_commands), 0)
TestRegister.expected_commands.extend(['shared_folder_update_v3'])
cmd.execute(params, action='revoke', user=['user2@keepersecurity.com'], folder=shared_folder_uid)
self.assertEqual(len(TestRegister.expected_commands), 0)
@staticmethod
def record_share_rq_rs(rq):
status = record_pb2.SharedRecordStatus()
status.recordUid = rq.recordUid
status.status = 'success'
status.username = rq.toUsername
return status
@staticmethod
def communicate_rest_success(params, request, endpoint, **kwargs):
if 'rs_type' in kwargs:
rs = kwargs['rs_type']()
else:
rs = None
_, _, command = endpoint.rpartition('/')
if command == 'get_public_keys':
for x in request.usernames:
key_response = APIRequest_pb2.PublicKeyResponse()
key_response.username = x
key_response.publicKey = utils.base64_url_decode(vault_env.encoded_public_key)
rs.keyResponses.append(key_response)
return rs
if command == 'records_share_update':
rs.addSharedRecordStatus.extend((TestRegister.record_share_rq_rs(x) for x in request.addSharedRecord))
rs.addSharedRecordStatus.extend((TestRegister.record_share_rq_rs(x) for x in request.updateSharedRecord))
rs.removeSharedRecordStatus.extend((TestRegister.record_share_rq_rs(x) for x in request.removeSharedRecord))
cmd = TestRegister.expected_commands.pop(0)
if cmd == command:
return rs
raise Exception()
@staticmethod
def communicate_success(params, request):
rs = {
'result': 'success',
'result_code': '',
'message': ''
}
raise Exception()
| {
"content_hash": "e225bcd6943d2547a0ebefa1d21a1455",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 145,
"avg_line_length": 41.122950819672134,
"alnum_prop": 0.6169025313932629,
"repo_name": "Keeper-Security/Commander",
"id": "2e3817762fb5720969a1971140fe2a2045725d0a",
"size": "5017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit-tests/test_command_register.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2274231"
},
{
"name": "Shell",
"bytes": "3388"
}
],
"symlink_target": ""
} |
from f5_cccl.resource.ltm.pool import IcrPool
from f5_cccl.resource.ltm.virtual import VirtualServer
from f5_cccl.resource.ltm.node import Node
from f5_cccl.resource.ltm.app_service import IcrApplicationService
def test_bigip_refresh(bigip_proxy):
"""Test BIG-IP refresh function."""
big_ip = bigip_proxy.mgmt_root()
test_pools = [
IcrPool(**p) for p in big_ip.bigip_data['pools']
if p['partition'] == 'test'
]
test_virtuals = [
VirtualServer(**v) for v in big_ip.bigip_data['virtuals']
if v['partition'] == 'test'
]
test_iapps = [
IcrApplicationService(**i) for i in big_ip.bigip_data['iapps']
if i['partition'] == 'test'
]
test_nodes = [
Node(**n) for n in big_ip.bigip_data['nodes']
if n['partition'] == 'test'
]
# refresh the BIG-IP state
bigip_proxy.refresh()
# verify pools and pool members
assert big_ip.tm.ltm.pools.get_collection.called
assert len(bigip_proxy._pools) == 1
assert len(bigip_proxy._pools) == len(test_pools)
for pool in test_pools:
assert bigip_proxy._pools[pool.name] == pool
# Make a change, pools will not be equal
pool._data['loadBalancingMode'] = 'Not a valid LB mode'
assert bigip_proxy._pools[pool.name] != pool
# verify virtual servers
assert big_ip.tm.ltm.virtuals.get_collection.called
assert len(bigip_proxy._virtuals) == 1
assert len(bigip_proxy._virtuals) == len(test_virtuals)
for v in test_virtuals:
assert bigip_proxy._virtuals[v.name] == v
# Make a change, virtuals will not be equal
v._data['partition'] = 'NoPartition'
assert bigip_proxy._virtuals[v.name] != v
# verify application services
assert big_ip.tm.sys.application.services.get_collection.called
assert len(bigip_proxy._iapps) == 2
assert len(bigip_proxy._iapps) == len(test_iapps)
for i in test_iapps:
assert bigip_proxy._iapps[i.name] == i
# Make a change, iapps will not be equal
i._data['template'] = '/Common/NoTemplate'
assert bigip_proxy._iapps[i.name] != i
# verify nodes
assert big_ip.tm.ltm.nodes.get_collection.called
assert len(bigip_proxy._nodes) == 4
assert len(bigip_proxy._nodes) == len(test_nodes)
for n in test_nodes:
assert bigip_proxy._nodes[n.name] == n
def test_bigip_properties(bigip_proxy):
"""Test BIG-IP properties function."""
big_ip = bigip_proxy
test_pools = [
IcrPool(**p) for p in big_ip.mgmt_root().bigip_data['pools']
if p['partition'] == 'test'
]
test_virtuals = [
VirtualServer(**v) for v in big_ip.mgmt_root().bigip_data['virtuals']
if v['partition'] == 'test'
]
# refresh the BIG-IP state
big_ip.refresh()
assert len(big_ip.get_pools()) == len(test_pools)
for p in test_pools:
assert big_ip._pools[p.name] == p
assert len(big_ip.get_virtuals()) == len(test_virtuals)
for v in test_virtuals:
assert big_ip._virtuals[v.name] == v
http_hc = big_ip.get_http_monitors()
https_hc = big_ip.get_https_monitors()
tcp_hc = big_ip.get_tcp_monitors()
icmp_hc = big_ip.get_icmp_monitors()
| {
"content_hash": "af5a1b17ec37b0dd3f05b6c6811590f8",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 77,
"avg_line_length": 32.43,
"alnum_prop": 0.6244218316373727,
"repo_name": "richbrowne/f5-cccl",
"id": "82c6a0c4194245551ec10e580cdf39eb0d09f377",
"size": "3846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_cccl/test/test_bigip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "342330"
},
{
"name": "Shell",
"bytes": "2369"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~ Copyright (C) 2002-2004 TechGame Networks, LLC.
##~
##~ This library is free software; you can redistribute it and/or
##~ modify it under the terms of the BSD style License as found in the
##~ LICENSE file included with this distribution.
##
## Modified by Dirk Holtwick <holtwick@web.de>, 2007-2008
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""CSS-2.1 parser.
The CSS 2.1 Specification this parser was derived from can be found at http://www.w3.org/TR/CSS21/
Primary Classes:
* CSSParser
Parses CSS source forms into results using a Builder Pattern. Must
provide concrete implemenation of CSSBuilderAbstract.
* CSSBuilderAbstract
Outlines the interface between CSSParser and it's rule-builder.
Compose CSSParser with a concrete implementation of the builder to get
usable results from the CSS parser.
Dependencies:
python 2.3 (or greater)
re
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import re
from . import cssSpecial
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Definitions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def isAtRuleIdent(src, ident):
return re.match(r'^@' + ident + r'\s*', src)
def stripAtRuleIdent(src):
return re.sub(r'^@[a-z\-]+\s*', '', src)
class CSSSelectorAbstract(object):
"""Outlines the interface between CSSParser and it's rule-builder for selectors.
CSSBuilderAbstract.selector and CSSBuilderAbstract.combineSelectors must
return concrete implementations of this abstract.
See css.CSSMutableSelector for an example implementation.
"""
def addHashId(self, hashId):
raise NotImplementedError('Subclass responsibility')
def addClass(self, class_):
raise NotImplementedError('Subclass responsibility')
def addAttribute(self, attrName):
raise NotImplementedError('Subclass responsibility')
def addAttributeOperation(self, attrName, op, attrValue):
raise NotImplementedError('Subclass responsibility')
def addPseudo(self, name):
raise NotImplementedError('Subclass responsibility')
def addPseudoFunction(self, name, value):
raise NotImplementedError('Subclass responsibility')
class CSSBuilderAbstract(object):
"""Outlines the interface between CSSParser and it's rule-builder. Compose
CSSParser with a concrete implementation of the builder to get usable
results from the CSS parser.
See css.CSSBuilder for an example implementation
"""
def setCharset(self, charset):
raise NotImplementedError('Subclass responsibility')
#~ css results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def beginStylesheet(self):
raise NotImplementedError('Subclass responsibility')
def stylesheet(self, elements):
raise NotImplementedError('Subclass responsibility')
def endStylesheet(self):
raise NotImplementedError('Subclass responsibility')
def beginInline(self):
raise NotImplementedError('Subclass responsibility')
def inline(self, declarations):
raise NotImplementedError('Subclass responsibility')
def endInline(self):
raise NotImplementedError('Subclass responsibility')
def ruleset(self, selectors, declarations):
raise NotImplementedError('Subclass responsibility')
#~ css namespaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def resolveNamespacePrefix(self, nsPrefix, name):
raise NotImplementedError('Subclass responsibility')
#~ css @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def atCharset(self, charset):
raise NotImplementedError('Subclass responsibility')
def atImport(self, import_, mediums, cssParser):
raise NotImplementedError('Subclass responsibility')
def atNamespace(self, nsPrefix, uri):
raise NotImplementedError('Subclass responsibility')
def atMedia(self, mediums, ruleset):
raise NotImplementedError('Subclass responsibility')
def atPage(self, page, pseudopage, declarations):
raise NotImplementedError('Subclass responsibility')
def atFontFace(self, declarations):
raise NotImplementedError('Subclass responsibility')
def atIdent(self, atIdent, cssParser, src):
return src, NotImplemented
#~ css selectors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def combineSelectors(self, selectorA, combiner, selectorB):
"""Return value must implement CSSSelectorAbstract"""
raise NotImplementedError('Subclass responsibility')
def selector(self, name):
"""Return value must implement CSSSelectorAbstract"""
raise NotImplementedError('Subclass responsibility')
#~ css declarations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def property(self, name, value, important=False):
raise NotImplementedError('Subclass responsibility')
def combineTerms(self, termA, combiner, termB):
raise NotImplementedError('Subclass responsibility')
def termIdent(self, value):
raise NotImplementedError('Subclass responsibility')
def termNumber(self, value, units=None):
raise NotImplementedError('Subclass responsibility')
def termRGB(self, value):
raise NotImplementedError('Subclass responsibility')
def termURI(self, value):
raise NotImplementedError('Subclass responsibility')
def termString(self, value):
raise NotImplementedError('Subclass responsibility')
def termUnicodeRange(self, value):
raise NotImplementedError('Subclass responsibility')
def termFunction(self, name, value):
raise NotImplementedError('Subclass responsibility')
def termUnknown(self, src):
raise NotImplementedError('Subclass responsibility')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Parser
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSParseError(Exception):
src = None
ctxsrc = None
fullsrc = None
inline = False
srcCtxIdx = None
srcFullIdx = None
ctxsrcFullIdx = None
def __init__(self, msg, src, ctxsrc=None):
Exception.__init__(self, msg)
self.src = src
self.ctxsrc = ctxsrc or src
if self.ctxsrc:
self.srcCtxIdx = self.ctxsrc.find(self.src)
if self.srcCtxIdx < 0:
del self.srcCtxIdx
def __str__(self):
if self.ctxsrc:
return Exception.__str__(self) + ':: (' + repr(self.ctxsrc[:self.srcCtxIdx]) + ', ' + repr(
self.ctxsrc[self.srcCtxIdx:self.srcCtxIdx + 20]) + ')'
else:
return Exception.__str__(self) + ':: ' + repr(self.src[:40])
def setFullCSSSource(self, fullsrc, inline=False):
self.fullsrc = fullsrc
if inline:
self.inline = inline
if self.fullsrc:
self.srcFullIdx = self.fullsrc.find(self.src)
if self.srcFullIdx < 0:
del self.srcFullIdx
self.ctxsrcFullIdx = self.fullsrc.find(self.ctxsrc)
if self.ctxsrcFullIdx < 0:
del self.ctxsrcFullIdx
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSParser(object):
"""CSS-2.1 parser dependent only upon the re module.
Implemented directly from http://www.w3.org/TR/CSS21/grammar.html
Tested with some existing CSS stylesheets for portability.
CSS Parsing API:
* setCSSBuilder()
To set your concrete implementation of CSSBuilderAbstract
* parseFile()
Use to parse external stylesheets using a file-like object
>>> cssFile = open('test.css', 'r')
>>> stylesheets = myCSSParser.parseFile(cssFile)
* parse()
Use to parse embedded stylesheets using source string
>>> cssSrc = '''
body,body.body {
font: 110%, "Times New Roman", Arial, Verdana, Helvetica, serif;
background: White;
color: Black;
}
a {text-decoration: underline;}
'''
>>> stylesheets = myCSSParser.parse(cssSrc)
* parseInline()
Use to parse inline stylesheets using attribute source string
>>> style = 'font: 110%, "Times New Roman", Arial, Verdana, Helvetica, serif; background: White; color: Black'
>>> stylesheets = myCSSParser.parseInline(style)
* parseAttributes()
Use to parse attribute string values into inline stylesheets
>>> stylesheets = myCSSParser.parseAttributes(
font='110%, "Times New Roman", Arial, Verdana, Helvetica, serif',
background='White',
color='Black')
* parseSingleAttr()
Use to parse a single string value into a CSS expression
>>> fontValue = myCSSParser.parseSingleAttr('110%, "Times New Roman", Arial, Verdana, Helvetica, serif')
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Constants / Variables / Etc.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ParseError = CSSParseError
AttributeOperators = ['=', '~=', '|=', '&=', '^=', '!=', '<>']
SelectorQualifiers = ('#', '.', '[', ':')
SelectorCombiners = ['+', '>']
ExpressionOperators = ('/', '+', ',')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Regular expressions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if True: # makes the following code foldable
_orRule = lambda *args: '|'.join(args)
_reflags = re.I | re.M | re.U
i_hex = '[0-9a-fA-F]'
i_nonascii = u'[\200-\377]'
i_unicode = '\\\\(?:%s){1,6}\s?' % i_hex
i_escape = _orRule(i_unicode, u'\\\\[ -~\200-\377]')
# i_nmstart = _orRule('[A-Za-z_]', i_nonascii, i_escape)
i_nmstart = _orRule('\-[^0-9]|[A-Za-z_]', i_nonascii,
i_escape) # XXX Added hyphen, http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier
i_nmchar = _orRule('[-0-9A-Za-z_]', i_nonascii, i_escape)
i_ident = '((?:%s)(?:%s)*)' % (i_nmstart, i_nmchar)
re_ident = re.compile(i_ident, _reflags)
# Caution: treats all characters above 0x7f as legal for an identifier.
i_unicodeid = r'([^\u0000-\u007f]+)'
re_unicodeid = re.compile(i_unicodeid, _reflags)
i_element_name = '((?:%s)|\*)' % (i_ident[1:-1],)
re_element_name = re.compile(i_element_name, _reflags)
i_namespace_selector = '((?:%s)|\*|)\|(?!=)' % (i_ident[1:-1],)
re_namespace_selector = re.compile(i_namespace_selector, _reflags)
i_class = '\\.' + i_ident
re_class = re.compile(i_class, _reflags)
i_hash = '#((?:%s)+)' % i_nmchar
re_hash = re.compile(i_hash, _reflags)
i_rgbcolor = '(#%s{6}|#%s{3})' % (i_hex, i_hex)
re_rgbcolor = re.compile(i_rgbcolor, _reflags)
i_nl = u'\n|\r\n|\r|\f'
i_escape_nl = u'\\\\(?:%s)' % i_nl
i_string_content = _orRule(u'[\t !#$%&(-~]', i_escape_nl, i_nonascii, i_escape)
i_string1 = u'\"((?:%s|\')*)\"' % i_string_content
i_string2 = u'\'((?:%s|\")*)\'' % i_string_content
i_string = _orRule(i_string1, i_string2)
re_string = re.compile(i_string, _reflags)
i_uri = (u'url\\(\s*(?:(?:%s)|((?:%s)+))\s*\\)'
% (i_string, _orRule('[!#$%&*-~]', i_nonascii, i_escape)))
# XXX For now
# i_uri = u'(url\\(.*?\\))'
re_uri = re.compile(i_uri, _reflags)
i_num = u'(([-+]?[0-9]+(?:\\.[0-9]+)?)|([-+]?\\.[0-9]+))' # XXX Added out paranthesis, because e.g. .5em was not parsed correctly
re_num = re.compile(i_num, _reflags)
i_unit = '(%%|%s)?' % i_ident
re_unit = re.compile(i_unit, _reflags)
i_function = i_ident + '\\('
re_function = re.compile(i_function, _reflags)
i_functionterm = u'[-+]?' + i_function
re_functionterm = re.compile(i_functionterm, _reflags)
i_unicoderange1 = "(?:U\\+%s{1,6}-%s{1,6})" % (i_hex, i_hex)
i_unicoderange2 = "(?:U\\+\?{1,6}|{h}(\?{0,5}|{h}(\?{0,4}|{h}(\?{0,3}|{h}(\?{0,2}|{h}(\??|{h}))))))"
i_unicoderange = i_unicoderange1 # u'(%s|%s)' % (i_unicoderange1, i_unicoderange2)
re_unicoderange = re.compile(i_unicoderange, _reflags)
# i_comment = u'(?:\/\*[^*]*\*+([^/*][^*]*\*+)*\/)|(?://.*)'
# gabriel: only C convention for comments is allowed in CSS
i_comment = u'(?:\/\*[^*]*\*+([^/*][^*]*\*+)*\/)'
re_comment = re.compile(i_comment, _reflags)
i_important = u'!\s*(important)'
re_important = re.compile(i_important, _reflags)
del _orRule
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Public
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, cssBuilder=None):
self.setCSSBuilder(cssBuilder)
#~ CSS Builder to delegate to ~~~~~~~~~~~~~~~~~~~~~~~~
def getCSSBuilder(self):
"""A concrete instance implementing CSSBuilderAbstract"""
return self._cssBuilder
def setCSSBuilder(self, cssBuilder):
"""A concrete instance implementing CSSBuilderAbstract"""
self._cssBuilder = cssBuilder
cssBuilder = property(getCSSBuilder, setCSSBuilder)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Public CSS Parsing API
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def parseFile(self, srcFile, closeFile=False):
"""Parses CSS file-like objects using the current cssBuilder.
Use for external stylesheets."""
try:
result = self.parse(srcFile.read())
finally:
if closeFile:
srcFile.close()
return result
def parse(self, src):
"""Parses CSS string source using the current cssBuilder.
Use for embedded stylesheets."""
self.cssBuilder.beginStylesheet()
try:
# XXX Some simple preprocessing
src = cssSpecial.cleanupCSS(src)
try:
src, stylesheet = self._parseStylesheet(src)
except self.ParseError as err:
err.setFullCSSSource(src)
raise
finally:
self.cssBuilder.endStylesheet()
return stylesheet
def parseInline(self, src):
"""Parses CSS inline source string using the current cssBuilder.
Use to parse a tag's 'sytle'-like attribute."""
self.cssBuilder.beginInline()
try:
try:
src, properties = self._parseDeclarationGroup(src.strip(), braces=False)
except self.ParseError as err:
err.setFullCSSSource(src, inline=True)
raise
result = self.cssBuilder.inline(properties)
finally:
self.cssBuilder.endInline()
return result
def parseAttributes(self, attributes={}, **kwAttributes):
"""Parses CSS attribute source strings, and return as an inline stylesheet.
Use to parse a tag's highly CSS-based attributes like 'font'.
See also: parseSingleAttr
"""
if attributes:
kwAttributes.update(attributes)
self.cssBuilder.beginInline()
try:
properties = []
try:
for propertyName, src in kwAttributes.iteritems():
src, property = self._parseDeclarationProperty(src.strip(), propertyName)
properties.append(property)
except self.ParseError as err:
err.setFullCSSSource(src, inline=True)
raise
result = self.cssBuilder.inline(properties)
finally:
self.cssBuilder.endInline()
return result
def parseSingleAttr(self, attrValue):
"""Parse a single CSS attribute source string, and returns the built CSS expression.
Use to parse a tag's highly CSS-based attributes like 'font'.
See also: parseAttributes
"""
results = self.parseAttributes(temp=attrValue)
if 'temp' in results[1]:
return results[1]['temp']
else:
return results[0]['temp']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Internal _parse methods
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseStylesheet(self, src):
"""stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
;
"""
# Get rid of the comments
src = self.re_comment.sub(u'', src)
# [ CHARSET_SYM S* STRING S* ';' ]?
src = self._parseAtCharset(src)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
# [ import [S|CDO|CDC]* ]*
src, stylesheetImports = self._parseAtImports(src)
# [ namespace [S|CDO|CDC]* ]*
src = self._parseAtNamespace(src)
stylesheetElements = []
# [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]*
while src: # due to ending with ]*
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None:
stylesheetElements.extend(atResults)
else:
# ruleset
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports)
return src, stylesheet
def _parseSCDOCDC(self, src):
"""[S|CDO|CDC]*"""
while 1:
src = src.lstrip()
if src.startswith('<!--'):
src = src[4:]
elif src.startswith('-->'):
src = src[3:]
else:
break
return src
#~ CSS @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseAtCharset(self, src):
"""[ CHARSET_SYM S* STRING S* ';' ]?"""
if isAtRuleIdent(src, 'charset'):
src = stripAtRuleIdent(src)
charset, src = self._getString(src)
src = src.lstrip()
if src[:1] != ';':
raise self.ParseError('@charset expected a terminating \';\'', src, ctxsrc)
src = src[1:].lstrip()
self.cssBuilder.atCharset(charset)
return src
def _parseAtImports(self, src):
"""[ import [S|CDO|CDC]* ]*"""
result = []
while isAtRuleIdent(src, 'import'):
ctxsrc = src
src = stripAtRuleIdent(src)
import_, src = self._getStringOrURI(src)
if import_ is None:
raise self.ParseError('Import expecting string or url', src, ctxsrc)
mediums = []
medium, src = self._getIdent(src.lstrip())
while medium is not None:
mediums.append(medium)
if src[:1] == ',':
src = src[1:].lstrip()
medium, src = self._getIdent(src)
else:
break
# XXX No medium inherits and then "all" is appropriate
if not mediums:
mediums = ["all"]
if src[:1] != ';':
raise self.ParseError('@import expected a terminating \';\'', src, ctxsrc)
src = src[1:].lstrip()
stylesheet = self.cssBuilder.atImport(import_, mediums, self)
if stylesheet is not None:
result.append(stylesheet)
src = self._parseSCDOCDC(src)
return src, result
def _parseAtNamespace(self, src):
"""namespace :
@namespace S* [IDENT S*]? [STRING|URI] S* ';' S*
"""
src = self._parseSCDOCDC(src)
while isAtRuleIdent(src, 'namespace'):
ctxsrc = src
src = stripAtRuleIdent(src)
namespace, src = self._getStringOrURI(src)
if namespace is None:
nsPrefix, src = self._getIdent(src)
if nsPrefix is None:
raise self.ParseError('@namespace expected an identifier or a URI', src, ctxsrc)
namespace, src = self._getStringOrURI(src.lstrip())
if namespace is None:
raise self.ParseError('@namespace expected a URI', src, ctxsrc)
else:
nsPrefix = None
src = src.lstrip()
if src[:1] != ';':
raise self.ParseError('@namespace expected a terminating \';\'', src, ctxsrc)
src = src[1:].lstrip()
self.cssBuilder.atNamespace(nsPrefix, namespace)
src = self._parseSCDOCDC(src)
return src
def _parseAtKeyword(self, src):
"""[media | page | font_face | unknown_keyword]"""
ctxsrc = src
if isAtRuleIdent(src, 'media'):
src, result = self._parseAtMedia(src)
elif isAtRuleIdent(src, 'page'):
src, result = self._parseAtPage(src)
elif isAtRuleIdent(src, 'font-face'):
src, result = self._parseAtFontFace(src)
# XXX added @import, was missing!
elif isAtRuleIdent(src, 'import'):
src, result = self._parseAtImports(src)
elif isAtRuleIdent(src, 'frame'):
src, result = self._parseAtFrame(src)
elif src.startswith('@'):
src, result = self._parseAtIdent(src)
else:
raise self.ParseError('Unknown state in atKeyword', src, ctxsrc)
return src, result
def _parseAtMedia(self, src):
"""media
: MEDIA_SYM S* medium [ ',' S* medium ]* '{' S* ruleset* '}' S*
;
"""
ctxsrc = src
src = src[len('@media '):].lstrip()
mediums = []
while src and src[0] != '{':
medium, src = self._getIdent(src)
if medium is None:
raise self.ParseError('@media rule expected media identifier', src, ctxsrc)
# make "and ... {" work
if medium == u'and':
# strip up to curly bracket
pattern = re.compile('.*({.*)')
match = re.match(pattern, src)
src = src[match.end()-1:]
break
mediums.append(medium)
if src[0] == ',':
src = src[1:].lstrip()
else:
src = src.lstrip()
if not src.startswith('{'):
raise self.ParseError('Ruleset opening \'{\' not found', src, ctxsrc)
src = src[1:].lstrip()
stylesheetElements = []
#while src and not src.startswith('}'):
# src, ruleset = self._parseRuleset(src)
# stylesheetElements.append(ruleset)
# src = src.lstrip()
# Containing @ where not found and parsed
while src and not src.startswith('}'):
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None:
stylesheetElements.extend(atResults)
else:
# ruleset
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
src = src.lstrip()
if not src.startswith('}'):
raise self.ParseError('Ruleset closing \'}\' not found', src, ctxsrc)
else:
src = src[1:].lstrip()
result = self.cssBuilder.atMedia(mediums, stylesheetElements)
return src, result
def _parseAtPage(self, src):
"""page
: PAGE_SYM S* IDENT? pseudo_page? S*
'{' S* declaration [ ';' S* declaration ]* '}' S*
;
"""
ctxsrc = src
src = src[len('@page '):].lstrip()
page, src = self._getIdent(src)
if src[:1] == ':':
pseudopage, src = self._getIdent(src[1:])
page = page + '_' + pseudopage
else:
pseudopage = None
#src, properties = self._parseDeclarationGroup(src.lstrip())
# Containing @ where not found and parsed
stylesheetElements = []
src = src.lstrip()
properties = []
# XXX Extended for PDF use
if not src.startswith('{'):
raise self.ParseError('Ruleset opening \'{\' not found', src, ctxsrc)
else:
src = src[1:].lstrip()
while src and not src.startswith('}'):
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None:
stylesheetElements.extend(atResults)
else:
src, nproperties = self._parseDeclarationGroup(src.lstrip(), braces=False)
properties += nproperties
src = src.lstrip()
result = [self.cssBuilder.atPage(page, pseudopage, properties)]
return src[1:].lstrip(), result
def _parseAtFrame(self, src):
"""
XXX Proprietary for PDF
"""
ctxsrc = src
src = src[len('@frame '):].lstrip()
box, src = self._getIdent(src)
src, properties = self._parseDeclarationGroup(src.lstrip())
result = [self.cssBuilder.atFrame(box, properties)]
return src.lstrip(), result
def _parseAtFontFace(self, src):
ctxsrc = src
src = src[len('@font-face '):].lstrip()
src, properties = self._parseDeclarationGroup(src)
result = [self.cssBuilder.atFontFace(properties)]
return src, result
def _parseAtIdent(self, src):
ctxsrc = src
atIdent, src = self._getIdent(src[1:])
if atIdent is None:
raise self.ParseError('At-rule expected an identifier for the rule', src, ctxsrc)
src, result = self.cssBuilder.atIdent(atIdent, self, src)
if result is NotImplemented:
# An at-rule consists of everything up to and including the next semicolon (;) or the next block, whichever comes first
semiIdx = src.find(';')
if semiIdx < 0:
semiIdx = None
blockIdx = src[:semiIdx].find('{')
if blockIdx < 0:
blockIdx = None
if semiIdx is not None and semiIdx < blockIdx:
src = src[semiIdx + 1:].lstrip()
elif blockIdx is None:
# consume the rest of the content since we didn't find a block or a semicolon
src = src[-1:-1]
elif blockIdx is not None:
# expecing a block...
src = src[blockIdx:]
try:
# try to parse it as a declarations block
src, declarations = self._parseDeclarationGroup(src)
except self.ParseError:
# try to parse it as a stylesheet block
src, stylesheet = self._parseStylesheet(src)
else:
raise self.ParserError('Unable to ignore @-rule block', src, ctxsrc)
return src.lstrip(), result
#~ ruleset - see selector and declaration groups ~~~~
def _parseRuleset(self, src):
"""ruleset
: selector [ ',' S* selector ]*
'{' S* declaration [ ';' S* declaration ]* '}' S*
;
"""
src, selectors = self._parseSelectorGroup(src)
src, properties = self._parseDeclarationGroup(src.lstrip())
result = self.cssBuilder.ruleset(selectors, properties)
return src, result
#~ selector parsing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseSelectorGroup(self, src):
selectors = []
while src[:1] not in ('{', '}', ']', '(', ')', ';', ''):
src, selector = self._parseSelector(src)
if selector is None:
break
selectors.append(selector)
if src.startswith(','):
src = src[1:].lstrip()
return src, selectors
def _parseSelector(self, src):
"""selector
: simple_selector [ combinator simple_selector ]*
;
"""
src, selector = self._parseSimpleSelector(src)
srcLen = len(src) # XXX
while src[:1] not in ('', ',', ';', '{', '}', '[', ']', '(', ')'):
for combiner in self.SelectorCombiners:
if src.startswith(combiner):
src = src[len(combiner):].lstrip()
break
else:
combiner = ' '
src, selectorB = self._parseSimpleSelector(src)
# XXX Fix a bug that occured here e.g. : .1 {...}
if len(src) >= srcLen:
src = src[1:]
while src and (src[:1] not in ('', ',', ';', '{', '}', '[', ']', '(', ')')):
src = src[1:]
return src.lstrip(), None
selector = self.cssBuilder.combineSelectors(selector, combiner, selectorB)
return src.lstrip(), selector
def _parseSimpleSelector(self, src):
"""simple_selector
: [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*
;
"""
ctxsrc = src.lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
name, src = self._getMatchResult(self.re_element_name, src)
if name:
pass # already *successfully* assigned
elif src[:1] in self.SelectorQualifiers:
name = '*'
else:
raise self.ParseError('Selector name or qualifier expected', src, ctxsrc)
name = self.cssBuilder.resolveNamespacePrefix(nsPrefix, name)
selector = self.cssBuilder.selector(name)
while src and src[:1] in self.SelectorQualifiers:
hash_, src = self._getMatchResult(self.re_hash, src)
if hash_ is not None:
selector.addHashId(hash_)
continue
class_, src = self._getMatchResult(self.re_class, src)
if class_ is not None:
selector.addClass(class_)
continue
if src.startswith('['):
src, selector = self._parseSelectorAttribute(src, selector)
elif src.startswith(':'):
src, selector = self._parseSelectorPseudo(src, selector)
else:
break
return src.lstrip(), selector
def _parseSelectorAttribute(self, src, selector):
"""attrib
: '[' S* [ namespace_selector ]? IDENT S* [ [ '=' | INCLUDES | DASHMATCH ] S*
[ IDENT | STRING ] S* ]? ']'
;
"""
ctxsrc = src
if not src.startswith('['):
raise self.ParseError('Selector Attribute opening \'[\' not found', src, ctxsrc)
src = src[1:].lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
attrName, src = self._getIdent(src)
src = src.lstrip()
if attrName is None:
raise self.ParseError('Expected a selector attribute name', src, ctxsrc)
if nsPrefix is not None:
attrName = self.cssBuilder.resolveNamespacePrefix(nsPrefix, attrName)
for op in self.AttributeOperators:
if src.startswith(op):
break
else:
op = ''
src = src[len(op):].lstrip()
if op:
attrValue, src = self._getIdent(src)
if attrValue is None:
attrValue, src = self._getString(src)
if attrValue is None:
raise self.ParseError('Expected a selector attribute value', src, ctxsrc)
else:
attrValue = None
if not src.startswith(']'):
raise self.ParseError('Selector Attribute closing \']\' not found', src, ctxsrc)
else:
src = src[1:]
if op:
selector.addAttributeOperation(attrName, op, attrValue)
else:
selector.addAttribute(attrName)
return src, selector
def _parseSelectorPseudo(self, src, selector):
"""pseudo
: ':' [ IDENT | function ]
;
"""
ctxsrc = src
if not src.startswith(':'):
raise self.ParseError('Selector Pseudo \':\' not found', src, ctxsrc)
src = re.search('^:{1,2}(.*)', src, re.M | re.S).group(1)
name, src = self._getIdent(src)
if not name:
raise self.ParseError('Selector Pseudo identifier not found', src, ctxsrc)
if src.startswith('('):
# function
src = src[1:].lstrip()
src, term = self._parseExpression(src, True)
if not src.startswith(')'):
raise self.ParseError('Selector Pseudo Function closing \')\' not found', src, ctxsrc)
src = src[1:]
selector.addPseudoFunction(name, term)
else:
selector.addPseudo(name)
return src, selector
#~ declaration and expression parsing ~~~~~~~~~~~~~~~
def _parseDeclarationGroup(self, src, braces=True):
ctxsrc = src
if src.startswith('{'):
src, braces = src[1:], True
elif braces:
raise self.ParseError('Declaration group opening \'{\' not found', src, ctxsrc)
properties = []
src = src.lstrip()
while src[:1] not in ('', ',', '{', '}', '[', ']', '(', ')', '@'): # XXX @?
src, property = self._parseDeclaration(src)
# XXX Workaround for styles like "*font: smaller"
if src.startswith("*"):
src = "-nothing-" + src[1:]
continue
if property is None:
break
properties.append(property)
if src.startswith(';'):
src = src[1:].lstrip()
else:
break
if braces:
if not src.startswith('}'):
raise self.ParseError('Declaration group closing \'}\' not found', src, ctxsrc)
src = src[1:]
return src.lstrip(), properties
def _parseDeclaration(self, src):
"""declaration
: ident S* ':' S* expr prio?
| /* empty */
;
"""
# property
propertyName, src = self._getIdent(src)
if propertyName is not None:
src = src.lstrip()
# S* : S*
if src[:1] in (':', '='):
# Note: we are being fairly flexable here... technically, the
# ":" is *required*, but in the name of flexibility we
# suppor a null transition, as well as an "=" transition
src = src[1:].lstrip()
src, property = self._parseDeclarationProperty(src, propertyName)
else:
property = None
return src, property
def _parseDeclarationProperty(self, src, propertyName):
# expr
src, expr = self._parseExpression(src)
# prio?
important, src = self._getMatchResult(self.re_important, src)
src = src.lstrip()
property = self.cssBuilder.property(propertyName, expr, important)
return src, property
def _parseExpression(self, src, returnList=False):
"""
expr
: term [ operator term ]*
;
"""
src, term = self._parseExpressionTerm(src)
operator = None
while src[:1] not in ('', ';', '{', '}', '[', ']', ')'):
for operator in self.ExpressionOperators:
if src.startswith(operator):
src = src[len(operator):]
break
else:
operator = ' '
src, term2 = self._parseExpressionTerm(src.lstrip())
if term2 is NotImplemented:
break
else:
term = self.cssBuilder.combineTerms(term, operator, term2)
if operator is None and returnList:
term = self.cssBuilder.combineTerms(term, None, None)
return src, term
else:
return src, term
def _parseExpressionTerm(self, src):
"""term
: unary_operator?
[ NUMBER S* | PERCENTAGE S* | LENGTH S* | EMS S* | EXS S* | ANGLE S* |
TIME S* | FREQ S* | function ]
| STRING S* | IDENT S* | URI S* | RGB S* | UNICODERANGE S* | hexcolor
;
"""
ctxsrc = src
result, src = self._getMatchResult(self.re_num, src)
if result is not None:
units, src = self._getMatchResult(self.re_unit, src)
term = self.cssBuilder.termNumber(result, units)
return src.lstrip(), term
result, src = self._getString(src, self.re_uri)
if result is not None:
# XXX URL!!!!
term = self.cssBuilder.termURI(result)
return src.lstrip(), term
result, src = self._getString(src)
if result is not None:
term = self.cssBuilder.termString(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_functionterm, src)
if result is not None:
src, params = self._parseExpression(src, True)
if src[0] != ')':
raise self.ParseError('Terminal function expression expected closing \')\'', src, ctxsrc)
src = src[1:].lstrip()
term = self.cssBuilder.termFunction(result, params)
return src, term
result, src = self._getMatchResult(self.re_rgbcolor, src)
if result is not None:
term = self.cssBuilder.termRGB(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_unicoderange, src)
if result is not None:
term = self.cssBuilder.termUnicodeRange(result)
return src.lstrip(), term
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
result, src = self._getIdent(src)
if result is not None:
if nsPrefix is not None:
result = self.cssBuilder.resolveNamespacePrefix(nsPrefix, result)
term = self.cssBuilder.termIdent(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_unicodeid, src)
if result is not None:
term = self.cssBuilder.termIdent(result)
return src.lstrip(), term
return self.cssBuilder.termUnknown(src)
#~ utility methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _getIdent(self, src, default=None):
return self._getMatchResult(self.re_ident, src, default)
def _getString(self, src, rexpression=None, default=None):
if rexpression is None:
rexpression = self.re_string
result = rexpression.match(src)
if result:
strres = [x for x in result.groups()]
if strres:
strres = strres[0]
else:
strres = ''
return strres, src[result.end():]
else:
return default, src
def _getStringOrURI(self, src):
result, src = self._getString(src, self.re_uri)
if result is None:
result, src = self._getString(src)
return result, src
def _getMatchResult(self, rexpression, src, default=None, group=1):
result = rexpression.match(src)
if result:
return result.group(group), src[result.end():]
else:
return default, src
| {
"content_hash": "ae9da4b875f541f6e230a54c0f0e1ffa",
"timestamp": "",
"source": "github",
"line_count": 1194,
"max_line_length": 137,
"avg_line_length": 33.46566164154104,
"alnum_prop": 0.5341858951899494,
"repo_name": "zenx/xhtml2pdf",
"id": "a3ff5bb91d1ceaff83b4a9f641d55d97bbe57946",
"size": "39958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xhtml2pdf/w3c/cssParser.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "479010"
}
],
"symlink_target": ""
} |
from suplemon.suplemon_module import Module
class RStrip(Module):
"""Strips whitespace from end of line."""
def run(self, app, editor, args):
line_nums = editor.get_lines_with_cursors()
for n in line_nums:
line = editor.lines[n]
line.set_data(line.data.rstrip())
module = {
"class": RStrip,
"name": "rstrip",
}
| {
"content_hash": "11e650c76c358a1bb41d8ea3dd4c0a8e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 51,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6,
"repo_name": "severin31/suplemon",
"id": "efef191f787ac7f2e30dc9eb995876f1f3dfcf59",
"size": "393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "suplemon/modules/rstrip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155931"
}
],
"symlink_target": ""
} |
from collections import Counter, OrderedDict
from collections.abc import Mapping
import contextlib
from copy import deepcopy
import datetime
from io import BytesIO
import operator
from textwrap import shorten
import string
import numpy as np
from .pick import (channel_type, _get_channel_types,
get_channel_type_constants, pick_types, _contains_ch_type)
from .constants import FIFF, _coord_frame_named
from .open import fiff_open
from .tree import dir_tree_find
from .tag import (read_tag, find_tag, _ch_coord_dict, _update_ch_info_named,
_rename_list)
from .proj import (_read_proj, _write_proj, _uniquify_projs, _normalize_proj,
_proj_equal, Projection)
from .ctf_comp import _read_ctf_comp, write_ctf_comp
from .write import (start_and_end_file, start_block, end_block,
write_string, write_dig_points, write_float, write_int,
write_coord_trans, write_ch_info, write_name_list,
write_julian, write_float_matrix, write_id, DATE_NONE)
from .proc_history import _read_proc_history, _write_proc_history
from ..transforms import (invert_transform, Transform, _coord_frame_name,
_ensure_trans, _frame_to_str)
from ..utils import (logger, verbose, warn, object_diff, _validate_type,
_stamp_to_dt, _dt_to_stamp, _pl, _is_numeric,
_check_option, _on_missing, _check_on_missing, fill_doc,
_check_fname, repr_html)
from ._digitization import (_format_dig_points, _dig_kind_proper, DigPoint,
_dig_kind_rev, _dig_kind_ints, _read_dig_fif)
from ._digitization import write_dig, _get_data_as_dict_from_dig
from .compensator import get_current_comp
from ..defaults import _handle_default
b = bytes # alias
_SCALAR_CH_KEYS = ('scanno', 'logno', 'kind', 'range', 'cal', 'coil_type',
'unit', 'unit_mul', 'coord_frame')
_ALL_CH_KEYS_SET = set(_SCALAR_CH_KEYS + ('loc', 'ch_name'))
# XXX we need to require these except when doing simplify_info
_MIN_CH_KEYS_SET = set(('kind', 'cal', 'unit', 'loc', 'ch_name'))
def _get_valid_units():
"""Get valid units according to the International System of Units (SI).
The International System of Units (SI, :footcite:`WikipediaSI`) is the
default system for describing units in the Brain Imaging Data Structure
(BIDS). For more information, see the BIDS specification
:footcite:`BIDSdocs` and the appendix "Units" therein.
References
----------
.. footbibliography::
"""
valid_prefix_names = ['yocto', 'zepto', 'atto', 'femto', 'pico', 'nano',
'micro', 'milli', 'centi', 'deci', 'deca', 'hecto',
'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'yotta']
valid_prefix_symbols = ['y', 'z', 'a', 'f', 'p', 'n', u'µ', 'm', 'c', 'd',
'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
valid_unit_names = ['metre', 'kilogram', 'second', 'ampere', 'kelvin',
'mole', 'candela', 'radian', 'steradian', 'hertz',
'newton', 'pascal', 'joule', 'watt', 'coulomb', 'volt',
'farad', 'ohm', 'siemens', 'weber', 'tesla', 'henry',
'degree Celsius', 'lumen', 'lux', 'becquerel', 'gray',
'sievert', 'katal']
valid_unit_symbols = ['m', 'kg', 's', 'A', 'K', 'mol', 'cd', 'rad', 'sr',
'Hz', 'N', 'Pa', 'J', 'W', 'C', 'V', 'F', u'Ω', 'S',
'Wb', 'T', 'H', u'°C', 'lm', 'lx', 'Bq', 'Gy', 'Sv',
'kat']
# Valid units are all possible combinations of either prefix name or prefix
# symbol together with either unit name or unit symbol. E.g., nV for
# nanovolt
valid_units = []
valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_names
for unit in valid_unit_names])
valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_names
for unit in valid_unit_symbols])
valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_symbols
for unit in valid_unit_names])
valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_symbols
for unit in valid_unit_symbols])
# units are also valid without a prefix
valid_units += valid_unit_names
valid_units += valid_unit_symbols
# we also accept "n/a" as a unit, which is the default missing value in
# BIDS
valid_units += ["n/a"]
return tuple(valid_units)
@verbose
def _unique_channel_names(ch_names, max_length=None, verbose=None):
"""Ensure unique channel names."""
suffixes = tuple(string.ascii_lowercase)
if max_length is not None:
ch_names[:] = [name[:max_length] for name in ch_names]
unique_ids = np.unique(ch_names, return_index=True)[1]
if len(unique_ids) != len(ch_names):
dups = {ch_names[x]
for x in np.setdiff1d(range(len(ch_names)), unique_ids)}
warn('Channel names are not unique, found duplicates for: '
'%s. Applying running numbers for duplicates.' % dups)
for ch_stem in dups:
overlaps = np.where(np.array(ch_names) == ch_stem)[0]
# We need an extra character since we append '-'.
# np.ceil(...) is the maximum number of appended digits.
if max_length is not None:
n_keep = (
max_length - 1 - int(np.ceil(np.log10(len(overlaps)))))
else:
n_keep = np.inf
n_keep = min(len(ch_stem), n_keep)
ch_stem = ch_stem[:n_keep]
for idx, ch_idx in enumerate(overlaps):
# try idx first, then loop through lower case chars
for suffix in (idx,) + suffixes:
ch_name = ch_stem + '-%s' % suffix
if ch_name not in ch_names:
break
if ch_name not in ch_names:
ch_names[ch_idx] = ch_name
else:
raise ValueError('Adding a single alphanumeric for a '
'duplicate resulted in another '
'duplicate name %s' % ch_name)
return ch_names
class MontageMixin(object):
"""Mixin for Montage getting and setting."""
@fill_doc
def get_montage(self):
"""Get a DigMontage from instance.
Returns
-------
%(montage)s
"""
from ..channels.montage import make_dig_montage
info = self if isinstance(self, Info) else self.info
if info['dig'] is None:
return None
# obtain coord_frame, and landmark coords
# (nasion, lpa, rpa, hsp, hpi) from DigPoints
montage_bunch = _get_data_as_dict_from_dig(info['dig'])
coord_frame = _frame_to_str.get(montage_bunch.coord_frame)
# get the channel names and chs data structure
ch_names, chs = info['ch_names'], info['chs']
picks = pick_types(info, meg=False, eeg=True, seeg=True,
ecog=True, dbs=True, fnirs=True, exclude=[])
# channel positions from dig do not match ch_names one to one,
# so use loc[:3] instead
ch_pos = {ch_names[ii]: chs[ii]['loc'][:3] for ii in picks}
# fNIRS uses multiple channels for the same sensors, we use
# a private function to format these for dig montage.
fnirs_picks = pick_types(info, fnirs=True, exclude=[])
if len(ch_pos) == len(fnirs_picks):
ch_pos = _get_fnirs_ch_pos(info)
elif len(fnirs_picks) > 0:
raise ValueError("MNE does not support getting the montage "
"for a mix of fNIRS and other data types. "
"Please raise a GitHub issue if you "
"require this feature.")
# create montage
montage = make_dig_montage(
ch_pos=ch_pos,
coord_frame=coord_frame,
nasion=montage_bunch.nasion,
lpa=montage_bunch.lpa,
rpa=montage_bunch.rpa,
hsp=montage_bunch.hsp,
hpi=montage_bunch.hpi,
)
return montage
@verbose
def set_montage(self, montage, match_case=True, match_alias=False,
on_missing='raise', verbose=None):
"""Set %(montage_types)s channel positions and digitization points.
Parameters
----------
%(montage)s
%(match_case)s
%(match_alias)s
%(on_missing_montage)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance, modified in-place.
See Also
--------
mne.channels.make_standard_montage
mne.channels.make_dig_montage
mne.channels.read_custom_montage
Notes
-----
.. warning::
Only %(montage_types)s channels can have their positions set using
a montage. Other channel types (e.g., MEG channels) should have
their positions defined properly using their data reading
functions.
"""
# How to set up a montage to old named fif file (walk through example)
# https://gist.github.com/massich/f6a9f4799f1fbeb8f5e8f8bc7b07d3df
from ..channels.montage import _set_montage
info = self if isinstance(self, Info) else self.info
_set_montage(info, montage, match_case, match_alias, on_missing)
return self
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs and Info."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
info = self if isinstance(self, Info) else self.info
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(info, 'mag') or
_contains_ch_type(info, 'grad'))
else:
has_ch_type = _contains_ch_type(info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
info = self if isinstance(self, Info) else self.info
return get_current_comp(info)
@fill_doc
def get_channel_types(self, picks=None, unique=False, only_data_chs=False):
"""Get a list of channel type for each channel.
Parameters
----------
%(picks_all)s
unique : bool
Whether to return only unique channel types. Default is ``False``.
only_data_chs : bool
Whether to ignore non-data channels. Default is ``False``.
Returns
-------
channel_types : list
The channel types.
"""
info = self if isinstance(self, Info) else self.info
return _get_channel_types(info, picks=picks, unique=unique,
only_data_chs=only_data_chs)
def _format_trans(obj, key):
try:
t = obj[key]
except KeyError:
pass
else:
if t is not None:
obj[key] = Transform(t['from'], t['to'], t['trans'])
def _check_ch_keys(ch, ci, name='info["chs"]', check_min=True):
ch_keys = set(ch)
bad = sorted(ch_keys.difference(_ALL_CH_KEYS_SET))
if bad:
raise KeyError(
f'key{_pl(bad)} errantly present for {name}[{ci}]: {bad}')
if check_min:
bad = sorted(_MIN_CH_KEYS_SET.difference(ch_keys))
if bad:
raise KeyError(
f'key{_pl(bad)} missing for {name}[{ci}]: {bad}',)
# As options are added here, test_meas_info.py:test_info_bad should be updated
def _check_bads(bads):
_validate_type(bads, list, 'bads')
return bads
def _check_description(description):
_validate_type(description, (None, str), "info['description']")
return description
def _check_dev_head_t(dev_head_t):
_validate_type(dev_head_t, (Transform, None), "info['dev_head_t']")
if dev_head_t is not None:
dev_head_t = _ensure_trans(dev_head_t, 'meg', 'head')
return dev_head_t
def _check_experimenter(experimenter):
_validate_type(experimenter, (None, str), 'experimenter')
return experimenter
def _check_line_freq(line_freq):
_validate_type(line_freq, (None, 'numeric'), 'line_freq')
line_freq = float(line_freq) if line_freq is not None else line_freq
return line_freq
def _check_subject_info(subject_info):
_validate_type(subject_info, (None, dict), 'subject_info')
return subject_info
def _check_device_info(device_info):
_validate_type(device_info, (None, dict, ), 'device_info')
return device_info
def _check_helium_info(helium_info):
_validate_type(helium_info, (None, dict, ), 'helium_info')
return helium_info
class Info(dict, MontageMixin, ContainsMixin):
"""Measurement information.
This data structure behaves like a dictionary. It contains all metadata
that is available for a recording. However, its keys are restricted to
those provided by the
`FIF format specification <https://github.com/mne-tools/fiff-constants>`__,
so new entries should not be manually added.
.. note::
This class should not be instantiated directly via
``mne.Info(...)``. Instead, use :func:`mne.create_info` to create
measurement information from scratch.
.. warning::
The only entries that should be manually changed by the user are:
``info['bads']``, ``info['description']``, ``info['device_info']``
``info['dev_head_t']``, ``info['experimenter']``,
``info['helium_info']``, ``info['line_freq']``, ``info['temp']``,
and ``info['subject_info']``.
All other entries should be considered read-only, though they can be
modified by various MNE-Python functions or methods (which have
safeguards to ensure all fields remain in sync).
Parameters
----------
*args : list
Arguments.
**kwargs : dict
Keyword arguments.
Attributes
----------
acq_pars : str | None
MEG system acquisition parameters.
See :class:`mne.AcqParserFIF` for details.
acq_stim : str | None
MEG system stimulus parameters.
bads : list of str
List of bad (noisy/broken) channels, by name. These channels will by
default be ignored by many processing steps.
ch_names : list of str
The names of the channels.
chs : list of dict
A list of channel information dictionaries, one per channel.
See Notes for more information.
command_line : str
Contains the command and arguments used to create the source space
(used for source estimation).
comps : list of dict
CTF software gradient compensation data.
See Notes for more information.
ctf_head_t : Transform | None
The transformation from 4D/CTF head coordinates to Neuromag head
coordinates. This is only present in 4D/CTF data.
custom_ref_applied : int
Whether a custom (=other than average) reference has been applied to
the EEG data. This flag is checked by some algorithms that require an
average reference to be set.
description : str | None
String description of the recording.
dev_ctf_t : Transform | None
The transformation from device coordinates to 4D/CTF head coordinates.
This is only present in 4D/CTF data.
dev_head_t : Transform | None
The device to head transformation.
device_info : dict | None
Information about the acquisition device. See Notes for details.
.. versionadded:: 0.19
dig : list of dict | None
The Polhemus digitization data in head coordinates.
See Notes for more information.
events : list of dict
Event list, sometimes extracted from the stim channels by Neuromag
systems. In general this should not be used and
:func:`mne.find_events` should be used for event processing.
See Notes for more information.
experimenter : str | None
Name of the person that ran the experiment.
file_id : dict | None
The FIF globally unique ID. See Notes for more information.
gantry_angle : float | None
Tilt angle of the gantry in degrees.
helium_info : dict | None
Information about the device helium. See Notes for details.
.. versionadded:: 0.19
highpass : float
Highpass corner frequency in Hertz. Zero indicates a DC recording.
hpi_meas : list of dict
HPI measurements that were taken at the start of the recording
(e.g. coil frequencies).
See Notes for details.
hpi_results : list of dict
Head position indicator (HPI) digitization points and fit information
(e.g., the resulting transform).
See Notes for details.
hpi_subsystem : dict | None
Information about the HPI subsystem that was used (e.g., event
channel used for cHPI measurements).
See Notes for details.
kit_system_id : int
Identifies the KIT system.
line_freq : float | None
Frequency of the power line in Hertz.
lowpass : float
Lowpass corner frequency in Hertz.
It is automatically set to half the sampling rate if there is
otherwise no low-pass applied to the data.
maxshield : bool
True if active shielding (IAS) was active during recording.
meas_date : datetime
The time (UTC) of the recording.
.. versionchanged:: 0.20
This is stored as a :class:`~python:datetime.datetime` object
instead of a tuple of seconds/microseconds.
meas_file : str | None
Raw measurement file (used for source estimation).
meas_id : dict | None
The ID assigned to this measurement by the acquisition system or
during file conversion. Follows the same format as ``file_id``.
mri_file : str | None
File containing the MRI to head transformation (used for source
estimation).
mri_head_t : dict | None
Transformation from MRI to head coordinates (used for source
estimation).
mri_id : dict | None
MRI unique ID (used for source estimation).
nchan : int
Number of channels.
proc_history : list of dict
The MaxFilter processing history.
See Notes for details.
proj_id : int | None
ID number of the project the experiment belongs to.
proj_name : str | None
Name of the project the experiment belongs to.
projs : list of Projection
List of SSP operators that operate on the data.
See :class:`mne.Projection` for details.
sfreq : float
Sampling frequency in Hertz.
subject_info : dict | None
Information about the subject.
See Notes for details.
temp : object | None
Can be used to store temporary objects in an Info instance. It will not
survive an I/O roundtrip.
.. versionadded:: 0.24
utc_offset : str
"UTC offset of related meas_date (sHH:MM).
.. versionadded:: 0.19
working_dir : str
Working directory used when the source space was created (used for
source estimation).
xplotter_layout : str
Layout of the Xplotter (Neuromag system only).
See Also
--------
mne.create_info
Notes
-----
The following parameters have a nested structure.
* ``chs`` list of dict:
cal : float
The calibration factor to bring the channels to physical
units. Used in product with ``range`` to scale the data read
from disk.
ch_name : str
The channel name.
coil_type : int
Coil type, e.g. ``FIFFV_COIL_MEG``.
coord_frame : int
The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``.
kind : int
The kind of channel, e.g. ``FIFFV_EEG_CH``.
loc : array, shape (12,)
Channel location. For MEG this is the position plus the
normal given by a 3x3 rotation matrix. For EEG this is the
position followed by reference position (with 6 unused).
The values are specified in device coordinates for MEG and in
head coordinates for EEG channels, respectively.
logno : int
Logical channel number, conventions in the usage of this
number vary.
range : float
The hardware-oriented part of the calibration factor.
This should be only applied to the continuous raw data.
Used in product with ``cal`` to scale data read from disk.
scanno : int
Scanning order number, starting from 1.
unit : int
The unit to use, e.g. ``FIFF_UNIT_T_M``.
unit_mul : int
Unit multipliers, most commonly ``FIFF_UNITM_NONE``.
* ``comps`` list of dict:
ctfkind : int
CTF compensation grade.
colcals : ndarray
Column calibrations.
mat : dict
A named matrix dictionary (with entries "data", "col_names", etc.)
containing the compensation matrix.
rowcals : ndarray
Row calibrations.
save_calibrated : bool
Were the compensation data saved in calibrated form.
* ``device_info`` dict:
type : str
Device type.
model : str
Device model.
serial : str
Device serial.
site : str
Device site.
* ``dig`` list of dict:
kind : int
The kind of channel,
e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``.
r : array, shape (3,)
3D position in m. and coord_frame.
ident : int
Number specifying the identity of the point.
e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, or
42 if kind is ``FIFFV_POINT_EEG``.
coord_frame : int
The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``.
* ``events`` list of dict:
channels : list of int
Channel indices for the events.
list : ndarray, shape (n_events * 3,)
Events in triplets as number of samples, before, after.
* ``file_id`` dict:
version : int
FIF format version, i.e. ``FIFFC_VERSION``.
machid : ndarray, shape (2,)
Unique machine ID, usually derived from the MAC address.
secs : int
Time in seconds.
usecs : int
Time in microseconds.
* ``helium_info`` dict:
he_level_raw : float
Helium level (%) before position correction.
helium_level : float
Helium level (%) after position correction.
orig_file_guid : str
Original file GUID.
meas_date : tuple of int
The helium level meas date.
* ``hpi_meas`` list of dict:
creator : str
Program that did the measurement.
sfreq : float
Sample rate.
nchan : int
Number of channels used.
nave : int
Number of averages used.
ncoil : int
Number of coils used.
first_samp : int
First sample used.
last_samp : int
Last sample used.
hpi_coils : list of dict
Coils, containing:
number: int
Coil number
epoch : ndarray
Buffer containing one epoch and channel.
slopes : ndarray, shape (n_channels,)
HPI data.
corr_coeff : ndarray, shape (n_channels,)
HPI curve fit correlations.
coil_freq : float
HPI coil excitation frequency
* ``hpi_results`` list of dict:
dig_points : list
Digitization points (see ``dig`` definition) for the HPI coils.
order : ndarray, shape (ncoil,)
The determined digitization order.
used : ndarray, shape (nused,)
The indices of the used coils.
moments : ndarray, shape (ncoil, 3)
The coil moments.
goodness : ndarray, shape (ncoil,)
The goodness of fits.
good_limit : float
The goodness of fit limit.
dist_limit : float
The distance limit.
accept : int
Whether or not the fit was accepted.
coord_trans : instance of Transformation
The resulting MEG<->head transformation.
* ``hpi_subsystem`` dict:
ncoil : int
The number of coils.
event_channel : str
The event channel used to encode cHPI status (e.g., STI201).
hpi_coils : list of ndarray
List of length ``ncoil``, each 4-element ndarray contains the
event bits used on the event channel to indicate cHPI status
(using the first element of these arrays is typically
sufficient).
* ``mri_id`` dict:
version : int
FIF format version, i.e. ``FIFFC_VERSION``.
machid : ndarray, shape (2,)
Unique machine ID, usually derived from the MAC address.
secs : int
Time in seconds.
usecs : int
Time in microseconds.
* ``proc_history`` list of dict:
block_id : dict
See ``id`` above.
date : ndarray, shape (2,)
2-element tuple of seconds and microseconds.
experimenter : str
Name of the person who ran the program.
creator : str
Program that did the processing.
max_info : dict
Maxwel filtering info, can contain:
sss_info : dict
SSS processing information.
max_st
tSSS processing information.
sss_ctc : dict
Cross-talk processing information.
sss_cal : dict
Fine-calibration information.
smartshield : dict
MaxShield information. This dictionary is (always?) empty,
but its presence implies that MaxShield was used during
acquisition.
* ``subject_info`` dict:
id : int
Integer subject identifier.
his_id : str
String subject identifier.
last_name : str
Last name.
first_name : str
First name.
middle_name : str
Middle name.
birthday : tuple of int
Birthday in (year, month, day) format.
sex : int
Subject sex (0=unknown, 1=male, 2=female).
hand : int
Handedness (1=right, 2=left, 3=ambidextrous).
weight : float
Weight in kilograms.
height : float
Height in meters.
"""
_attributes = {
'acq_pars': 'acq_pars cannot be set directly. '
'See mne.AcqParserFIF() for details.',
'acq_stim': 'acq_stim cannot be set directly.',
'bads': _check_bads,
'ch_names': 'ch_names cannot be set directly. '
'Please use methods inst.add_channels(), '
'inst.drop_channels(), inst.pick_channels(), '
'inst.rename_channels(), inst.reorder_channels() '
'and inst.set_channel_types() instead.',
'chs': 'chs cannot be set directly. '
'Please use methods inst.add_channels(), '
'inst.drop_channels(), inst.pick_channels(), '
'inst.rename_channels(), inst.reorder_channels() '
'and inst.set_channel_types() instead.',
'command_line': 'command_line cannot be set directly.',
'comps': 'comps cannot be set directly. '
'Please use method Raw.apply_gradient_compensation() '
'instead.',
'ctf_head_t': 'ctf_head_t cannot be set directly.',
'custom_ref_applied': 'custom_ref_applied cannot be set directly. '
'Please use method inst.set_eeg_reference() '
'instead.',
'description': _check_description,
'dev_ctf_t': 'dev_ctf_t cannot be set directly.',
'dev_head_t': _check_dev_head_t,
'device_info': _check_device_info,
'dig': 'dig cannot be set directly. '
'Please use method inst.set_montage() instead.',
'events': 'events cannot be set directly.',
'experimenter': _check_experimenter,
'file_id': 'file_id cannot be set directly.',
'gantry_angle': 'gantry_angle cannot be set directly.',
'helium_info': _check_helium_info,
'highpass': 'highpass cannot be set directly. '
'Please use method inst.filter() instead.',
'hpi_meas': 'hpi_meas can not be set directly.',
'hpi_results': 'hpi_results cannot be set directly.',
'hpi_subsystem': 'hpi_subsystem cannot be set directly.',
'kit_system_id': 'kit_system_id cannot be set directly.',
'line_freq': _check_line_freq,
'lowpass': 'lowpass cannot be set directly. '
'Please use method inst.filter() instead.',
'maxshield': 'maxshield cannot be set directly.',
'meas_date': 'meas_date cannot be set directly. '
'Please use method inst.set_meas_date() instead.',
'meas_file': 'meas_file cannot be set directly.',
'meas_id': 'meas_id cannot be set directly.',
'mri_file': 'mri_file cannot be set directly.',
'mri_head_t': 'mri_head_t cannot be set directly.',
'mri_id': 'mri_id cannot be set directly.',
'nchan': 'nchan cannot be set directly. '
'Please use methods inst.add_channels(), '
'inst.drop_channels(), and inst.pick_channels() instead.',
'proc_history': 'proc_history cannot be set directly.',
'proj_id': 'proj_id cannot be set directly.',
'proj_name': 'proj_name cannot be set directly.',
'projs': 'projs cannot be set directly. '
'Please use methods inst.add_proj() and inst.del_proj() '
'instead.',
'sfreq': 'sfreq cannot be set directly. '
'Please use method inst.resample() instead.',
'subject_info': _check_subject_info,
'temp': lambda x: x,
'utc_offset': 'utc_offset cannot be set directly.',
'working_dir': 'working_dir cannot be set directly.',
'xplotter_layout': 'xplotter_layout cannot be set directly.'
}
def __init__(self, *args, **kwargs):
self._unlocked = True
super().__init__(*args, **kwargs)
# Deal with h5io writing things as dict
for key in ('dev_head_t', 'ctf_head_t', 'dev_ctf_t'):
_format_trans(self, key)
for res in self.get('hpi_results', []):
_format_trans(res, 'coord_trans')
if self.get('dig', None) is not None and len(self['dig']):
if isinstance(self['dig'], dict): # needs to be unpacked
self['dig'] = _dict_unpack(self['dig'], _DIG_CAST)
if not isinstance(self['dig'][0], DigPoint):
self['dig'] = _format_dig_points(self['dig'])
if isinstance(self.get('chs', None), dict):
self['chs']['ch_name'] = [str(x) for x in np.char.decode(
self['chs']['ch_name'], encoding='utf8')]
self['chs'] = _dict_unpack(self['chs'], _CH_CAST)
for pi, proj in enumerate(self.get('projs', [])):
if not isinstance(proj, Projection):
self['projs'][pi] = Projection(**proj)
# Old files could have meas_date as tuple instead of datetime
try:
meas_date = self['meas_date']
except KeyError:
pass
else:
self['meas_date'] = _ensure_meas_date_none_or_dt(meas_date)
self._unlocked = False
def __getstate__(self):
"""Get state (for pickling)."""
return {'_unlocked': self._unlocked}
def __setstate__(self, state):
"""Set state (for pickling)."""
self._unlocked = state['_unlocked']
def __setitem__(self, key, val):
"""Attribute setter."""
# During unpickling, the _unlocked attribute has not been set, so
# let __setstate__ do it later and act unlocked now
unlocked = getattr(self, '_unlocked', True)
if key in self._attributes:
if isinstance(self._attributes[key], str):
if not unlocked:
raise RuntimeError(self._attributes[key])
else:
val = self._attributes[key](val) # attribute checker function
else:
raise RuntimeError(
f"Info does not support directly setting the key {repr(key)}. "
"You can set info['temp'] to store temporary objects in an "
"Info instance, but these will not survive an I/O round-trip.")
super().__setitem__(key, val)
def update(self, other=None, **kwargs):
"""Update method using __setitem__()."""
iterable = other.items() if isinstance(other, Mapping) else other
if other is not None:
for key, val in iterable:
self[key] = val
for key, val in kwargs.items():
self[key] = val
@contextlib.contextmanager
def _unlock(self, *, update_redundant=False, check_after=False):
"""Context manager unlocking access to attributes."""
# needed for nested _unlock()
state = self._unlocked if hasattr(self, '_unlocked') else False
self._unlocked = True
try:
yield
except Exception:
raise
else:
if update_redundant:
self._update_redundant()
if check_after:
self._check_consistency()
finally:
self._unlocked = state
def copy(self):
"""Copy the instance.
Returns
-------
info : instance of Info
The copied info.
"""
return deepcopy(self)
def normalize_proj(self):
"""(Re-)Normalize projection vectors after subselection.
Applying projection after sub-selecting a set of channels that
were originally used to compute the original projection vectors
can be dangerous (e.g., if few channels remain, most power was
in channels that are no longer picked, etc.). By default, mne
will emit a warning when this is done.
This function will re-normalize projectors to use only the
remaining channels, thus avoiding that warning. Only use this
function if you're confident that the projection vectors still
adequately capture the original signal of interest.
"""
_normalize_proj(self)
def __repr__(self):
"""Summarize info instead of printing all."""
MAX_WIDTH = 68
strs = ['<Info | %s non-empty values']
non_empty = 0
titles = _handle_default('titles')
for k, v in self.items():
if k == 'ch_names':
if v:
entr = shorten(', '.join(v), MAX_WIDTH, placeholder=' ...')
else:
entr = '[]' # always show
non_empty -= 1 # don't count as non-empty
elif k == 'bads':
if v:
entr = '{} items ('.format(len(v))
entr += ', '.join(v)
entr = shorten(entr, MAX_WIDTH, placeholder=' ...') + ')'
else:
entr = '[]' # always show
non_empty -= 1 # don't count as non-empty
elif k == 'projs':
if v:
entr = ', '.join(p['desc'] + ': o%s' %
{0: 'ff', 1: 'n'}[p['active']] for p in v)
entr = shorten(entr, MAX_WIDTH, placeholder=' ...')
else:
entr = '[]' # always show projs
non_empty -= 1 # don't count as non-empty
elif k == 'meas_date':
if v is None:
entr = 'unspecified'
else:
entr = v.strftime('%Y-%m-%d %H:%M:%S %Z')
elif k == 'kit_system_id' and v is not None:
from .kit.constants import KIT_SYSNAMES
entr = '%i (%s)' % (v, KIT_SYSNAMES.get(v, 'unknown'))
elif k == 'dig' and v is not None:
counts = Counter(d['kind'] for d in v)
counts = ['%d %s' % (counts[ii],
_dig_kind_proper[_dig_kind_rev[ii]])
for ii in _dig_kind_ints if ii in counts]
counts = (' (%s)' % (', '.join(counts))) if len(counts) else ''
entr = '%d item%s%s' % (len(v), _pl(len(v)), counts)
elif isinstance(v, Transform):
# show entry only for non-identity transform
if not np.allclose(v["trans"], np.eye(v["trans"].shape[0])):
frame1 = _coord_frame_name(v['from'])
frame2 = _coord_frame_name(v['to'])
entr = '%s -> %s transform' % (frame1, frame2)
else:
entr = ''
elif k in ['sfreq', 'lowpass', 'highpass']:
entr = '{:.1f} Hz'.format(v)
elif isinstance(v, str):
entr = shorten(v, MAX_WIDTH, placeholder=' ...')
elif k == 'chs':
# TODO someday we should refactor with _repr_html_ with
# bad vs good
ch_types = [channel_type(self, idx) for idx in range(len(v))]
ch_counts = Counter(ch_types)
entr = ', '.join(
f'{count} {titles.get(ch_type, ch_type.upper())}'
for ch_type, count in ch_counts.items())
elif k == 'custom_ref_applied':
entr = str(bool(v))
if not v:
non_empty -= 1 # don't count if 0
else:
try:
this_len = len(v)
except TypeError:
entr = '{}'.format(v) if v is not None else ''
else:
if this_len > 0:
entr = ('%d item%s (%s)' % (this_len, _pl(this_len),
type(v).__name__))
else:
entr = ''
if entr != '':
non_empty += 1
strs.append('%s: %s' % (k, entr))
st = '\n '.join(sorted(strs))
st += '\n>'
st %= non_empty
return st
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
result = Info.__new__(Info)
result._unlocked = True
for k, v in self.items():
# chs is roughly half the time but most are immutable
if k == 'chs':
# dict shallow copy is fast, so use it then overwrite
result[k] = list()
for ch in v:
ch = ch.copy() # shallow
ch['loc'] = ch['loc'].copy()
result[k].append(ch)
elif k == 'ch_names':
# we know it's list of str, shallow okay and saves ~100 µs
result[k] = v.copy()
elif k == 'hpi_meas':
hms = list()
for hm in v:
hm = hm.copy()
# the only mutable thing here is some entries in coils
hm['hpi_coils'] = [coil.copy() for coil in hm['hpi_coils']]
# There is a *tiny* risk here that someone could write
# raw.info['hpi_meas'][0]['hpi_coils'][1]['epoch'] = ...
# and assume that info.copy() will make an actual copy,
# but copying these entries has a 2x slowdown penalty so
# probably not worth it for such a deep corner case:
# for coil in hpi_coils:
# for key in ('epoch', 'slopes', 'corr_coeff'):
# coil[key] = coil[key].copy()
hms.append(hm)
result[k] = hms
else:
result[k] = deepcopy(v, memodict)
result._unlocked = False
return result
def _check_consistency(self, prepend_error=''):
"""Do some self-consistency checks and datatype tweaks."""
missing = [bad for bad in self['bads'] if bad not in self['ch_names']]
if len(missing) > 0:
msg = '%sbad channel(s) %s marked do not exist in info'
raise RuntimeError(msg % (prepend_error, missing,))
meas_date = self.get('meas_date')
if meas_date is not None:
if (not isinstance(self['meas_date'], datetime.datetime) or
self['meas_date'].tzinfo is None or
self['meas_date'].tzinfo is not datetime.timezone.utc):
raise RuntimeError('%sinfo["meas_date"] must be a datetime '
'object in UTC or None, got %r'
% (prepend_error, repr(self['meas_date']),))
chs = [ch['ch_name'] for ch in self['chs']]
if len(self['ch_names']) != len(chs) or any(
ch_1 != ch_2 for ch_1, ch_2 in zip(self['ch_names'], chs)) or \
self['nchan'] != len(chs):
raise RuntimeError('%sinfo channel name inconsistency detected, '
'please notify mne-python developers'
% (prepend_error,))
# make sure we have the proper datatypes
with self._unlock():
for key in ('sfreq', 'highpass', 'lowpass'):
if self.get(key) is not None:
self[key] = float(self[key])
for pi, proj in enumerate(self.get('projs', [])):
_validate_type(proj, Projection, f'info["projs"][{pi}]')
for key in ('kind', 'active', 'desc', 'data', 'explained_var'):
if key not in proj:
raise RuntimeError(f'Projection incomplete, missing {key}')
# Ensure info['chs'] has immutable entries (copies much faster)
for ci, ch in enumerate(self['chs']):
_check_ch_keys(ch, ci)
ch_name = ch['ch_name']
if not isinstance(ch_name, str):
raise TypeError(
'Bad info: info["chs"][%d]["ch_name"] is not a string, '
'got type %s' % (ci, type(ch_name)))
for key in _SCALAR_CH_KEYS:
val = ch.get(key, 1)
if not _is_numeric(val):
raise TypeError(
'Bad info: info["chs"][%d][%r] = %s is type %s, must '
'be float or int' % (ci, key, val, type(val)))
loc = ch['loc']
if not (isinstance(loc, np.ndarray) and loc.shape == (12,)):
raise TypeError(
'Bad info: info["chs"][%d]["loc"] must be ndarray with '
'12 elements, got %r' % (ci, loc))
# make sure channel names are unique
with self._unlock():
self['ch_names'] = _unique_channel_names(self['ch_names'])
for idx, ch_name in enumerate(self['ch_names']):
self['chs'][idx]['ch_name'] = ch_name
def _update_redundant(self):
"""Update the redundant entries."""
with self._unlock():
self['ch_names'] = [ch['ch_name'] for ch in self['chs']]
self['nchan'] = len(self['chs'])
@property
def ch_names(self):
return self['ch_names']
def _get_chs_for_repr(self):
titles = _handle_default('titles')
# good channels
channels = {}
ch_types = [channel_type(self, idx) for idx in range(len(self['chs']))]
ch_counts = Counter(ch_types)
for ch_type, count in ch_counts.items():
if ch_type == 'meg':
channels['mag'] = len(pick_types(self, meg='mag'))
channels['grad'] = len(pick_types(self, meg='grad'))
elif ch_type == 'eog':
pick_eog = pick_types(self, eog=True)
eog = ', '.join(
np.array(self['ch_names'])[pick_eog])
elif ch_type == 'ecg':
pick_ecg = pick_types(self, ecg=True)
ecg = ', '.join(
np.array(self['ch_names'])[pick_ecg])
channels[ch_type] = count
good_channels = ', '.join(
[f'{v} {titles.get(k, k.upper())}' for k, v in channels.items()])
if 'ecg' not in channels.keys():
ecg = 'Not available'
if 'eog' not in channels.keys():
eog = 'Not available'
# bad channels
if len(self['bads']) > 0:
bad_channels = ', '.join(self['bads'])
else:
bad_channels = 'None'
return good_channels, bad_channels, ecg, eog
@repr_html
def _repr_html_(self, caption=None):
"""Summarize info for HTML representation."""
from ..html_templates import repr_templates_env
if isinstance(caption, str):
html = f'<h4>{caption}</h4>'
else:
html = ''
good_channels, bad_channels, ecg, eog = self._get_chs_for_repr()
# TODO
# Most of the following checks are to ensure that we get a proper repr
# for Forward['info'] (and probably others like
# InverseOperator['info']??), which doesn't seem to follow our standard
# Info structure used elsewhere.
# Proposed solution for a future refactoring:
# Forward['info'] should get its own Info subclass (with respective
# repr).
# meas date
meas_date = self.get('meas_date')
if meas_date is not None:
meas_date = meas_date.strftime("%B %d, %Y %H:%M:%S") + ' GMT'
projs = self.get('projs')
if projs:
projs = [
f'{p["desc"]} : {"on" if p["active"] else "off"}'
for p in self['projs']
]
else:
projs = None
info_template = repr_templates_env.get_template('info.html.jinja')
return html + info_template.render(
caption=caption,
meas_date=meas_date,
projs=projs,
ecg=ecg,
eog=eog,
good_channels=good_channels,
bad_channels=bad_channels,
dig=self.get('dig'),
subject_info=self.get('subject_info'),
lowpass=self.get('lowpass'),
highpass=self.get('highpass'),
sfreq=self.get('sfreq'),
experimenter=self.get('experimenter'),
)
def _simplify_info(info):
"""Return a simplified info structure to speed up picking."""
chs = [{key: ch[key]
for key in ('ch_name', 'kind', 'unit', 'coil_type', 'loc', 'cal')}
for ch in info['chs']]
sub_info = Info(chs=chs, bads=info['bads'], comps=info['comps'],
projs=info['projs'],
custom_ref_applied=info['custom_ref_applied'])
sub_info._update_redundant()
return sub_info
@verbose
def read_fiducials(fname, verbose=None):
"""Read fiducials from a fiff file.
Parameters
----------
fname : path-like
The filename to read.
%(verbose)s
Returns
-------
pts : list of dict
List of digitizer points (each point in a dict).
coord_frame : int
The coordinate frame of the points (one of
``mne.io.constants.FIFF.FIFFV_COORD_...``).
"""
fname = _check_fname(
fname=fname,
overwrite='read',
must_exist=True
)
fid, tree, _ = fiff_open(fname)
with fid:
isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)
isotrak = isotrak[0]
pts = []
coord_frame = FIFF.FIFFV_COORD_HEAD
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if kind == FIFF.FIFF_DIG_POINT:
tag = read_tag(fid, pos)
pts.append(DigPoint(tag.data))
elif kind == FIFF.FIFF_MNE_COORD_FRAME:
tag = read_tag(fid, pos)
coord_frame = tag.data[0]
coord_frame = _coord_frame_named.get(coord_frame, coord_frame)
# coord_frame is not stored in the tag
for pt in pts:
pt['coord_frame'] = coord_frame
return pts, coord_frame
@verbose
def write_fiducials(fname, pts, coord_frame='unknown', *, overwrite=False,
verbose=None):
"""Write fiducials to a fiff file.
Parameters
----------
fname : path-like
Destination file name.
pts : iterator of dict
Iterator through digitizer points. Each point is a dictionary with
the keys 'kind', 'ident' and 'r'.
coord_frame : str | int
The coordinate frame of the points. If a string, must be one of
``'meg'``, ``'mri'``, ``'mri_voxel'``, ``'head'``,
``'mri_tal'``, ``'ras'``, ``'fs_tal'``, ``'ctf_head'``,
``'ctf_meg'``, and ``'unknown'``
If an integer, must be one of the constants defined as
``mne.io.constants.FIFF.FIFFV_COORD_...``.
%(overwrite)s
.. versionadded:: 1.0
%(verbose)s
"""
write_dig(fname, pts, coord_frame, overwrite=overwrite)
@verbose
def read_info(fname, verbose=None):
"""Read measurement info from a file.
Parameters
----------
fname : str
File name.
%(verbose)s
Returns
-------
%(info_not_none)s
"""
f, tree, _ = fiff_open(fname)
with f as fid:
info = read_meas_info(fid, tree)[0]
return info
def read_bad_channels(fid, node):
"""Read bad channels.
Parameters
----------
fid : file
The file descriptor.
node : dict
The node of the FIF tree that contains info on the bad channels.
Returns
-------
bads : list
A list of bad channel's names.
"""
return _read_bad_channels(fid, node)
def _read_bad_channels(fid, node, ch_names_mapping):
ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping
nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)
bads = []
if len(nodes) > 0:
for node in nodes:
tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)
if tag is not None and tag.data is not None:
bads = tag.data.split(':')
bads[:] = _rename_list(bads, ch_names_mapping)
return bads
@verbose
def read_meas_info(fid, tree, clean_bads=False, verbose=None):
"""Read the measurement info.
Parameters
----------
fid : file
Open file descriptor.
tree : tree
FIF tree structure.
clean_bads : bool
If True, clean info['bads'] before running consistency check.
Should only be needed for old files where we did not check bads
before saving.
%(verbose)s
Returns
-------
%(info_not_none)s
meas : dict
Node in tree that contains the info.
"""
# Find the desired blocks
meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
if len(meas) == 0:
raise ValueError('Could not find measurement data')
if len(meas) > 1:
raise ValueError('Cannot read more that 1 measurement data')
meas = meas[0]
meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)
if len(meas_info) == 0:
raise ValueError('Could not find measurement info')
if len(meas_info) > 1:
raise ValueError('Cannot read more that 1 measurement info')
meas_info = meas_info[0]
# Read measurement info
dev_head_t = None
ctf_head_t = None
dev_ctf_t = None
meas_date = None
utc_offset = None
highpass = None
lowpass = None
nchan = None
sfreq = None
chs = []
experimenter = None
description = None
proj_id = None
proj_name = None
line_freq = None
gantry_angle = None
custom_ref_applied = FIFF.FIFFV_MNE_CUSTOM_REF_OFF
xplotter_layout = None
kit_system_id = None
for k in range(meas_info['nent']):
kind = meas_info['directory'][k].kind
pos = meas_info['directory'][k].pos
if kind == FIFF.FIFF_NCHAN:
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif kind == FIFF.FIFF_SFREQ:
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
elif kind == FIFF.FIFF_LOWPASS:
tag = read_tag(fid, pos)
if not np.isnan(tag.data):
lowpass = float(tag.data)
elif kind == FIFF.FIFF_HIGHPASS:
tag = read_tag(fid, pos)
if not np.isnan(tag.data):
highpass = float(tag.data)
elif kind == FIFF.FIFF_MEAS_DATE:
tag = read_tag(fid, pos)
meas_date = tuple(tag.data)
if len(meas_date) == 1: # can happen from old C conversions
meas_date = (meas_date[0], 0)
elif kind == FIFF.FIFF_UTC_OFFSET:
tag = read_tag(fid, pos)
utc_offset = str(tag.data)
elif kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, pos)
cand = tag.data
if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
cand['to'] == FIFF.FIFFV_COORD_HEAD:
dev_head_t = cand
elif cand['from'] == FIFF.FIFFV_COORD_HEAD and \
cand['to'] == FIFF.FIFFV_COORD_DEVICE:
# this reversal can happen with BabyMEG data
dev_head_t = invert_transform(cand)
elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
cand['to'] == FIFF.FIFFV_COORD_HEAD:
ctf_head_t = cand
elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE and \
cand['to'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
dev_ctf_t = cand
elif kind == FIFF.FIFF_EXPERIMENTER:
tag = read_tag(fid, pos)
experimenter = tag.data
elif kind == FIFF.FIFF_DESCRIPTION:
tag = read_tag(fid, pos)
description = tag.data
elif kind == FIFF.FIFF_PROJ_ID:
tag = read_tag(fid, pos)
proj_id = tag.data
elif kind == FIFF.FIFF_PROJ_NAME:
tag = read_tag(fid, pos)
proj_name = tag.data
elif kind == FIFF.FIFF_LINE_FREQ:
tag = read_tag(fid, pos)
line_freq = float(tag.data)
elif kind == FIFF.FIFF_GANTRY_ANGLE:
tag = read_tag(fid, pos)
gantry_angle = float(tag.data)
elif kind in [FIFF.FIFF_MNE_CUSTOM_REF, 236]: # 236 used before v0.11
tag = read_tag(fid, pos)
custom_ref_applied = int(tag.data)
elif kind == FIFF.FIFF_XPLOTTER_LAYOUT:
tag = read_tag(fid, pos)
xplotter_layout = str(tag.data)
elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID:
tag = read_tag(fid, pos)
kit_system_id = int(tag.data)
ch_names_mapping = _read_extended_ch_info(chs, meas_info, fid)
# Check that we have everything we need
if nchan is None:
raise ValueError('Number of channels is not defined')
if sfreq is None:
raise ValueError('Sampling frequency is not defined')
if len(chs) == 0:
raise ValueError('Channel information not defined')
if len(chs) != nchan:
raise ValueError('Incorrect number of channel definitions found')
if dev_head_t is None or ctf_head_t is None:
hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
if len(hpi_result) == 1:
hpi_result = hpi_result[0]
for k in range(hpi_result['nent']):
kind = hpi_result['directory'][k].kind
pos = hpi_result['directory'][k].pos
if kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, pos)
cand = tag.data
if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and
cand['to'] == FIFF.FIFFV_COORD_HEAD and
dev_head_t is None):
dev_head_t = cand
elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and
cand['to'] == FIFF.FIFFV_COORD_HEAD and
ctf_head_t is None):
ctf_head_t = cand
# Locate the Polhemus data
dig = _read_dig_fif(fid, meas_info)
# Locate the acquisition information
acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)
acq_pars = None
acq_stim = None
if len(acqpars) == 1:
acqpars = acqpars[0]
for k in range(acqpars['nent']):
kind = acqpars['directory'][k].kind
pos = acqpars['directory'][k].pos
if kind == FIFF.FIFF_DACQ_PARS:
tag = read_tag(fid, pos)
acq_pars = tag.data
elif kind == FIFF.FIFF_DACQ_STIM:
tag = read_tag(fid, pos)
acq_stim = tag.data
# Load the SSP data
projs = _read_proj(
fid, meas_info, ch_names_mapping=ch_names_mapping)
# Load the CTF compensation data
comps = _read_ctf_comp(
fid, meas_info, chs, ch_names_mapping=ch_names_mapping)
# Load the bad channel list
bads = _read_bad_channels(
fid, meas_info, ch_names_mapping=ch_names_mapping)
#
# Put the data together
#
info = Info(file_id=tree['id'])
info._unlocked = True
# Locate events list
events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS)
evs = list()
for event in events:
ev = dict()
for k in range(event['nent']):
kind = event['directory'][k].kind
pos = event['directory'][k].pos
if kind == FIFF.FIFF_EVENT_CHANNELS:
ev['channels'] = read_tag(fid, pos).data
elif kind == FIFF.FIFF_EVENT_LIST:
ev['list'] = read_tag(fid, pos).data
evs.append(ev)
info['events'] = evs
# Locate HPI result
hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
hrs = list()
for hpi_result in hpi_results:
hr = dict()
hr['dig_points'] = []
for k in range(hpi_result['nent']):
kind = hpi_result['directory'][k].kind
pos = hpi_result['directory'][k].pos
if kind == FIFF.FIFF_DIG_POINT:
hr['dig_points'].append(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER:
hr['order'] = read_tag(fid, pos).data
elif kind == FIFF.FIFF_HPI_COILS_USED:
hr['used'] = read_tag(fid, pos).data
elif kind == FIFF.FIFF_HPI_COIL_MOMENTS:
hr['moments'] = read_tag(fid, pos).data
elif kind == FIFF.FIFF_HPI_FIT_GOODNESS:
hr['goodness'] = read_tag(fid, pos).data
elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT:
hr['good_limit'] = float(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT:
hr['dist_limit'] = float(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_HPI_FIT_ACCEPT:
hr['accept'] = int(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_COORD_TRANS:
hr['coord_trans'] = read_tag(fid, pos).data
hrs.append(hr)
info['hpi_results'] = hrs
# Locate HPI Measurement
hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS)
hms = list()
for hpi_meas in hpi_meass:
hm = dict()
for k in range(hpi_meas['nent']):
kind = hpi_meas['directory'][k].kind
pos = hpi_meas['directory'][k].pos
if kind == FIFF.FIFF_CREATOR:
hm['creator'] = str(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_SFREQ:
hm['sfreq'] = float(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_NCHAN:
hm['nchan'] = int(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_NAVE:
hm['nave'] = int(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_HPI_NCOIL:
hm['ncoil'] = int(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_FIRST_SAMPLE:
hm['first_samp'] = int(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
hm['last_samp'] = int(read_tag(fid, pos).data)
hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL)
hcs = []
for hpi_coil in hpi_coils:
hc = dict()
for k in range(hpi_coil['nent']):
kind = hpi_coil['directory'][k].kind
pos = hpi_coil['directory'][k].pos
if kind == FIFF.FIFF_HPI_COIL_NO:
hc['number'] = int(read_tag(fid, pos).data)
elif kind == FIFF.FIFF_EPOCH:
hc['epoch'] = read_tag(fid, pos).data
hc['epoch'].flags.writeable = False
elif kind == FIFF.FIFF_HPI_SLOPES:
hc['slopes'] = read_tag(fid, pos).data
hc['slopes'].flags.writeable = False
elif kind == FIFF.FIFF_HPI_CORR_COEFF:
hc['corr_coeff'] = read_tag(fid, pos).data
hc['corr_coeff'].flags.writeable = False
elif kind == FIFF.FIFF_HPI_COIL_FREQ:
hc['coil_freq'] = float(read_tag(fid, pos).data)
hcs.append(hc)
hm['hpi_coils'] = hcs
hms.append(hm)
info['hpi_meas'] = hms
del hms
subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)
si = None
if len(subject_info) == 1:
subject_info = subject_info[0]
si = dict()
for k in range(subject_info['nent']):
kind = subject_info['directory'][k].kind
pos = subject_info['directory'][k].pos
if kind == FIFF.FIFF_SUBJ_ID:
tag = read_tag(fid, pos)
si['id'] = int(tag.data)
elif kind == FIFF.FIFF_SUBJ_HIS_ID:
tag = read_tag(fid, pos)
si['his_id'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_LAST_NAME:
tag = read_tag(fid, pos)
si['last_name'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:
tag = read_tag(fid, pos)
si['first_name'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME:
tag = read_tag(fid, pos)
si['middle_name'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:
try:
tag = read_tag(fid, pos)
except OverflowError:
warn('Encountered an error while trying to read the '
'birthday from the input data. No birthday will be '
'set. Please check the integrity of the birthday '
'information in the input data.')
continue
si['birthday'] = tag.data
elif kind == FIFF.FIFF_SUBJ_SEX:
tag = read_tag(fid, pos)
si['sex'] = int(tag.data)
elif kind == FIFF.FIFF_SUBJ_HAND:
tag = read_tag(fid, pos)
si['hand'] = int(tag.data)
elif kind == FIFF.FIFF_SUBJ_WEIGHT:
tag = read_tag(fid, pos)
si['weight'] = tag.data
elif kind == FIFF.FIFF_SUBJ_HEIGHT:
tag = read_tag(fid, pos)
si['height'] = tag.data
info['subject_info'] = si
del si
device_info = dir_tree_find(meas_info, FIFF.FIFFB_DEVICE)
di = None
if len(device_info) == 1:
device_info = device_info[0]
di = dict()
for k in range(device_info['nent']):
kind = device_info['directory'][k].kind
pos = device_info['directory'][k].pos
if kind == FIFF.FIFF_DEVICE_TYPE:
tag = read_tag(fid, pos)
di['type'] = str(tag.data)
elif kind == FIFF.FIFF_DEVICE_MODEL:
tag = read_tag(fid, pos)
di['model'] = str(tag.data)
elif kind == FIFF.FIFF_DEVICE_SERIAL:
tag = read_tag(fid, pos)
di['serial'] = str(tag.data)
elif kind == FIFF.FIFF_DEVICE_SITE:
tag = read_tag(fid, pos)
di['site'] = str(tag.data)
info['device_info'] = di
del di
helium_info = dir_tree_find(meas_info, FIFF.FIFFB_HELIUM)
hi = None
if len(helium_info) == 1:
helium_info = helium_info[0]
hi = dict()
for k in range(helium_info['nent']):
kind = helium_info['directory'][k].kind
pos = helium_info['directory'][k].pos
if kind == FIFF.FIFF_HE_LEVEL_RAW:
tag = read_tag(fid, pos)
hi['he_level_raw'] = float(tag.data)
elif kind == FIFF.FIFF_HELIUM_LEVEL:
tag = read_tag(fid, pos)
hi['helium_level'] = float(tag.data)
elif kind == FIFF.FIFF_ORIG_FILE_GUID:
tag = read_tag(fid, pos)
hi['orig_file_guid'] = str(tag.data)
elif kind == FIFF.FIFF_MEAS_DATE:
tag = read_tag(fid, pos)
hi['meas_date'] = tuple(int(t) for t in tag.data)
info['helium_info'] = hi
del hi
hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM)
hs = None
if len(hpi_subsystem) == 1:
hpi_subsystem = hpi_subsystem[0]
hs = dict()
for k in range(hpi_subsystem['nent']):
kind = hpi_subsystem['directory'][k].kind
pos = hpi_subsystem['directory'][k].pos
if kind == FIFF.FIFF_HPI_NCOIL:
tag = read_tag(fid, pos)
hs['ncoil'] = int(tag.data)
elif kind == FIFF.FIFF_EVENT_CHANNEL:
tag = read_tag(fid, pos)
hs['event_channel'] = str(tag.data)
hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL)
hc = []
for coil in hpi_coils:
this_coil = dict()
for j in range(coil['nent']):
kind = coil['directory'][j].kind
pos = coil['directory'][j].pos
if kind == FIFF.FIFF_EVENT_BITS:
tag = read_tag(fid, pos)
this_coil['event_bits'] = np.array(tag.data)
hc.append(this_coil)
hs['hpi_coils'] = hc
info['hpi_subsystem'] = hs
# Read processing history
info['proc_history'] = _read_proc_history(fid, tree)
# Make the most appropriate selection for the measurement id
if meas_info['parent_id'] is None:
if meas_info['id'] is None:
if meas['id'] is None:
if meas['parent_id'] is None:
info['meas_id'] = info['file_id']
else:
info['meas_id'] = meas['parent_id']
else:
info['meas_id'] = meas['id']
else:
info['meas_id'] = meas_info['id']
else:
info['meas_id'] = meas_info['parent_id']
info['experimenter'] = experimenter
info['description'] = description
info['proj_id'] = proj_id
info['proj_name'] = proj_name
if meas_date is None:
meas_date = (info['meas_id']['secs'], info['meas_id']['usecs'])
info['meas_date'] = _ensure_meas_date_none_or_dt(meas_date)
info['utc_offset'] = utc_offset
info['sfreq'] = sfreq
info['highpass'] = highpass if highpass is not None else 0.
info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0
info['line_freq'] = line_freq
info['gantry_angle'] = gantry_angle
# Add the channel information and make a list of channel names
# for convenience
info['chs'] = chs
#
# Add the coordinate transformations
#
info['dev_head_t'] = dev_head_t
info['ctf_head_t'] = ctf_head_t
info['dev_ctf_t'] = dev_ctf_t
if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None:
from ..transforms import Transform
head_ctf_trans = np.linalg.inv(ctf_head_t['trans'])
dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])
info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans)
# All kinds of auxliary stuff
info['dig'] = _format_dig_points(dig)
info['bads'] = bads
info._update_redundant()
if clean_bads:
info['bads'] = [b for b in bads if b in info['ch_names']]
info['projs'] = projs
info['comps'] = comps
info['acq_pars'] = acq_pars
info['acq_stim'] = acq_stim
info['custom_ref_applied'] = custom_ref_applied
info['xplotter_layout'] = xplotter_layout
info['kit_system_id'] = kit_system_id
info._check_consistency()
info._unlocked = False
return info, meas
def _read_extended_ch_info(chs, parent, fid):
ch_infos = dir_tree_find(parent, FIFF.FIFFB_CH_INFO)
if len(ch_infos) == 0:
return
_check_option('length of channel infos', len(ch_infos), [len(chs)])
logger.info(' Reading extended channel information')
# Here we assume that ``remap`` is in the same order as the channels
# themselves, which is hopefully safe enough.
ch_names_mapping = dict()
for new, ch in zip(ch_infos, chs):
for k in range(new['nent']):
kind = new['directory'][k].kind
try:
key, cast = _CH_READ_MAP[kind]
except KeyError:
# This shouldn't happen if we're up to date with the FIFF
# spec
warn(f'Discarding extra channel information kind {kind}')
continue
assert key in ch
data = read_tag(fid, new['directory'][k].pos).data
if data is not None:
data = cast(data)
if key == 'ch_name':
ch_names_mapping[ch[key]] = data
ch[key] = data
_update_ch_info_named(ch)
# we need to return ch_names_mapping so that we can also rename the
# bad channels
return ch_names_mapping
def _rename_comps(comps, ch_names_mapping):
if not (comps and ch_names_mapping):
return
for comp in comps:
data = comp['data']
for key in ('row_names', 'col_names'):
data[key][:] = _rename_list(data[key], ch_names_mapping)
def _ensure_meas_date_none_or_dt(meas_date):
if meas_date is None or np.array_equal(meas_date, DATE_NONE):
meas_date = None
elif not isinstance(meas_date, datetime.datetime):
meas_date = _stamp_to_dt(meas_date)
return meas_date
def _check_dates(info, prepend_error=''):
"""Check dates before writing as fif files.
It's needed because of the limited integer precision
of the fix standard.
"""
for key in ('file_id', 'meas_id'):
value = info.get(key)
if value is not None:
assert 'msecs' not in value
for key_2 in ('secs', 'usecs'):
if (value[key_2] < np.iinfo('>i4').min or
value[key_2] > np.iinfo('>i4').max):
raise RuntimeError('%sinfo[%s][%s] must be between '
'"%r" and "%r", got "%r"'
% (prepend_error, key, key_2,
np.iinfo('>i4').min,
np.iinfo('>i4').max,
value[key_2]),)
meas_date = info.get('meas_date')
if meas_date is None:
return
meas_date_stamp = _dt_to_stamp(meas_date)
if (meas_date_stamp[0] < np.iinfo('>i4').min or
meas_date_stamp[0] > np.iinfo('>i4').max):
raise RuntimeError(
'%sinfo["meas_date"] seconds must be between "%r" '
'and "%r", got "%r"'
% (prepend_error, (np.iinfo('>i4').min, 0),
(np.iinfo('>i4').max, 0), meas_date_stamp[0],))
@fill_doc
def write_meas_info(fid, info, data_type=None, reset_range=True):
"""Write measurement info into a file id (from a fif file).
Parameters
----------
fid : file
Open file descriptor.
%(info_not_none)s
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
Notes
-----
Tags are written in a particular order for compatibility with maxfilter.
"""
info._check_consistency()
_check_dates(info)
# Measurement info
start_block(fid, FIFF.FIFFB_MEAS_INFO)
# Add measurement id
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
for event in info['events']:
start_block(fid, FIFF.FIFFB_EVENTS)
if event.get('channels') is not None:
write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels'])
if event.get('list') is not None:
write_int(fid, FIFF.FIFF_EVENT_LIST, event['list'])
end_block(fid, FIFF.FIFFB_EVENTS)
# HPI Result
for hpi_result in info['hpi_results']:
start_block(fid, FIFF.FIFFB_HPI_RESULT)
write_dig_points(fid, hpi_result['dig_points'])
if 'order' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER,
hpi_result['order'])
if 'used' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used'])
if 'moments' in hpi_result:
write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS,
hpi_result['moments'])
if 'goodness' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS,
hpi_result['goodness'])
if 'good_limit' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
hpi_result['good_limit'])
if 'dist_limit' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT,
hpi_result['dist_limit'])
if 'accept' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept'])
if 'coord_trans' in hpi_result:
write_coord_trans(fid, hpi_result['coord_trans'])
end_block(fid, FIFF.FIFFB_HPI_RESULT)
# HPI Measurement
for hpi_meas in info['hpi_meas']:
start_block(fid, FIFF.FIFFB_HPI_MEAS)
if hpi_meas.get('creator') is not None:
write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator'])
if hpi_meas.get('sfreq') is not None:
write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq'])
if hpi_meas.get('nchan') is not None:
write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan'])
if hpi_meas.get('nave') is not None:
write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave'])
if hpi_meas.get('ncoil') is not None:
write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil'])
if hpi_meas.get('first_samp') is not None:
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp'])
if hpi_meas.get('last_samp') is not None:
write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp'])
for hpi_coil in hpi_meas['hpi_coils']:
start_block(fid, FIFF.FIFFB_HPI_COIL)
if hpi_coil.get('number') is not None:
write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number'])
if hpi_coil.get('epoch') is not None:
write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch'])
if hpi_coil.get('slopes') is not None:
write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes'])
if hpi_coil.get('corr_coeff') is not None:
write_float(fid, FIFF.FIFF_HPI_CORR_COEFF,
hpi_coil['corr_coeff'])
if hpi_coil.get('coil_freq') is not None:
write_float(fid, FIFF.FIFF_HPI_COIL_FREQ,
hpi_coil['coil_freq'])
end_block(fid, FIFF.FIFFB_HPI_COIL)
end_block(fid, FIFF.FIFFB_HPI_MEAS)
# Polhemus data
write_dig_points(fid, info['dig'], block=True)
# megacq parameters
if info['acq_pars'] is not None or info['acq_stim'] is not None:
start_block(fid, FIFF.FIFFB_DACQ_PARS)
if info['acq_pars'] is not None:
write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
if info['acq_stim'] is not None:
write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
end_block(fid, FIFF.FIFFB_DACQ_PARS)
# Coordinate transformations if the HPI result block was not there
if info['dev_head_t'] is not None:
write_coord_trans(fid, info['dev_head_t'])
if info['ctf_head_t'] is not None:
write_coord_trans(fid, info['ctf_head_t'])
if info['dev_ctf_t'] is not None:
write_coord_trans(fid, info['dev_ctf_t'])
# Projectors
ch_names_mapping = _make_ch_names_mapping(info['chs'])
_write_proj(fid, info['projs'], ch_names_mapping=ch_names_mapping)
# Bad channels
if len(info['bads']) > 0:
bads = _rename_list(info['bads'], ch_names_mapping)
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads)
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# General
if info.get('experimenter') is not None:
write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
if info.get('description') is not None:
write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
if info.get('proj_id') is not None:
write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
if info.get('proj_name') is not None:
write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
if info.get('meas_date') is not None:
write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(info['meas_date']))
if info.get('utc_offset') is not None:
write_string(fid, FIFF.FIFF_UTC_OFFSET, info['utc_offset'])
write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
if info['lowpass'] is not None:
write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
if info['highpass'] is not None:
write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
if info.get('line_freq') is not None:
write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
if info.get('gantry_angle') is not None:
write_float(fid, FIFF.FIFF_GANTRY_ANGLE, info['gantry_angle'])
if data_type is not None:
write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
if info.get('custom_ref_applied'):
write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info['custom_ref_applied'])
if info.get('xplotter_layout'):
write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout'])
# Channel information
_write_ch_infos(fid, info['chs'], reset_range, ch_names_mapping)
# Subject information
if info.get('subject_info') is not None:
start_block(fid, FIFF.FIFFB_SUBJECT)
si = info['subject_info']
if si.get('id') is not None:
write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
if si.get('his_id') is not None:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
if si.get('last_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
if si.get('first_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
if si.get('middle_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name'])
if si.get('birthday') is not None:
write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
if si.get('sex') is not None:
write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
if si.get('hand') is not None:
write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
if si.get('weight') is not None:
write_float(fid, FIFF.FIFF_SUBJ_WEIGHT, si['weight'])
if si.get('height') is not None:
write_float(fid, FIFF.FIFF_SUBJ_HEIGHT, si['height'])
end_block(fid, FIFF.FIFFB_SUBJECT)
del si
if info.get('device_info') is not None:
start_block(fid, FIFF.FIFFB_DEVICE)
di = info['device_info']
write_string(fid, FIFF.FIFF_DEVICE_TYPE, di['type'])
for key in ('model', 'serial', 'site'):
if di.get(key) is not None:
write_string(fid, getattr(FIFF, 'FIFF_DEVICE_' + key.upper()),
di[key])
end_block(fid, FIFF.FIFFB_DEVICE)
del di
if info.get('helium_info') is not None:
start_block(fid, FIFF.FIFFB_HELIUM)
hi = info['helium_info']
if hi.get('he_level_raw') is not None:
write_float(fid, FIFF.FIFF_HE_LEVEL_RAW, hi['he_level_raw'])
if hi.get('helium_level') is not None:
write_float(fid, FIFF.FIFF_HELIUM_LEVEL, hi['helium_level'])
if hi.get('orig_file_guid') is not None:
write_string(fid, FIFF.FIFF_ORIG_FILE_GUID, hi['orig_file_guid'])
write_int(fid, FIFF.FIFF_MEAS_DATE, hi['meas_date'])
end_block(fid, FIFF.FIFFB_HELIUM)
del hi
if info.get('hpi_subsystem') is not None:
hs = info['hpi_subsystem']
start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
if hs.get('ncoil') is not None:
write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil'])
if hs.get('event_channel') is not None:
write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel'])
if hs.get('hpi_coils') is not None:
for coil in hs['hpi_coils']:
start_block(fid, FIFF.FIFFB_HPI_COIL)
if coil.get('event_bits') is not None:
write_int(fid, FIFF.FIFF_EVENT_BITS,
coil['event_bits'])
end_block(fid, FIFF.FIFFB_HPI_COIL)
end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
del hs
# CTF compensation info
comps = info['comps']
if ch_names_mapping:
comps = deepcopy(comps)
_rename_comps(comps, ch_names_mapping)
write_ctf_comp(fid, comps)
# KIT system ID
if info.get('kit_system_id') is not None:
write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info['kit_system_id'])
end_block(fid, FIFF.FIFFB_MEAS_INFO)
# Processing history
_write_proc_history(fid, info)
@fill_doc
def write_info(fname, info, data_type=None, reset_range=True):
"""Write measurement info in fif file.
Parameters
----------
fname : str
The name of the file. Should end by -info.fif.
%(info_not_none)s
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
"""
with start_and_end_file(fname) as fid:
start_block(fid, FIFF.FIFFB_MEAS)
write_meas_info(fid, info, data_type, reset_range)
end_block(fid, FIFF.FIFFB_MEAS)
@verbose
def _merge_info_values(infos, key, verbose=None):
"""Merge things together.
Fork for {'dict', 'list', 'array', 'other'}
and consider cases where one or all are of the same type.
Does special things for "projs", "bads", and "meas_date".
"""
values = [d[key] for d in infos]
msg = ("Don't know how to merge '%s'. Make sure values are "
"compatible, got types:\n %s"
% (key, [type(v) for v in values]))
def _flatten(lists):
return [item for sublist in lists for item in sublist]
def _check_isinstance(values, kind, func):
return func([isinstance(v, kind) for v in values])
def _where_isinstance(values, kind):
"""Get indices of instances."""
return np.where([isinstance(v, type) for v in values])[0]
# list
if _check_isinstance(values, list, all):
lists = (d[key] for d in infos)
if key == 'projs':
return _uniquify_projs(_flatten(lists))
elif key == 'bads':
return sorted(set(_flatten(lists)))
else:
return _flatten(lists)
elif _check_isinstance(values, list, any):
idx = _where_isinstance(values, list)
if len(idx) == 1:
return values[int(idx)]
elif len(idx) > 1:
lists = (d[key] for d in infos if isinstance(d[key], list))
return _flatten(lists)
# dict
elif _check_isinstance(values, dict, all):
is_qual = all(object_diff(values[0], v) == '' for v in values[1:])
if is_qual:
return values[0]
else:
RuntimeError(msg)
elif _check_isinstance(values, dict, any):
idx = _where_isinstance(values, dict)
if len(idx) == 1:
return values[int(idx)]
elif len(idx) > 1:
raise RuntimeError(msg)
# ndarray
elif _check_isinstance(values, np.ndarray, all) or \
_check_isinstance(values, tuple, all):
is_qual = all(np.array_equal(values[0], x) for x in values[1:])
if is_qual:
return values[0]
elif key == 'meas_date':
logger.info('Found multiple entries for %s. '
'Setting value to `None`' % key)
return None
else:
raise RuntimeError(msg)
elif _check_isinstance(values, (np.ndarray, tuple), any):
idx = _where_isinstance(values, np.ndarray)
if len(idx) == 1:
return values[int(idx)]
elif len(idx) > 1:
raise RuntimeError(msg)
# other
else:
unique_values = set(values)
if len(unique_values) == 1:
return list(values)[0]
elif isinstance(list(unique_values)[0], BytesIO):
logger.info('Found multiple StringIO instances. '
'Setting value to `None`')
return None
elif isinstance(list(unique_values)[0], str):
logger.info('Found multiple filenames. '
'Setting value to `None`')
return None
else:
raise RuntimeError(msg)
@verbose
def _merge_info(infos, force_update_to_first=False, verbose=None):
"""Merge multiple measurement info dictionaries.
- Fields that are present in only one info object will be used in the
merged info.
- Fields that are present in multiple info objects and are the same
will be used in the merged info.
- Fields that are present in multiple info objects and are different
will result in a None value in the merged info.
- Channels will be concatenated. If multiple info objects contain
channels with the same name, an exception is raised.
Parameters
----------
infos | list of instance of Info
Info objects to merge into one info object.
force_update_to_first : bool
If True, force the fields for objects in `info` will be updated
to match those in the first item. Use at your own risk, as this
may overwrite important metadata.
%(verbose)s
Returns
-------
info : instance of Info
The merged info object.
"""
for info in infos:
info._check_consistency()
if force_update_to_first is True:
infos = deepcopy(infos)
_force_update_info(infos[0], infos[1:])
info = Info()
info._unlocked = True
info['chs'] = []
for this_info in infos:
info['chs'].extend(this_info['chs'])
info._update_redundant()
duplicates = {ch for ch in info['ch_names']
if info['ch_names'].count(ch) > 1}
if len(duplicates) > 0:
msg = ("The following channels are present in more than one input "
"measurement info objects: %s" % list(duplicates))
raise ValueError(msg)
transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t']
for trans_name in transforms:
trans = [i[trans_name] for i in infos if i[trans_name]]
if len(trans) == 0:
info[trans_name] = None
elif len(trans) == 1:
info[trans_name] = trans[0]
elif all(np.all(trans[0]['trans'] == x['trans']) and
trans[0]['from'] == x['from'] and
trans[0]['to'] == x['to']
for x in trans[1:]):
info[trans_name] = trans[0]
else:
msg = ("Measurement infos provide mutually inconsistent %s" %
trans_name)
raise ValueError(msg)
# KIT system-IDs
kit_sys_ids = [i['kit_system_id'] for i in infos if i['kit_system_id']]
if len(kit_sys_ids) == 0:
info['kit_system_id'] = None
elif len(set(kit_sys_ids)) == 1:
info['kit_system_id'] = kit_sys_ids[0]
else:
raise ValueError("Trying to merge channels from different KIT systems")
# hpi infos and digitization data:
fields = ['hpi_results', 'hpi_meas', 'dig']
for k in fields:
values = [i[k] for i in infos if i[k]]
if len(values) == 0:
info[k] = []
elif len(values) == 1:
info[k] = values[0]
elif all(object_diff(values[0], v) == '' for v in values[1:]):
info[k] = values[0]
else:
msg = ("Measurement infos are inconsistent for %s" % k)
raise ValueError(msg)
# other fields
other_fields = ['acq_pars', 'acq_stim', 'bads',
'comps', 'custom_ref_applied', 'description',
'experimenter', 'file_id', 'highpass', 'utc_offset',
'hpi_subsystem', 'events', 'device_info', 'helium_info',
'line_freq', 'lowpass', 'meas_id',
'proj_id', 'proj_name', 'projs', 'sfreq', 'gantry_angle',
'subject_info', 'sfreq', 'xplotter_layout', 'proc_history']
for k in other_fields:
info[k] = _merge_info_values(infos, k)
info['meas_date'] = infos[0]['meas_date']
info._unlocked = False
return info
@verbose
def create_info(ch_names, sfreq, ch_types='misc', verbose=None):
"""Create a basic Info instance suitable for use with create_raw.
Parameters
----------
ch_names : list of str | int
Channel names. If an int, a list of channel names will be created
from ``range(ch_names)``.
sfreq : float
Sample rate of the data.
ch_types : list of str | str
Channel types, default is ``'misc'`` which is not a
:term:`data channel <data channels>`.
Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc',
'seeg', 'dbs', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr'
or 'hbo'. If str, then all channels are assumed to be of the same type.
%(verbose)s
Returns
-------
%(info_not_none)s
Notes
-----
The info dictionary will be sparsely populated to enable functionality
within the rest of the package. Advanced functionality such as source
localization can only be obtained through substantial, proper
modifications of the info structure (not recommended).
Note that the MEG device-to-head transform ``info['dev_head_t']`` will
be initialized to the identity transform.
Proper units of measure:
* V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog
* T: mag
* T/m: grad
* M: hbo, hbr
* Am: dipole
* AU: misc
"""
try:
ch_names = operator.index(ch_names) # int-like
except TypeError:
pass
else:
ch_names = list(np.arange(ch_names).astype(str))
_validate_type(ch_names, (list, tuple), "ch_names",
("list, tuple, or int"))
sfreq = float(sfreq)
if sfreq <= 0:
raise ValueError('sfreq must be positive')
nchan = len(ch_names)
if isinstance(ch_types, str):
ch_types = [ch_types] * nchan
ch_types = np.atleast_1d(np.array(ch_types, np.str_))
if ch_types.ndim != 1 or len(ch_types) != nchan:
raise ValueError('ch_types and ch_names must be the same length '
'(%s != %s) for ch_types=%s'
% (len(ch_types), nchan, ch_types))
info = _empty_info(sfreq)
ch_types_dict = get_channel_type_constants(include_defaults=True)
for ci, (ch_name, ch_type) in enumerate(zip(ch_names, ch_types)):
_validate_type(ch_name, 'str', "each entry in ch_names")
_validate_type(ch_type, 'str', "each entry in ch_types")
if ch_type not in ch_types_dict:
raise KeyError(f'kind must be one of {list(ch_types_dict)}, '
f'not {ch_type}')
this_ch_dict = ch_types_dict[ch_type]
kind = this_ch_dict['kind']
# handle chpi, where kind is a *list* of FIFF constants:
kind = kind[0] if isinstance(kind, (list, tuple)) else kind
# mirror what tag.py does here
coord_frame = _ch_coord_dict.get(kind, FIFF.FIFFV_COORD_UNKNOWN)
coil_type = this_ch_dict.get('coil_type', FIFF.FIFFV_COIL_NONE)
unit = this_ch_dict.get('unit', FIFF.FIFF_UNIT_NONE)
chan_info = dict(loc=np.full(12, np.nan),
unit_mul=FIFF.FIFF_UNITM_NONE, range=1., cal=1.,
kind=kind, coil_type=coil_type, unit=unit,
coord_frame=coord_frame, ch_name=str(ch_name),
scanno=ci + 1, logno=ci + 1)
info['chs'].append(chan_info)
info._update_redundant()
info._check_consistency()
info._unlocked = False
return info
RAW_INFO_FIELDS = (
'acq_pars', 'acq_stim', 'bads', 'ch_names', 'chs',
'comps', 'ctf_head_t', 'custom_ref_applied', 'description', 'dev_ctf_t',
'dev_head_t', 'dig', 'experimenter', 'events', 'utc_offset', 'device_info',
'file_id', 'highpass', 'hpi_meas', 'hpi_results', 'helium_info',
'hpi_subsystem', 'kit_system_id', 'line_freq', 'lowpass', 'meas_date',
'meas_id', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq',
'subject_info', 'xplotter_layout', 'proc_history', 'gantry_angle',
)
def _empty_info(sfreq):
"""Create an empty info dictionary."""
_none_keys = (
'acq_pars', 'acq_stim', 'ctf_head_t', 'description',
'dev_ctf_t', 'dig', 'experimenter', 'utc_offset', 'device_info',
'file_id', 'highpass', 'hpi_subsystem', 'kit_system_id', 'helium_info',
'line_freq', 'lowpass', 'meas_date', 'meas_id', 'proj_id', 'proj_name',
'subject_info', 'xplotter_layout', 'gantry_angle',
)
_list_keys = ('bads', 'chs', 'comps', 'events', 'hpi_meas', 'hpi_results',
'projs', 'proc_history')
info = Info()
info._unlocked = True
for k in _none_keys:
info[k] = None
for k in _list_keys:
info[k] = list()
info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF
info['highpass'] = 0.
info['sfreq'] = float(sfreq)
info['lowpass'] = info['sfreq'] / 2.
info['dev_head_t'] = Transform('meg', 'head')
info._update_redundant()
info._check_consistency()
return info
def _force_update_info(info_base, info_target):
"""Update target info objects with values from info base.
Note that values in info_target will be overwritten by those in info_base.
This will overwrite all fields except for: 'chs', 'ch_names', 'nchan'.
Parameters
----------
info_base : mne.Info
The Info object you want to use for overwriting values
in target Info objects.
info_target : mne.Info | list of mne.Info
The Info object(s) you wish to overwrite using info_base. These objects
will be modified in-place.
"""
exclude_keys = ['chs', 'ch_names', 'nchan']
info_target = np.atleast_1d(info_target).ravel()
all_infos = np.hstack([info_base, info_target])
for ii in all_infos:
if not isinstance(ii, Info):
raise ValueError('Inputs must be of type Info. '
'Found type %s' % type(ii))
for key, val in info_base.items():
if key in exclude_keys:
continue
for i_targ in info_target:
with i_targ._unlock():
i_targ[key] = val
def _add_timedelta_to_stamp(meas_date_stamp, delta_t):
"""Add a timedelta to a meas_date tuple."""
if meas_date_stamp is not None:
meas_date_stamp = _dt_to_stamp(_stamp_to_dt(meas_date_stamp) + delta_t)
return meas_date_stamp
@verbose
def anonymize_info(info, daysback=None, keep_his=False, verbose=None):
"""Anonymize measurement information in place.
.. warning:: If ``info`` is part of an object like
:class:`raw.info <mne.io.Raw>`, you should directly use
the method :meth:`raw.anonymize() <mne.io.Raw.anonymize>`
to ensure that all parts of the data are anonymized and
stay synchronized (e.g.,
:class:`raw.annotations <mne.Annotations>`).
Parameters
----------
%(info_not_none)s
%(daysback_anonymize_info)s
%(keep_his_anonymize_info)s
%(verbose)s
Returns
-------
info : instance of Info
The anonymized measurement information.
Notes
-----
%(anonymize_info_notes)s
"""
_validate_type(info, 'info', "self")
default_anon_dos = datetime.datetime(2000, 1, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc)
default_str = "mne_anonymize"
default_subject_id = 0
default_sex = 0
default_desc = ("Anonymized using a time shift"
" to preserve age at acquisition")
none_meas_date = info['meas_date'] is None
if none_meas_date:
if daysback is not None:
warn('Input info has "meas_date" set to None. '
'Removing all information from time/date structures, '
'*NOT* performing any time shifts!')
else:
# compute timeshift delta
if daysback is None:
delta_t = info['meas_date'] - default_anon_dos
else:
delta_t = datetime.timedelta(days=daysback)
with info._unlock():
info['meas_date'] = info['meas_date'] - delta_t
# file_id and meas_id
for key in ('file_id', 'meas_id'):
value = info.get(key)
if value is not None:
assert 'msecs' not in value
if (none_meas_date or
((value['secs'], value['usecs']) == DATE_NONE)):
# Don't try to shift backwards in time when no measurement
# date is available or when file_id is already a place holder
tmp = DATE_NONE
else:
tmp = _add_timedelta_to_stamp(
(value['secs'], value['usecs']), -delta_t)
value['secs'] = tmp[0]
value['usecs'] = tmp[1]
# The following copy is needed for a test CTF dataset
# otherwise value['machid'][:] = 0 would suffice
_tmp = value['machid'].copy()
_tmp[:] = 0
value['machid'] = _tmp
# subject info
subject_info = info.get('subject_info')
if subject_info is not None:
if subject_info.get('id') is not None:
subject_info['id'] = default_subject_id
if keep_his:
logger.info('Not fully anonymizing info - keeping '
'his_id, sex, and hand info')
else:
if subject_info.get('his_id') is not None:
subject_info['his_id'] = str(default_subject_id)
if subject_info.get('sex') is not None:
subject_info['sex'] = default_sex
if subject_info.get('hand') is not None:
del subject_info['hand'] # there's no "unknown" setting
for key in ('last_name', 'first_name', 'middle_name'):
if subject_info.get(key) is not None:
subject_info[key] = default_str
# anonymize the subject birthday
if none_meas_date:
subject_info.pop('birthday', None)
elif subject_info.get('birthday') is not None:
dob = datetime.datetime(subject_info['birthday'][0],
subject_info['birthday'][1],
subject_info['birthday'][2])
dob -= delta_t
subject_info['birthday'] = dob.year, dob.month, dob.day
for key in ('weight', 'height'):
if subject_info.get(key) is not None:
subject_info[key] = 0
info['experimenter'] = default_str
info['description'] = default_desc
with info._unlock():
if info['proj_id'] is not None:
info['proj_id'] = np.zeros_like(info['proj_id'])
if info['proj_name'] is not None:
info['proj_name'] = default_str
if info['utc_offset'] is not None:
info['utc_offset'] = None
proc_hist = info.get('proc_history')
if proc_hist is not None:
for record in proc_hist:
record['block_id']['machid'][:] = 0
record['experimenter'] = default_str
if none_meas_date:
record['block_id']['secs'] = DATE_NONE[0]
record['block_id']['usecs'] = DATE_NONE[1]
record['date'] = DATE_NONE
else:
this_t0 = (record['block_id']['secs'],
record['block_id']['usecs'])
this_t1 = _add_timedelta_to_stamp(
this_t0, -delta_t)
record['block_id']['secs'] = this_t1[0]
record['block_id']['usecs'] = this_t1[1]
record['date'] = _add_timedelta_to_stamp(
record['date'], -delta_t)
hi = info.get('helium_info')
if hi is not None:
if hi.get('orig_file_guid') is not None:
hi['orig_file_guid'] = default_str
if none_meas_date and hi.get('meas_date') is not None:
hi['meas_date'] = DATE_NONE
elif hi.get('meas_date') is not None:
hi['meas_date'] = _add_timedelta_to_stamp(
hi['meas_date'], -delta_t)
di = info.get('device_info')
if di is not None:
for k in ('serial', 'site'):
if di.get(k) is not None:
di[k] = default_str
err_mesg = ('anonymize_info generated an inconsistent info object. '
'Underlying Error:\n')
info._check_consistency(prepend_error=err_mesg)
err_mesg = ('anonymize_info generated an inconsistent info object. '
'daysback parameter was too large. '
'Underlying Error:\n')
_check_dates(info, prepend_error=err_mesg)
return info
@fill_doc
def _bad_chans_comp(info, ch_names):
"""Check if channel names are consistent with current compensation status.
Parameters
----------
%(info_not_none)s
ch_names : list of str
The channel names to check.
Returns
-------
status : bool
True if compensation is *currently* in use but some compensation
channels are not included in picks
False if compensation is *currently* not being used
or if compensation is being used and all compensation channels
in info and included in picks.
missing_ch_names: array-like of str, shape (n_missing,)
The names of compensation channels not included in picks.
Returns [] if no channels are missing.
"""
if 'comps' not in info:
# should this be thought of as a bug?
return False, []
# only include compensation channels that would affect selected channels
ch_names_s = set(ch_names)
comp_names = []
for comp in info['comps']:
if len(ch_names_s.intersection(comp['data']['row_names'])) > 0:
comp_names.extend(comp['data']['col_names'])
comp_names = sorted(set(comp_names))
missing_ch_names = sorted(set(comp_names).difference(ch_names))
if get_current_comp(info) != 0 and len(missing_ch_names) > 0:
return True, missing_ch_names
return False, missing_ch_names
_DIG_CAST = dict(
kind=int, ident=int, r=lambda x: x, coord_frame=int)
# key -> const, cast, write
_CH_INFO_MAP = OrderedDict(
scanno=(FIFF.FIFF_CH_SCAN_NO, int, write_int),
logno=(FIFF.FIFF_CH_LOGICAL_NO, int, write_int),
kind=(FIFF.FIFF_CH_KIND, int, write_int),
range=(FIFF.FIFF_CH_RANGE, float, write_float),
cal=(FIFF.FIFF_CH_CAL, float, write_float),
coil_type=(FIFF.FIFF_CH_COIL_TYPE, int, write_int),
loc=(FIFF.FIFF_CH_LOC, lambda x: x, write_float),
unit=(FIFF.FIFF_CH_UNIT, int, write_int),
unit_mul=(FIFF.FIFF_CH_UNIT_MUL, int, write_int),
ch_name=(FIFF.FIFF_CH_DACQ_NAME, str, write_string),
coord_frame=(FIFF.FIFF_CH_COORD_FRAME, int, write_int),
)
# key -> cast
_CH_CAST = OrderedDict((key, val[1]) for key, val in _CH_INFO_MAP.items())
# const -> key, cast
_CH_READ_MAP = OrderedDict((val[0], (key, val[1]))
for key, val in _CH_INFO_MAP.items())
@contextlib.contextmanager
def _writing_info_hdf5(info):
# Make info writing faster by packing chs and dig into numpy arrays
orig_dig = info.get('dig', None)
orig_chs = info['chs']
with info._unlock():
try:
if orig_dig is not None and len(orig_dig) > 0:
info['dig'] = _dict_pack(info['dig'], _DIG_CAST)
info['chs'] = _dict_pack(info['chs'], _CH_CAST)
info['chs']['ch_name'] = np.char.encode(
info['chs']['ch_name'], encoding='utf8')
yield
finally:
if orig_dig is not None:
info['dig'] = orig_dig
info['chs'] = orig_chs
def _dict_pack(obj, casts):
# pack a list of dict into dict of array
return {key: np.array([o[key] for o in obj]) for key in casts}
def _dict_unpack(obj, casts):
# unpack a dict of array into a list of dict
n = len(obj[list(casts)[0]])
return [{key: cast(obj[key][ii]) for key, cast in casts.items()}
for ii in range(n)]
def _make_ch_names_mapping(chs):
orig_ch_names = [c['ch_name'] for c in chs]
ch_names = orig_ch_names.copy()
_unique_channel_names(ch_names, max_length=15, verbose='error')
ch_names_mapping = dict()
if orig_ch_names != ch_names:
ch_names_mapping.update(zip(orig_ch_names, ch_names))
return ch_names_mapping
def _write_ch_infos(fid, chs, reset_range, ch_names_mapping):
ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping
for k, c in enumerate(chs):
# Scan numbers may have been messed up
c = c.copy()
c['ch_name'] = ch_names_mapping.get(c['ch_name'], c['ch_name'])
assert len(c['ch_name']) <= 15
c['scanno'] = k + 1
# for float/double, the "range" param is unnecessary
if reset_range:
c['range'] = 1.0
write_ch_info(fid, c)
# only write new-style channel information if necessary
if len(ch_names_mapping):
logger.info(
' Writing channel names to FIF truncated to 15 characters '
'with remapping')
for ch in chs:
start_block(fid, FIFF.FIFFB_CH_INFO)
assert set(ch) == set(_CH_INFO_MAP)
for (key, (const, _, write)) in _CH_INFO_MAP.items():
write(fid, const, ch[key])
end_block(fid, FIFF.FIFFB_CH_INFO)
def _ensure_infos_match(info1, info2, name, *, on_mismatch='raise'):
"""Check if infos match.
Parameters
----------
info1, info2 : instance of Info
The infos to compare.
name : str
The name of the object appearing in the error message of the comparison
fails.
on_mismatch : 'raise' | 'warn' | 'ignore'
What to do in case of a mismatch of ``dev_head_t`` between ``info1``
and ``info2``.
"""
_check_on_missing(on_missing=on_mismatch, name='on_mismatch')
info1._check_consistency()
info2._check_consistency()
if info1['nchan'] != info2['nchan']:
raise ValueError(f'{name}.info[\'nchan\'] must match')
if set(info1['bads']) != set(info2['bads']):
raise ValueError(f'{name}.info[\'bads\'] must match')
if info1['sfreq'] != info2['sfreq']:
raise ValueError(f'{name}.info[\'sfreq\'] must match')
if set(info1['ch_names']) != set(info2['ch_names']):
raise ValueError(f'{name}.info[\'ch_names\'] must match')
if len(info2['projs']) != len(info1['projs']):
raise ValueError(f'SSP projectors in {name} must be the same')
if any(not _proj_equal(p1, p2) for p1, p2 in
zip(info2['projs'], info1['projs'])):
raise ValueError(f'SSP projectors in {name} must be the same')
if (info1['dev_head_t'] is None) != (info2['dev_head_t'] is None) or \
(info1['dev_head_t'] is not None and not
np.allclose(info1['dev_head_t']['trans'],
info2['dev_head_t']['trans'], rtol=1e-6)):
msg = (f"{name}.info['dev_head_t'] differs. The "
f"instances probably come from different runs, and "
f"are therefore associated with different head "
f"positions. Manually change info['dev_head_t'] to "
f"avoid this message but beware that this means the "
f"MEG sensors will not be properly spatially aligned. "
f"See mne.preprocessing.maxwell_filter to realign the "
f"runs to a common head position.")
_on_missing(on_missing=on_mismatch, msg=msg,
name='on_mismatch')
def _get_fnirs_ch_pos(info):
"""Return positions of each fNIRS optode.
fNIRS uses two types of optodes, sources and detectors.
There can be multiple connections between each source
and detector at different wavelengths. This function
returns the location of each source and detector.
"""
from ..preprocessing.nirs import _fnirs_optode_names, _optode_position
srcs, dets = _fnirs_optode_names(info)
ch_pos = {}
for optode in [*srcs, *dets]:
ch_pos[optode] = _optode_position(info, optode)
return ch_pos
| {
"content_hash": "ecb33b2717c49b5918fbb14aa9217cb3",
"timestamp": "",
"source": "github",
"line_count": 2916,
"max_line_length": 79,
"avg_line_length": 38.45644718792867,
"alnum_prop": 0.5488902166061763,
"repo_name": "wmvanvliet/mne-python",
"id": "62b83f6d80897ddc34f4aaf8c3384efd3d178940",
"size": "112418",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "mne/io/meas_info.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "14962"
},
{
"name": "Makefile",
"bytes": "4612"
},
{
"name": "Python",
"bytes": "10372316"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "19970"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.rdbms import MySQLManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-rdbms
# USAGE
python recommended_action_session_result.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MySQLManagementClient(
credential=DefaultAzureCredential(),
subscription_id="ffffffff-ffff-ffff-ffff-ffffffffffff",
)
response = client.location_based_recommended_action_sessions_result.list(
location_name="WestUS",
operation_id="aaaabbbb-cccc-dddd-0000-111122223333",
)
for item in response:
print(item)
# x-ms-original-file: specification/mysql/resource-manager/Microsoft.DBforMySQL/stable/2018-06-01/examples/RecommendedActionSessionResult.json
if __name__ == "__main__":
main()
| {
"content_hash": "87dc40423675199396d0cbf4ba518c96",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 142,
"avg_line_length": 34.705882352941174,
"alnum_prop": 0.7364406779661017,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e81360ba827f79b69485a268b0e8bdec27edd100",
"size": "1648",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/rdbms/azure-mgmt-rdbms/generated_samples/recommended_action_session_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from trust.models import TrustItem
class TrustItemAdmin(admin.ModelAdmin):
raw_id_fields = ["user"]
admin.site.register(TrustItem, TrustItemAdmin)
| {
"content_hash": "5dc43b9324198f7c70a1a74c8fccfa76",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 18.9,
"alnum_prop": 0.7777777777777778,
"repo_name": "eldarion/django-trust",
"id": "4429759abad1e442ad68f8d89e9006425406a099",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trust/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33287"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2013-2018 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
time.py
~~~~~~~
Functionality to add and process time varying parameters
"""
import warnings
import xarray as xr
import numpy as np
import pandas as pd
from calliope import exceptions
from calliope.core.attrdict import AttrDict
from calliope.core.util.tools import plugin_load
from calliope.core.preprocess import checks
from calliope.core.util.dataset import reorganise_dataset_dimensions
def apply_time_clustering(model_data, model_run):
"""
Take a Calliope model_data post time dimension addition, prior to any time
clustering, and apply relevant time clustering/masking techniques.
See doi: 10.1016/j.apenergy.2017.03.051 for applications.
Techniques include:
- Clustering timeseries into a selected number of 'representative' days.
Days with similar profiles and daily magnitude are grouped together and
represented by one 'representative' day with a greater weight per time
step.
- Masking timeseries, leading to variable timestep length
Only certain parts of the input are shown at full resolution, with other
periods being clustered together into a single timestep.
E.g. Keep high resolution in the week with greatest wind power variability,
smooth all other timesteps to 12H
- Timestep resampling
Used to reduce problem size by reducing resolution of all timeseries data.
E.g. resample from 1H to 6H timesteps
Parameters
----------
model_data : xarray Dataset
Preprocessed Calliope model_data, as produced using
`calliope.core.preprocess_data.build_model_data`
and found in model._model_data_original
model_run : bool
preprocessed model_run dictionary, as produced by
Calliope.core.preprocess_model
Returns
-------
data : xarray Dataset
Dataset with optimisation parameters as variables, optimisation sets as
coordinates, and other information in attributes. Time dimension has
been updated as per user-defined clustering techniques (from model_run)
"""
time_config = model_run.model['time']
data = model_data.copy(deep=True)
##
# Process masking and get list of timesteps to keep at high res
##
if 'masks' in time_config:
masks = {}
# time.masks is a list of {'function': .., 'options': ..} dicts
for entry in time_config.masks:
entry = AttrDict(entry)
mask_func = plugin_load(entry.function, builtin_module='calliope.core.time.masks')
mask_kwargs = entry.get_key('options', default=AttrDict()).as_dict()
masks[entry.to_yaml()] = mask_func(data, **mask_kwargs)
data.attrs['masks'] = masks
# Concatenate the DatetimeIndexes by using dummy Series
chosen_timesteps = pd.concat([pd.Series(0, index=m)
for m in masks.values()]).index
# timesteps: a list of timesteps NOT picked by masks
timesteps = pd.Index(data.timesteps.values).difference(chosen_timesteps)
else:
timesteps = None
##
# Process function, apply resolution adjustments
##
if 'function' in time_config:
func = plugin_load(
time_config.function, builtin_module='calliope.core.time.funcs'
)
func_kwargs = time_config.get('function_options', AttrDict()).as_dict()
if 'file=' in func_kwargs.get('clustering_func', ''):
func_kwargs.update({'model_run': model_run})
data = func(data=data, timesteps=timesteps, **func_kwargs)
return data
def add_time_dimension(data, model_run):
"""
Once all constraints and costs have been loaded into the model dataset, any
timeseries data is loaded from file and substituted into the model dataset
Parameters:
-----------
data : xarray Dataset
A data structure which has already gone through `constraints_to_dataset`,
`costs_to_dataset`, and `add_attributes`
model_run : AttrDict
Calliope model_run dictionary
Returns:
--------
data : xarray Dataset
A data structure with an additional time dimension to the input dataset,
with all relevant `file=` entries replaced with data from file.
"""
data['timesteps'] = pd.to_datetime(data.timesteps)
# Search through every constraint/cost for use of '='
for variable in data.data_vars:
# 1) If '=' in variable, it will give the variable a string data type
if data[variable].dtype.kind != 'U':
continue
# 2) convert to a Pandas Series to do 'string contains' search
data_series = data[variable].to_series()
# 3) get a Series of all the uses of 'file=' for this variable
filenames = data_series[data_series.str.contains('file=')]
# 4) If no use of 'file=' then we can be on our way
if filenames.empty:
continue
# 5) remove all before '=' and split filename and location column
filenames = filenames.str.split('=').str[1].str.rsplit(':', 1)
if isinstance(filenames.index, pd.MultiIndex):
filenames.index = filenames.index.remove_unused_levels()
# 6) Get all timeseries data from dataframes stored in model_run
timeseries_data = [model_run.timeseries_data[file].loc[:, column].values
for (file, column) in filenames.values]
timeseries_data_series = pd.DataFrame(index=filenames.index,
columns=data.timesteps.values,
data=timeseries_data).stack()
timeseries_data_series.index.rename('timesteps', -1, inplace=True)
# 7) Add time dimension to the relevent DataArray and update the '='
# dimensions with the time varying data (static data is just duplicated
# at each timestep)
timeseries_data_array = xr.broadcast(data[variable], data.timesteps)[0].copy()
timeseries_data_array.loc[
xr.DataArray.from_series(timeseries_data_series).coords
] = xr.DataArray.from_series(timeseries_data_series).values
# 8) assign correct dtype (might be string/object accidentally)
# string 'nan' to NaN:
array_to_check = timeseries_data_array.where(timeseries_data_array != 'nan', drop=True)
timeseries_data_array = timeseries_data_array.where(timeseries_data_array != 'nan')
if ((array_to_check == 'True') | (array_to_check == '1') | (array_to_check == 'False') | (array_to_check == '0')).all().item():
# Turn to bool
timeseries_data_array = ((timeseries_data_array == 'True') | (timeseries_data_array == '1')).copy()
else:
try:
timeseries_data_array = timeseries_data_array.astype(np.float, copy=False)
except ValueError:
None
data[variable] = timeseries_data_array
# Add timestep_resolution by looking at the time difference between timestep n
# and timestep n + 1 for all timesteps
time_delta = (data.timesteps.shift(timesteps=-1) - data.timesteps).to_series()
# Last timestep has no n + 1, so will be NaT (not a time),
# we duplicate the penultimate time_delta instead
time_delta[-1] = time_delta[-2]
time_delta.name = 'timestep_resolution'
# Time resolution is saved in hours (i.e. seconds / 3600)
data['timestep_resolution'] = (
xr.DataArray.from_series(time_delta.dt.total_seconds() / 3600)
)
data['timestep_weights'] = xr.DataArray(
np.ones(len(data.timesteps)),
dims=['timesteps']
)
return None
def add_max_demand_timesteps(model_data):
max_demand_timesteps = []
# Get all loc_techs with a demand resource
loc_techs_with_demand_resource = list(
set(model_data.coords['loc_techs_finite_resource'].values)
.intersection(model_data.coords['loc_techs_demand'].values)
)
for carrier in list(model_data.carriers.data):
# Filter demand loc_techs for this carrier
loc_techs = [
i for i in loc_techs_with_demand_resource
if '{}::{}'.format(i, carrier) in model_data.coords['loc_tech_carriers_con'].values
]
carrier_demand = model_data.resource.loc[
dict(loc_techs_finite_resource=loc_techs)
].sum(dim='loc_techs_finite_resource').copy()
# Only keep negative (=demand) values
carrier_demand[carrier_demand.values > 0] = 0
max_demand_timesteps.append(carrier_demand.to_series().idxmin())
model_data['max_demand_timesteps'] = xr.DataArray(
max_demand_timesteps,
dims=['carriers']
)
return model_data
def final_timedimension_processing(model_data):
# Final checking of the data
model_data, final_check_comments, warns, errors = checks.check_model_data(model_data)
exceptions.print_warnings_and_raise_errors(warnings=warns, errors=errors)
model_data = reorganise_dataset_dimensions(model_data)
model_data = add_max_demand_timesteps(model_data)
return model_data
| {
"content_hash": "edfdb46bed4daeec5745da1a6f679f7a",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 135,
"avg_line_length": 38.385892116182575,
"alnum_prop": 0.6495513998486651,
"repo_name": "brynpickering/calliope",
"id": "aec86f8625afebf5019cb05fd4ab89319b57c03c",
"size": "9251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calliope/core/preprocess/time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1474"
},
{
"name": "HTML",
"bytes": "1361"
},
{
"name": "Makefile",
"bytes": "1226"
},
{
"name": "Python",
"bytes": "679198"
},
{
"name": "TeX",
"bytes": "2666"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ExtendedErrorInfo(Model):
"""this is the extended error info.
:param code: this is the error response code. Possible values include:
'NotFound', 'Conflict', 'BadRequest'
:type code: str or ~azure.mgmt.managementpartner.models.enum
:param message: this is the extended error info message
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, code=None, message=None):
super(ExtendedErrorInfo, self).__init__()
self.code = code
self.message = message
| {
"content_hash": "7067eec3645a894197988bab773d03a7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 30.681818181818183,
"alnum_prop": 0.6237037037037036,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "25cb3debb5525a8d273d114ac9eee54a0e28d776",
"size": "1149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-managementpartner/azure/mgmt/managementpartner/models/extended_error_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import sys
def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '█' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
| {
"content_hash": "83e967168b37976899ccb2c91e4489c9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 94,
"avg_line_length": 46.54545454545455,
"alnum_prop": 0.580078125,
"repo_name": "mirca/fsopy",
"id": "8b3bb3b5bafb6297d3da1efb23000842d5b369fd",
"size": "1026",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fsopy/utils/progressbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32136"
}
],
"symlink_target": ""
} |
"""Tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.layers import core as keras_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0]
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph", "eager"]))
class MirroredTwoDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testNumReplicasInSync(self, distribution):
self.assertEqual(2, distribution.num_replicas_in_sync)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRunRegroupError(self, distribution):
def run_fn():
replica_id = int(self.evaluate(_replica_id()))
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(replica_id))
with distribution.scope(), self.assertRaises(AssertionError):
distribution.extended.call_for_each_replica(run_fn)
def testReduceToCpu(self, distribution):
with distribution.scope():
result = distribution.extended.call_for_each_replica(_replica_id)
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result, axis=None)
expected = sum(range(distribution.num_replicas_in_sync))
self.assertEqual(expected, self.evaluate(reduced))
def reduce_axis_helper(self, distribution, replica_squared_fn):
with distribution.scope():
num_replicas = distribution.num_replicas_in_sync
result = distribution.extended.call_for_each_replica(replica_squared_fn)
# sum
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result, axis=0)
expected = sum(x * (x + 1) for x in range(num_replicas))
self.assertNear(expected, self.evaluate(reduced), 0.00001)
# mean
reduced = distribution.reduce(reduce_util.ReduceOp.MEAN, result, axis=0)
expected /= sum(x + 1 for x in range(num_replicas))
self.assertNear(expected, self.evaluate(reduced), 0.00001)
def testReduceAxisToCpu(self, distribution):
for dtype in (dtypes.float32, dtypes.int32):
def replica_squared_fn(dtype=dtype):
# Lists with different lengths on different replicas.
replica_id = _replica_id_as_int()
return math_ops.cast([replica_id] * (replica_id + 1), dtype)
self.reduce_axis_helper(distribution, replica_squared_fn)
def set_v2_tensorshape(self, v2):
if v2:
tensor_shape.enable_v2_tensorshape()
else:
tensor_shape.disable_v2_tensorshape()
def testReduceAxisToCpuUnknownShape(self, distribution):
original_v2 = tensor_shape._TENSORSHAPE_V2_OVERRIDE # pylint: disable=protected-access
try:
for v2 in (False, True):
self.set_v2_tensorshape(v2)
for dtype in (dtypes.float32, dtypes.int32):
for shape in ((None,), None): # Test both unknown size and rank.
def replica_squared_fn(dtype=dtype, shape=shape):
# Lists with different lengths on different replicas.
replica_id = _replica_id_as_int()
tensor = math_ops.cast([replica_id] * (replica_id + 1), dtype)
# Erase shape information
return array_ops.placeholder_with_default(tensor, shape=shape)
self.reduce_axis_helper(distribution, replica_squared_fn)
finally:
self.set_v2_tensorshape(original_v2)
def testReplicateDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterable(distribution, input_fn, expected_values)
def testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values)
def testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(2).interleave(
(lambda _: dataset_ops.Dataset.range(10)), cycle_length=2)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
expected_values = [[i, i] for i in range(0, 10)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values, test_reinitialize=False,
ignore_order=True)
def testNumpyDataset(self, distribution):
self._test_numpy_dataset(distribution)
def testGlobalStepUpdate(self, distribution):
self._test_global_step_update(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
def testSummaryForReplicaZeroOnly(self, distribution):
self._test_summary_for_replica_zero_only(distribution)
def testTrainableVariables(self, distribution):
self._test_trainable_variable(distribution)
def one_device_combinations():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph", "eager"])
@combinations.generate(one_device_combinations())
class MirroredOneDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
class MirroredStrategyVariableCreatorStackTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
def testCreatorStacksAreThreadLocal(self, distribution):
def model_fn():
replica_id_str = str(self.evaluate(_replica_id()))
def thread_creator_fn(next_creator, **kwargs):
return next_creator(**kwargs) + ":thread_" + replica_id_str
with variable_scope.variable_creator_scope(thread_creator_fn):
# Create a variable in this scope.
v = variable_scope.variable(1.0)
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
return v
def main_thread_creator(next_creator, **kwargs):
# We are not using the underlying next_creator for test purposes.
del next_creator, kwargs
return "main_thread"
with context.graph_mode(), \
distribution.scope(), \
variable_scope.variable_creator_scope(main_thread_creator):
result = distribution.extended.call_for_each_replica(model_fn)
result = distribution.experimental_local_results(result)
expected = ("main_thread:thread_0", "main_thread:thread_1")
self.assertEqual(expected, result)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyCallForEachReplicaTest(test.TestCase):
def testExecutingEagerlyOutsideFunction(self, distribution):
"""Verify we preserve the value of executing_eagerly_outside_functions()."""
def model_fn():
return ops.executing_eagerly_outside_functions()
originally = ops.executing_eagerly_outside_functions()
with distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
# Verify this all again, but this time in a FuncGraph.
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
def testFunctionInCallForEachReplica(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return ds_context.get_replica_context().replica_id_in_sync_group
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual((0, 1), self.evaluate(result.values))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testFunctionInCallForEachReplicaInsideAnotherFunction(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return ds_context.get_replica_context().replica_id_in_sync_group
@def_function.function
def step():
return distribution.extended.call_for_each_replica(model_fn)
with distribution.scope():
result = step()
self.assertEqual((0, 1), self.evaluate(result.values))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testNestedFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
def body_fn(i):
ds_context.get_replica_context().merge_call(merge_fn)
return i + 1
return control_flow_ops.while_loop_v2(lambda i: i < 2, body_fn, [0])
with distribution.scope():
with self.assertRaisesRegexp(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
def testFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
ds_context.get_replica_context().merge_call(merge_fn)
return 0.
with distribution.scope():
self.assertEqual(
self.evaluate(distribution.extended.call_for_each_replica(model_fn)),
0.)
def testFunctionInCallForEachReplicaCached(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(None)
self.assertEmpty(traces)
for i in range(10):
distribution.extended.call_for_each_replica(model_fn)
if i == 0:
num_devices = len(traces)
self.assertGreater(num_devices, 0)
else:
# model_fn should not have been re-evaluated so the length should remain
# the same.
self.assertLen(traces, num_devices)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
class MirroredStrategyNameScopeTest(test.TestCase):
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self, distribution):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("main/foo/" + name + ":0", v0.name)
self.assertEqual("main/replica_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self, distribution):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("foo/" + name + ":0", v0.name)
self.assertEqual("replica_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes but respects variable scope when creating variables. We test both
# methods of creating variables to make sure that we have the same
# variable names in both cases.
def testNameScopeWithVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("a:0", a0.name)
self.assertEqual("a/replica_1:0", a1.name)
self.assertEqual("b:0", b0.name)
self.assertEqual("b/replica_1:0", b1.name)
self.assertEqual("c:0", c0.name)
self.assertEqual("c/replica_1:0", c1.name)
def testVariableScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with variable_scope.variable_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with variable_scope.variable_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored3Devices",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
required_gpus=2)
],
mode=["graph", "eager"]))
class MirroredThreeDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
def testThreeDevices(self, distribution):
def model_fn():
v = variable_scope.variable(1.0, name="foo")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEqual("foo:0", result.name)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in replica and cross replica context.
def testAssignMirroredVarReplicaContextWithoutAggregationType(self,
distribution):
# Test that we always have an aggregation type set on the mirrored variable
# if we assign to it in replica mode.
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "You must specify an aggregation method to update a "
"MirroredVariable in Replica Context. You can do so by"):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarReplicaContextWithSum(self, distribution):
# Test that we don't reduce a non-per-replica value with the "sum"
# aggregation type.
def var_fn():
v = variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "A non-DistributedValues value 5.0 cannot be reduced "
"with the given reduce op ReduceOp.SUM."):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
self.assertEqual(6.0, mirrored_var_result)
def testAssignMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(0.5, self.evaluate(mirrored_var))
def testAssignMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(5.0, self.evaluate(mirrored_var))
def testAssignAddMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
# read_value == True
mirrored_var_result = self.evaluate(
mirrored_var.assign_add(6.0, read_value=True))
self.assertEqual(7.0, mirrored_var_result)
self.assertEqual(7.0, self.evaluate(mirrored_var.values[0]))
self.assertEqual(7.0, self.evaluate(mirrored_var.values[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var.devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var.devices[1])
# read_value == False
self.evaluate(mirrored_var.assign_add(2.0, read_value=False))
self.assertEqual(9.0, self.evaluate(mirrored_var.values[0]))
self.assertEqual(9.0, self.evaluate(mirrored_var.values[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var.devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var.devices[1])
def testAssignAddMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_add(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(1.5, self.evaluate(mirrored_var))
def testAssignAddMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_add(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(6.0, self.evaluate(mirrored_var))
def testAssignSubMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(5.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
self.assertEqual(3.0, mirrored_var_result)
self.assertEqual(3.0, self.evaluate(mirrored_var.values[0]))
self.assertEqual(3.0, self.evaluate(mirrored_var.values[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var.devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var.devices[1])
def testAssignSubMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_sub(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.5, self.evaluate(mirrored_var))
def testAssignSubMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_sub(1.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.0, self.evaluate(mirrored_var))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
def testAssignMirroredVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
self.evaluate(mirrored_var.initializer)
self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
def testAssignReplicaLocalVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertIsInstance(v_sum, values.SyncOnReadVariable)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(
model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.assertFalse(self.evaluate(sync_on_read_var.is_initialized()))
self.evaluate(sync_on_read_var.initializer)
self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class SyncOnReadVariableAssignTest(test.TestCase):
def testAssignReplicaLocalVarSumAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the SUM of each of
# values on each of the replicas.
self.assertEqual(2.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
# Assigning 6.0 in cross replica context will assign a value of
# 6.0/num_replicas to each replica.
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the assigned value back.
# The value on all the replicas are added before being returned by
# `read_var`.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
def testAssignReplicaLocalVarMeanAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the MEAN of values
# on all replicas which is the value assigned in replica context.
self.assertEqual(1.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the MEAN of all values
# which is equal to the value assigned.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
class MockModel(object):
def __init__(self, two_variables=False):
self.variables = []
self.variables.append(variable_scope.variable(1.25, name="dummy_var1"))
if two_variables:
self.variables.append(variable_scope.variable(2.0, name="dummy_var2"))
def __call__(self, factor=2):
x = factor * self.variables[0]
if len(self.variables) > 1:
x += self.variables[1]
return x
class MiniModel(keras_training.Model):
"""Minimal model for mnist.
Useful for testing and debugging on slow TPU simulators.
"""
def __init__(self):
super(MiniModel, self).__init__(name="")
self.fc = keras_core.Dense(1, name="fc", kernel_initializer="ones",
bias_initializer="ones")
def call(self, inputs, training=True):
inputs = array_ops.ones([1, 10])
return self.fc(inputs)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyDefunTest(test.TestCase):
def _call_and_check(self, distribution, model_fn, inputs, expected_result,
defuns, two_variables=False):
cpu_dev = device_util.canonicalize("CPU:0")
gpu_dev = device_util.canonicalize("GPU:0")
devices = [cpu_dev, gpu_dev]
with distribution.scope():
mock_model = MockModel(two_variables)
self.evaluate(variables.global_variables_initializer())
result = distribution.extended.call_for_each_replica(
model_fn, args=[mock_model] + inputs)
for r in range(len(devices)):
device_result = values.select_replica(r, result)
device_expected_result = values.select_replica(r, expected_result)
self.assertAllClose(device_expected_result,
self.evaluate(device_result))
for defun in defuns:
# `Function`s are specialized to the current device stack, so
# call_for_each has one trace per device. To check that the expected set
# of variables was accessed on each trace, we first retrieve each
# device-specific graph function.
per_replica_graph_functions = (
distribution.extended.call_for_each_replica(
defun.get_concrete_function, args=[mock_model] + inputs))
for i in range(len(devices)):
graph_function = per_replica_graph_functions.values[i]
# TODO(b/129555712): re-enable an assertion here that the two sets of
# variables are the same.
# self.assertEqual(set(graph_function.graph.variables),
# set(mock_model.variables))
del graph_function
def testVariableInDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
def model_fn(mock_model):
return times_two(mock_model)
self._call_and_check(distribution, model_fn, [], 2.5, [times_two])
def testVariableInNestedDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
@function.defun
def two_x_plus_one(mock_model):
return times_two(mock_model) + 1
def model_fn(mock_model):
return two_x_plus_one(mock_model)
self._call_and_check(distribution, model_fn, [], 3.5,
[times_two, two_x_plus_one])
def testTwoVariablesInNestedDefun(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
return fn2(mock_model)
self._call_and_check(distribution, model_fn, [], 5.5, [fn1, fn2],
two_variables=True)
def testGradientTapeOverNestedDefuns(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
with backprop.GradientTape(persistent=True) as gtape:
result = fn2(mock_model)
grads = gtape.gradient(result,
[v._get() for v in mock_model.variables])
return grads
self._call_and_check(distribution, model_fn, [], [2.0, 1.0], [fn1, fn2],
two_variables=True)
def testPassPerReplica(self, distribution):
@function.defun
def fn1(mock_model, factor):
return mock_model(factor)
factors = values.PerReplica((5.0, 3.0))
expected_result = values.PerReplica((5.0 * 1.25, 3.0 * 1.25))
self._call_and_check(distribution, fn1, [factors], expected_result, [fn1])
def testTrain(self, distribution):
with distribution.scope():
mock_model = MiniModel()
mock_model.call = function.defun(mock_model.call)
def loss_fn(ctx):
del ctx
return mock_model(array_ops.ones([1, 10]))
gradients_fn = backprop.implicit_grad(loss_fn)
gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)
grads_and_vars = distribution.extended.call_for_each_replica(
gradients_fn, args=(None,))
optimizer = gradient_descent.GradientDescentOptimizer(0.25)
update_ops = optimizer._distributed_apply(distribution, grads_and_vars) # pylint: disable=protected-access
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(update_ops)
updated_var_values = self.evaluate(mock_model.variables)
# All variables start at 1.0 and get two updates of 0.25.
self.assertAllEqual(0.5 * np.ones([10, 1]), updated_var_values[0])
self.assertAllEqual([0.5], updated_var_values[1])
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
devices=mirrored_strategy.all_local_devices(),
cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce([
"/job:worker/task:0", "/job:worker/task:1"
], context.num_gpus())),
required_gpus=1)
],
mode=["graph"]))
class MultiWorkerMirroredStrategyTest(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
def _configure_distribution_strategy(self, distribution):
cluster_spec = server_lib.ClusterSpec({
"worker": ["/job:worker/task:0", "/job:worker/task:1"]
})
distribution.configure(cluster_spec=cluster_spec)
def test_num_replicas_in_sync(self, distribution):
self._configure_distribution_strategy(distribution)
# We calculate the total number of gpus across the workers(2) specified in
# the cluster spec.
self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)
def testMinimizeLossGraph(self, distribution):
self._configure_distribution_strategy(distribution)
self._test_minimize_loss_graph(distribution, learning_rate=0.05)
def testDeviceScope(self, distribution):
"""Test the device scope of multi-worker MirroredStrategy."""
self._configure_distribution_strategy(distribution)
with distribution.scope():
a = constant_op.constant(1.)
with ops.device("/cpu:0"):
b = constant_op.constant(1.)
self.assertEqual(a.device, "/job:worker/task:0")
self.assertEqual(b.device, "/job:worker/task:0/device:CPU:0")
def testMakeInputFnIteratorWithDataset(self, distribution):
self._configure_distribution_strategy(distribution)
dataset_fn = lambda: dataset_ops.Dataset.range(100)
num_gpus = context.num_gpus()
num_workers = 2
expected_values = [[i+j for j in range(num_gpus)] * num_workers
for i in range(0, 100, num_gpus)]
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._configure_distribution_strategy(distribution)
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
num_gpus = context.num_gpus()
num_workers = 2
expected_values = []
for i in range(0, 100, num_gpus):
expected_values.append([i+j for j in range(num_gpus)] * num_workers)
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess,
test_reinitialize=False, ignore_order=True)
def testUpdateConfigProto(self, distribution):
distribution.configure(cluster_spec={"worker": ["fake1", "fake2"]})
config_proto = config_pb2.ConfigProto()
new_config = distribution.update_config_proto(config_proto)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
devices=["/job:worker/task:0/gpu:{}".format(
i) for i in range(context.num_gpus())]),
required_gpus=1)
],
mode=["graph"]))
class RemoteSingleWorkerMirroredStrategyGraph(
multi_worker_test_base.SingleWorkerTestBaseGraph,
strategy_test_lib.RemoteSingleWorkerMirroredStrategyBase):
def _get_num_gpus(self):
return context.num_gpus()
def testNumReplicasInSync(self, distribution):
self._testNumReplicasInSync(distribution)
def testMinimizeLoss(self, distribution):
self._testMinimizeLoss(distribution)
def testDeviceScope(self, distribution):
self._testDeviceScope(distribution)
def testMakeInputFnIteratorWithDataset(self, distribution):
self._testMakeInputFnIteratorWithDataset(distribution)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._testMakeInputFnIteratorWithCallable(distribution)
class MultiWorkerMirroredStrategyTestWithChief(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=2, num_ps=0, has_chief=True)
cls._default_target = "grpc://" + cls._cluster_spec["chief"][0]
def _make_cross_device_ops(self):
return cross_device_ops_lib.MultiWorkerAllReduce(
["/job:chief/task:0", "/job:worker/task:0", "/job:worker/task:1"],
context.num_gpus())
def testMinimizeLossGraph(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphMirroredStrategy(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
mirrored_strategy.all_local_devices(),
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphMirroredStrategyWithOneNode(self):
with context.graph_mode():
cluster_spec = {}
cluster_spec["chief"] = self._cluster_spec["chief"]
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.MirroredStrategy()
if context.num_gpus() > 0:
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.NcclAllReduce)
else:
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.ReductionToOneDevice)
self.skipTest("b/130551176, run the following once fixed.")
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testInitializeFromTFConfig(self):
with context.graph_mode():
tf_config = {"cluster": self._cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.MirroredStrategy(
cross_device_ops=self._make_cross_device_ops())
self.assertEqual(
max(context.num_gpus(), 1) * 3, strategy.num_replicas_in_sync)
def testSummaryForReplicaZeroOnly(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
mirrored_strategy.all_local_devices(),
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_summary_for_replica_zero_only(strategy)
class MirroredVariableStopGradientTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph"]))
def testMirroredVariableAsStopGradient(self, distribution):
with distribution.scope():
inp = constant_op.constant(1.0)
x = variables.Variable(1.0)
y = inp*x
grads = gradients.gradients(x, y, stop_gradients=x)
self.assertIsNone(grads[0])
class FunctionTest(test.TestCase):
def testBackwardFuctionDevicePlacement(self):
if context.num_gpus() < 1:
self.skipTest("At least one GPU is required.")
devices = [device_util.resolve("/device:GPU:0"),
device_util.resolve("/device:CPU:0")]
ms = mirrored_strategy.MirroredStrategy(devices)
with ms.scope():
w = variable_scope.variable([1.5], name="w")
b = variable_scope.variable([0.5], name="b")
@def_function.function
def forward(x, w, b):
return x * w + b
x = constant_op.constant([1.0], name="x_useless")
concrete_forward = forward.get_concrete_function(x, w.primary, b.primary)
with ms.scope():
def replica_fn():
with backprop.GradientTape() as t:
x = constant_op.constant([1.0], name="x")
loss = concrete_forward(x, w._get(), b._get()) - [1.0]
return t.gradient(loss, [w, b])
def step_fn():
return ms.experimental_run_v2(replica_fn)
context.enable_run_metadata()
g1, g2 = step_fn()
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
self.assertEqual(self.evaluate(g1.primary), 1.0)
self.assertEqual(self.evaluate(g2.primary), 1.0)
# Verify that this node runs on both devices.
node_name = "gradients_mul_grad_mul_1_x"
devices_for_this_node = set()
for partition_graph in run_metadata.partition_graphs:
for node in partition_graph.node:
if node.name == node_name:
devices_for_this_node.add(node.device)
self.assertSetEqual(devices_for_this_node, set(devices))
def testFuctionPreservesAutoGraph(self):
config.set_logical_device_configuration(
config.list_physical_devices("CPU")[0],
[context.LogicalDeviceConfiguration()] * 2)
ms = mirrored_strategy.MirroredStrategy()
def f():
self.assertTrue(converter_testing.is_inside_generated_code())
return 1
with ms.scope():
@def_function.function
def replica_fn():
return f()
ms.experimental_run_v2(replica_fn)
def _replica_id():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if not isinstance(replica_id, ops.Tensor):
replica_id = constant_op.constant(replica_id)
return replica_id
def _replica_id_as_int():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if isinstance(replica_id, ops.Tensor):
replica_id = tensor_util.constant_value(replica_id)
return replica_id
if __name__ == "__main__":
test.main()
| {
"content_hash": "2ddc20c1fa2a4468f710fe50437c04c4",
"timestamp": "",
"source": "github",
"line_count": 1384,
"max_line_length": 113,
"avg_line_length": 39.164739884393065,
"alnum_prop": 0.6841008043686813,
"repo_name": "jhseu/tensorflow",
"id": "f31a7d17386531baa5db378d386b993fb404cff8",
"size": "54893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/mirrored_strategy_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "27480"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "875455"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "80051513"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112748"
},
{
"name": "Go",
"bytes": "1853641"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1729057"
},
{
"name": "Makefile",
"bytes": "62498"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "304661"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19515"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "36791185"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "56741"
},
{
"name": "Shell",
"bytes": "685877"
},
{
"name": "Smarty",
"bytes": "35147"
},
{
"name": "Starlark",
"bytes": "3504187"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from typing import Dict, Type
from .base import RecommenderTransport
from .grpc import RecommenderGrpcTransport
from .grpc_asyncio import RecommenderGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[RecommenderTransport]]
_transport_registry["grpc"] = RecommenderGrpcTransport
_transport_registry["grpc_asyncio"] = RecommenderGrpcAsyncIOTransport
__all__ = (
"RecommenderTransport",
"RecommenderGrpcTransport",
"RecommenderGrpcAsyncIOTransport",
)
| {
"content_hash": "8928f5bc3f2a308e80846ab3e3c72fd0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 82,
"avg_line_length": 33.294117647058826,
"alnum_prop": 0.8003533568904594,
"repo_name": "googleapis/python-recommender",
"id": "d13734288c03f133c04ebd44bd001d4da736dd3e",
"size": "1166",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "google/cloud/recommender_v1/services/recommender/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1010344"
},
{
"name": "Shell",
"bytes": "30675"
}
],
"symlink_target": ""
} |
from http.server import BaseHTTPRequestHandler, HTTPServer
import threading, pickle, re
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
f_type_map = {'.html': 'text/html', '.css': 'text/css', '.ico': 'image/x-icon', '.jpg': 'image/jpeg', '.png': 'image/png', '.gif': 'image/gif', '.js': 'text/javascript'}
t_type = re.compile('\/|(\.\w*)')
r_file = self.path.split('?')
print(r_file)
requested_type = t_type.findall(self.path)
print(requested_type)
ex = requested_type[-1]
try:
self.send_response(200)
self.send_header('Content-type', f_type_map[ex])
self.send_header('Content-Encoding', 'utf-8')
self.end_headers()
try:
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google%s'% r_file[0]) as file:
f = file.read()
self.wfile.write(bytes(f, 'utf8'))
except UnicodeDecodeError:
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google%s'% r_file[0], 'rb') as f:
file = f.read()
self.wfile.write(file)
except IOError:
self.send_response(404, 'File Not Found')
self.wfile.write(bytes('404 file not found', 'utf8'))
except KeyError:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google\index.html') as file:
f = file.read()
self.wfile.write(bytes(f, 'utf8'))
return
server_address = ('localhost', 8989)
def run():
print('starting server ...')
httpd = HTTPServer(server_address, MyHandler)
httpd.serve_forever()
bg_server= threading.Thread(target = run)
###Uncomment the next line if you want to have the server start when the file is run###
bg_server.start()
print('\nserver started at %s:%s'% server_address)
| {
"content_hash": "f6c0f83c6ddde55089ac0863545a9739",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 177,
"avg_line_length": 43.673469387755105,
"alnum_prop": 0.5780373831775701,
"repo_name": "jath03/projects",
"id": "a91bd1fd431900de78cad8b364138a15b7a46188",
"size": "2140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "local/Google test signin/Google/MyServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2001"
},
{
"name": "C++",
"bytes": "381"
},
{
"name": "CSS",
"bytes": "784"
},
{
"name": "HTML",
"bytes": "18090"
},
{
"name": "JavaScript",
"bytes": "261"
},
{
"name": "Python",
"bytes": "1665081"
}
],
"symlink_target": ""
} |
import argparse
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import sqlite3, json, datetime
import time, math, os, re
def chat_renames():
"""
Returns list of topics
"""
print("Getting topics...")
renames = []
for sid, timestamp, service in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="service" ORDER BY timestamp;"""):
service = json.loads(service)
if service["action"]["type"] == "chat_rename":
renames.append((
datetime.datetime.fromtimestamp(timestamp),
service["action"]["title"],
service["from"]["print_name"].replace("_", " ")
))
return sorted(renames, key=lambda x: x[0], reverse=True)
def talker_stats(span=None, max_talkers=10):
""""
Return list of top talkers in decending order
"""
print("Getting top talkers...")
i = 0
talkers = {}
before = int(time.time() - span*24*60*60) if span else 0
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message" AND timestamp >= ?;""", (before, )):
message = json.loads(message)
name = message["from"]["print_name"]
if name not in talkers:
talkers[name] = [0, 0, 0, 0]
if "text" in message:
talkers[name][0] += 1
talkers[name][1] += len(re.findall('[a-zäöå]{2,}', message["text"], flags=re.IGNORECASE))
elif "media" in message:
media_type = message["media"]["type"]
if media_type == "photo":
talkers[name][3] += 1
elif media_type == "document":
talkers[name][2] += 1
elif media_type == "geo":
pass
elif media_type == "contact":
pass
return talkers.items()
def bot_spammers(max_talkers=10):
print("Getting top bot spammers...")
cmds = {}
bots = {}
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message";"""):
message = json.loads(message)
name = message["from"]["print_name"]
if "text" in message and message["text"].startswith("/"):
cmd = message["text"].strip().split(" ")[0].split("@")[0]
#print(cmd, "\t", message["text"])
if cmd in cmds:
if name in cmds[cmd]:
cmds[cmd][name] += 1
else:
cmds[cmd][name] = 1
else:
cmds[cmd] = { name: 1 }
elif name.lower()[-3:]== "bot":
# Increase bot's popularity
if name in bots:
bots[name] += 1
else:
bots[name] = 1
# Filter Top-6 commands
cmds = sorted(cmds.items(), key=lambda x: sum(x[1].values()), reverse=True)[:6]
# Filter Top-6 users for each command
cmds = [(c[0], sorted(c[1].items(), key=lambda x: x[1], reverse=True)[:6]) for c in cmds]
# Filter Top-5 Bots
bots = sorted(bots.items(), key=lambda x: x[1], reverse=True)[:5]
return cmds, bots
def most_commonly_used_words():
""""
Return list of most commonly used words
"""
print("Getting most commonly used words...")
words = {}
users = {}
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message";"""):
message = json.loads(message)
if "text" not in message:
continue
for mword in re.findall('[a-zäöå]{2,}', message["text"], flags=re.IGNORECASE):
mword = mword.lower()
if mword not in words:
words[mword] = 1
else:
words[mword] += 1
if mword[0] == "@":
if mword not in users:
users[mword] = 1
else:
users[mword] += 1
#print(sorted(users.items(), key=lambda x: x[1], reverse=True)[:10])
return sorted(words.items(), key=lambda x: x[1], reverse=True)
def population_graph(filepath="aski_population.png", show=False):
print("Creating population graph...")
population = {}
total = 0
prev_date = None
for sid, timestamp, service in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="service" ORDER BY timestamp;"""):
service = json.loads(service)
action_type = service["action"]["type"]
if action_type not in ["chat_add_user", "chat_add_user_link", "chat_del_user"]:
continue
timestamp = datetime.datetime.fromtimestamp(timestamp)
date = datetime.date(timestamp.year, timestamp.month, timestamp.day)
# Init table for the date
if date != prev_date:
population[date] = [total, 0, 0]
prev_date = date
if action_type == "chat_add_user" or action_type == "chat_add_user_link":
total += 1
population[date][0] = total
population[date][1] += 1
elif action_type == "chat_del_user":
total -= 1
population[date][0] = total
population[date][2] -= 1
# TODO: Add today to the list if doesn't exist
#if population[-1] != today:
# population[today] = [total, 0, 0]
dates = []
members = []
income = []
outcome = []
for date, vals in sorted(population.items(), key=lambda x: x[0]):
dates.append(date)
members.append(vals[0])
income.append(vals[1])
outcome.append(vals[2])
fig, ax = plt.subplots()
fig.set_size_inches(14, 6)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%y'))
ax.set_xlim(datetime.date(dates[0].year, dates[0].month, 1), datetime.date(dates[-1].year, dates[-1].month, dates[-1].day))
ax.set_ylim(10 * math.floor(min(outcome) / 10.), 20 * math.ceil((1 + total) / 20.))
ax.plot(dates, members)
ax.bar(dates, income, color="green", edgecolor = "none")
ax.bar(dates, outcome, color="red", edgecolor = "none")
plt.xlabel('Date')
plt.ylabel('Members')
plt.title('Population')
plt.grid(True)
plt.savefig(filepath, dpi=250)
if show:
plt.show()
def hourly_rate(timespan=3600):
"""
Calculate most messages inside the timespan
"""
print("Calculating message rates...")
buff = []
top_date, top_rate = (0, 0), 0
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message" ORDER BY timestamp;"""):
message = json.loads(message)
# Append new message to the buffer
if "text" in message:
buff.append(timestamp)
# Filter old messages
buff = [x for x in buff if x + timespan > timestamp]
if len(buff) > top_rate:
top_rate = len(buff)
top_date = (buff[0], buff[-1])
#print(top_date, top_rate, message["text"])
return top_rate, datetime.datetime.fromtimestamp(top_date[0]), \
datetime.datetime.fromtimestamp(top_date[1])
def messages_graph(filepath="messages.png", show=True):
print("Creating messages graphs...")
messages = {}
prev_date = None
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message" ORDER BY timestamp;"""):
message = json.loads(message)
timestamp = datetime.datetime.fromtimestamp(timestamp)
date = datetime.date(timestamp.year, timestamp.month, timestamp.day)
# Init table for the date
if date != prev_date:
messages[date] = 0
prev_date = date
messages[date] += 1
dates = []
mgs = []
for date, vals in sorted(messages.items(), key=lambda x: x[0]):
dates.append(date)
mgs.append(vals)
fig, ax = plt.subplots()
fig.set_size_inches(14, 6)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%y'))
ax.set_xlim(datetime.date(dates[0].year, dates[0].month, 1), datetime.date(dates[-1].year, dates[-1].month, dates[-1].day))
#ax.set_ylim(10 * math.floor(min(outcome) / 10.), 20 * math.ceil((1 + total) / 20.))
ax.plot(dates, mgs)
plt.xlabel('Date')
plt.ylabel('Messages')
plt.title('Messages per day')
plt.grid(True)
plt.savefig(filepath, dpi=250)
if show:
plt.show()
def popular_emojis():
print("Searching emojis...")
highpoints = re.compile(u'['
u'\U0001F300-\U0001F5FF'
u'\U0001F600-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\u2600-\u26FF\u2700-\u27BF]',
re.UNICODE)
emojis = {}
for mid, message in c.execute("""SELECT id, json FROM messages WHERE event="message";"""):
message = json.loads(message)
if "text" not in message:
continue
#if len(r) > 0:
#print(r, hex(ord(r[0])))
for ec in map(ord, highpoints.findall(message["text"])):
emojis[ec] = emojis.get(ec, 0) + 1
return sorted(emojis.items(), key=lambda x: x[1], reverse=True)[:20]
def activity_graph(filepath="activity.png", show=True):
print("Creating activity graph...")
messages = 24 * [0]
for sid, timestamp, message in c.execute("""SELECT id, timestamp, json FROM messages WHERE event="message" ORDER BY id;"""):
message = json.loads(message)
timestamp = datetime.datetime.fromtimestamp(timestamp)
messages[timestamp.hour] += 1
fig, ax = plt.subplots()
fig.set_size_inches(14, 4)
ax.set_xlim(0, 23)
ax.bar(list(range(0, 24)), messages)
plt.xlabel('Hours')
plt.ylabel('Messages')
plt.title('Activity')
plt.grid(True)
plt.savefig(filepath, dpi=250)
if show:
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('name', type=str, help="Will be used as database name")
parser.add_argument('--no-population', action='store_true', help="Disable population graph")
parser.add_argument('--no-messages', action='store_true', help="Disable messages graph")
parser.add_argument('--no-activity', action='store_true', help="Disable activity graph")
parser.add_argument('--no-general', action='store_true', help="Disable general stats")
parser.add_argument('--no-talkers', action='store_true', help="Disable top talkers")
parser.add_argument('--no-topics', action='store_true', help="Disable topic list")
parser.add_argument('--no-words', action='store_true', help="Disable most commonly used words list")
parser.add_argument('--no-bots', action='store_true', help="Disable most commonly used bots/commands list")
parser.add_argument('--no-emojis', action='store_true', help="Disable most commonly used emojis list")
args = parser.parse_args()
if len(args.name) < 3:
print("Invalid name!")
conn = sqlite3.connect("%s.db" % args.name)
c = conn.cursor()
# Try to create a folder
try:
os.mkdir(args.name)
except OSError:
pass
if not args.no_population:
population_graph("%s/population.png" % args.name, show=False)
if not args.no_messages:
messages_graph("%s/messages.png" % args.name, show=False)
if not args.no_activity:
activity_graph("%s/activity.png" % args.name, show=False)
out = open("%s/index.html" % args.name, "w")
out.write("""<!DOCTYPE html><html lang="en"><head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>%s Telegram Statistics</title>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-2.2.4.min.js" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js" crossorigin="anonymous"></script>
</head><body>
<div class="container">
""" % args.name)
out.write("<h1>%s Telegram Statistics</h1>" % args.name)
if not args.no_general or not args.no_talkers:
talkers = talker_stats()
if not args.no_population:
out.write("<h2>Members</h2>\n")
out.write("<img src='population.png' class='img-responsive' alt='Population over time'/>\n")
if not args.no_messages:
out.write("<h2>Messages per day</h2>\n")
out.write("<img src='messages.png' class='img-responsive' alt='Messages per day'/>\n")
if not args.no_activity:
out.write("<h2>Activity</h2>\n")
out.write("<img src='activity.png' class='img-responsive' alt=''/>\n")
if not args.no_general:
out.write("<h2>General numbers</h2>\n<table class='table tabler-striped'>\n")
top_rate, top_start, top_end = hourly_rate()
messages = 0
stickers = 0
photos = 0
for talker, stats in talkers:
messages += stats[0]
stickers += stats[2]
photos += stats[3]
out.write("<tr><td>Messages</td><td>%d</td></tr>\n" % messages)
out.write("<tr><td>Top speed</td><td>%d messages/hour (%s-%s)</td></tr>\n" % (top_rate, top_start.strftime("%d. %B %Y %I:%M"), top_end.strftime("%I:%M")))
out.write("<tr><td>Stickers</td><td>%d (%.1f%% of messages)</td></tr>\n" % (stickers, (100.0 * stickers) / messages))
out.write("<tr><td>Media</td><td>%d (%.1f%% of messages)</td></tr>\n" % (photos, (100.0 * photos) / messages))
#out.write("<tr><td>Videos</td><td>TODO</td></tr>\n")
#out.write("<tr><td>Audio</td><td>TODO</td></tr>\n")
out.write("</table>\n")
if not args.no_talkers:
out.write("<h2>Top 15 Talkers</h2>\n")
out.write("<ul class=\"nav nav-tabs\">" \
"<li class=\"active\"><a data-toggle=\"tab\" href=\"#all\">All-time</a></li>"\
"<li><a data-toggle=\"tab\" href=\"#week\">Last week</a></li>" \
"<li><a data-toggle=\"tab\" href=\"#month\">Last month</a></li>"\
"<li><a data-toggle=\"tab\" href=\"#year\">Last year</a></li></ul>" \
"<div class=\"tab-content\">\n")
timeranges = [
("all", "active", 3600),
("week", "", 7),
("month", "", 31),
("year", "", 365)
]
for trange, active, span in timeranges:
talks = talkers if trange == "all" else talker_stats(span)
top_talkers = sorted(talks, key=lambda x: x[1][0], reverse=True)[:15]
out.write("<div id=\"%s\" class=\"tab-pane %s\"><table class='table tabler-striped'>\n" % (trange, active))
out.write("\t<tr><th>#</th><th>Talker</th><th>Messages</th><th>Words</th><th>WPM</th><th>Stickers</th><th>Media</th></tr>\n")
pos = 1
for talker, (messages, words, stickers, photos) in top_talkers:
out.write("\t<tr><td>%d</td><td>%s</td><td>%d</td><td>%d</td><td>%.1f</td><td>%d</td><td>%d</td></tr>\n" % \
(pos, talker.replace("_", " "), messages, words, words / messages, stickers, photos))
pos += 1
out.write("</table></div>\n")
if not args.no_bots:
cmds, bots = bot_spammers()
out.write("<h2>Bot spammers</h2>\n<b>Most used bots:</b> ")
for bot, count in bots:
out.write("%s (%d), " % (bot, count))
out.write("\n<table class='table'><tr>\n")
for cmd, users in cmds:
out.write("<td><b>%s</b><br/>" % cmd)
for user, count in users:
out.write("%s (%d), <br/>" % (user.replace("_", " "), count))
out.write("</td>\n")
out.write("</tr></table>\n")
if not args.no_emojis:
out.write("<h2>Most popular emojis</h2>\n")
for emoji, count in popular_emojis():
out.write("<img width=\"32px\" src=\"http://emojione.com/wp-content/uploads/assets/emojis/%x.svg\" title=\"%d uses\"/>" % (emoji, count))
if not args.no_words:
out.write("<h2>100 most commonly used words</h2>\n<p>\n")
out.write(", ".join([ "%s (%d)" % c for c in most_commonly_used_words()[:100]]))
out.write("</p>\n")
if not args.no_topics:
out.write("<h2>Latest topics</h2>\n<table class='table tabler-striped'>\n")
for timestamp, title, changer in chat_renames()[:10]:
out.write("\t<tr><td>%s</td><td>Changed by %s (%s)</td></tr>\n" % (title, changer, timestamp.strftime("%d. %B %Y %I:%M")))
# TODO: Add deltatime
out.write("</table>\n")
out.write("<p>Generated %s with <a href='https://github.com/petrinm/tgstats'>tgstats</a></p>\n" % datetime.datetime.now().strftime("%d. %B %Y %H:%M"))
out.write("\n</div>\n</body></html>")
| {
"content_hash": "c188277ba1ec667581effc7f19aa1d0a",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 162,
"avg_line_length": 33.19569471624266,
"alnum_prop": 0.5672935211931852,
"repo_name": "petrinm/tgstats",
"id": "bb66d1809606a51d4b0420a79ed8e01bc9edaa90",
"size": "16993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20676"
}
],
"symlink_target": ""
} |
import traceback
import cattr
import attr
import flask_transmute
from flask import Flask, Blueprint
from flask_transmute import (
route, annotate, Response, APIException
)
from typing import (
Optional, Sequence, Callable, TypeVar, Mapping, Union, Dict, List
)
# from falcon import HTTPInvalidParam, HTTPMissingParam
from survey_stats import log
from survey_stats import state as st
from survey_stats.types import T
logger = log.getLogger(__name__)
db_cfg = None
cache_dir = None
app = Flask(__name__)
@attr.s
class SvySlice(object):
d: str = attr.ib()
q: str = attr.ib()
r: str = attr.ib()
vs: Sequence[str] = attr.ib()
f: Mapping[str, Sequence[str]] = attr.ib()
@attr.s
class SvyStats(dict):
pass
@route(app, body_parameters="s", paths='/stats', methods=['POST'])
def compute(s: SvySlice) -> [SvyStats]:
try:
svy = st.dset[s.d]
result = (svy.fetch_stats_for_slice(s.q, s.r, s.vs, s.f)
.to_dict(orient='records'))
logger.info('got the results!', res=result)
except Exception as ex:
raise APIException('worker failure!' + str(ex))
return result
def setup_app(dbc, cdir, use_feather):
app.config.update(dbc=dbc, cache_dir=cdir)
st.initialize(dbc, cdir, init_des=True,
use_feather=use_feather,
init_svy=False, init_soc=False)
return app
| {
"content_hash": "e5580448e28fd2472e1cd7d512d9bd8e",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 69,
"avg_line_length": 24.857142857142858,
"alnum_prop": 0.6479885057471264,
"repo_name": "semanticbits/survey_stats",
"id": "722f9c527df557ecdefc12ffd139ba760cc755ed",
"size": "1392",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/survey_stats/microservice.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1361"
},
{
"name": "Python",
"bytes": "101408"
},
{
"name": "Shell",
"bytes": "3258"
}
],
"symlink_target": ""
} |
import unittest
from pyrfc import Connection
from tests.config import PARAMS as params
class TestTT:
"""
This test cases cover table types of variable and structure types
"""
def setup_method(self, test_method):
self.conn = Connection(**params)
assert self.conn.alive
def teardown_method(self, test_method):
self.conn.close()
assert not self.conn.alive
def test_TABLE_TYPE(self):
result = self.conn.call(
"/COE/RBP_PAM_SERVICE_ORD_CHANG",
IV_ORDERID="4711",
IT_NOTICE_NOTIFICATION=[{"": "ABCD"}, {"": "XYZ"}],
)
assert len(result["ET_RETURN"]) > 0
erl = result["ET_RETURN"][0]
assert erl["TYPE"] == "E"
assert erl["ID"] == "IWO_BAPI"
assert erl["NUMBER"] == "121"
assert erl["MESSAGE_V1"] == "4711"
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "792b54bcac19384917113e3b974abbf3",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 69,
"avg_line_length": 26,
"alnum_prop": 0.5659340659340659,
"repo_name": "SAP/PyRFC",
"id": "9346a9bcfe831383d63f3b365e91bc5075612bf2",
"size": "1079",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_table_type.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "419"
},
{
"name": "Cython",
"bytes": "129697"
},
{
"name": "Python",
"bytes": "94422"
},
{
"name": "Shell",
"bytes": "2541"
}
],
"symlink_target": ""
} |
import pytest
@pytest.mark.xfail(reason="Blows up with a 500 no matter what")
def test_200_success(petstore):
pets = petstore.pet.findPetsByTags(tags=['string']).result()
assert pets
for pet in pets:
assert type(pet).__name__ == 'Pet'
assert pet.status == 'sold'
@pytest.mark.xfail(reason="Don't know how to cause a 400")
def test_400_invalid_tag_value(petstore):
assert False
| {
"content_hash": "dbf4ad4aefdfc299f261e8526b848b73",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 64,
"avg_line_length": 27.533333333333335,
"alnum_prop": 0.6731234866828087,
"repo_name": "vi4m/bravado",
"id": "3d03971004dd9817b93a38315f8c625f3992942c",
"size": "413",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/petstore/pet/findPetsByTags_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "281"
},
{
"name": "Python",
"bytes": "99546"
}
],
"symlink_target": ""
} |
from CsDatabag import CsDataBag
from CsRedundant import *
class CsStaticRoutes(CsDataBag):
def process(self):
logging.debug("Processing CsStaticRoutes file ==> %s" % self.dbag)
for item in self.dbag:
if item == "id":
continue
self.__update(self.dbag[item])
def __update(self, route):
if route['revoke']:
command = "ip route del %s via %s" % (route['network'], route['gateway'])
CsHelper.execute(command)
else:
command = "ip route show | grep %s | awk '{print $1, $3}'" % route['network']
result = CsHelper.execute(command)
if not result:
route_command = "ip route add %s via %s" % (route['network'], route['gateway'])
CsHelper.execute(route_command) | {
"content_hash": "f6313428829e561ccd865f11896c3f0d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 95,
"avg_line_length": 35.869565217391305,
"alnum_prop": 0.5575757575757576,
"repo_name": "resmo/cloudstack",
"id": "57b259aabc4ec2dbed0aa9e63c25fc2847b9f160",
"size": "1649",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "systemvm/patches/debian/config/opt/cloud/bin/cs/CsStaticRoutes.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1451"
},
{
"name": "Batchfile",
"bytes": "11926"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "335738"
},
{
"name": "FreeMarker",
"bytes": "4917"
},
{
"name": "Groovy",
"bytes": "153137"
},
{
"name": "HTML",
"bytes": "151164"
},
{
"name": "Java",
"bytes": "33712712"
},
{
"name": "JavaScript",
"bytes": "7719277"
},
{
"name": "Python",
"bytes": "11019815"
},
{
"name": "Ruby",
"bytes": "896"
},
{
"name": "Shell",
"bytes": "770039"
}
],
"symlink_target": ""
} |
from dask.compatibility import entry_points
def test_entry_points():
assert "pytest" in [ep.name for ep in entry_points(group="console_scripts")]
| {
"content_hash": "4b3c88c7f9b9cc75012b0ad4f84d0296",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 80,
"avg_line_length": 30.4,
"alnum_prop": 0.743421052631579,
"repo_name": "dask/dask",
"id": "175a94e71079df51a1649b4f6c67658bf90859c5",
"size": "152",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "dask/tests/test_compatibility.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jinja",
"bytes": "6086"
},
{
"name": "Python",
"bytes": "4591450"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import os
import subprocess
import sys
from json import dumps, loads
DEFAULT_DATA_TABLE_NAME = "kallisto_indexes"
def get_id_name( params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_kallisto_index( data_manager_dict, options, params, sequence_id, sequence_name ):
data_table_name = options.data_table_name or DEFAULT_DATA_TABLE_NAME
target_directory = params[ 'output_data' ][0]['extra_files_path']
if not os.path.exists( target_directory ):
os.mkdir( target_directory )
fasta_base_name = os.path.split( options.fasta_filename )[-1]
sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name )
os.symlink( options.fasta_filename, sym_linked_fasta_filename )
args = [ 'kallisto', 'index' ]
args.extend( [ sym_linked_fasta_filename, '-i', sequence_id ] )
proc = subprocess.Popen( args=args, shell=False, cwd=target_directory )
return_code = proc.wait()
if return_code:
print("Error building index.", file=sys.stderr)
sys.exit( return_code )
data_table_entry = dict( value=sequence_id, dbkey=options.fasta_dbkey, name=sequence_name, path=sequence_id )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
# Parse Command Line
parser = argparse.ArgumentParser()
parser.add_argument( '--output', dest='output', action='store', type=str, default=None )
parser.add_argument( '--fasta_filename', dest='fasta_filename', action='store', type=str, default=None )
parser.add_argument( '--fasta_dbkey', dest='fasta_dbkey', action='store', type=str, default=None )
parser.add_argument( '--fasta_description', dest='fasta_description', action='store', type=str, default=None )
parser.add_argument( '--data_table_name', dest='data_table_name', action='store', type=str, default='kallisto_indexes' )
options = parser.parse_args()
filename = options.output
params = loads( open( filename ).read() )
data_manager_dict = {}
if options.fasta_dbkey in [ None, '', '?' ]:
raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( options.fasta_dbkey ) )
sequence_id, sequence_name = get_id_name( params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description )
# build the index
build_kallisto_index( data_manager_dict, options, params, sequence_id, sequence_name )
# save info to json file
open( filename, 'w' ).write( dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
| {
"content_hash": "4ba450524beb652fb1c58aabd23919f7",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 126,
"avg_line_length": 42.4625,
"alnum_prop": 0.6803061524874889,
"repo_name": "dpryan79/tools-iuc",
"id": "50fd5482ea3014b1302fb9b90ee1bffc26ba71be",
"size": "3498",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "data_managers/data_manager_kallisto_index_builder/data_manager/kallisto_index_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4220"
},
{
"name": "HTML",
"bytes": "11172600"
},
{
"name": "Mako",
"bytes": "2116"
},
{
"name": "Max",
"bytes": "140358"
},
{
"name": "OpenEdge ABL",
"bytes": "1960016"
},
{
"name": "Pep8",
"bytes": "87474"
},
{
"name": "Perl",
"bytes": "59018"
},
{
"name": "Python",
"bytes": "640901"
},
{
"name": "R",
"bytes": "244557"
},
{
"name": "Rebol",
"bytes": "1225"
},
{
"name": "Roff",
"bytes": "3011"
},
{
"name": "Shell",
"bytes": "79414"
},
{
"name": "UnrealScript",
"bytes": "660637"
},
{
"name": "eC",
"bytes": "24"
}
],
"symlink_target": ""
} |
import os
import time
import xml.etree.ElementTree as ET
if __name__ == "__main__":
baseURL = os.path.abspath(os.path.join(os.getcwd(),os.pardir)) + '\\MyProgram\\ExperimentConFile\\'
tree = ET.parse(baseURL + 'Experiments_File.xml')
root = tree.getroot()
ListOfExperimentFiles = []
for child in root:
if(child.tag == 'Experiment'):
ListOfExperimentFiles.append(child.text)
for exp in ListOfExperimentFiles:
print 'Starting: ' + exp
os.system("python runner2.py \"" +baseURL + exp + "\"")
print 'Ending: ' + exp | {
"content_hash": "c5ece98b51e87b3dfc9b54b8368cd952",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 100,
"avg_line_length": 25.428571428571427,
"alnum_prop": 0.6797752808988764,
"repo_name": "ucbtrans/sumo-project",
"id": "d10b63f2af94f189cece75d157e8f53d03078bfd",
"size": "534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/xavier/code/ProgramLauncher.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "246259"
},
{
"name": "Makefile",
"bytes": "42239"
},
{
"name": "Python",
"bytes": "139969"
},
{
"name": "Ruby",
"bytes": "44482"
}
],
"symlink_target": ""
} |
"""Host action implementations"""
from osc_lib.command import command
from osc_lib import utils
from openstackclient.i18n import _
class ListHost(command.Lister):
_description = _("List hosts")
def get_parser(self, prog_name):
parser = super(ListHost, self).get_parser(prog_name)
parser.add_argument(
"--zone",
metavar="<zone>",
help=_("Only return hosts in the availability zone")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
columns = (
"Host Name",
"Service",
"Zone"
)
data = compute_client.api.host_list(parsed_args.zone)
return (columns,
(utils.get_dict_properties(
s, columns,
) for s in data))
class SetHost(command.Command):
_description = _("Set host properties")
def get_parser(self, prog_name):
parser = super(SetHost, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help=_("Host to modify (name only)")
)
status = parser.add_mutually_exclusive_group()
status.add_argument(
'--enable',
action='store_true',
help=_("Enable the host")
)
status.add_argument(
'--disable',
action='store_true',
help=_("Disable the host")
)
maintenance = parser.add_mutually_exclusive_group()
maintenance.add_argument(
'--enable-maintenance',
action='store_true',
help=_("Enable maintenance mode for the host")
)
maintenance.add_argument(
'--disable-maintenance',
action='store_true',
help=_("Disable maintenance mode for the host")
)
return parser
def take_action(self, parsed_args):
kwargs = {}
if parsed_args.enable:
kwargs['status'] = 'enable'
if parsed_args.disable:
kwargs['status'] = 'disable'
if parsed_args.enable_maintenance:
kwargs['maintenance_mode'] = 'enable'
if parsed_args.disable_maintenance:
kwargs['maintenance_mode'] = 'disable'
compute_client = self.app.client_manager.compute
compute_client.api.host_set(
parsed_args.host,
**kwargs
)
class ShowHost(command.Lister):
_description = _("Display host details")
def get_parser(self, prog_name):
parser = super(ShowHost, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help=_("Name of host")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
columns = (
"Host",
"Project",
"CPU",
"Memory MB",
"Disk GB"
)
data = compute_client.api.host_show(parsed_args.host)
return (columns,
(utils.get_dict_properties(
s, columns,
) for s in data))
| {
"content_hash": "52d4bd7882ae6d8913c2269e9a3f5065",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 64,
"avg_line_length": 28.00862068965517,
"alnum_prop": 0.5297014465989536,
"repo_name": "dtroyer/python-openstackclient",
"id": "07c92a8c16644d53f047569b6633cf7c05e99e2c",
"size": "3862",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstackclient/compute/v2/host.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4040230"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
from lacuna.building import MyBuilding
class beach10(MyBuilding):
path = 'beach10'
def __init__( self, client, body_id:int = 0, building_id:int = 0 ):
super().__init__( client, body_id, building_id )
| {
"content_hash": "0b9e40ff43b47a6905b715903d9354e2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 31.142857142857142,
"alnum_prop": 0.6376146788990825,
"repo_name": "tmtowtdi/MontyLacuna",
"id": "8945895f2a5b1ab939d46bd4bd4d1b2dc192db9c",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/lacuna/buildings/beach/beach10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36255146"
},
{
"name": "Shell",
"bytes": "2766"
}
],
"symlink_target": ""
} |
"""Tests for SpeedTest integration."""
import speedtest
from homeassistant import config_entries
from homeassistant.components import speedtestdotnet
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_setup_with_config(hass):
"""Test that we import the config and setup the integration."""
config = {
speedtestdotnet.DOMAIN: {
speedtestdotnet.CONF_SERVER_ID: "1",
speedtestdotnet.CONF_MANUAL: True,
speedtestdotnet.CONF_SCAN_INTERVAL: "00:01:00",
}
}
with patch("speedtest.Speedtest"):
assert await async_setup_component(hass, speedtestdotnet.DOMAIN, config)
async def test_successful_config_entry(hass):
"""Test that SpeedTestDotNet is configured successfully."""
entry = MockConfigEntry(domain=speedtestdotnet.DOMAIN, data={},)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest"), patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert forward_entry_setup.mock_calls[0][1] == (entry, "sensor",)
async def test_setup_failed(hass):
"""Test SpeedTestDotNet failed due to an error."""
entry = MockConfigEntry(domain=speedtestdotnet.DOMAIN, data={},)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest", side_effect=speedtest.ConfigRetrievalError):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == config_entries.ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass):
"""Test removing SpeedTestDotNet."""
entry = MockConfigEntry(domain=speedtestdotnet.DOMAIN, data={},)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest"):
await hass.config_entries.async_setup(entry.entry_id)
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ENTRY_STATE_NOT_LOADED
assert speedtestdotnet.DOMAIN not in hass.data
| {
"content_hash": "e66cc5b1c0023cb2feba78758f675f84",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 82,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7164716471647165,
"repo_name": "pschmitt/home-assistant",
"id": "7b7eed67c0c94cbb7ff42bdbf2032d8339df1f89",
"size": "2222",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/speedtestdotnet/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
from builtins import str as text
from future.utils import iterkeys
from future.utils import itervalues
from recipe_engine import recipe_test_api
class RawIOTestApi(recipe_test_api.RecipeTestApi): # pragma: no cover
@recipe_test_api.placeholder_step_data
@staticmethod
def output(data, retcode=None, name=None):
"""Returns an output Placeholder for the provided data.
The data is expected to be bytes (i.e. str in python2). However, to help
with the Python3 migration, the unicode (str in python3) data will also be
accepted and will be encoded to bytes internally because in Python3,
`raw_io.output('foo')` pass will actually 'foo' as type 'str' instead of
'bytes' to this method and we have lots of usages like this. Please
make 'foo' a byte literal like `b'foo'`. We may drop the unicode support in
the future.
"""
if isinstance(data, text):
# TODO(yiwzhang): implicitly encode the data to bytes to avoid excessive
# errors during migration to python3. After we drop python2 support,
# consider raise ValueError instead for non-bytes data.
data = data.encode('utf-8')
if not isinstance(data, (type(None), bytes)):
raise ValueError(
'expected bytes, got %s: %r' % (type(data), data))
return data, retcode, name
@recipe_test_api.placeholder_step_data
@staticmethod
def output_text(data, retcode=None, name=None):
"""Returns an output Placeholder for the provided text data.
data must be either str (unicode in py2) or bytes (str in py2) that has
valid encoded utf-8 text in it.
"""
if isinstance(data, bytes):
data = data.decode('utf-8')
if not isinstance(data, (type(None), text)):
raise ValueError(
'expected None or utf-8 text, got %s: %r' % (type(data), data))
return data, retcode, name
@recipe_test_api.placeholder_step_data
@staticmethod
def output_dir(files_dict, retcode=None, name=None):
"""Use to mock an `output_dir` placeholder.
Note that slashes should match the platform that this test is targeting.
i.e. if this test is targeting Windows, you need to use backslashes.
Example:
yield api.test('tname') + api.step_data('sname', api.raw_io.output_dir({
"some/file": "contents of some/file",
}))
"""
assert type(files_dict) is dict
assert all(isinstance(key, str) for key in iterkeys(files_dict))
assert all(isinstance(value, bytes) for value in itervalues(files_dict))
return files_dict, retcode, name
def stream_output(self, data, stream='stdout', retcode=None, name=None):
return self._stream_output(data, self.output,
stream=stream,
retcode=retcode,
name=name)
def stream_output_text(self, data, stream='stdout', retcode=None, name=None):
return self._stream_output(data, self.output_text,
stream=stream,
retcode=retcode,
name=name)
def _stream_output(self, data, to_step_data_fn,
stream='stdout', retcode=None, name=None):
ret = recipe_test_api.StepTestData()
assert stream in ('stdout', 'stderr')
step_data = to_step_data_fn(data, retcode=retcode, name=name)
setattr(ret, stream, step_data.unwrap_placeholder())
if retcode:
ret.retcode = retcode
return ret
@recipe_test_api.placeholder_step_data('output')
@staticmethod
def backing_file_missing(retcode=None, name=None):
"""Simulates a missing backing file.
Only valid if the corresponding placeholder has `leak_to` specified.
"""
# Passing None as the data of a placeholder causes the placeholder to
# behave during testing as if its backing file was missing.
return None, retcode, name
| {
"content_hash": "188d65cf3a11ecb5c43fb0fb4418128c",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 40.270833333333336,
"alnum_prop": 0.6580444904293844,
"repo_name": "luci/recipes-py",
"id": "c62edff1866237a2608ffd8e7bfd77ddfa7af5e7",
"size": "4040",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "recipe_modules/raw_io/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "Python",
"bytes": "900422"
},
{
"name": "Shell",
"bytes": "5746"
}
],
"symlink_target": ""
} |
__author__ = 'yfauser'
from tests.config import *
from nsxramlclient.client import NsxClient
def get_vdnscopes(session):
scope_response = session.read('vdnScopes')
session.view_response(scope_response)
return scope_response['objectId']
def get_vdnscope(session, vdn_scope):
scope_response = session.read('vdnScope', uri_parameters={'scopeId': vdn_scope})
session.view_response(scope_response)
return scope_response['objectId']
def create_vdnscope(session, name, cluster_moid_list, description='created by nsxramlclient',
control_plane_mode='UNICAST_MODE'):
vdn_create_spec = session.extract_resource_body_example('vdnScopes', 'create')
vdn_create_spec['vdnScope']['clusters']['cluster']['cluster']['objectId'] = cluster_moid_list[0]
vdn_create_spec['vdnScope']['name'] = name
vdn_create_spec['vdnScope']['description'] = description
vdn_create_spec['vdnScope']['controlPlaneMode'] = control_plane_mode
vdn_scope = session.create('vdnScopes', request_body_dict=vdn_create_spec)['objectId']
if len(cluster_moid_list) > 1:
for cluster in cluster_moid_list[1:]:
vdn_edit_spec = session.extract_resource_body_example('vdnScope', 'create')
vdn_edit_spec['vdnScope']['objectId'] = vdn_scope
vdn_edit_spec['vdnScope']['clusters']['cluster']['cluster']['objectId'] = cluster
session.create('vdnScope', uri_parameters={'scopeId': vdn_scope},
query_parameters_dict={'action': 'expand'},
request_body_dict=vdn_edit_spec)
return vdn_scope
def shrink_vdn_scope(session, vdn_scope, cluster_moid_list):
for cluster in cluster_moid_list:
vdn_edit_spec = session.extract_resource_body_example('vdnScope', 'create')
vdn_edit_spec['vdnScope']['objectId'] = vdn_scope
vdn_edit_spec['vdnScope']['clusters']['cluster']['cluster']['objectId'] = cluster
session.create('vdnScope', uri_parameters={'scopeId': vdn_scope}, query_parameters_dict={'action': 'shrink'},
request_body_dict=vdn_edit_spec)
def update_vdnscope_attributes(session, vdn_scope, new_name='Updated the name', new_desc='And updated description',
control_plane_mode='HYBRID_MODE'):
vdn_update_spec = session.extract_resource_body_example('vdnScopeAttribUpdate', 'update')
vdn_update_spec['vdnScope']['name'] = new_name
vdn_update_spec['vdnScope']['description'] = new_desc
vdn_update_spec['vdnScope']['objectId'] = vdn_scope
vdn_update_spec['vdnScope']['controlPlaneMode'] = control_plane_mode
scope_update_resp = session.update('vdnScopeAttribUpdate', uri_parameters={'scopeId': vdn_scope},
request_body_dict=vdn_update_spec)
session.view_response(scope_update_resp)
def delete_vdn_scope(session, vdn_scope):
scope_response = session.delete('vdnScope', uri_parameters={'scopeId': vdn_scope})
session.view_response(scope_response)
def main():
s = NsxClient(nsxraml_file, nsxmanager, nsx_username, nsx_password, debug=True)
created_scope = create_vdnscope(s, 'TZ1', ['domain-c26', 'domain-c28'])
get_vdnscopes(s)
get_vdnscope(s, created_scope)
shrink_vdn_scope(s, created_scope, ['domain-c28'])
update_vdnscope_attributes(s, created_scope)
get_vdnscope(s, created_scope)
#delete_vdn_scope(s, created_scope)
if __name__ == "__main__":
main()
| {
"content_hash": "50e47504f1747659a72937ecda2cf0a7",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 117,
"avg_line_length": 41.10588235294118,
"alnum_prop": 0.6608471665712651,
"repo_name": "vmware/nsxramlclient",
"id": "1560171ca72deac023c97b02c72171228d8b759b",
"size": "4615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scopes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181224"
}
],
"symlink_target": ""
} |
#imports
from gopigo import *
import time
__author__ = 'Robert Hayek'
servo(80)
class Pigo:
sweep = [None] * 160 #the list to hold scanning data
MIN_DIST = 50 #distance used when plotting a clear direction... longer so we're planning farther ahead
status = {'isMoving': False, 'servo': 90, 'leftSpeed': 175, 'rightSpeed': 175, "distance": 100}
def __init__(self):
print "NOW RUNNING OBSTACLE AVOIDER"
def servoSweep(self):
for angle in range(15, 150, 5):
servo(angle)
time.sleep(.10)
self.sweep[angle] = us_dist(15)
print str(self.sweep[angle]) + " MM AWAY FROM ORIGIN"
def findPath(self):
for angle in self.sweep:
counter = 0
if self.sweep[angle] > MIN_DIST:
counter += 1
else:
counter = 0
if counter == 20:
return True
return False
def turnAround(self):
right_rot()
time.sleep(1)
butler = Pigo()
''' while True:
if butler.servoScan():
butler.safeDrive()
if butler.findPath():
butler.turnTo(butler.findAngle())
else:
butler.turnAround()
butler.stop()
'''
butler.turnAround()
| {
"content_hash": "937b241de70501f6776e9cdf7a3c7e01",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 107,
"avg_line_length": 24.96153846153846,
"alnum_prop": 0.536979969183359,
"repo_name": "hayekr/GoPiGoButler",
"id": "74b2a6eac824e5a9a01a8b6cbde0d8c7c2efffaa",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "obst_avoidance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8733"
},
{
"name": "HTML",
"bytes": "22238"
},
{
"name": "JavaScript",
"bytes": "7655"
},
{
"name": "Python",
"bytes": "40175"
},
{
"name": "Shell",
"bytes": "2793"
}
],
"symlink_target": ""
} |
"""Build and install the windspharm package."""
# Copyright (c) 2012-2018 Andrew Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os.path
from setuptools import setup
import versioneer
packages = ['windspharm',
'windspharm.examples',
'windspharm.tests']
package_data = {
'windspharm.examples': ['example_data/*'],
'windspharm.tests': ['data/regular/*.npy', 'data/gaussian/*.npy']}
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
long_description = f.read()
setup(name='windspharm',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='vector wind analysis in spherical coordinates',
author='Andrew Dawson',
author_email='dawson@atm.ox.ac.uk',
url='http://ajdawson.github.com/windspharm/',
long_description=long_description,
long_description_content_type='text/markdown',
packages=packages,
package_data=package_data,
install_requires=['numpy', 'pyspharm >= 1.0.8'],)
| {
"content_hash": "a9ede363b5445342c2288e9f3acfd1d3",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 42.770833333333336,
"alnum_prop": 0.7243058938139308,
"repo_name": "ajdawson/windspharm",
"id": "51fdec6c7fd3e9df98e6f690c7c83975a8515df7",
"size": "2053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "247467"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/weapon/shared_blacksun_light_weapon_s04.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "9e06dbf9bdc3bce82229fcf0fd004dce",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 96,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.7103658536585366,
"repo_name": "anhstudios/swganh",
"id": "30b9ec8c723037f1730aa56ae2ad135e230a766a",
"size": "473",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/attachment/weapon/shared_blacksun_light_weapon_s04.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from functools import wraps
"""
Helper decorators for handling context from skills.
"""
def adds_context(context, words=''):
"""Decorator adding context to the Adapt context manager.
Args:
context (str): context Keyword to insert
words (str): optional string content of Keyword
"""
def context_add_decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
args[0].set_context(context, words)
return ret
return func_wrapper
return context_add_decorator
def removes_context(context):
"""Decorator removing context from the Adapt context manager.
Args:
context (str): Context keyword to remove
"""
def context_removes_decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
args[0].remove_context(context)
return ret
return func_wrapper
return context_removes_decorator
| {
"content_hash": "ffbb7472ffa401fc3b14bd87534ad197",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 65,
"avg_line_length": 27.105263157894736,
"alnum_prop": 0.6194174757281553,
"repo_name": "MycroftAI/mycroft-core",
"id": "1b795c171b906d16e472f0dbcca85ada3fbc6848",
"size": "1610",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "mycroft/skills/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3791"
},
{
"name": "Python",
"bytes": "1370285"
},
{
"name": "QML",
"bytes": "18805"
},
{
"name": "Shell",
"bytes": "85326"
}
],
"symlink_target": ""
} |
"""
Tests for models
"""
from django.core.exceptions import ValidationError
from courses.factories import ProgramFactory
from micromasters.factories import UserFactory
from roles import roles
from roles.models import Role
from search.base import MockedESTestCase
class MicroMastersRoleTest(MockedESTestCase):
"""
Tests for the MicroMastersRole model
"""
@classmethod
def setUpTestData(cls):
super(MicroMastersRoleTest, cls).setUpTestData()
cls.user = UserFactory.create()
cls.program1 = ProgramFactory.create()
cls.program2 = ProgramFactory.create()
def tearDown(self):
super().tearDown()
Role.objects.all().delete()
def test_role_available(self):
"""
Simple test for all the roles available
"""
for role_key in Role.ASSIGNABLE_ROLES:
assert role_key in (roles.Staff.ROLE_ID, roles.Instructor.ROLE_ID, )
def test_one_role_in_program(self):
"""
The same user cannot have different roles in the same program
"""
Role.objects.create(
user=self.user,
program=self.program1,
role=roles.Staff.ROLE_ID
)
with self.assertRaises(ValidationError):
Role.objects.create(
user=self.user,
program=self.program1,
role=roles.Instructor.ROLE_ID
)
def test_one_role_in_multiple_program(self):
"""
The same user cannot have different roles even in different programs
"""
Role.objects.create(
user=self.user,
program=self.program1,
role=roles.Staff.ROLE_ID
)
with self.assertRaises(ValidationError):
Role.objects.create(
user=self.user,
program=self.program2,
role=roles.Instructor.ROLE_ID
)
def test_role_modification(self):
"""
The role for a user can be modified if there is not another same role for another program
"""
role = Role.objects.create(
user=self.user,
program=self.program1,
role=roles.Staff.ROLE_ID
)
# role can be modified
role.role = roles.Instructor.ROLE_ID
role.save()
# crete a second role for the user in another program
Role.objects.create(
user=self.user,
program=self.program2,
role=roles.Instructor.ROLE_ID
)
# the role cannot be modified any more
with self.assertRaises(ValidationError):
role.role = roles.Staff.ROLE_ID
role.save()
| {
"content_hash": "6417628bad7593639c0e9d5888c2f3e4",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 97,
"avg_line_length": 30.258426966292134,
"alnum_prop": 0.5941329372447085,
"repo_name": "mitodl/micromasters",
"id": "48362e19ab1f7bc33446ed3d81e945808c5530fe",
"size": "2693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roles/models_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9764"
},
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "84519"
},
{
"name": "JavaScript",
"bytes": "1462849"
},
{
"name": "Procfile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "2098424"
},
{
"name": "SCSS",
"bytes": "135082"
},
{
"name": "Shell",
"bytes": "10764"
}
],
"symlink_target": ""
} |
import unittest
from brew.constants import IMPERIAL_UNITS
from fixtures import recipe
from fixtures import recipe_dme
from fixtures import recipe_lme
class TestRecipeExtract(unittest.TestCase):
def setUp(self):
# Define Recipes
self.recipe = recipe
self.recipe_lme = recipe_lme
self.recipe_dme = recipe_dme
self.assertEquals(self.recipe.units, IMPERIAL_UNITS)
self.assertEquals(self.recipe_lme.units, IMPERIAL_UNITS)
self.assertEquals(self.recipe_dme.units, IMPERIAL_UNITS)
def test_recipe_is_recipe_lme(self):
recipe_data = self.recipe.to_dict()[u"data"]
recipe_data_lme = self.recipe_lme.to_dict()[u"data"]
for key in recipe_data.keys():
# TODO: The colors are withing 0.1 of each other
# but its hard to test in this way. Write another test.
if key == u"total_wort_color_map":
continue
self.assertEqual(recipe_data[key], recipe_data_lme[key], msg=key)
def test_recipe_is_recipe_dme(self):
recipe_data = self.recipe.to_dict()[u"data"]
recipe_data_dme = self.recipe_dme.to_dict()[u"data"]
for key in recipe_data.keys():
# TODO: The colors are withing 0.1 of each other
# but its hard to test in this way. Write another test.
if key == u"total_wort_color_map":
continue
self.assertEquals(recipe_data[key], recipe_data_dme[key], msg=key)
| {
"content_hash": "10640124bd242d01345444a798eb38e4",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 40.16216216216216,
"alnum_prop": 0.6339165545087483,
"repo_name": "chrisgilmerproj/brewday",
"id": "53b61833f375e7cad3a448c619a9da0acc94e276",
"size": "1510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_recipes_extract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1528"
},
{
"name": "Python",
"bytes": "313918"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
} |
def main(request, response):
referrer = request.headers.get(b"referer", b"")
response_headers = [(b"Content-Type", b"text/javascript")]
return (200, response_headers, b"window.referrer = '" + referrer + b"'")
| {
"content_hash": "7f32c49d6d4e882bbb2467094bdff745",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 76,
"avg_line_length": 55.25,
"alnum_prop": 0.6606334841628959,
"repo_name": "chromium/chromium",
"id": "e36631479e60fab893de7e02d55a3f2ec2a32a15",
"size": "221",
"binary": false,
"copies": "23",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/common/security-features/subresource/referrer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from pyasn1.compat.octets import null
from pyasn1.error import PyAsn1Error
from pysnmp import debug
from pysnmp import nextid
from pysnmp.error import PySnmpError
from pysnmp.proto import cache
from pysnmp.proto import errind
from pysnmp.proto import error
from pysnmp.proto.api import verdec # XXX
from pysnmp.smi import builder
from pysnmp.smi import instrum
class MsgAndPduDispatcher(object):
"""SNMP engine PDU & message dispatcher. Exchanges SNMP PDU's with
applications and serialized messages with transport level.
"""
def __init__(self, mibInstrumController=None):
if mibInstrumController is None:
self.mibInstrumController = instrum.MibInstrumController(
builder.MibBuilder()
)
else:
self.mibInstrumController = mibInstrumController
self.mibInstrumController.mibBuilder.loadModules(
'SNMPv2-MIB', 'SNMP-MPD-MIB', 'SNMP-COMMUNITY-MIB',
'SNMP-TARGET-MIB', 'SNMP-USER-BASED-SM-MIB'
)
# Requests cache
self._cache = cache.Cache()
# Registered context engine IDs
self._appsRegistration = {}
# Source of sendPduHandle and cache of requesting apps
self._sendPduHandle = nextid.Integer(0xffffff)
# To pass transport info to app (legacy)
self._transportInfo = {}
# legacy
def getTransportInfo(self, stateReference):
if stateReference in self._transportInfo:
return self._transportInfo[stateReference]
else:
raise error.ProtocolError(
'No data for stateReference %s' % stateReference)
# Application registration with dispatcher
# 4.3.1
def registerContextEngineId(self, contextEngineId, pduTypes, processPdu):
"""Register application with dispatcher"""
# 4.3.2 -> no-op
# 4.3.3
for pduType in pduTypes:
k = contextEngineId, pduType
if k in self._appsRegistration:
raise error.ProtocolError(
'Duplicate registration %r/%s' % (contextEngineId, pduType))
# 4.3.4
self._appsRegistration[k] = processPdu
debug.logger & debug.FLAG_DSP and debug.logger(
'registerContextEngineId: contextEngineId %r pduTypes '
'%s' % (contextEngineId, pduTypes))
# 4.4.1
def unregisterContextEngineId(self, contextEngineId, pduTypes):
"""Unregister application with dispatcher"""
# 4.3.4
if contextEngineId is None:
# Default to local snmpEngineId
contextEngineId, = self.mibInstrumController.mibBuilder.importSymbols(
'__SNMP-FRAMEWORK-MIB', 'snmpEngineID')
for pduType in pduTypes:
k = contextEngineId, pduType
if k in self._appsRegistration:
del self._appsRegistration[k]
debug.logger & debug.FLAG_DSP and debug.logger(
'unregisterContextEngineId: contextEngineId %r pduTypes '
'%s' % (contextEngineId, pduTypes))
def getRegisteredApp(self, contextEngineId, pduType):
k = contextEngineId, pduType
if k in self._appsRegistration:
return self._appsRegistration[k]
k = null, pduType
if k in self._appsRegistration:
return self._appsRegistration[k] # wildcard
# Dispatcher <-> application API
# 4.1.1
def sendPdu(self, snmpEngine, transportDomain, transportAddress,
messageProcessingModel, securityModel, securityName,
securityLevel, contextEngineId, contextName,
pduVersion, PDU, expectResponse, timeout=0,
cbFun=None, cbCtx=None):
"""PDU dispatcher -- prepare and serialize a request or notification"""
# 4.1.1.2
k = int(messageProcessingModel)
if k in snmpEngine.messageProcessingSubsystems:
mpHandler = snmpEngine.messageProcessingSubsystems[k]
else:
raise error.StatusInformation(
errorIndication=errind.unsupportedMsgProcessingModel)
debug.logger & debug.FLAG_DSP and debug.logger(
'sendPdu: securityName %s, PDU\n'
'%s' % (securityName, PDU.prettyPrint()))
# 4.1.1.3
sendPduHandle = self._sendPduHandle()
if expectResponse:
self._cache.add(
sendPduHandle,
messageProcessingModel=messageProcessingModel,
sendPduHandle=sendPduHandle,
timeout=timeout + snmpEngine.transportDispatcher.getTimerTicks(),
cbFun=cbFun,
cbCtx=cbCtx
)
debug.logger & debug.FLAG_DSP and debug.logger(
'sendPdu: current time %d ticks, one tick is %s '
'seconds' % (snmpEngine.transportDispatcher.getTimerTicks(),
snmpEngine.transportDispatcher.getTimerResolution()))
debug.logger & debug.FLAG_DSP and debug.logger(
'sendPdu: new sendPduHandle %s, timeout %s ticks, cbFun '
'%s' % (sendPduHandle, timeout, cbFun))
origTransportDomain = transportDomain
origTransportAddress = transportAddress
# 4.1.1.4 & 4.1.1.5
try:
(transportDomain,
transportAddress,
outgoingMessage) = mpHandler.prepareOutgoingMessage(
snmpEngine, origTransportDomain, origTransportAddress,
messageProcessingModel, securityModel, securityName,
securityLevel, contextEngineId, contextName,
pduVersion, PDU, expectResponse, sendPduHandle
)
debug.logger & debug.FLAG_DSP and debug.logger(
'sendPdu: MP succeeded')
except PySnmpError:
if expectResponse:
self._cache.pop(sendPduHandle)
self.releaseStateInformation(snmpEngine, sendPduHandle, messageProcessingModel)
raise
# 4.1.1.6
if snmpEngine.transportDispatcher is None:
if expectResponse:
self._cache.pop(sendPduHandle)
raise error.PySnmpError('Transport dispatcher not set')
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.sendPdu',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
outgoingMessage=outgoingMessage,
messageProcessingModel=messageProcessingModel,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
pdu=PDU)
)
try:
snmpEngine.transportDispatcher.sendMessage(
outgoingMessage, transportDomain, transportAddress)
except PySnmpError:
if expectResponse:
self._cache.pop(sendPduHandle)
raise
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.sendPdu')
# Update cache with orignal req params (used for retrying)
if expectResponse:
self._cache.update(
sendPduHandle,
transportDomain=origTransportDomain,
transportAddress=origTransportAddress,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
pduVersion=pduVersion,
PDU=PDU
)
return sendPduHandle
# 4.1.2.1
def returnResponsePdu(self, snmpEngine, messageProcessingModel,
securityModel, securityName, securityLevel,
contextEngineId, contextName, pduVersion,
PDU, maxSizeResponseScopedPDU, stateReference,
statusInformation):
# Extract input values and initialize defaults
k = int(messageProcessingModel)
if k in snmpEngine.messageProcessingSubsystems:
mpHandler = snmpEngine.messageProcessingSubsystems[k]
else:
raise error.StatusInformation(
errorIndication=errind.unsupportedMsgProcessingModel)
debug.logger & debug.FLAG_DSP and debug.logger(
'returnResponsePdu: PDU '
'%s' % (PDU and PDU.prettyPrint() or "<empty>",))
# 4.1.2.2
try:
(transportDomain,
transportAddress,
outgoingMessage) = mpHandler.prepareResponseMessage(
snmpEngine, messageProcessingModel, securityModel,
securityName, securityLevel, contextEngineId, contextName,
pduVersion, PDU, maxSizeResponseScopedPDU, stateReference,
statusInformation
)
debug.logger & debug.FLAG_DSP and debug.logger(
'returnResponsePdu: MP suceeded')
except error.StatusInformation:
# 4.1.2.3
raise
mibBuilder = self.mibInstrumController.mibBuilder
# Handle oversized messages XXX transport constrains?
snmpEngineMaxMessageSize, = mibBuilder.importSymbols(
'__SNMP-FRAMEWORK-MIB', 'snmpEngineMaxMessageSize')
if (snmpEngineMaxMessageSize.syntax and
len(outgoingMessage) > snmpEngineMaxMessageSize.syntax):
snmpSilentDrops, = mibBuilder.importSymbols(
'__SNMPv2-MIB', 'snmpSilentDrops')
snmpSilentDrops.syntax += 1
raise error.StatusInformation(errorIndication=errind.tooBig)
snmpEngine.observer.storeExecutionContext(
snmpEngine,
'rfc3412.returnResponsePdu',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
outgoingMessage=outgoingMessage,
messageProcessingModel=messageProcessingModel,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
pdu=PDU)
)
# 4.1.2.4
snmpEngine.transportDispatcher.sendMessage(
outgoingMessage, transportDomain, transportAddress)
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.returnResponsePdu')
# 4.2.1
def receiveMessage(self, snmpEngine, transportDomain,
transportAddress, wholeMsg):
"""Message dispatcher -- de-serialize message into PDU"""
mibBuilder = self.mibInstrumController.mibBuilder
# 4.2.1.1
snmpInPkts, = mibBuilder.importSymbols(
'__SNMPv2-MIB', 'snmpInPkts')
snmpInPkts.syntax += 1
restOfWholeMsg = null # XXX fix decoder non-recursive return
# 4.2.1.2
try:
msgVersion = verdec.decodeMessageVersion(wholeMsg)
except error.ProtocolError:
snmpInASNParseErrs, = mibBuilder.importSymbols(
'__SNMPv2-MIB', 'snmpInASNParseErrs')
snmpInASNParseErrs.syntax += 1
return null # n.b the whole buffer gets dropped
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: msgVersion %s, msg decoded' % msgVersion)
messageProcessingModel = msgVersion
try:
mpHandler = snmpEngine.messageProcessingSubsystems[
int(messageProcessingModel)]
except KeyError:
snmpInBadVersions, = mibBuilder.importSymbols(
'__SNMPv2-MIB', 'snmpInBadVersions')
snmpInBadVersions.syntax += 1
return restOfWholeMsg
# 4.2.1.3 -- no-op
# 4.2.1.4
try:
(messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
PDU,
pduType,
sendPduHandle,
maxSizeResponseScopedPDU,
statusInformation,
stateReference) = mpHandler.prepareDataElements(
snmpEngine, transportDomain, transportAddress, wholeMsg)
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: MP succeded')
except error.StatusInformation as exc:
statusInformation = exc
if 'sendPduHandle' in statusInformation:
# Dropped REPORT -- re-run pending reqs queue as some
# of them may be waiting for this REPORT
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: MP failed, statusInformation %s, '
'forcing retry' % statusInformation)
self.__expireRequest(
statusInformation['sendPduHandle'],
self._cache.pop(statusInformation['sendPduHandle']),
snmpEngine,
statusInformation)
return restOfWholeMsg
except PyAsn1Error as exc:
debug.logger & debug.FLAG_MP and debug.logger(
'receiveMessage: %s' % exc)
snmpInASNParseErrs, = mibBuilder.importSymbols(
'__SNMPv2-MIB', 'snmpInASNParseErrs')
snmpInASNParseErrs.syntax += 1
return restOfWholeMsg
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: PDU %s' % PDU.prettyPrint())
# 4.2.2
if sendPduHandle is None:
# 4.2.2.1 (request or notification)
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: pduType %s' % pduType)
# 4.2.2.1.1
processPdu = self.getRegisteredApp(contextEngineId, pduType)
# 4.2.2.1.2
if processPdu is None:
# 4.2.2.1.2.a
snmpUnknownPDUHandlers, = importSymbols(
'__SNMP-MPD-MIB', 'snmpUnknownPDUHandlers')
snmpUnknownPDUHandlers.syntax += 1
# 4.2.2.1.2.b
statusInformation = {
'errorIndication': errind.unknownPDUHandler,
'oid': snmpUnknownPDUHandlers.name,
'val': snmpUnknownPDUHandlers.syntax
}
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: unhandled PDU type')
# 4.2.2.1.2.c
try:
(destTransportDomain,
destTransportAddress,
outgoingMessage) = mpHandler.prepareResponseMessage(
snmpEngine, messageProcessingModel,
securityModel, securityName, securityLevel,
contextEngineId, contextName, pduVersion,
PDU, maxSizeResponseScopedPDU, stateReference,
statusInformation)
snmpEngine.transportDispatcher.sendMessage(
outgoingMessage, destTransportDomain,
destTransportAddress)
except PySnmpError as exc:
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: report failed, statusInformation %s' % exc)
else:
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: reporting succeeded')
# 4.2.2.1.2.d
return restOfWholeMsg
else:
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.receiveMessage:request',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
wholeMsg=wholeMsg,
messageProcessingModel=messageProcessingModel,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
pdu=PDU))
# pass transport info to app (legacy)
if stateReference is not None:
self._transportInfo[stateReference] = (
transportDomain, transportAddress)
# 4.2.2.1.3 (asynchronous function)
processPdu(
snmpEngine, messageProcessingModel, securityModel,
securityName, securityLevel, contextEngineId,
contextName, pduVersion, PDU, maxSizeResponseScopedPDU,
stateReference)
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.receiveMessage:request')
# legacy
if stateReference is not None:
del self._transportInfo[stateReference]
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: processPdu initiated')
return restOfWholeMsg
else:
# 4.2.2.2 (response)
# 4.2.2.2.1
cachedParams = self._cache.pop(sendPduHandle)
# 4.2.2.2.2
if cachedParams is None:
snmpUnknownPDUHandlers, = mibBuilder.importSymbols(
'__SNMP-MPD-MIB', 'snmpUnknownPDUHandlers')
snmpUnknownPDUHandlers.syntax += 1
return restOfWholeMsg
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: cache read by sendPduHandle %s' % sendPduHandle)
# 4.2.2.2.3
# no-op ? XXX
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.receiveMessage:response',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
wholeMsg=wholeMsg,
messageProcessingModel=messageProcessingModel,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
pdu=PDU))
# 4.2.2.2.4
processResponsePdu = cachedParams['cbFun']
processResponsePdu(
snmpEngine, messageProcessingModel, securityModel,
securityName, securityLevel, contextEngineId, contextName,
pduVersion, PDU, statusInformation,
cachedParams['sendPduHandle'],
cachedParams['cbCtx'])
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.receiveMessage:response')
debug.logger & debug.FLAG_DSP and debug.logger(
'receiveMessage: processResponsePdu succeeded')
return restOfWholeMsg
def releaseStateInformation(self, snmpEngine, sendPduHandle,
messageProcessingModel):
k = int(messageProcessingModel)
if k in snmpEngine.messageProcessingSubsystems:
mpHandler = snmpEngine.messageProcessingSubsystems[k]
mpHandler.releaseStateInformation(sendPduHandle)
self._cache.pop(sendPduHandle)
# Cache expiration stuff
# noinspection PyUnusedLocal
def __expireRequest(self, cacheKey, cachedParams, snmpEngine,
statusInformation=None):
timeNow = snmpEngine.transportDispatcher.getTimerTicks()
timeoutAt = cachedParams['timeout']
if statusInformation is None and timeNow < timeoutAt:
return
processResponsePdu = cachedParams['cbFun']
debug.logger & debug.FLAG_DSP and debug.logger(
'__expireRequest: req cachedParams %s' % cachedParams)
# Fail timed-out requests
if not statusInformation:
statusInformation = error.StatusInformation(
errorIndication=errind.requestTimedOut)
self.releaseStateInformation(
snmpEngine, cachedParams['sendPduHandle'],
cachedParams['messageProcessingModel'])
processResponsePdu(
snmpEngine, None, None, None, None, None, None, None, None,
statusInformation,
cachedParams['sendPduHandle'], cachedParams['cbCtx'])
return True
# noinspection PyUnusedLocal
def receiveTimerTick(self, snmpEngine, timeNow):
self._cache.expire(self.__expireRequest, snmpEngine)
| {
"content_hash": "3bc86ab0462d97a20fef370e0799621b",
"timestamp": "",
"source": "github",
"line_count": 580,
"max_line_length": 95,
"avg_line_length": 36.31896551724138,
"alnum_prop": 0.5856159506290055,
"repo_name": "etingof/pysnmp",
"id": "4e0f401cf35ff5af69c8ebae892e72c505070e96",
"size": "21222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysnmp/proto/rfc3412.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1453555"
},
{
"name": "Shell",
"bytes": "1312"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="tickwidth", parent_name="cone.colorbar", **kwargs):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "51dfd8ba5ffb1ec621ad1dac1eb799eb",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 87,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.6172566371681416,
"repo_name": "plotly/plotly.py",
"id": "01973200951e40754e944a2038f3aa48ad61d291",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/cone/colorbar/_tickwidth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Generate application_credentials data."""
from __future__ import annotations
import json
from .model import Config, Integration
BASE = """
\"\"\"Automatically generated by hassfest.
To update, run python3 -m script.hassfest
\"\"\"
# fmt: off
APPLICATION_CREDENTIALS = {}
""".strip()
def generate_and_validate(integrations: dict[str, Integration], config: Config) -> str:
"""Validate and generate application_credentials data."""
match_list = []
for domain in sorted(integrations):
integration = integrations[domain]
application_credentials_file = integration.path / "application_credentials.py"
if not application_credentials_file.is_file():
continue
match_list.append(domain)
return BASE.format(json.dumps(match_list, indent=4))
def validate(integrations: dict[str, Integration], config: Config) -> None:
"""Validate application_credentials data."""
application_credentials_path = (
config.root / "homeassistant/generated/application_credentials.py"
)
config.cache["application_credentials"] = content = generate_and_validate(
integrations, config
)
if config.specific_integrations:
return
if application_credentials_path.read_text(encoding="utf-8").strip() != content:
config.add_error(
"application_credentials",
"File application_credentials.py is not up to date. Run python3 -m script.hassfest",
fixable=True,
)
def generate(integrations: dict[str, Integration], config: Config):
"""Generate application_credentials data."""
application_credentials_path = (
config.root / "homeassistant/generated/application_credentials.py"
)
application_credentials_path.write_text(
f"{config.cache['application_credentials']}\n", encoding="utf-8"
)
| {
"content_hash": "6b1b7e394eb66bb69b979dfef86b35e5",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 96,
"avg_line_length": 29.53968253968254,
"alnum_prop": 0.6792047286405158,
"repo_name": "toddeye/home-assistant",
"id": "48d812dba02940ec3dc821a38ae2900da73876cb",
"size": "1861",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "script/hassfest/application_credentials.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from random import random
from time import sleep
from base10 import MetricHelper, MetricHandler
from base10.dialects import InfluxDBDialect
from base10.transports import UDPWriter
if __name__ == '__main__':
class MyMetric(MetricHelper):
_name = 'metric'
_fields = [
'value',
]
_metadata = [
'hostname',
]
class InfluxDB(MetricHandler):
_dialect = InfluxDBDialect()
_writer = UDPWriter(host='127.0.0.1', port=10000)
influxdb = InfluxDB()
while True:
influxdb.write(MyMetric(value=random(), hostname='test'))
sleep(1)
| {
"content_hash": "7e824f413d0dfacc05800196bb9e366a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 65,
"avg_line_length": 21.93103448275862,
"alnum_prop": 0.6053459119496856,
"repo_name": "mattdavis90/base10",
"id": "39eac653f934eaf4b07e035cda584d908ee676aa",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/influx_sender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24213"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import unittest
from tempfile import TemporaryDirectory
from pandas_datareader.data import DataReader
from pandas import to_datetime
from numpy import dtype
from lmk.utils import env
from lmk.cache import Cache
def date(s):
return to_datetime(s).date()
DS = "google"
class CacheTestCase(unittest.TestCase):
"""Tests for `lmk.cache`."""
def setUp(self):
# env.logger.setLevel(logging.WARN)
self.symbol = "TSLA"
self.start = "2015-04-01"
self.end = "2015-06-30"
self.h = DataReader(self.symbol, DS, self.start, self.end)
def test_cache(self):
with TemporaryDirectory(prefix="lmk.") as tmpdir:
cache = Cache(tmpdir)
self.assertTrue(list(cache.range.columns) == ["start", "end"])
self.assertEqual(cache.range.dtypes.loc["start"], dtype("<M8[ns]"))
def cache_range():
r = cache.range.loc[self.symbol]
return r["start"].date(), r["end"].date()
# initial put
cache.put(self.symbol, date(self.start), date(self.end), self.h)
self.assertEqual(cache.range.dtypes.loc["end"], dtype("<M8[ns]"))
self.assertEqual(cache_range(), (date(self.start), date(self.end)))
# no data cached for the symbol.
start, end = "2015-01-01", "2015-01-31"
h = cache.get("NONEXIST", date(start), date(end))
self.assertTrue(h is None)
# on the left, no overlap
start, end = "2015-01-01", "2015-01-31"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None)
self.assertEqual(cache_range(), (date(self.start), date(self.end)))
h1 = DataReader(self.symbol, DS, start, end)
cache.put(self.symbol, date(start), date(end), h1)
self.assertEqual(cache_range(), (date(self.start), date(self.end)))
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None) # only the most recent range is saved.
# on the right, no overlap
start, end = "2016-01-01", "2016-05-31"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None)
h1 = DataReader(self.symbol, DS, start, end)
cache.put(self.symbol, date(start), date(end), h1)
self.assertEqual(cache_range(), (date(start), date(end)))
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None) # only the most recent range is saved.
# overlap on the left
start, end = "2015-12-01", "2016-03-31"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None)
h1 = DataReader(self.symbol, DS, start, end)
cache.put(self.symbol, date(start), date(end), h1)
self.assertEqual(cache_range(), (date(start), date("2016-05-31")))
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None) # cache extended
# overlap on the right
start, end = "2016-04-01", "2016-06-30"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None)
h1 = DataReader(self.symbol, DS, start, end)
cache.put(self.symbol, date(start), date(end), h1)
self.assertEqual(cache_range(), (date("2015-12-01"), date("2016-06-30")))
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None) # cache extended
# hit - part
start, end = "2016-01-01", "2016-05-31"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None)
# hit - full
start, end = "2015-12-01", "2016-06-30"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "1e18f2fc969f5f0548fee71f810f3033",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 85,
"avg_line_length": 38.58095238095238,
"alnum_prop": 0.564798815107381,
"repo_name": "dyno/LMK",
"id": "62091a4975bcc1edde1c680a0400b4cefc1e2049",
"size": "4051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lmk/test/test_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "738482"
},
{
"name": "Python",
"bytes": "72466"
},
{
"name": "Shell",
"bytes": "1442"
}
],
"symlink_target": ""
} |
"""pyroven.tests
Contains tests for the pyroven application
"""
from datetime import datetime, timedelta
from base64 import b64encode
from string import maketrans
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from OpenSSL.crypto import FILETYPE_PEM, load_privatekey, sign
from pyroven import InvalidResponseError, MalformedResponseError
RAVEN_TEST_USER = 'test0001'
RAVEN_TEST_PWD = 'test'
RAVEN_NEW_USER = 'test0002'
GOOD_PRIV_KEY_PEM = """-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQC4RYvbSGb42EEEXzsz93Mubo0fdWZ7UJ0HoZXQch5XIR0Zl8AN
aLf3tVpRz4CI2JBUVpUjXEgzOa+wZBbuvczOuiB3BfNDSKKQaftxWKouboJRA5ac
xa3fr2JZc8O5Qc1J6Qq8E8cjuSQWlpxTGa0JEnbKV7/PVUFDuFeEI11e/wIDAQAB
AoGACr2jBUkXF3IjeAnE/aZyxEYVW7wQGSf9vzAf92Jvekyn0ZIS07VC4+FiPlqF
93QIFaJmVwVOAA5guztaStgtU9YX37wRPkFwrtKgjZcqV8ReQeC67bjo5v3Odht9
750F7mKWXctZrm0MD1PoDlkLvVZ2hDolHm5tpfP52jPvQ6ECQQDgtI4K3IuEVOIg
75xUG3Z86DMmwPmme7vsFgf2goWV+p4471Ang9oN7l+l+Jj2VISdz7GE7ZQwW6a1
IQev3+h7AkEA0e9oC+lCcYsMsI9vtXvB8s6Bpl0c1U19HUUWHdJIpluwvxF6SIL3
ug4EJPP+sDT5LvdV5cNy7nmO9uUd+Se2TQJAdxI2UrsbkzwHt7xA8rC60OWadWa8
4+OdaTUjcxUnBJqRTUpDBy1vVwKB3MknBSE0RQvR3canSBjI9iJSmHfmEQJAKJlF
49fOU6ryX0q97bjrPwuUoxmqs81yfrCXoFjEV/evbKPypAc/5SlEv+i3vlfgQKbw
Y6iyl0/GyBRzAXYemQJAVeChw15Lj2/uE7HIDtkqd8POzXjumOxKPfESSHKxRGnP
3EruVQ6+SY9CDA1xGfgDSkoFiGhxeo1lGRkWmz09Yw==
-----END RSA PRIVATE KEY-----
"""
BAD_PRIV_KEY_PEM = """-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQD5mkLpi7q6ROdu7khB3S9aanA0Zls7vvfGOmB80/yeylhGpsjA
jWen0VtSQke/NlEPGtO38tsV7CsuFnSmschvAnGrcJl76b0UOOHUgDTIoRxC6QDU
3claegwsrBA+sJEBbqx5RdXbIRGicPG/8qQ4Zm1SKOgotcbwiaor2yxZ2wIDAQAB
AoGBAPCgMpmLxzwDaUmcFbTJUvlLW1hoxNNYSu2jIZm1k/hRAcE60JYwvBkgz3UB
yMEh0AtLxYe0bFk6EHah11tMUPgscbCq73snJ++8koUw+csk22G65hOs51bVb7Aa
6JBe67oLzdtvgCUFAA2qfrKzWRZzAdhUirQUZgySZk+Xq1pBAkEA/kZG0A6roTSM
BVnx7LnPfsycKUsTumorpXiylZJjTi9XtmzxhrYN6wgZlDOOwOLgSQhszGpxVoMD
u3gByT1b2QJBAPtL3mSKdvwRu/+40zaZLwvSJRxaj0mcE4BJOS6Oqs/hS1xRlrNk
PpQ7WJ4yM6ZOLnXzm2mKyxm50Mv64109FtMCQQDOqS2KkjHaLowTGVxwC0DijMfr
I9Lf8sSQk32J5VWCySWf5gGTfEnpmUa41gKTMJIbqZZLucNuDcOtzUaeWZlZAkA8
ttXigLnCqR486JDPTi9ZscoZkZ+w7y6e/hH8t6d5Vjt48JVyfjPIaJY+km58LcN3
6AWSeGAdtRFHVzR7oHjVAkB4hutvxiOeiIVQNBhM6RSI9aBPMI21DoX2JRoxvNW2
cbvAhow217X9V0dVerEOKxnNYspXRrh36h7k4mQA+sDq
-----END RSA PRIVATE KEY-----
"""
EXAMPLE_RAVEN_RETURN_URL = 'http%3A%2F%2Fwww.example.org%2Fraven_return%2F'
def create_wls_response(raven_ver='2', raven_status='200', raven_msg='',
raven_issue=datetime.utcnow().strftime(
'%Y%m%dT%H%M%SZ'
),
raven_id='1347296083-8278-2',
raven_url=EXAMPLE_RAVEN_RETURN_URL,
raven_principal=RAVEN_TEST_USER, raven_auth='pwd',
raven_sso='', raven_life='36000', raven_params='',
raven_kid='901',
raven_key_pem=GOOD_PRIV_KEY_PEM):
"""Creates a valid WLS Response as the Raven test server would
using keys from https://raven.cam.ac.uk/project/keys/demo_server/
"""
raven_pkey = load_privatekey(FILETYPE_PEM, raven_key_pem)
trans_table = maketrans("+/=", "-._")
# This is the data which is signed by Raven with their private key
# Note data consists of full payload with exception of kid and sig
# source: http://raven.cam.ac.uk/project/waa2wls-protocol.txt
wls_response_data = [raven_ver, raven_status, raven_msg,
raven_issue, raven_id, raven_url,
raven_principal, raven_auth, raven_sso,
raven_life, raven_params]
data = '!'.join(wls_response_data)
raven_sig = b64encode(sign(raven_pkey, data, 'sha1'))
# Full WLS-Response also includes the Raven-variant b64encoded sig
# and the requisite Key ID which has been used for the signing
# process
wls_response_data.append(raven_kid)
wls_response_data.append(str(raven_sig).translate(trans_table))
return '!'.join(wls_response_data)
class RavenTestCase(TestCase):
"""RavenTestCase
Authentication tests for the Raven service
"""
fixtures = ['users.json']
def __init__(self, *args, **kwargs):
self.client = Client()
super(RavenTestCase, self).__init__(*args, **kwargs)
def test_login_raven_not_local(self):
"""Tests login of user via raven, not in database"""
self.client.get(
reverse('raven_return'),
{
'WLS-Response': create_wls_response(
raven_principal=RAVEN_NEW_USER
),
}
)
self.assertNotIn('_auth_user_id', self.client.session)
def test_login_raven_local(self):
"""Tests login of user who exists in database"""
self.client.get(
reverse('raven_return'),
{
'WLS-Response': create_wls_response(),
}
)
self.assertIn('_auth_user_id', self.client.session)
def test_login_invalid_version_fails(self):
with self.assertRaises(MalformedResponseError) as excep:
self.client.get(
reverse('raven_return'),
{
'WLS-Response': create_wls_response(
raven_ver='3'
),
}
)
self.assertEqual(excep.exception.message,
'Version number does not match that in the '
'configuration')
self.assertNotIn('_auth_user_id', self.client.session)
def test_login_issue_future_fails(self):
"""Tests that Raven responses issued in the future fail validation"""
with self.assertRaises(InvalidResponseError) as excep:
raven_issue = datetime.utcnow() + timedelta(hours=1)
raven_issue = raven_issue.strftime('%Y%m%dT%H%M%SZ')
self.client.get(
reverse('raven_return'),
{
'WLS-Response': create_wls_response(
raven_issue=raven_issue
),
}
)
self.assertEqual(excep.exception.message,
'The timestamp on the response is in the future')
self.assertNotIn('_auth_user_id', self.client.session)
def test_login_issue_too_old_fails(self):
"""Tests that Raven responses which are older than PYROVEN_TIMEOUT +
PYROVEN_MAX_CLOCK_SKEW are rejected"""
with self.assertRaises(InvalidResponseError) as excep:
raven_issue = datetime.utcnow() - timedelta(hours=1)
raven_issue = raven_issue.strftime('%Y%m%dT%H%M%SZ')
self.client.get(
reverse('raven_return'),
{
'WLS-Response': create_wls_response(
raven_issue=raven_issue
),
}
)
self.assertEqual(excep.exception.message,
'The response has timed out')
self.assertNotIn('_auth_user_id', self.client.session)
def test_login_wrong_private_key_fails(self):
"""Tests that Raven responses with invalid key fail"""
with self.assertRaises(InvalidResponseError) as excep:
self.client.get(
reverse('raven_return'),
{
'WLS-Response': create_wls_response(
raven_key_pem=BAD_PRIV_KEY_PEM
)
}
)
self.assertEqual(excep.exception.message,
'The signature for this response is not valid.')
self.assertNotIn('_auth_user_id', self.client.session)
def test_create_raven_not_local_create_false(self):
"""When valid raven user authenticates, and PYROVEN_CREATE_USER is
false, user is not created in database"""
with self.settings(PYROVEN_CREATE_USER=False):
self.client.get(
reverse('raven_return'),
{
'WLS-Response': create_wls_response(
raven_principal=RAVEN_NEW_USER
),
}
)
with self.assertRaises(User.DoesNotExist):
User.objects.get(username=RAVEN_NEW_USER)
def test_raven_user_not_local_create_true(self):
"""When valid raven user authenticates, and PYROVEN_CREATE_USER is true
creates valid user in database"""
with self.settings(PYROVEN_CREATE_USER=True):
self.client.get(
reverse('raven_return'),
{
'WLS-Response': create_wls_response(
raven_principal=RAVEN_NEW_USER
)
}
)
user = User.objects.get(username=RAVEN_NEW_USER)
self.assertFalse(user.has_usable_password())
| {
"content_hash": "f385018198dca74d091159af3c33905c",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 79,
"avg_line_length": 38.44255319148936,
"alnum_prop": 0.6321673677219394,
"repo_name": "pyroven/django-pyroven",
"id": "82b005bb293d9482c6da6ca1d5c44e8a7eb7564c",
"size": "9034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyroven/tests/test_raven.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26455"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.api_schema.response.compute.v2_1 import hosts as schema
from tempest.common import service_client
class HostsClient(service_client.ServiceClient):
def list_hosts(self, **params):
"""Lists all hosts."""
url = 'os-hosts'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_hosts, resp, body)
return service_client.ResponseBodyList(resp, body['hosts'])
def show_host(self, hostname):
"""Show detail information for the host."""
resp, body = self.get("os-hosts/%s" % hostname)
body = json.loads(body)
self.validate_response(schema.get_host_detail, resp, body)
return service_client.ResponseBodyList(resp, body['host'])
def update_host(self, hostname, **kwargs):
"""Update a host."""
request_body = {
'status': None,
'maintenance_mode': None,
}
request_body.update(**kwargs)
request_body = json.dumps(request_body)
resp, body = self.put("os-hosts/%s" % hostname, request_body)
body = json.loads(body)
self.validate_response(schema.update_host, resp, body)
return service_client.ResponseBody(resp, body)
def startup_host(self, hostname):
"""Startup a host."""
resp, body = self.get("os-hosts/%s/startup" % hostname)
body = json.loads(body)
self.validate_response(schema.startup_host, resp, body)
return service_client.ResponseBody(resp, body['host'])
def shutdown_host(self, hostname):
"""Shutdown a host."""
resp, body = self.get("os-hosts/%s/shutdown" % hostname)
body = json.loads(body)
self.validate_response(schema.shutdown_host, resp, body)
return service_client.ResponseBody(resp, body['host'])
def reboot_host(self, hostname):
"""reboot a host."""
resp, body = self.get("os-hosts/%s/reboot" % hostname)
body = json.loads(body)
self.validate_response(schema.reboot_host, resp, body)
return service_client.ResponseBody(resp, body['host'])
| {
"content_hash": "35d44ab3338a7d2adabefe0fd68453ab",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 69,
"avg_line_length": 34.26865671641791,
"alnum_prop": 0.625,
"repo_name": "varunarya10/tempest",
"id": "752af68178810226f453533921a8ca33f3554b49",
"size": "2898",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tempest/services/compute/json/hosts_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2752004"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
from mangrove.errors.MangroveException import MangroveException
class EditSurveyResponseForm(object):
def __init__(self, dbm, survey_response, form_model, form_answers):
assert dbm is not None
assert survey_response is not None
assert form_model is not None
self.saved = False
self.dbm = dbm
self.form_model = form_model
self.form_model.bind(form_answers)
self._cleaned_data, self.errors = form_model.validate_submission(values=form_model.bound_values())
self.is_valid = (self.errors is None or len(self.errors) == 0)
self.entity_type = form_model.entity_type
self.survey_response = survey_response
self.survey_response.set_form(form_model)
self.survey_response.set_answers(form_model.bound_values())
@property
def unique_id_question_code(self):
return self.form_model.entity_questions[0].code if self.form_model.entity_questions else None
@property
def data_record_id(self):
return self.survey_response.data_record_id if self.survey_response is not None else None
@property
def is_registration(self):
return self.form_model.is_entity_registration_form()
def save(self):
assert self.is_valid
try:
self.survey_response.set_status(self.errors)
self.survey_response.update(self.form_model, self.data())
#self.entity.update_latest_data(data=self.data())
except MangroveException as exception:
self.survey_response.set_status(self.errors)
self.errors = exception.message
raise
self.saved = True
return self.survey_response
def data(self):
return [(self.form_model.get_field_by_code(code).name, value)
for (code, value) in
(self._cleaned_data.items())]
| {
"content_hash": "988cd6b085fb5c9fb8f7c3ce9b0645a5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 106,
"avg_line_length": 34.127272727272725,
"alnum_prop": 0.6483750665956314,
"repo_name": "ICT4H/dcs-mangrove",
"id": "d35052c5f0a90bf4bd8791298ceec4c9ba72eb31",
"size": "1877",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mangrove/form_model/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "700265"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from api.models import Answer, Question, User
from django import forms
class AnswerAdmin(admin.ModelAdmin):
model = Answer
class QuestionAdmin(admin.ModelAdmin):
model = Question
# class UserForm(forms.ModelForm):
# password = forms.CharField(widget=forms.PasswordInput)
#
# def __init__(self, *args, **kwargs):
# super(UserForm, self).__init__(*args, **kwargs)
#
# class Meta:
# model = User
class UserAdmin(admin.ModelAdmin):
model = User
admin.site.register(Answer, AnswerAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(User, UserAdmin)
| {
"content_hash": "e922ed8cb8f6055c4cb7f5a79d0f052e",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 60,
"avg_line_length": 21.566666666666666,
"alnum_prop": 0.7063369397217929,
"repo_name": "Cookie-Monsters/uQu-Backend",
"id": "334ae316e44a87d9de4896827430dd6a339557e7",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9030"
}
],
"symlink_target": ""
} |
"""Group (Model) query functions.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from soc.logic.models import base
import soc.models.group
class Logic(base.Logic):
"""Logic methods for the Group model.
"""
def __init__(self, model=soc.models.group.Group,
base_model=None, scope_logic=None):
"""Defines the name, key_name and model for this entity.
"""
super(Logic, self).__init__(model, base_model=base_model,
scope_logic=scope_logic)
def getKeyValuesFromEntity(self, entity):
"""Extracts the key values from entity and returns them.
The default implementation for Groups assumes that the Group is site-wide
and thus has no scope. Such Groups include Sponsors and Clubs. Any
Group that exists per-Program or per-Year will need to override this
method.
Args:
entity: the entity from which to extract the key values
"""
return [entity.link_id]
def getKeyValuesFromFields(self, fields):
"""Extracts the key values from a dict and returns them.
The default implementation for Groups assumes that the Group is site-wide
and thus has no scope. Such Groups include Sponsors and Clubs. Any
Group that exists per-Program or per-Year will need to override this
method.
Args:
fields: the dict from which to extract the key values
"""
return [fields['link_id']]
def getKeyFieldNames(self):
"""Returns an array with the names of the Key Fields.
The default implementation for Groups assumes that the Group is site-wide
and thus has no scope. Such Groups include Sponsors and Clubs. Any
Group that exists per-Program or per-Year will need to override this
method.
"""
return ['link_id']
def isDeletable(self, entity):
"""Returns whether the specified Group entity can be deleted.
Generically, a Group can always be deleted. Subclasses of group.Logic
should add their own deletion prerequisites.
Args:
entity: an existing Group entity in the Datastore
"""
return True
logic = Logic()
| {
"content_hash": "4abbe9c4f0f261cf94cbacab56047e19",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 77,
"avg_line_length": 27.037974683544302,
"alnum_prop": 0.6793071161048689,
"repo_name": "SRabbelier/Melange",
"id": "2308498c4c123f9d6fe973a9221d821321d3fd5d",
"size": "2746",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/soc/logic/models/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
} |
"""HTML utilities suitable for global use."""
import html
import json
import re
from html.parser import HTMLParser
from urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.encoding import punycode
from django.utils.functional import Promise, keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeString, mark_safe
from django.utils.text import normalize_newlines
# Configuration for urlize() function.
TRAILING_PUNCTUATION_CHARS = '.,:;!'
WRAPPING_PUNCTUATION = [('(', ')'), ('[', ']')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
@keep_lazy(str, SafeString)
def escape(text):
"""
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
Always escape input, even if it's already escaped and marked as such.
This may result in double-escaping. If this is a concern, use
conditional_escape() instead.
"""
return mark_safe(html.escape(str(text)))
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('`'): '\\u0060',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
@keep_lazy(str, SafeString)
def escapejs(value):
"""Hex encode characters for use in JavaScript strings."""
return mark_safe(str(value).translate(_js_escapes))
_json_script_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
def json_script(value, element_id):
"""
Escape all the HTML/XML special characters with their unicode escapes, so
value is safe to be output anywhere except for inside a tag attribute. Wrap
the escaped JSON in a script tag.
"""
from django.core.serializers.json import DjangoJSONEncoder
json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes)
return format_html(
'<script id="{}" type="application/json">{}</script>',
element_id, mark_safe(json_str)
)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if isinstance(text, Promise):
text = str(text)
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *args)
for args in args_generator
))
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Convert newlines into <p> and <br>s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', str(value))
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br>') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br>') for p in paras]
return '\n\n'.join(paras)
class MLStripper(HTMLParser):
def __init__(self):
super().__init__(convert_charrefs=False)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(value)
s.close()
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Return the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = str(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if value.count('<') == new_value.count('<'):
# _strip_once wasn't able to detect more tags.
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Return the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', str(value))
def smart_urlquote(url):
"""Quote a URL if it isn't already quoted."""
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# https://tools.ietf.org/html/rfc3986#section-2.3
# See also https://bugs.python.org/issue16285
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = punycode(netloc) # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Convert any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, truncate the URLs in the link text longer
than this limit to trim_url_limit - 1 characters and append an ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s…' % x[:max(0, limit - 1)]
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
# Trim trailing punctuation (after trimming wrapping punctuation,
# as encoded entities contain ';'). Unescape entities to avoid
# breaking them by removing ';'.
middle_unescaped = html.unescape(middle)
stripped = middle_unescaped.rstrip(TRAILING_PUNCTUATION_CHARS)
if middle_unescaped != stripped:
trail = middle[len(stripped):] + trail
middle = middle[:len(stripped) - len(middle_unescaped)]
trimmed_something = True
return lead, middle, trail
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if '@' not in value or value.startswith('@') or value.endswith('@'):
return False
try:
p1, p2 = value.split('@')
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if '.' not in p2 or p2.startswith('.'):
return False
return True
words = word_split_re.split(str(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(html.unescape(middle))
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % html.unescape(middle))
elif ':' not in middle and is_email_simple(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = punycode(domain)
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeString.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
| {
"content_hash": "7dc636446cf67cb2da248e66038d69be",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 110,
"avg_line_length": 34.99733333333333,
"alnum_prop": 0.5929594635781774,
"repo_name": "mdworks2016/work_development",
"id": "94aa0ff35e9929f40bdfe88931277d90346364d3",
"size": "13126",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Python/20_Third_Certification/venv/lib/python3.7/site-packages/django/utils/html.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "142"
},
{
"name": "Kotlin",
"bytes": "68744"
},
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import djorm_pgfulltext.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Box',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('folder_name', models.CharField(help_text='Filkatalog på disk där denna lådas filer ligger', max_length=255, unique=True, verbose_name='Katalognamn')),
('sequence_number', models.IntegerField(db_index=True)),
('label', models.CharField(db_index=True, max_length=255, verbose_name='Etikett')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Låda',
'verbose_name_plural': 'Lådor',
'ordering': ['sequence_number'],
'abstract': False,
},
),
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, help_text='Rubriken som visas överst på en kortsida', max_length=255, verbose_name='Kortnamn')),
('filename', models.CharField(db_index=True, help_text='Filnamnet för bildfilen', max_length=255, verbose_name='Filnamn')),
('filename_back', models.CharField(db_index=True, help_text='Filnamnet för bildfilen av baksidan', max_length=255, verbose_name='Filnamn baksida')),
('ocr_text', models.TextField(blank=True, help_text='Automatiskt OCR-tolkad text från kortet.')),
('ocr_text_back', models.TextField(blank=True, help_text='Automatiskt OCR-tolkad text från kortets baksida.')),
('letter', models.CharField(blank=True, db_index=True, help_text='Anges för första kortet för att dela upp katalogen alfabetiskt.', max_length=1, null=True, verbose_name='Indexbokstav')),
('sequence_number', models.IntegerField(db_index=True, verbose_name='Sekvensnummer i låda')),
('catalog_sequence_number', models.IntegerField(blank=True, help_text='Globalt katalognummer som anger kortets plats i katalogen. Används även som identifierare.', null=True, verbose_name='Kortnummer')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('comment', models.TextField(blank=True, help_text='Visas ej för besökare.', null=True, verbose_name='Intern kommentar')),
('search_index', djorm_pgfulltext.fields.VectorField(db_index=True, default='', editable=False, null=True, serialize=False)),
('box', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cards', to='rogge.Box', verbose_name='kort')),
],
options={
'verbose_name': 'Kort',
'verbose_name_plural': 'Kort',
'ordering': ['catalog_sequence_number'],
'abstract': False,
},
),
]
| {
"content_hash": "02da67f6f1bd02f823077e2a4b44e813",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 219,
"avg_line_length": 58.8448275862069,
"alnum_prop": 0.608555523000293,
"repo_name": "Kungbib/CIPAC",
"id": "a7827245120d3199d3055cb25b3097d3ba37f663",
"size": "3505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/kortkatalogen/rogge/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3932"
},
{
"name": "HTML",
"bytes": "171920"
},
{
"name": "JavaScript",
"bytes": "564"
},
{
"name": "Perl",
"bytes": "4929"
},
{
"name": "Python",
"bytes": "158144"
},
{
"name": "Shell",
"bytes": "5787"
}
],
"symlink_target": ""
} |
'''Functions for use by BUILD scripts'''
__author__ = 'Jim Olsen (jim.olsen@tanium.com)'
__version__ = '2.1.4'
import sys
sys.dont_write_bytecode = True
import os
import glob
import string
import json
import StringIO
import platform
from random import randint
def json_read(f):
return json.loads(read_file(f))
def read_file(f):
with open(f) as fh:
out = fh.read()
return out
def write_file(f, c):
d = os.path.dirname(f)
if not os.path.exists(d):
print "Creating directory: {}".format(d)
os.makedirs(d)
with open(f, 'w') as fh:
fh.write(c)
print "Wrote file: {}".format(f)
def get_name_title(t):
fixes = {
'Xml': 'XML',
'Json': 'JSON',
'Csv': 'CSV',
'Pytan': 'PyTan',
'Api': 'API',
'Resultset': 'ResultSet',
'Resultinfo': 'ResultInfo',
}
ret = t.replace('_', ' ').strip().title()
for k, v in fixes.iteritems():
ret = ret.replace(k, v)
return ret
def clean_it(f):
if os.path.exists(f):
os.unlink(f)
print "Removed {}".format(f)
def clean_up(p, pattern):
for i in get_files(p, pattern):
clean_it(i)
def get_files(p, pattern='*'):
return glob.glob(os.path.join(p, pattern))
class ExecWrap(object):
def main(self, code_block, name='', verbose=True):
print "executing code block for: {}".format(name)
if verbose:
print "Code block:\n{}".format(code_block)
exec_globals = {}
exec_locals = {}
code_stdout = StringIO.StringIO()
code_stderr = StringIO.StringIO()
sys.stdout = code_stdout
sys.stderr = code_stderr
try:
exec(code_block, exec_globals, exec_locals)
except:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
code_stdout_val = code_stdout.getvalue()
code_stderr_val = code_stderr.getvalue()
print "Exception occurred!!"
print "Code block:\n{}".format(code_block)
print "Code stdout:\n{}".format(code_stdout_val)
print "Code stderr:\n{}".format(code_stderr_val)
raise
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
code_stdout_val = code_stdout.getvalue()
code_stderr_val = code_stderr.getvalue()
try:
response_objects = list(exec_locals['handler'].session.ALL_REQUESTS_RESPONSES)
except:
response_objects = []
print "unable to fetch response objects from session!"
if verbose:
print "{}".format("_" * 80)
print "Code stdout:\n{}".format(code_stdout_val)
print "Code stderr:\n{}".format(code_stderr_val)
print "# of response_objects passed back: {}".format(len(response_objects))
print "{}".format("_" * 80)
return code_stdout_val, code_stderr_val, response_objects
def templatize(val, subs):
subs["RANDINT"] = randint(1, 9999)
val_template = string.Template(val)
new_val = val_template.safe_substitute(subs)
return new_val
def template_dict(d, all_subs):
# replace any ${} vars with matching key/values from all_subs
for k, v in d.iteritems():
if type(v) in [str, unicode]:
new_v = templatize(v, all_subs)
elif type(v) in [list, tuple]:
new_v = [templatize(x, all_subs) for x in v]
elif type(v) in [dict]:
new_v = template_dict(v, all_subs)
else:
new_v = v
d[k] = new_v
return d
def create_script(d, template_key, output_dir, filename_template):
if d.get(template_key):
script_out = string.Template(d[template_key]).safe_substitute(**d)
script_out = string.Template(script_out).safe_substitute(**d)
script_file = string.Template(filename_template).safe_substitute(**d)
script_path = os.path.join(output_dir, script_file)
write_file(script_path, script_out)
os.chmod(script_path, 0755)
def process_example(example_idx, example_dict, sub_dict):
# print "Before parse: {script_name}: {name}".format(script_name=script_def['script_name'], **example_def)
# prepend command line with stdin re-direct for interactive console scripts
if '-i' in sub_dict.get('pyopts', ''):
example_dict['cmd'] = 'echo "" | ' + example_dict['cmd']
if 'noerror' in example_dict['tests']:
fixed = example_dict['tests'].split(',')
fixed = [x.strip() for x in fixed if x.strip() != 'noerror']
example_dict['tests'] = ', '.join(fixed)
# set depth to 1 if no depth specified
example_dict['depth'] = example_dict.get('depth', 1)
# expand all variables in the example dict with key/value pairs from all_subs
example_dict = template_dict(example_dict, sub_dict)
# print "After parse: {script_name}: {name}".format(script_name=script_def['script_name'], **example_def)
# for k, v in example_def.iteritems():
# debug_out = " example #{} for {} {}: {}".format
# print debug_out(example_idx, script_def['script_name'], k, v)
skips = ['name', 'cmd', 'depth', 'notes', 'tests']
others = ["{}: {}".format(k, v) for k, v in example_dict.iteritems() if k not in skips]
example_dict['others_out'] = '\n'.join(others)
notes = example_dict.get('notes', '').strip().splitlines()
notes = [x for x in notes if x]
notes = ['notes{}: {}'.format(idx, n) for idx, n in enumerate(notes)]
example_dict['notes_out'] = '\n'.join(notes)
return example_dict
def spew(t, verbose=False):
if verbose:
print t
def determine_os_ver():
os_system = platform.system()
if os_system.lower() == 'darwin':
os_name = 'OS X'
os_version = platform.mac_ver()[0]
os_version = "{} {}".format(os_name, os_version)
elif os_system.lower() == 'windows':
os_name = os_system
os_version = platform.release()
os_patch = platform.win32_ver()[2]
os_version = "{} {} {}".format(os_name, os_version, os_patch)
elif os_system.lower() == 'linux':
os_version = ' '.join(platform.linux_distribution())
else:
raise Exception("OS System not coded for: {}".format(os_system))
return os_version
| {
"content_hash": "001c426d59bb3e0d63d06a6f94710058",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 110,
"avg_line_length": 30.451923076923077,
"alnum_prop": 0.580991474581623,
"repo_name": "tanium/pytan",
"id": "eda4dcaf6cff51d70d0cfc26b88e0ef72df5c8db",
"size": "6501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BUILD/lib/buildsupport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13251"
},
{
"name": "CSS",
"bytes": "32442"
},
{
"name": "HTML",
"bytes": "1232764"
},
{
"name": "JavaScript",
"bytes": "375167"
},
{
"name": "Makefile",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "2541262"
},
{
"name": "Shell",
"bytes": "3194"
}
],
"symlink_target": ""
} |
class Solution:
# @param {Interval[]} intervals
# @return {Interval[]}
def merge(self, intervals):
if not intervals:
return intervals
from operator import attrgetter
intervals.sort(key=attrgetter('start', 'end'))
newIntervals = []
pre = intervals[0]
for i, interval in enumerate(intervals[1:]):
if pre.end < interval.start:
newIntervals += [pre]
pre = interval
elif pre.start > interval.end:
newIntervals += [interval]
else:
pre.start = min(pre.start, interval.start)
pre.end = max(pre.end, interval.end)
if not newIntervals or pre.start > newIntervals[-1].end:
newIntervals += [pre]
return newIntervals | {
"content_hash": "85d4a7c0ee3ec6c4c249d2caadc6ed5c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 31.653846153846153,
"alnum_prop": 0.543134872417983,
"repo_name": "abawchen/leetcode",
"id": "4f8ab21beadf2576b96a0ff4b61aeff478af90bf",
"size": "1110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solutions/056_merge_intervals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1991403"
}
],
"symlink_target": ""
} |
# Copyright 2010 The JsonCpp Authors
# Distributed under MIT license, or public domain if desired and
# recognized in your jurisdiction.
# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
from __future__ import print_function
import os.path
import sys
def fix_source_eol(path, is_dry_run = True, verbose = True, eol = '\n'):
"""Makes sure that all sources have the specified eol sequence (default: unix)."""
if not os.path.isfile(path):
raise ValueError('Path "%s" is not a file' % path)
try:
f = open(path, 'rb')
except IOError as msg:
print("%s: I/O Error: %s" % (file, str(msg)), file=sys.stderr)
return False
try:
raw_lines = f.readlines()
finally:
f.close()
fixed_lines = [line.rstrip('\r\n') + eol for line in raw_lines]
if raw_lines != fixed_lines:
print('%s =>' % path, end=' ')
if not is_dry_run:
f = open(path, "wb")
try:
f.writelines(fixed_lines)
finally:
f.close()
if verbose:
print(is_dry_run and ' NEED FIX' or ' FIXED')
return True
##
##
##
##def _do_fix(is_dry_run = True):
## from waftools import antglob
## python_sources = antglob.glob('.',
## includes = '**/*.py **/wscript **/wscript_build',
## excludes = antglob.default_excludes + './waf.py',
## prune_dirs = antglob.prune_dirs + 'waf-* ./build')
## for path in python_sources:
## _fix_python_source(path, is_dry_run)
##
## cpp_sources = antglob.glob('.',
## includes = '**/*.cpp **/*.h **/*.inl',
## prune_dirs = antglob.prune_dirs + 'waf-* ./build')
## for path in cpp_sources:
## _fix_source_eol(path, is_dry_run)
##
##
##def dry_fix(context):
## _do_fix(is_dry_run = True)
##
##def fix(context):
## _do_fix(is_dry_run = False)
##
##def shutdown():
## pass
##
##def check(context):
## # Unit tests are run when "check" target is used
## ut = UnitTest.unit_test()
## ut.change_to_testfile_dir = True
## ut.want_to_see_test_output = True
## ut.want_to_see_test_error = True
## ut.run()
## ut.print_results()
| {
"content_hash": "63ff86fb3c726986fa7fe272f6239374",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 86,
"avg_line_length": 31.47142857142857,
"alnum_prop": 0.5683159328188834,
"repo_name": "fanqiang001/HMI_SDK_LIB",
"id": "e089f9f93e62c42272a23f141a6e4654ffe62b5e",
"size": "2203",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/jsoncpp/devtools/fixeol.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "64199"
},
{
"name": "C++",
"bytes": "366368"
},
{
"name": "CMake",
"bytes": "6156"
},
{
"name": "Java",
"bytes": "68007"
},
{
"name": "QMake",
"bytes": "4463"
}
],
"symlink_target": ""
} |
from kafkatest.services.trogdor.task_spec import TaskSpec
class FilesUnreadableFaultSpec(TaskSpec):
"""
The specification for a fault which makes files unreadable.
"""
def __init__(self, start_ms, duration_ms, node_names, mount_path,
prefix, error_code):
"""
Create a new FilesUnreadableFaultSpec.
:param start_ms: The start time, as described in task_spec.py
:param duration_ms: The duration in milliseconds.
:param node_names: The names of the node(s) to create the fault on.
:param mount_path: The mount path.
:param prefix: The prefix within the mount point to make unreadable.
:param error_code: The error code to use.
"""
super(FilesUnreadableFaultSpec, self).__init__(start_ms, duration_ms)
self.node_names = node_names
self.mount_path = mount_path
self.prefix = prefix
self.error_code = error_code
def message(self):
return {
"class": "org.apache.kafka.trogdor.fault.FilesUnreadableFaultSpec",
"startMs": self.start_ms,
"durationMs": self.duration_ms,
"nodeNames": self.node_names,
"mountPath": self.mount_path,
"prefix": self.prefix,
"errorCode": self.error_code,
}
def kibosh_message(self):
return {
"type": "unreadable",
"prefix": self.prefix,
"code": self.error_code,
}
| {
"content_hash": "53575805646546ecada5ec01ba4bc47e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 85,
"avg_line_length": 35.53488372093023,
"alnum_prop": 0.5818062827225131,
"repo_name": "themarkypantz/kafka",
"id": "4f0540a632b68c69b855b7236d7bf5efba019a8c",
"size": "2309",
"binary": false,
"copies": "1",
"ref": "refs/heads/mcastillo-dev",
"path": "tests/kafkatest/services/trogdor/files_unreadable_fault_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "27427"
},
{
"name": "HTML",
"bytes": "5443"
},
{
"name": "Java",
"bytes": "10384396"
},
{
"name": "Python",
"bytes": "600233"
},
{
"name": "Scala",
"bytes": "4763309"
},
{
"name": "Shell",
"bytes": "84077"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
"""setup.py file for a GRR API client library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import subprocess
import sys
from distutils.command.build_py import build_py
from setuptools import find_packages
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
# TODO: Fix this import once support for Python 2 is dropped.
# pylint: disable=g-import-not-at-top
if sys.version_info.major == 2:
import ConfigParser as configparser
else:
import configparser
# pylint: enable=g-import-not-at-top
THIS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
os.chdir(THIS_DIRECTORY)
GRPCIO_TOOLS = "grpcio-tools==1.17.1"
PROTOBUF = "protobuf==3.8.0"
def get_config():
"""Get INI parser with version.ini data."""
ini_path = os.path.join(THIS_DIRECTORY, "version.ini")
# In a prebuilt sdist archive, version.ini is copied to the root folder
# of the archive. When installing in a development mode, version.ini
# has to be read from the root repository folder (two levels above).
if not os.path.exists(ini_path):
ini_path = os.path.join(THIS_DIRECTORY, "../../version.ini")
if not os.path.exists(ini_path):
raise RuntimeError("Couldn't find version.ini")
config = configparser.SafeConfigParser()
config.read(ini_path)
return config
def compile_protos():
"""Builds necessary assets from sources."""
# Using Popen to effectively suppress the output of the command below - no
# need to fill in the logs with protoc's help.
p = subprocess.Popen([sys.executable, "-m", "grpc_tools.protoc", "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.communicate()
# If protoc is not installed, install it. This seems to be the only reliable
# way to make sure that grpcio-tools gets intalled, no matter which Python
# setup mechanism is used: pip install, pip install -e,
# python setup.py install, etc.
if p.returncode != 0:
# Specifying protobuf dependency right away pins it to the correct
# version. Otherwise latest protobuf library will be installed with
# grpcio-tools and then uninstalled when grr-response-proto's setup.py runs
# and reinstalled to the version required by grr-response-proto.
subprocess.check_call(
[sys.executable, "-m", "pip", "install", GRPCIO_TOOLS, PROTOBUF])
# If there's no makefile, we're likely installing from an sdist,
# so there's no need to compile the protos (they should be already
# compiled).
if not os.path.exists(os.path.join(THIS_DIRECTORY, "makefile.py")):
return
# Only compile protobufs if we're inside GRR source tree.
subprocess.check_call(
["python", "makefile.py", "--clean"], cwd=THIS_DIRECTORY)
class Build(build_py):
def find_all_modules(self):
self.packages = find_packages()
return build_py.find_all_modules(self)
class Develop(develop):
def run(self):
compile_protos()
develop.run(self)
class Sdist(sdist):
"""Build sdist."""
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
sdist_version_ini = os.path.join(base_dir, "version.ini")
if os.path.exists(sdist_version_ini):
os.unlink(sdist_version_ini)
shutil.copy(
os.path.join(THIS_DIRECTORY, "../../version.ini"), sdist_version_ini)
def run(self):
compile_protos()
sdist.run(self)
VERSION = get_config()
setup_args = dict(
name="grr-response-proto",
version=VERSION.get("Version", "packageversion"),
description="GRR API client library",
license="Apache License, Version 2.0",
maintainer="GRR Development Team",
maintainer_email="grr-dev@googlegroups.com",
url="https://github.com/google/grr/tree/master/proto",
cmdclass={
"build_py": Build,
"develop": Develop,
"sdist": Sdist,
},
packages=find_packages(),
install_requires=[
PROTOBUF,
],
setup_requires=[
GRPCIO_TOOLS,
],
data=["version.ini"])
setup(**setup_args)
| {
"content_hash": "066145cf0efb676d165c3f129b899fc4",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 79,
"avg_line_length": 30.573529411764707,
"alnum_prop": 0.68999518999519,
"repo_name": "demonchild2112/travis-test",
"id": "09fb93af5693abc8083318aa04ff66144681392a",
"size": "4180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/proto/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3446"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "35549"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HCL",
"bytes": "7208"
},
{
"name": "HTML",
"bytes": "190212"
},
{
"name": "JavaScript",
"bytes": "11691"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7213255"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "48882"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "51"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
import time
from django.conf import settings
from django.db import transaction, connection
from django.db.utils import ConnectionHandler, DEFAULT_DB_ALIAS, DatabaseError
from django.test import (TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature)
from django.utils import unittest
from .models import Person
# Some tests require threading, which might not be available. So create a
# skip-test decorator for those test functions.
try:
import threading
except ImportError:
threading = None
requires_threading = unittest.skipUnless(threading, 'requires threading')
class SelectForUpdateTests(TransactionTestCase):
def setUp(self):
transaction.enter_transaction_management(True)
transaction.managed(True)
self.person = Person.objects.create(name='Reinhardt')
# We have to commit here so that code in run_select_for_update can
# see this data.
transaction.commit()
# We need another database connection to test that one connection
# issuing a SELECT ... FOR UPDATE will block.
new_connections = ConnectionHandler(settings.DATABASES)
self.new_connection = new_connections[DEFAULT_DB_ALIAS]
# We need to set settings.DEBUG to True so we can capture
# the output SQL to examine.
self._old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
try:
# We don't really care if this fails - some of the tests will set
# this in the course of their run.
transaction.managed(False)
transaction.leave_transaction_management()
except transaction.TransactionManagementError:
pass
self.new_connection.close()
settings.DEBUG = self._old_debug
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
def start_blocking_transaction(self):
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
result = self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection._rollback()
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
def check_exc(self, exc):
self.failUnless(isinstance(exc, DatabaseError))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
# In Python 2.6 beta and some final releases, exceptions raised in __len__
# are swallowed (Python issue 1242657), so these cases return an empty
# list, rather than raising an exception. Not a lot we can do about that,
# unfortunately, due to the way Python handles list() calls internally.
# Python 2.6.1 is the "in the wild" version affected by this, so we skip
# the test for that version.
@requires_threading
@skipUnlessDBFeature('has_select_for_update_nowait')
@unittest.skipIf(sys.version_info[:3] == (2, 6, 1), "Python version is 2.6.1")
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.check_exc(status[-1])
# In Python 2.6 beta and some final releases, exceptions raised in __len__
# are swallowed (Python issue 1242657), so these cases return an empty
# list, rather than raising an exception. Not a lot we can do about that,
# unfortunately, due to the way Python handles list() calls internally.
# Python 2.6.1 is the "in the wild" version affected by this, so we skip
# the test for that version.
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
@unittest.skipIf(sys.version_info[:3] == (2, 6, 1), "Python version is 2.6.1")
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
transaction.enter_transaction_management(True)
transaction.managed(True)
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
transaction.commit()
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError, 'Thread did not run and block'
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.failIf(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.check_exc(status[-1])
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_dirty_managed(self):
""" Check that a select_for_update sets the transaction to be
dirty when executed under txn management. Setting the txn dirty
means that it will be either committed or rolled back by Django,
which will release any locks held by the SELECT FOR UPDATE.
"""
people = list(Person.objects.select_for_update())
self.assertTrue(transaction.is_dirty())
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_not_dirty_unmanaged(self):
""" If we're not under txn management, the txn will never be
marked as dirty.
"""
transaction.managed(False)
transaction.leave_transaction_management()
people = list(Person.objects.select_for_update())
self.assertFalse(transaction.is_dirty())
| {
"content_hash": "9e9d6a7c2862325654961ed2d15bc092",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 82,
"avg_line_length": 38.59642857142857,
"alnum_prop": 0.6252428981215878,
"repo_name": "chrishas35/django-travis-ci",
"id": "0587e11a3a1c9e4482336c85fc138daedd159a73",
"size": "10807",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/modeltests/select_for_update/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "89027"
},
{
"name": "Python",
"bytes": "8037393"
},
{
"name": "Shell",
"bytes": "4241"
}
],
"symlink_target": ""
} |
from scrapy import Spider
from scrapy.http import Request
from firmware.items import FirmwareImage
from firmware.loader import FirmwareLoader
import urllib.request, urllib.parse, urllib.error
import json
class A360Spider(Spider):
name = "360"
#allowed_domains = ["luyou.360.cn"]
json_url = "http://s7.qhres.com/static/ef5bacdd3d93fa90/common_info.js"
start_urls = ["http://luyou.360.cn/download_center.html?from=nav"]
def parse(self, response):
yield Request(
url=self.json_url,
headers={"Referer": response.url},
callback=self.parse_product)
def parse_product(self, response):
js = response.text
if js.startswith("var commonInfo"):
print((response.url))
print(js)
p_product = "id:\"(?P<product>.*?)\""
p_description = "title:\"(?P<description>.*?)\""
p_version = "romVersions:\"(?P<version>.*?)\""
p_url = "romUrl:\"(?P<url>.*?)\""
p_date = "updateDate:\"(?P<date>.*?)\""
import re
products = re.findall(p_product, js)
descriptions = re.findall(p_description, js)
versions = re.findall(p_version, js)
urls = re.findall(p_url, js)
dates = re.findall(p_date, js)
for i in range(len(products)):
product = products[i]
url = urls[i]
version = versions[i]
description = descriptions[i]
date = dates[i]
item = FirmwareLoader(
item=FirmwareImage(), response=response)
item.add_value("url", url)
item.add_value("version", version)
item.add_value("product", product)
item.add_value("description", description)
item.add_value("date", date)
item.add_value("vendor", self.name)
yield item.load_item()
| {
"content_hash": "8e0059f0b1c3773423a6978e6f8d3130",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 75,
"avg_line_length": 35.464285714285715,
"alnum_prop": 0.540785498489426,
"repo_name": "firmadyne/scraper",
"id": "b78dc18a6807590b471854a729af7612531c2058",
"size": "2000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "firmware/spiders/360.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113811"
},
{
"name": "Shell",
"bytes": "760"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="sunburst.marker.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "94ac3667ad5827a8a26d05c6e2cd8fed",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 34.266666666666666,
"alnum_prop": 0.585603112840467,
"repo_name": "plotly/python-api",
"id": "d53ccd9a7411ee80dae06479124f48ec2fb2d904",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sunburst/marker/line/_color.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from devlib.utils.misc import list_to_mask, mask_to_list
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import memoized
from lisa.trace import requires_events, CPU
from lisa.datautils import df_refit_index, series_refit_index
from lisa.notebook import plot_signal
class ThermalAnalysis(TraceAnalysisBase):
"""
Support for plotting Thermal Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'thermal'
@requires_events("thermal_temperature")
def df_thermal_zones_temperature(self):
"""
Get the temperature of the thermal zones
:returns: a :class:`pandas.DataFrame` with:
* An ``id`` column (The thermal zone ID)
* A ``thermal_zone`` column (The thermal zone name)
* A ``temp`` column (The reported temperature)
"""
df = self.trace.df_event("thermal_temperature")
df = df[['id', 'thermal_zone', 'temp']]
return df
@TraceAnalysisBase.cache
@requires_events("thermal_power_cpu_limit")
def df_cpufreq_cooling_state(self, cpus=None):
"""
Get cpufreq cooling device states
:param cpus: The CPUs to consider (all by default)
:type cpus: list(int)
:returns: a :class:`pandas.DataFrame` with:
* An ``cpus`` column (The CPUs affected by the cooling device)
* A ``freq`` column (The frequency limit)
* A ``cdev_state`` column (The cooling device state index)
"""
df = self.trace.df_event("thermal_power_cpu_limit")
df = df[['cpus', 'freq', 'cdev_state']]
if cpus is not None:
# Find masks that match the requested CPUs
# This can include other CPUs
masks = self._matching_masks(cpus)
df = df[df.cpus.isin(masks)]
return df
@TraceAnalysisBase.cache
@requires_events("thermal_power_devfreq_limit")
def df_devfreq_cooling_state(self, devices=None):
"""
Get devfreq cooling device states
:param devices: The devfreq devices to consider (all by default)
:type device: list(str)
:returns: a :class:`pandas.DataFrame` with:
* An ``cpus`` column (The CPUs affected by the cooling device)
* A ``freq`` column (The frequency limit)
* A ``cdev_state`` column (The cooling device state index)
"""
df = self.trace.df_event("devfreq_out_power")
df = df[['type', 'freq', 'cdev_state']]
if devices is not None:
df = df[df.type.isin(devices)]
return df
@property
@memoized
@df_thermal_zones_temperature.used_events
def thermal_zones(self):
"""
Get thermal zone ids that appear in the trace
"""
df = self.df_thermal_zones_temperature()
return df["thermal_zone"].unique().tolist()
@property
@memoized
@df_cpufreq_cooling_state.used_events
def cpufreq_cdevs(self):
"""
Get cpufreq cooling devices that appear in the trace
"""
df = self.df_cpufreq_cooling_state()
res = df['cpus'].unique().tolist()
return [mask_to_list(mask) for mask in res]
@property
@memoized
@df_devfreq_cooling_state.used_events
def devfreq_cdevs(self):
"""
Get devfreq cooling devices that appear in the trace
"""
df = self.df_devfreq_cooling_state()
return df['type'].unique().tolist()
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method
@df_thermal_zones_temperature.used_events
def plot_thermal_zone_temperature(self, thermal_zone_id: int):
"""
Plot temperature of thermal zones (all by default)
:param thermal_zone_id: ID of the zone
:type thermal_zone_id: int
"""
window = self.trace.window
df = self.df_thermal_zones_temperature()
df = df[df['id'] == thermal_zone_id]
df = df_refit_index(df, window=window)
tz_name = df.thermal_zone.unique()[0]
return plot_signal(
series_refit_index(df['temp'], window=window),
name=f'Thermal zone "{tz_name}"',
).options(
title='Temperature evolution',
ylabel='Temperature (°C.10e3)'
)
@TraceAnalysisBase.plot_method
@df_cpufreq_cooling_state.used_events
def plot_cpu_cooling_states(self, cpu: CPU):
"""
Plot the state evolution of a cpufreq cooling device
:param cpu: The CPU. Whole clusters can be controlled as
a single cooling device, they will be plotted as long this CPU
belongs to the cluster.
:type cpu: int
"""
window = self.trace.window
df = self.df_cpufreq_cooling_state([cpu])
df = df_refit_index(df, window=window)
series = series_refit_index(df['cdev_state'], window=window)
cdev_name = f"CPUs {mask_to_list(df.cpus.unique()[0])}"
return plot_signal(
series,
name=cdev_name,
).options(
title='cpufreq cooling devices status'
)
@TraceAnalysisBase.plot_method
def plot_dev_freq_cooling_states(self, device: str):
"""
Plot the state evolution of a devfreq cooling device
:param device: The devfreq devices to consider
:type device: str
"""
df = self.df_devfreq_cooling_state([device])
df = df_refit_index(df, window=self.trace.window)
return plot_signal(
df['cdev_state'],
name=f'Device "{device}"',
).options(
title='devfreq cooling devices status'
)
###############################################################################
# Utility Methods
###############################################################################
def _matching_masks(self, cpus):
df = self.trace.df_event('thermal_power_cpu_limit')
global_mask = list_to_mask(cpus)
cpumasks = df['cpus'].unique().tolist()
return [m for m in cpumasks if m & global_mask]
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| {
"content_hash": "f969265654cd5af9391e57525da89f8b",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 31.839195979899497,
"alnum_prop": 0.5694444444444444,
"repo_name": "ARM-software/lisa",
"id": "beaa818cf7296e471a0c0348f2721ed9e02f2b92",
"size": "6975",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lisa/analysis/thermal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "68635"
},
{
"name": "Jupyter Notebook",
"bytes": "60193313"
},
{
"name": "Makefile",
"bytes": "6176"
},
{
"name": "Perl",
"bytes": "6106"
},
{
"name": "Python",
"bytes": "2337042"
},
{
"name": "Shell",
"bytes": "108802"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.