hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a762fb2592438b205199d3906152892bcd08d42
| 6,617
|
py
|
Python
|
cortex/rois.py
|
pulkitag/pycortex
|
f329dccbe3f04dc4556b930f284bcddd760fc76b
|
[
"BSD-2-Clause"
] | 1
|
2021-03-08T22:52:52.000Z
|
2021-03-08T22:52:52.000Z
|
cortex/rois.py
|
candleinwindsteve/pycortex
|
988dc996a33f16208537a2e8ffe14e2ae3ea6647
|
[
"BSD-2-Clause"
] | null | null | null |
cortex/rois.py
|
candleinwindsteve/pycortex
|
988dc996a33f16208537a2e8ffe14e2ae3ea6647
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import tempfile
import binascii
import cStringIO
import numpy as np
import networkx as nx
from db import surfs
from svgroi import get_roipack, _make_layer, _find_layer, parser
from lxml import etree
from dataset import VertexData
from polyutils import Surface, boundary_edges
from utils import get_curvature, add_roi
import quickflat
class ROIpack(object):
def __init__(self, subject, roifile):
self.subject = subject
self.roifile = roifile
self.rois = {}
self.load_roifile()
def load_roifile(self):
"""Load ROI definitions from self.roifile.
"""
# Check if file exists
if not os.path.exists(self.roifile):
print("ROI file %s doesn't exist.." % self.roifile)
return
# Create basic VertexData to avoid expensive initialization..
empty = VertexData(None, self.subject)
# Load ROIs from file
if self.roifile.endswith("npz"):
roidata = np.load(self.roifile)
for roi in roidata.keys():
self.rois[roi] = empty.copy(roidata[roi])
roidata.close()
elif self.roifile.endswith("svg"):
pts, polys = surfs.getSurf(self.subject, "flat", merge=True, nudge=True)
npts = len(pts)
svgroipack = get_roipack(self.roifile, pts, polys)
for name in svgroipack.names:
roimask = np.zeros((npts,))
roimask[svgroipack.get_roi(name)] = 1
self.rois[name] = empty.copy(roimask)
elif self.roifile.endswith("hf5"):
raise NotImplementedError
else:
raise ValueError("Don't understand ROI filetype: %s" % self.roifile)
def to_npz(self, filename):
"""Saves npz file containing ROI masks.
"""
roidata = dict([(name,vd.data) for name,vd in self.rois.iteritems()])
np.savez(filename, **roidata)
def to_svg(self, open_inkscape=False, filename=None):
"""Generate SVG file from vertex ROI masks.
"""
# Generate temp filename if not provided
if filename is None:
filename = tempfile.mktemp(suffix=".svg", prefix=self.subject+"-rois-")
mpts, mpolys = surfs.getSurf(self.subject, "flat", merge=True, nudge=True)
svgmpts = mpts[:,:2].copy()
svgmpts -= svgmpts.min(0)
svgmpts *= 1024 / svgmpts.max(0)[1]
svgmpts[:,1] = 1024 - svgmpts[:,1]
npts = len(mpts)
svgroipack = get_roipack(filename, mpts, mpolys)
# Add default layers
# Add curvature
from matplotlib import cm
curv = VertexData(np.hstack(get_curvature(self.subject)), self.subject)
fp = cStringIO.StringIO()
curvim = quickflat.make_png(fp, curv, height=1024, with_rois=False, with_labels=False,
with_colorbar=False, cmap=cm.gray,recache=True)
fp.seek(0)
svgroipack.add_roi("curvature", binascii.b2a_base64(fp.read()), add_path=False)
# Add thickness
# Add ROI boundaries
svg = etree.parse(svgroipack.svgfile, parser=parser)
# Find boundary vertices for each ROI
lsurf, rsurf = [Surface(*pp) for pp in surfs.getSurf(self.subject, "fiducial")]
flsurf, frsurf = [Surface(*pp) for pp in surfs.getSurf(self.subject, "flat")]
valids = [set(np.unique(flsurf.polys)), set(np.unique(frsurf.polys))]
# Construct polygon adjacency graph for each surface
polygraphs = [lsurf.poly_graph, rsurf.poly_graph]
for roi in self.rois.keys():
print "Adding %s.." % roi
masks = self.rois[roi].left, self.rois[roi].right
mmpts = svgmpts[:len(masks[0])], svgmpts[len(masks[0]):]
roilayer = _make_layer(_find_layer(svg, "rois"), roi)
for valid, pgraph, surf, mask, mmp in zip(valids, polygraphs,
[lsurf, rsurf], masks, mmpts):
if mask.sum() == 0:
continue
# Find bounds
inbound, exbound = surf.get_boundary(np.nonzero(mask)[0])
# Find polys
allbpolys = np.unique(surf.connected[inbound+exbound].indices)
selbpolys = surf.polys[allbpolys]
inpolys = np.in1d(selbpolys, inbound).reshape(selbpolys.shape)
expolys = np.in1d(selbpolys, exbound).reshape(selbpolys.shape)
badpolys = np.logical_or(inpolys.all(1), expolys.all(1))
boundpolys = np.logical_and(np.logical_or(inpolys, expolys).all(1), ~badpolys)
# Walk around boundary
boundpolyinds = set(allbpolys[np.nonzero(boundpolys)[0]])
bgraph = nx.Graph()
pos = dict()
for pa in boundpolyinds:
for pb in set(pgraph[pa]) & boundpolyinds:
edge = pgraph[pa][pb]["verts"]
validverts = list(valid & edge)
pos[edge] = mmp[validverts].mean(0)
bgraph.add_edge(*edge)
cc = nx.cycles.cycle_basis(bgraph)
if len(cc) > 1:
edges = reduce(set.symmetric_difference,
[set(map(lambda l:tuple(sorted(l)), zip(c, c[1:]+[c[0]]))) for c in cc])
eg = nx.from_edgelist(edges)
cycles = nx.cycles.cycle_basis(eg)
longest = np.argmax(map(len, cycles))
path_order = cycles[longest]
else:
path_order = cc[0]
path_points = [tuple(pos[frozenset(p)]) for p in zip(path_order[:-1],
path_order[1:])]
# Store poly
path = "M %f %f L" % tuple(path_points[0])
path += ", ".join(["%f %f"%p for p in path_points[1:]])
path += "Z "
# Insert into SVG
svgpath = etree.SubElement(roilayer, "path")
svgpath.attrib["style"] = "fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opactiy:1"
svgpath.attrib["d"] = path
#svgpath.attrib["sodipodi:nodetypes"] = "c" * len(pts)
with open(svgroipack.svgfile, "w") as xml:
xml.write(etree.tostring(svg, pretty_print=True))
| 41.099379
| 144
| 0.548889
|
cdb5d1399d4f941bb01e2bf44f49534692f79af4
| 1,676
|
py
|
Python
|
scripts/view_shield_images.py
|
charlesblakemore/sem_processing
|
1ec8ac4b3eff4886f7a86529c590eed7cc15cbba
|
[
"MIT"
] | null | null | null |
scripts/view_shield_images.py
|
charlesblakemore/sem_processing
|
1ec8ac4b3eff4886f7a86529c590eed7cc15cbba
|
[
"MIT"
] | null | null | null |
scripts/view_shield_images.py
|
charlesblakemore/sem_processing
|
1ec8ac4b3eff4886f7a86529c590eed7cc15cbba
|
[
"MIT"
] | null | null | null |
import sys, os, time, re
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opti
from bead_util import find_all_fnames
import sem_util as su
img_dir = '/Users/manifestation/Stanford/beads/photos/sem/20191014_shield_v2_pc1_second-HF-clean/'
max_file = 1000
substr = '2000x_40deg.tif'
devlist = [#'/dev4/', \
#'/dev7/', \
#'/dev9/', \
#'/dev23/', \
#'/dev29/', \
#'/dev37/', \
#'/dev47/', \
#'/dev49/', \
#'/dev55/', \
'/dev57/', \
#'/dev64/', \
#'/dev65/', \
#'/dev67/', \
#'/dev76/', \
]
with_info = True
show_after = True
filenames, _ = find_all_fnames(img_dir, ext='.tif', substr=substr)
filenames.sort(key = su.get_devnum)
if len(devlist):
bad_inds = []
for fileind, filename in enumerate(filenames):
found = False
for dev in devlist:
if dev in filename:
found = True
break
if not found:
bad_inds.append(fileind)
for ind in bad_inds[::-1]:
filenames.pop(ind)
axes = []
for fileind, filename in enumerate(filenames[:max_file]):
devind = su.get_devnum(filename)
imgobj = su.SEMImage()
imgobj.load(filename)
#imgobj.calibrate(plot=False)
fig, ax = plt.subplots(1,1,figsize=(8,8))
if with_info:
ax.imshow(imgobj.full_img_arr, cmap='gray')
else:
ax.imshow(imgobj.img_arr, cmap='gray')
ax.set_title('Device {:d}'.format(devind))
fig.tight_layout()
if not show_after:
plt.show()
if show_after:
plt.show()
| 20.691358
| 98
| 0.554893
|
d90bfcd24eba882c1801d443caa84c56dab64b75
| 1,379
|
py
|
Python
|
2021/d5p1.py
|
zmerlynn/advent-of-code
|
20daa2df1ff57fff89fc88ea778cae59755424e3
|
[
"BSD-3-Clause"
] | null | null | null |
2021/d5p1.py
|
zmerlynn/advent-of-code
|
20daa2df1ff57fff89fc88ea778cae59755424e3
|
[
"BSD-3-Clause"
] | null | null | null |
2021/d5p1.py
|
zmerlynn/advent-of-code
|
20daa2df1ff57fff89fc88ea778cae59755424e3
|
[
"BSD-3-Clause"
] | 1
|
2021-12-25T00:12:44.000Z
|
2021-12-25T00:12:44.000Z
|
#!/usr/bin/python3
import collections
import fileinput
import numpy as np
import re
import sys
def solve(inp):
lines = []
max_x, max_y = 0, 0
for line in inp:
p1, p2 = re.split('\s+->\s', line.strip())
p1, p2 = point(p1), point(p2)
if p1[0] != p2[0] and p1[1] != p2[1]:
continue
max_x = int(max(max_x, p1[0], p2[0]))
max_y = int(max(max_x, p1[1], p2[1]))
lines.append((p1, p2, (p2-p1)/np.linalg.norm(p2-p1)))
touching = [ [ 0 for x in range(max_x+1) ] for y in range(max_y+1) ]
for p1, p2, shift in lines:
pos = p1
last_touch = np.array([-1, -1])
stop = False
while True:
if not same(pos, last_touch):
touching[int(pos[0])][int(pos[1])] += 1
last_touch = np.copy(pos)
if stop:
break
pos += shift
if same(pos, p2):
stop = True
more_than_one = sum([ sum([ 1 if touching[x][y] > 1 else 0 for x in range(max_x+1) ]) for y in range(max_y+1) ])
print(more_than_one)
def point(s):
x, y = s.split(',')
x, y = float(x), float(y)
return np.array([x, y])
def round(p):
return np.array(p, dtype=int)
def same(p1, p2):
return np.array_equal(round(p1), round(p2))
if __name__ == "__main__":
solve(fileinput.input(sys.argv[1]))
| 25.537037
| 116
| 0.524293
|
de857c3819b6b326fb3d453eab5b2f6cb8ff999d
| 3,280
|
py
|
Python
|
waymo_open_dataset/utils/test_utils.py
|
DKandrew/waymo-open-dataset
|
b2b2fc8f06ed6801aec1ea2d406d559bff08b6b5
|
[
"Apache-2.0"
] | 1,814
|
2019-08-20T18:30:38.000Z
|
2022-03-31T04:14:51.000Z
|
waymo_open_dataset/utils/test_utils.py
|
meyerjo/waymo-open-dataset
|
655c1fddb0071ab24544eb54c6690f09d7a9a1c3
|
[
"Apache-2.0"
] | 418
|
2019-08-20T22:38:02.000Z
|
2022-03-31T07:51:15.000Z
|
waymo_open_dataset/utils/test_utils.py
|
meyerjo/waymo-open-dataset
|
655c1fddb0071ab24544eb54c6690f09d7a9a1c3
|
[
"Apache-2.0"
] | 420
|
2019-08-21T10:59:06.000Z
|
2022-03-31T08:31:44.000Z
|
# Copyright 2019 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from waymo_open_dataset.utils import transform_utils
__all__ = ['generate_boxes', 'generate_range_image', 'generate_extrinsic']
def generate_boxes(center_x, batch=1):
"""Generates unit size boxes centered at (center_x, 0.0, 2.0).
Args:
center_x: center_x for each box
batch: batch size
Returns:
boxes: [batch, len(center_x) + 5, 7]. Each box has unit size, zero heading.
They are centered at (center_x, 0.0, 2.0).
box_classes: [batch, len(center_x) + 5]. The types are random.
num_boxes: [batch]. Number of boxes for each batch dim.
"""
center_y = np.zeros_like(center_x)
center_z = np.ones_like(center_x) * 2.0
heading = np.zeros_like(center_x)
dim = np.ones_like(center_x)
# [len(center_x), 7]
boxes = np.stack([center_x, center_y, center_z, dim, dim, dim, heading],
axis=-1)
box_padding = np.zeros([5, 7])
boxes = np.concatenate([boxes, box_padding], axis=0)
boxes = np.tile(np.expand_dims(boxes, axis=0), [batch, 1, 1])
box_classes = np.tile(
np.expand_dims(np.random.randint(1, 5, size=[len(center_x) + 5]), axis=0),
[batch, 1])
num_boxes = np.ones([batch]) * len(center_x)
return (tf.convert_to_tensor(value=boxes, dtype=tf.float32),
tf.convert_to_tensor(value=box_classes, dtype=tf.uint8),
tf.convert_to_tensor(value=num_boxes, dtype=tf.int32))
def generate_range_image(indices, values, shape, batch=1):
"""Generate range images by scattering values to indices.
Args:
indices: [N, 2] indices.
values: [N, ...] values.
shape: [3].
batch: batch indices, single integer.
Returns:
range_image: [batch, shape[...]]
"""
multiples = tf.concat(
[tf.constant([batch]),
tf.ones([len(shape)], dtype=tf.int32)], axis=-1)
return tf.tile(
tf.expand_dims(tf.scatter_nd(indices, values, shape), axis=0), multiples)
def generate_extrinsic(yaw, pitch, roll, translation, batch=1):
"""Generates extrinsic.
Args:
yaw: scalar tensor
pitch: scalar tensor
roll: scalar tensor
translation: [3] tensor
batch: integer
Returns:
[batch, 4, 4] tensor
"""
rotation_matrix = transform_utils.get_rotation_matrix(roll, pitch, yaw)
return tf.tile(
tf.expand_dims(
transform_utils.get_transform(
rotation_matrix, tf.constant(translation, dtype=tf.float32)),
axis=0), [batch, 1, 1])
| 30.943396
| 80
| 0.671646
|
0b1b21b3c2c1df1dc776f62faaed23d6f2ce353f
| 3,299
|
py
|
Python
|
fibo/protocols/wallet_protocol.py
|
Fibo-Network/fibo-blockchain
|
34471efc081a52443e874749bb8ea3dc50b59891
|
[
"Apache-2.0"
] | null | null | null |
fibo/protocols/wallet_protocol.py
|
Fibo-Network/fibo-blockchain
|
34471efc081a52443e874749bb8ea3dc50b59891
|
[
"Apache-2.0"
] | null | null | null |
fibo/protocols/wallet_protocol.py
|
Fibo-Network/fibo-blockchain
|
34471efc081a52443e874749bb8ea3dc50b59891
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from typing import List, Optional, Tuple
from fibo.types.blockchain_format.coin import Coin
from fibo.types.blockchain_format.program import Program
from fibo.types.blockchain_format.sized_bytes import bytes32
from fibo.types.header_block import HeaderBlock
from fibo.types.spend_bundle import SpendBundle
from fibo.util.ints import uint8, uint32, uint128
from fibo.util.streamable import Streamable, streamable
"""
Protocol between wallet (SPV node) and full node.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class RequestPuzzleSolution(Streamable):
coin_name: bytes32
height: uint32
@dataclass(frozen=True)
@streamable
class PuzzleSolutionResponse(Streamable):
coin_name: bytes32
height: uint32
puzzle: Program
solution: Program
@dataclass(frozen=True)
@streamable
class RespondPuzzleSolution(Streamable):
response: PuzzleSolutionResponse
@dataclass(frozen=True)
@streamable
class RejectPuzzleSolution(Streamable):
coin_name: bytes32
height: uint32
@dataclass(frozen=True)
@streamable
class SendTransaction(Streamable):
transaction: SpendBundle
@dataclass(frozen=True)
@streamable
class TransactionAck(Streamable):
txid: bytes32
status: uint8 # MempoolInclusionStatus
error: Optional[str]
@dataclass(frozen=True)
@streamable
class NewPeakWallet(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
@dataclass(frozen=True)
@streamable
class RequestBlockHeader(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RespondBlockHeader(Streamable):
header_block: HeaderBlock
@dataclass(frozen=True)
@streamable
class RejectHeaderRequest(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RequestRemovals(Streamable):
height: uint32
header_hash: bytes32
coin_names: Optional[List[bytes32]]
@dataclass(frozen=True)
@streamable
class RespondRemovals(Streamable):
height: uint32
header_hash: bytes32
coins: List[Tuple[bytes32, Optional[Coin]]]
proofs: Optional[List[Tuple[bytes32, bytes]]]
@dataclass(frozen=True)
@streamable
class RejectRemovalsRequest(Streamable):
height: uint32
header_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestAdditions(Streamable):
height: uint32
header_hash: bytes32
puzzle_hashes: Optional[List[bytes32]]
@dataclass(frozen=True)
@streamable
class RespondAdditions(Streamable):
height: uint32
header_hash: bytes32
coins: List[Tuple[bytes32, List[Coin]]]
proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]]
@dataclass(frozen=True)
@streamable
class RejectAdditionsRequest(Streamable):
height: uint32
header_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RejectHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RespondHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
header_blocks: List[HeaderBlock]
| 21.147436
| 116
| 0.776902
|
7ef0ba2ab96a01aef4994d5de747605120527114
| 1,913
|
py
|
Python
|
deploy/text_classification/Predictor.py
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null |
deploy/text_classification/Predictor.py
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null |
deploy/text_classification/Predictor.py
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null |
import tensorflow as tf
import joblib
import numpy as np
import json
import traceback
import sys
import os
class Predictor(object):
def __init__(self):
self.loaded = False
def load(self):
print("Loading model",os.getpid())
self.model = tf.keras.models.load_model('model.h5', compile=False)
self.labelencoder = joblib.load('labelencoder.pkl')
self.loaded = True
print("Loaded model")
def predict(self, X,features_names):
# data = request.get("data", {}).get("ndarray")
# mult_types_array = np.array(data, dtype=object)
print ('step1......')
print(X)
X = tf.constant(X)
print ('step2......')
print(X)
if not self.loaded:
self.load()
# result = self.model.predict(X)
try:
result = self.model.predict(X)
except Exception as e:
print(traceback.format_exception(*sys.exc_info()))
raise # reraises the exception
print ('step3......')
result = tf.sigmoid(result)
print ('step4......')
print(result)
result = tf.math.argmax(result,axis=1)
print ('step5......')
print(result)
print(result.shape)
print(self.labelencoder.inverse_transform(result))
print ('step6......')
return json.dumps(result.numpy(), cls=JsonSerializer)
class JsonSerializer(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (
np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
| 31.360656
| 118
| 0.576581
|
46bda75ba178ea92567ed3a30dbcd16d090831c2
| 27,188
|
py
|
Python
|
dbisshit.py
|
kaplanski/DBISshit
|
60c7c3c8fd13462c24b627539332917e2a268f16
|
[
"MIT"
] | null | null | null |
dbisshit.py
|
kaplanski/DBISshit
|
60c7c3c8fd13462c24b627539332917e2a268f16
|
[
"MIT"
] | null | null | null |
dbisshit.py
|
kaplanski/DBISshit
|
60c7c3c8fd13462c24b627539332917e2a268f16
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python -B
#DBISshit! 0.1b - the python app that mimics a database
#Copyright (c) 2018 Jan-Daniel Kaplanski
###-------imports------###
from subprocess import call
from datetime import datetime
from os import name as osname
from os import chmod
from os.path import exists
from time import sleep
from sys import argv
from sys import stdin
from io import open
from stat import *
import re
import gzip
from sys import path
###-------imports------###
###--------caches-----####
global CACHE_GC; CACHE_GC = []
global CACHE_GC_LST; CACHE_GC_LST = []
###--------caches-----####
###----helpful-stuff---###
def iprint(arg): print "[IMEM] " + str(arg)
def hprint(arg): print "[HELP] " + str(arg)
def mprint(arg): print "[MAIN] " + str(arg)
def cprint(arg): print "[CLOG] " + str(arg)
global EXT; EXT = ".dbis"
global changes; changes = []
global commited_changes; commited_changes = []
global OP_CNT; OP_CNT = 1
global HDR_FAIL; HDR_FAIL = False
global HTOW_FAIL; HTOW_FAIL = False
global EX_HTML; EX_HTML = False
global EX_XML; EX_XML = False
global DBG_FNC; DBG_FNC = True
global EX_DIS; EX_DIS = False
global CPR_EN; CPR_EN = False
global CPR_M1; CPR_M1 = False
global CPR_M2; CPR_M2 = False
if EX_DIS == False:
if exists(path[0] + "/dbistohtml.py"):
EX_HTML = True
import dbistohtml as tohtml
else: mprint("dbistohtml not found! HTML export disabled!")
if exists(path[0] + "/dbistoxml.py"):
EX_XML = True
import dbistoxml as toxml
else: mprint("dbistoxml not found! XML export disabled!")
else: raw_input("[DBUG] Export function disabled! Press return to continue..."); stdin.flush()
try:
dtn = datetime.now().strftime("%H:%M").split(":")
dtu = datetime.utcnow().strftime("%H:%M").split(":")
if datetime.now().day != datetime.utcnow().day: hrs = int(dtn[0]) + 24
else: hrs = int(dtn[0])
dif_h = hrs - int(dtu[0])
dif_m = int(dtn[1]) - int(dtu[1])
if dif_h < 0: dif_h = abs(dif_h); dif = "-"
else: dif = "+"
dif = dif + str(dif_h).zfill(2) + str(dif_m).zfill(2) + "UTC"
except: mprint("ERR - TIME_DIF_CALC_FAILED")
def gettime():
try: return str(datetime.now().replace(microsecond=0).isoformat())
except: return "ERR - GETTIME_FAILED"
commited_changes.append("Log of " + gettime() + dif)
###----helpful-stuff---###
###------commands------###
def cls():
try:
if osname == "nt": call(["cls"])
elif osname == "posix": call(["clear"])
else: mprint("Not supported by this os!")
except: mprint("ERR - CLS_FAILED")
def importdb(db):
global im_fail; im_fail = 0
global inputdb
global db_lines
try:
if exists(db):
if CPR_EN == False: inputdb = open(db, mode='rb').read()
elif CPR_EN == True: inputdb = gzip.open(db, mode='rb').read()
db_lines = inputdb.split("\n")
while db_lines.count('') > 0: db_lines.remove("")
else: mprint("ERR - FILE_NON_EXISTANT"); exit(127)
except: mprint("Failed to open input file!"); im_fail = 42
def readln(ln):
global headerln
try:
htow(db_lines[ln])
return db_lines[ln][len(workln[0])+2:-1]
except: mprint("ERR - READ_LN_FAILED")
def writeln(ln, val, silent = False):
global changes
global db_lines
try:
htow(db_lines[ln])
db_lines[ln] = "{" + workln[0] + ":" + str(val) + "}"
changes.append("Line: " + str(ln) + ", Value: " + str(val))
if silent == False: mprint(""); showln(ln)
except: False
def changelog():
global changes
global commited_changes
try:
cprint("Pending changes:")
for i in range(0, len(changes)): cprint(changes[i])
cprint("")
cprint("Commited changes:")
for j in range(1, len(commited_changes)): cprint(commited_changes[j])
except: mprint("ERR - CHANGELOG_FAILED")
def commit(silent=False):
global changes
global commited_changes
global inputdb
global db_lines
try:
outputdb=""
commited_changes.extend(changes)
changes=[]
if silent == False: changelog()
for i in range(0, len(db_lines)): outputdb = outputdb + db_lines[i] + "\n"
inputdb = outputdb
db_lines = inputdb.split("\n")
while db_lines.count('') > 0: db_lines.remove("")
except: mprint("ERR - COMMIT_FAILED")
def offscorr(ln, silent=False):
try:
if silent == False:
mprint("")
mprint("ERR - LINE_DEFECT")
mprint("ERR at line " + str(ln))
mprint("correcting...")
offs_wrln = db_lines[0].count(":") - db_lines[ln].count(":")
if silent == False: mprint("offset is " + str(offs_wrln))
to_write = db_lines[ln][len(workln[0])+2:-1]
while offs_wrln > 0:
to_write = to_write + ":"
offs_wrln = offs_wrln - 1
writeln(ln, to_write, True)
if silent == False:
mprint("Done!")
mprint("")
except: mprint("ERR - OFFS_CORR_FAILED")
def showdb():
try:
mprint("")
for i in range(1, len(db_lines)): showln(i); mprint("")
except: mprint("ERR - SHOW_DB_FAILED")
def showln(ln):
global HTOW_FAIL
global headerln
global workln
htow(db_lines[ln])
try:
if HTOW_FAIL != True:
for i in range(0, len(headerln)): iprint(headerln[i] + ": " + workln[i].replace("&dpp;", ":"))
elif HTOW_FAIL == True: mprint("Line not read due to errors!"); HTOW_FAIL = False
except: offscorr(ln)
def writeback(db, dbwb):
try:
if exists(db): chmod(db, S_IREAD|S_IWRITE)
if CPR_EN == False: open(db, mode='wb').write(dbwb)
elif CPR_EN == True: gzip.open(db, mode='wb').write(dbwb)
mprint("Writeback to " + str(db) + " sucessfull!")
except: mprint("Failed to write to " + str(db) + "!")
def new(val, cpr):
newdb = "[ID:" + str(val) + "]\n"
if not argv[2].endswith(EXT) and not argv[2].endswith(EXT + ".gz"): db = argv[2] + EXT
try:
if cpr == "n": open(db, mode='wb').write(newdb)
elif cpr == "y": db = db + ".gz"; gzip.open(db, mode='wb').write(newdb)
mprint("Database " + db + " created sucessfully!")
except: mprint("Failed to create " + db + "!")
def adfln(val):
global changes
global db_lines
try:
htow(db_lines[len(db_lines)-1])
try:
nid = int(workln[0]) + 1
newln = "{" + str(nid).zfill(len(workln[0])) + ":" + str(val) + "}"
except:
nid = 1
nid_fill = input("[MAIN] Enter fillup for IDs: ")
newln = "{" + str(1).zfill(nid_fill) + ":" + str(val) + "}"
db_lines.append(newln)
changes.append("ID: " + str(nid) + ", Value: " + str(val))
mprint("")
commit(True)
showln(nid)
except: False
def addln():
global changes
global db_lines
try:
htow(db_lines[len(db_lines)-1])
nid = int(workln[0]) + 1
newln = "{" + str(nid).zfill(len(workln[0])) + ":}"
except:
nid = 1
nid_fill = input("[MAIN] Enter fillup for IDs: ")
newln = "{" + str(1).zfill(nid_fill) + ":}"
db_lines.append(newln)
changes.append("new ID: " + str(nid))
mprint("")
commit(True)
htow(db_lines[-1])
offscorr(-1, True)
for i in range(1, len(headerln)):
stdin.flush()
val = raw_input("[MAIN] " + headerln[i] + ": ").replace(":", "&dpp;")
changeln(-1, i, val)
commit()
def rmln(ln):
global db_lines
try:
showln(ln)
del_in = raw_input("Do you really want to delete this entry? [y/n]: ")
if del_in == "y":
db_lines.pop(ln)
changes.append("Line: " + str(ln) + ", DELETED")
commit()
elif del_in == "n": mprint("Aborting...")
else: mprint("Invalid answer! Returning...")
except: mprint("ERR - REMOVE_IMPOSSIBLE")
def id2ln(id):
id = str(id)
global db_lines
try:
for ln in range(1, len(db_lines)):
htow(db_lines[ln])
if workln[0] == id: return ln; break
except: mprint("ERR - NO_ID_MATCH")
def rmid(id):
try: ln = id2ln(id); rmln(ln)
except: mprint("ERR - REMOVE_BY_ID_FAILED")
def showid(id):
try: ln = id2ln(id); showln(ln)
except: mprint("ERR - SHOW_BY_ID_FAILED")
def readid(id):
try: ln = id2ln(id); return readln(ln)
except: mprint("ERR - READ_BY_ID_FAIL")
def writeid(id, val):
try: ln = id2ln(id); writeln(ln, val)
except: mprint("ERR - WRITE_IMPOSSIBLE")
def changeln(ln, item, val):
global changes
global db_lines
try:
htow(db_lines[ln])
workln[item] = str(val)
db_lines[ln] = "{" + workln[0] + ":"
for i in range(1, len(headerln)):
db_lines[ln] = db_lines[ln] + workln[i] + ":"
db_lines[ln] = db_lines[ln][:-1] + "}"
changes.append("Line|Item: " + str(ln) + "|" + str(item) + ", Value: " + str(val))
except: False
def changeid(id, item, val):
try: ln=id2ln(id); changeln(ln, item, val)
except: mprint("ERR - CHANGE_IMPOSSIBLE")
def rebuildheader():
try:
old_hlen = len(db_lines[0])
mprint("Current header: " + db_lines[0][len(headerln[0])+2:-1])
inA = raw_input("[RBHD] Append to current header? [y/n/a]: ")
if inA == "y":
inB = raw_input("[RBHD] Enter addtitional fields, seperator=colon: ")
db_lines[0] = db_lines[0][:-1] + ":" + str(inB) + "]"
print "[RBHD] New header: " + db_lines[0][1:-1]
elif inA == "n":
inC = raw_input("[RBHD] Redefine header? [y/n/a]: ")
if inC == "y":
inD = raw_input("[RBHD] Enter new header, sepreator=colon: ")
try: db_lines[0] = "[" + headerln[0] + ":" + str(inD) + "]"
except: db_lines[0] = "[ID:" + str(inD) + "]"
print "[RBHD] New header: " + db_lines[0][1:-1]
elif inC == "n": print "[8088] Contents of register 0x00002A unknown!"
elif inC == "a": mprint("Aborting...")
else: mprint("Invalid answer! Returning...")
elif inA == "a": mprint("Aborting...")
else: mprint("Invalid answer! Returning...")
get_header(db_lines[0])
commit()
except: mprint("ERR - REBUILD_HEADER_FAILED")
def mergedb(db1, db2):
global inputdb
global db_lines
global CPR_EN
mprint("Merge two databases into one")
mprint("")
try:
if CPR_M2 == True: CPR_EN = True
importdb(db2)
if CPR_EN == True: CPR_EN = False
if exists(db2): chmod(db2, S_IREAD|S_IWRITE)
second = inputdb
second_lines = db_lines
if CPR_M1 == True: CPR_EN = True
importdb(db1)
if CPR_EN == True: CPR_EN = False
get_header(db_lines[0])
htow(db_lines[-1])
lid = workln[0]
nid = int(lid)
if second_lines[0].find(str(db_lines[0][:-1])) != -1:
if second_lines[0] != db_lines[0]:
db_lines[0] = second_lines[0]
get_header(db_lines[0])
for cnt in range(1, len(db_lines)): offscorr(cnt, True)
commit()
mprint("New header commited!")
for i in range(1, len(second_lines)):
htow(second_lines[i])
inputdb = inputdb + "{" + str(nid+1).zfill(len(lid)) + second_lines[i][len(workln[0])+1:] + "\n"
nid = nid + 1
mprint("Merged " + str(db1) + " and " + str(db2) + "!")
writeback(str(db1.split(".")[0]) + "_" + str(db2.split(".")[0]) + "_merged" + EXT, inputdb)
else:
mprint("Manual header rebuild needed for " + str(db2))
mprint("ERR - HEADER_NOT_COMPATIBLE")
except: mprint("ERR - HEADER_NOT_COMPATIBLE")
def sortdb(report=0):
try:
global rep_out; rep_out = []
mprint("Available rows:")
for i in range(1, len(headerln)): print "[SORT] " + str(i) + ". " + headerln[i]
sby = input("[SORT] Enter item nr.: ")
buffer = []
for j in range(1, len(db_lines)): htow(db_lines[j]); buffer.append(workln[sby] + ":" + str(j))
buffer.sort()
for k in range(0, len(buffer)):
mprint("")
htow(db_lines[int(buffer[k].split(":")[1])])
for l in range(0, len(headerln)):
currln = headerln[l] + ": " + workln[l].replace("&dpp;",":")
iprint(currln)
if report == 1:
rep_out.append(currln)
if l == 4: rep_out.append("\n")
if report == 1: rep_out.append('')
if report == 1:
out_rep = "Sorted by: " + str(headerln[sby]) + "\n\n"
for m in range(0, len(rep_out)-1):
if rep_out[m] != "\n": out_rep = out_rep + rep_out[m] + "\n"
writeback(str(argv[1]).replace(EXT, '') + "_report.txt", "Report of " + gettime() + dif + "\n" + out_rep)
mprint("")
except: mprint("ERR - SORT_DB_FAILED")
def export(format, cset="utf-8"):
if format == "HTML":
if EX_HTML == True:
try:
mprint("Calling external module...")
tohtml.embed()
except: mprint("ERR - EXPORT_HTML_FAILED")
else: mprint("HTML Export is disabled due to dbistohtml.py not found")
elif format == "XML":
if EX_XML == True:
try:
mprint("Calling external module...")
toxml.embed()
except: mprint("ERR - EXPORT_XML_FAILED")
else: mprint("XML Export is disabled due to dbistoxml.py not found")
else: mprint("Unrecognised format!")
def searchdb(col, val, silent=False, nowrite=False):
val = str(val)
cnt = 0
out_rep = ""
mprint('')
if nowrite == False: towrite = db_lines[0] + "\n"
for i in range(1, len(db_lines)):
htow(db_lines[i])
if re.search(val, workln[col], re.IGNORECASE):
for k in range(0, len(headerln)):
ln = headerln[k] + ": " + workln[k].replace("&dpp;", ":")
if silent == False: iprint(ln)
if nowrite == False: out_rep = out_rep + ln + "\n"
if silent == False: mprint('')
if nowrite == False:
out_rep = out_rep + "\n"
towrite = towrite + db_lines[i] + "\n"
cnt = cnt + 1
mprint("Total Items Found: " + str(cnt))
mprint('')
if nowrite == False:
writeback(str(argv[1]).replace(EXT, '') + "_search_" + headerln[col] + "_" + val + ".txt", "Report of " + gettime() + dif + "\n" + out_rep)
writeback(str(argv[1]).replace(EXT, '') + "_search_" + headerln[col] + "_" + val + ".dbis", towrite)
def getcount(col, silent=True):
global CACHE_GC
global CACHE_GC_LST
cached = False
cnt = 0
while cnt <= len(CACHE_GC):
if len(CACHE_GC) > 0:
if cnt == len(CACHE_GC): break
elif col == CACHE_GC[cnt][0]: cached = True; break
else: cnt = cnt + 1; continue
else: break
if cached == False:
CACHE_GC.append([col, cnt])
buf = ""
for i in range(1, len(db_lines)):
htow(db_lines[i])
val = str(workln[col])
if val != '':
if buf.find(val) == -1: buf = buf + val + ":"
lst = buf[:-1].split(":")
CACHE_GC_LST.append(lst)
if silent == False:
for j in range(0, len(lst)): mprint(str(headerln[col]) + ": " + str(lst[j]).replace("&dpp;", ":"))
else:
if silent == False:
try:
for k in range(0, len(CACHE_GC_LST[CACHE_GC[cnt][1]])): mprint(str(headerln[col]) + ": " + str(CACHE_GC_LST[CACHE_GC[cnt][1]][k]).replace("&dpp;", ":"))
mprint("(cached)")
except:
print "CACHE_GC_LST"; print CACHE_GC_LST
print "CACHE_GC"; print CACHE_GC
print "CACHE_GC_LST[CACHE_GC[cnt][1]]"
print CACHE_GC_LST[CACHE_GC[cnt][1]]
mprint('')
mprint("Item count: " + str(len(CACHE_GC_LST[CACHE_GC[cnt][1]])))
if cached == True: mprint("(cached)")
mprint('')
###------commands------###
###-------makros-------###
def start():
importdb(argv[1])
if im_fail == 0:
mprint("Imported database " + str(argv[1]) + ":")
get_header(db_lines[0])
if HDR_FAIL == False: chmod(argv[1], S_IREAD)
mprint("Items: " + str(len(db_lines)-1))
def stop():
global commited_changes
commit()
if len(commited_changes) > 1:
log_commited_changes = "\n"
for i in range(0, len(commited_changes)): log_commited_changes = log_commited_changes + commited_changes[i] + "\n"
try:
open(argv[1] + ".log", mode='ab').write(log_commited_changes)
mprint("Writeback to " + argv[1] + ".log sucessfull!")
except: mprint("Failed to write to " + argv[1] + ".log!")
writeback(argv[1], inputdb)
commited_changes = []
else: mprint("No changes made!")
###-------makros-------###
###------read--db------###
def get_header(header):
global HDR_FAIL
global headerln
header = str(header)
if header[0] == "[" and header[-1] == "]": headerln = header[1:-1].split(":")
else: mprint("ERR - HEADER_DEFECT - INTEGRITY"); mprint("ERR at: " + header); HDR_FAIL = True; exit(42)
def htow(linein):
global HTOW_FAIL; HTOW_FAIL = False
global workln; workln = []
linein = str(linein)
try:
if linein[0] == "{" and linein[-1] == "}": workln = linein[1:-1].split(":")
else: mprint("No readable lines detected!"); HTOW_FAIL = True
except: mprint("ERR - LINE_DEFECT - INTEGRITY"); HTOW_FAIL = True
###------read-db------###
###----pre-runtime----###
def info():
print "DBISshit! - the python app that mimics a database"
print " Copyright (c) 2018 Jan-Daniel Kaplanski"
print "-------------------------------------------------"
print ""
def usage():
mprint("Usage: " + argv[0] + " [file]")
mprint(" new: " + argv[0] + " new [file]")
mprint("merge: " + argv[0] + " merge [file1] [file2]")
def help():
hprint("available commands: sort, ls, showdb, showln, addln")
hprint(" writeln, writeid, changelog, cls")
hprint(" rmln, rmid, changeln, changeid")
hprint(" rebuildheader, commit, writeback")
hprint(" search, getcount, gc, add")
if DBG_FNC == True:
hprint(" importdb, adfln, stop")
hlpin = raw_input("[HELP] Enter command name: ")
if hlpin == "importdb" and DBG_FNC == True: hprint("imports the db into memory")
elif hlpin == "ls": hprint("alias for showdb")
elif hlpin == "sort": hprint("sorts the database then shows it (no change in structure)")
elif hlpin == "search": hprint("searches for a value in a given row, then saves it as a report and .dbis subset")
elif hlpin == "getcount": hprint("prints all different values for a col + prints their count")
elif hlpin == "gc": hprint("alias for getcount")
elif hlpin == "showdb": hprint("prints the entire database")
elif hlpin == "showln": hprint("prints a single line at position NUM from memory")
elif hlpin == "showid": hprint("prints a single line at ID NUM from memory")
elif hlpin == "writeln": hprint("writes a single line at position NUM into memory")
elif hlpin == "writeid": hprint("writes a single line at ID NUM into memory")
elif hlpin == "changeln": hprint("writes a single value at position NUM, item NUM into memory")
elif hlpin == "changeid": hprint("writes a single value at ID NUM, item NUM into memory")
elif hlpin == "commit": hprint("commits changes to memory")
elif hlpin == "changelog": hprint("prints uncommited changes")
elif hlpin == "writeback": hprint("writes the data to disk")
elif hlpin == "cls": hprint("clears the screen")
elif hlpin == "stop" and DBG_FNC == True: hprint("does commit and writeback")
elif hlpin == "adfln" and DBG_FNC == True: hprint("adds a new line")
elif hlpin == "addln": hprint("adds a new line and asks for speciffic input values")
elif hlpin == "add": hprint("alias for addln")
elif hlpin == "rmln": hprint("removes a line")
elif hlpin == "rmid": hprint("removes an id")
elif hlpin == "rebuildheader": hprint("rebuild the header of the file")
else: hprint("unknown command")
###----pre-runtime----###
###------runtime------###
def interactive():
global OP_CNT
start()
if im_fail == 0:
while True:
if OP_CNT % 15 == 0:
mprint("Autosaving...")
writeback(argv[1], inputdb)
chmod(argv[1], S_IREAD)
if OP_CNT % 30 == 0:
mprint("Backup...")
writeback(argv[1] + ".bak", inputdb)
chmod(argv[1], S_IREAD)
cmdin = raw_input("[MAIN] Enter command: ")
if cmdin == "exit":
exin = raw_input("[MAIN] Do you really want to exit? [y/n/f]: ")
if exin == "y": stop(); break
elif exin == "f": break
elif exin == "n": mprint("proceeding...")
else: mprint("Invalid answer! Returning...")
elif cmdin == "cls":
try:
cls()
info()
except: mprint("ERR - CLS_FAILED")
elif cmdin == "stop" and DBG_FNC == True:
try: stop()
except: mprint("ERR - STOP_FAILED")
elif cmdin == "changelog":
try: changelog()
except: mprint("ERR - CALL_CLOG_FAILED")
elif cmdin == "help":
try: help()
except: mprint("ERR - CALL_HELP_FAILED")
elif cmdin == "importdb":
try: importdb(argv[1])
except: mprint("ERR - IMPORT_DB_FAILED")
elif cmdin == "showdb" or cmdin == "ls":
try: showdb()
except: mprint("ERR - SHOW_DB_IMPOSSIBLE")
elif cmdin == "showln":
ln = input("[SHLN] Enter line nr.: ")
if ln != -42:
try: showln(ln)
except: mprint("ERR - SHOW_LN_IMPOSSIBLE")
else: mprint("Aborting...")
elif cmdin == "showid":
id = raw_input("[SHID] Enter ID: ")
if id != "-42":
try: showid(id)
except: mprint("ERR - SHOW_ID_IMPOSSIBLE")
else: mprint("Aborting...")
elif cmdin == "writeln":
try:
ln = input("[WRLN] Enter line nr.: ")
if ln != -42:
print "[WRLN] Old value: " + readln(ln)
val = raw_input("[WRLN] Enter value: ")
if val != "exit":
print "[WRLN] You entered: " + val
writeln(ln, val)
else: mprint("Aborting...")
else: mprint("Aborting...")
except: mprint("ERR - WRITE_IMPOSSIBLE")
elif cmdin == "writeid":
try:
id = raw_input("[WRID] Enter ID: ")
if id != "-42":
print "[WRID] Old value: " + readid(id)
val = raw_input("[WRID] Enter value: ")
if val != "exit":
print "[WRID] You entered: " + val
writeid(id, val)
else: mprint("Aborting...")
else: mprint("Aborting...")
except: mprint("ERR - WRITE_IMPOSSIBLE")
elif cmdin == "changeln":
try:
ln = input("[CHLN] Enter line nr.: ")
if ln != -42:
for i in range(1, len(headerln)): print "[CHLN] " + str(i) + ". " + headerln[i]
#print "[CHLN] Line: " + readln(ln)
it = input("[CHLN] Enter item nr.: ")
print "[CHLN] Item: " + workln[it]
if it != -42:
val = raw_input("[CHLN] Enter value: ")
if val != "exit":
print "[CHLN] You entered: " + val
changeln(ln, it, val)
showln(ln)
else: mprint("Aborting...")
else: mprint("Aborting...")
except: mprint("ERR - CHANGE_BY_LN_FAILED")
elif cmdin == "changeid":
try:
id = raw_input("[CHID] Enter ID: ")
if id != "-42":
#print "[CHID] Old value: " + readid(id)
for i in range(1, len(headerln)): print "[CHID] " + str(i) + ". " + headerln[i]
it = input("[CHID] Enter item nr.: ")
print "[CHID] Item: " + workln[it]
if it != -42:
val = raw_input("[CHID] Enter value: ")
if val != "exit":
print "[CHID] You entered: " + val
changeid(id, it, val)
showid(id)
else: mprint("Aborting...")
else: mprint("Aborting...")
except: mprint("ERR - CHANGE_BY_ID_FAILED")
elif cmdin == "adfln":
print "[ADLN] Use colons for multiple fields!"
print "[ADLN] A new entry should look like this: " + db_lines[0][len(db_lines[0].split(":")[0])+1:-1]
nval = raw_input("[ADLN] Enter value: ")
if nval != "exit":
print "[ADLN] You entered: " + nval
try: adfln(nval)
except: mprint("ERR - ADF_IMPOSSIBLE")
else: mprint("Aborting...")
elif cmdin == "addln" or cmdin == "add":
try: addln()
except: print("ERR - ADD_IMPOSSIBLE")
elif cmdin == "rmln":
r_ln = input("[RMLN] Enter line nr.: ")
if r_ln != 42:
try: rmln(r_ln)
except: mprint("ERR - REMOVE_IMPOSSIBLE")
else: mprint("Aborting...")
elif cmdin == "rmid":
r_id = raw_input("[RMID] Enter ID: ")
if r_id != 42:
try: rmid(r_id)
except: mprint("ERR - REMOVE_IMPOSSIBLE")
else: mprint("Aborting...")
elif cmdin == "rebuildheader":
try: rebuildheader()
except: mprint("ERR - REBUILD_HEADER_FAILED")
elif cmdin == "sort":
try:
set_rep = raw_input("[SORT] Generate report? [y/n/a]: ")
if set_rep == "y": sortdb(1)
elif set_rep == "n": sortdb()
elif set_rep == "a": mprint("Aborting...")
else: mprint("Invalid answer! Returning...")
except: mprint("ERR - SORTDB_FAILED")
elif cmdin == "export":
if not EX_DIS:
try:
form = ""
ex_which = raw_input("[EXPR] Export Format? [XML/HTML/a]: ")
if ex_which == "XML" or ex_which == "xml": form = "XML"
elif ex_which == "HTML" or ex_which == "html": form = "HTML"
if form != "":
ex_cset = raw_input("[EXPR] Custom charset? (leave empty for utf-8) [...]: ")
if ex_cset != "": export(form, ex_cset)
else: export(form)
elif ex_which == "a": mprint("Aborting...")
else: mprint("Invalid answer! Returning...")
except: mprint("ERR - EXPORT_FAILED")
else: mprint("Export disabled via EX_DIS flag!")
elif cmdin == "commit":
try: commit()
except: mprint("ERR - COMMIT_IMPOSSIBLE")
elif cmdin == "getcount" or cmdin == "gc":
try:
mprint("Available rows:")
for i in range(1, len(headerln)): print "[GETC] " + str(i) + ". " + headerln[i]
col = input("[GETC] Enter row nr.: ")
sil = raw_input("[GETC] Silent? [y/n]: ")
if sil == "y": getcount(col, True)
elif sil == "n": getcount(col, False)
else: mprint("Starting silent!"); getcount(col, True)
except: mprint("ERR - GET_COUNT_FAILED")
elif cmdin == "search":
try:
mprint("Available rows:")
for i in range(1, len(headerln)): print "[SEDB] " + str(i) + ". " + headerln[i]
col = input("[SEDB] Enter row nr.: ")
val = raw_input("[SEDB] Enter search value: ")
sil = raw_input("[SEDB] Silent? [y/n]: ")
wr = raw_input("[SEDB] Create Report + Subset-DB? [y/n]: ")
if sil == "y":
if wr == "y": searchdb(col, val, True)
elif wr == "n": searchdb(col, val, True, True)
elif sil == "n":
if wr == "y": searchdb(col, val)
elif wr == "n": searchdb(col, val, False, True)
else: mprint("Starting silent, nowrite!"); searchdb(col, val, True, True)
except: mprint("ERR - SEARCH_DB_FAILED")
elif cmdin == "writeback":
try: writeback(argv[1], inputdb)
except: mprint("ERR - WRITEBACK_FAILED")
else: mprint("Unknown command. Try 'help' for a list of available commands!")
OP_CNT = OP_CNT + 1
def main():
global CPR_EN
global CPR_M1
global CPR_M2
try:
if len(argv) >= 2:
if argv[1] == "help":
info()
usage()
help()
elif argv[1] == "new":
info()
if len(argv) > 2:
mprint("Create new header:")
mprint("Seperate fields with colons. Example:")
mprint("Name:Gender:E-Mail:Phone")
newdb_val = raw_input("[Main] Specify new header: ")
while True:
stdin.flush()
in_cpr = raw_input("[Main] Enable compression? [y/n]: ")
if in_cpr == "y" or in_cpr == "n": break
else: mprint("Invalid answer. Try again!")
new(newdb_val, in_cpr)
else: mprint("ERR - NO_FILE_NAME")
elif argv[1] == "merge":
info()
if not argv[2].endswith(EXT) and not argv[2].endswith(EXT + ".gz"):
mprint("File one does not apply to naming scheme! Aborting!")
if not argv[3].endswith(EXT) and not argv[3].endswith(EXT + ".gz"):
mprint("File two does not apply to naming scheme! Aborting!")
else:
if argv[2].endswith(EXT + ".gz"): CPR_M1 = True
if argv[3].endswith(EXT + ".gz"): CPR_M2 = True
if len(argv) > 3: mergedb(argv[2], argv[3])
else: mprint("ERR - NO_FILE_NAME"); exit(21)
else:
cls()
info()
if not argv[1].endswith(EXT) and not argv[1].endswith(EXT + ".gz"):
mprint("Entered file does not apply to naming scheme! Aborting!")
else:
if argv[1].endswith(EXT + ".gz"): CPR_EN = True
interactive()
if exists(argv[1]): chmod(argv[1], S_IREAD|S_IWRITE)
mprint("Good bye!")
elif len(argv) < 2:
info()
print "At least one argument required!"
print "Try: " + argv[0] + " help (or specifiy a file)!"
except:
if HDR_FAIL != True: mprint("ERR - UNKNOWN_ERROR")
###------runtime------###
###-----execution-----###
if __name__ == '__main__': main()
else: info(); mprint("This is a standalone program. Do not import!"); exit(127)
###-----execution-----###
###------comments-----###
###------comments-----###
| 33.237164
| 156
| 0.608872
|
b3212ddc8c5b722c1101b999b81c8ed3951de013
| 9,982
|
py
|
Python
|
src/eval_meas.py
|
lishuai1993/FairMOT
|
b224d61a24038cc7dc38d186e48e25e89c5daa6c
|
[
"MIT"
] | null | null | null |
src/eval_meas.py
|
lishuai1993/FairMOT
|
b224d61a24038cc7dc38d186e48e25e89c5daa6c
|
[
"MIT"
] | null | null | null |
src/eval_meas.py
|
lishuai1993/FairMOT
|
b224d61a24038cc7dc38d186e48e25e89c5daa6c
|
[
"MIT"
] | null | null | null |
import os
import os.path as osp
import numpy as np
from tqdm import tqdm
import cv2
import shutil
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
import _init_paths
from tracking_utils import visualization as vis
from tracking_utils.log import logger
from tracking_utils.timer import Timer
# import motmetrics as mm
from tracking_utils.evaluation import Evaluator
import sys
sys.path.append(r'D:\work\code\package\code\code_ls\package')
sys.path.append('/home/shuai.li/code/package')
import pathcostom as pm
def measure2files(results_root, data_root, seqs):
"""
@param results_root: mot轨迹预测结果文件的根目录,文件中格式 <frame>, <id>, <bb_left>, <bb_top>, <bb_width>, <bb_height>, <conf>, <x>, <y>, <z>
@param data_root: gt文件的路径,不包含后三级,因为在Evaluator初始化函数中已经写了全路径的拼接方式
@param seqs: gt路径的倒数第三级路径
@return: 存储seqs中,每一个路径下track结果的评价指标,以及全部文件夹汇总后的指标
"""
data_type = 'mot'
result_root = "/home/shuai.li/code/FairMOT/MOT15/images/results/temp/"
exp_name = "test_evalMot15"
accs = []
# eval
for seq in tqdm(seqs):
result_filename = osp.join(results_root, seq) + '.txt'
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type) # 在初始化中,根据data_root, seq自动加载ground truth数据
accs.append(evaluator.eval_file(result_filename)) # 在读取存储的检测结果,并进行计算,一帧对应一个acc对象
# if save_videos:
# output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
# cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
# os.system(cmd_str)
# get summary
metrics = mm.metrics.motchallenge_metrics # 18个评价指标
mh = mm.metrics.create() # 创建指标计算工厂,后续传入相关数据即可给出指标
summary = Evaluator.get_summary(accs, seqs, metrics) # 计算MOT challenge中的指标,参数:、eval的帧序列名称、指标序列
strsummary = mm.io.render_summary( # 将eval指标进行字符串格式化,用于在console上面的显示
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary) # 显示在命令行中
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
def getbb_perid():
file_result = '/home/shuai.li/dset/MOT/cue_video/compare_index_yhw/result/results.txt' # 追踪的结果文件,result.txt
dir_img = '/home/shuai.li/dset/MOT/cue_video/compare_index_yhw/result/frame' # 视频帧的文件夹\
dir_dst = '/home/shuai.li/dset/MOT/cue_video/compare_index_yhw/result/patch_ids' # 结果截图的根目录,每个子文件夹以id命名,长度为5,前面补零
formater = "{:05d}"
# 读取结果文件
result_data = np.loadtxt(file_result, delimiter=',', dtype=np.float, usecols=[0, 1, 2, 3, 4, 5])
result_data = result_data.astype(np.int)
# 创建全部文件夹
ids = result_data[:, 1].tolist()
ids_set = set(ids)
for temp in tqdm(ids_set):
pm.mkdir_recu(osp.join(dir_dst, formater.format(int(temp))))
# 保存id的bb
for temp in tqdm(result_data):
imgpath_src = osp.join(dir_img, formater.format(temp[0] - 1) + '.jpg') # 视频帧转换
imgpath_dst = osp.join(dir_dst, formater.format(temp[1]), formater.format(temp[0] - 1) + '.jpg') # id作为文件夹命名 + 视频帧作为文件名
top, bot, left, right = temp[3], temp[3]+temp[5], temp[2], temp[2]+temp[4]
img = cv2.imread(imgpath_src)
cv2.imwrite(imgpath_dst, img[top:bot, left:right])
def shutil_idfolder_byTspan(Fspan = 18, shutil_func=shutil.copy):
"""
@param Fspan: 该id的trajectory的时间跨度需要大于等于 1s,当前程序 1s 对应的是18帧左右
@return:
"""
dir_src = r'D:\work\dset\cue_mot_Data\compare_index_yhw\patch_ids\\'
dir_dst = r'D:\work\dset\cue_mot_Data\compare_index_yhw\ge{}frame_perid_inpatch\\'.format(Fspan)
idfolders = os.listdir(dir_src)
if not osp.exists(dir_dst):
os.makedirs(dir_dst)
for folder in tqdm(idfolders):
framelist = list(map(lambda x: int(x.split('.jpg')[0]), os.listdir(osp.join(dir_src, folder))))
diff = max(framelist) - min(framelist)
if diff >= Fspan:
if shutil_func == shutil.copytree:
shutil_func(osp.join(dir_src, folder), osp.join(dir_dst, folder))
def hist_fspan(bins=5, imgnumbins = 5, range=None):
# dir_src_list = [r'D:\work\dset\cue_mot_Data\compare_index_yhw\eq1frame_perid_movefpatch_ids\\',
# r'D:\work\dset\cue_mot_Data\compare_index_yhw\lt18frame_perid_inpatch_movefpatch_ids\\',
# r'D:\work\dset\cue_mot_Data\compare_index_yhw\patch_ids\\',
# ]
dir_src_list = [r'D:\work\dset\cue_mot_Data\compare_index_yhw\gap8f_ge60frame_perid_inpatch\\']
rotation = -90
id_folderlist = pm.get_dirflistdir(dir_src_list)
print('id_folderlist:\t', len(id_folderlist))
numlist = []
img_numlist = []
for folder in tqdm(id_folderlist):
num_perid = list(map(lambda x: int(x.split('.jpg')[0]), os.listdir(folder))) # 注意是帧跨度,而不是图片量
imgnum_perid = len(os.listdir(folder))
try:
fspan = max(num_perid) - min(num_perid)
except:
pass
numlist.append(fspan)
img_numlist.append(imgnum_perid)
a, _ = np.histogram(numlist, bins=bins, density=True)
bins = np.array(bins)
factor =bins[1:] - bins[:-1]
f = a*factor
bar_xlabel = []
for i, j in zip(bins[:-1], bins[1:]):
bar_xlabel.append(str(i) + '-' + str(j))
fig, ax = plt.subplots(2, 2)
fig.suptitle("gap8f_ge60frame_perid_inpatch")
xticks = (np.array(bins, dtype=np.int) - 1)//15 # 换算成秒
ax[0][0].hist(numlist, bins=bins) # bins是左闭右开区间
ax[0][0].set_xticks(ticks=bins)
ax[0][0].set_xticklabels(labels=xticks, rotation=rotation)
ax[0][0].set_xlabel('value of time span in per trajectory')
ax[0][0].set_ylim((0, 400))
ax[0][0].set_ylabel('num in per bin')
ax[0][1].bar(np.arange(1, len(xticks), 1), f) # bins是左闭右开区间
ax[0][1].set_xticks(ticks=np.arange(1, len(xticks), 1))
ax[0][1].set_xticklabels(labels=xticks[1:], rotation=rotation)
ax[0][1].set_xlabel('the percentage of time span in per trajectory')
ax[0][1].set_ylim((0, 1))
ax[0][1].set_ylabel('num in per bin')
ax[0][1].set_title('time span(s) of per trajectory')
b, _ = np.histogram(img_numlist, bins=imgnumbins, density=True)
imgnumbins = np.array(imgnumbins)
factor = imgnumbins[1:] - imgnumbins[:-1]
bf = b * factor
xticks = imgnumbins
ax[1][0].hist(img_numlist, bins=imgnumbins) # bins是左闭右开区间
ax[1][0].set_xticks(ticks=xticks)
ax[1][0].set_xticklabels(labels=xticks, rotation=rotation)
ax[1][0].set_xlabel('num of img per trajectory')
ax[1][0].set_ylim((0, 400))
ax[1][0].set_ylabel('num in per bin')
bar_xlabel = []
for i, j in zip(imgnumbins[:-1], imgnumbins[1:]):
bar_xlabel.append(str(i) + '-' + str(j))
ax[1][1].bar(np.arange(1, len(xticks), 1), bf) # bins是左闭右开区间
ax[1][1].set_xticks(ticks=np.arange(1, len(xticks), 1))
ax[1][1].set_xticklabels(labels=xticks[1:], rotation=rotation)
ax[1][1].set_xlabel('The percentage of the number of images per trajectory')
ax[1][1].set_ylim((0, 1))
ax[1][1].set_ylabel('num in per bin')
# plt.title('time span(s) of per trajectory')
plt.show()
def get_frame_gap_srcvideo(gap = 3, shutil_func=shutil.copy):
dir_src = r'D:\work\dset\cue_mot_Data\compare_index_yhw\get60frame_perid_inpatch'
dir_dst = r'D:\work\dset\cue_mot_Data\compare_index_yhw\gap{}f_ge60frame_perid_inpatch'.format(gap)
frame_orderset = set(range(0, 54000, gap))
folder_list = os.listdir(dir_src)
for folder in tqdm(folder_list):
frame_set_perid = set(map(lambda x: int(x.split('.jpg')[0]), os.listdir(osp.join(dir_src, folder))))
intersection = frame_set_perid.intersection(frame_orderset)
dst_path = osp.join(dir_dst, folder)
src_path = osp.join(dir_src, folder)
pm.mkdir_recu(dst_path)
for order in intersection:
shutil_func(osp.join(src_path, '{:05d}.jpg'.format(order)), dst_path)
def get_errnum():
dir_src = r'D:\work\dset\cue_mot_Data\compare_index_yhw\gap8f_ge60frame_perid_inpatch\\'
folder_list = os.listdir(dir_src)
num = 0
for folder in tqdm(folder_list):
if '_' in folder:
num += 1
print(num)
if __name__ == "__main__":
# file_pre = "/home/shuai.li/code/FairMOT/MOT15/images/results/temp/"
# data_root = "/home/shuai.li/code/FairMOT/MOT15/images/train"
# seqs = ['KITTI-13',
# 'KITTI-17',
# 'ETH-Bahnhof',
# 'ETH-Sunnyday',
# 'PETS09-S2L1',
# 'TUD-Campus',
# 'TUD-Stadtmitte',
# 'ADL-Rundle-6',
# 'ADL-Rundle-8',
# 'ETH-Pedcross2',
# 'TUD-Stadtmitte',
# ]
# measure2files(file_pre, data_root, seqs)
# getbb_perid()
Fspan = 60
# shutil_func = shutil.copytree
# shutil_idfolder_byTspan(Fspan= Fspan, shutil_func=shutil_func)
# get_frame_gap_srcvideo(gap=8, shutil_func=shutil.copy)
bins = list(range(1, 15*8+2, 15)) # [1, 16)
tail = list(range(121, 60*10+2, 60))
tail.pop(0)
bins.extend(tail)
imgnumbins = list(range(0, 150+1, 10))
range=None
hist_fspan(bins=bins, imgnumbins=imgnumbins, range=range)
# get_errnum()
| 36.833948
| 137
| 0.607794
|
86df210e442dbdab880476ac63700e71e2505061
| 494
|
py
|
Python
|
Egzersiz/bilal/regexegzersiz2.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
Egzersiz/bilal/regexegzersiz2.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
Egzersiz/bilal/regexegzersiz2.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
isimler = """
Merhaba
123123123123
2022-12-12
Mr. Simit
Mr. Kızılay
Mr. Kahverengi
Mr. Lacivert
Ms. Kırmızı
Ms. Yeşil
Mrs. Siyah
Mrs. Mavi
edizibrahim@patika.dev
patika@kodluyoruz.com
"""
##isimleri ayıklama
# pattern = re.compile(r"(Mr|Mrs|Ms).?\s(\w+)")
# matches = pattern.finditer(isimler)
# for match in matches:
# print(match.group(2))
# patika@kodluyoruz.com
import re
pattern = re.compile(r"(.*)+@+(.*)")
matches = pattern.finditer(isimler)
for match in matches:
print(match)
| 17.034483
| 47
| 0.700405
|
c6e8e81000aca4a53907836e9ef2b534c990510e
| 10,600
|
py
|
Python
|
ThirdAssigment/plots.py
|
PaiZuZe/MAC0460-machineLearning
|
98031412d8835afad0fde318b5c57a613bfb4fc8
|
[
"Apache-2.0"
] | null | null | null |
ThirdAssigment/plots.py
|
PaiZuZe/MAC0460-machineLearning
|
98031412d8835afad0fde318b5c57a613bfb4fc8
|
[
"Apache-2.0"
] | 1
|
2018-09-16T16:31:36.000Z
|
2018-09-16T16:31:36.000Z
|
ThirdAssigment/plots.py
|
PaiZuZe/MAC0460-machineLearning
|
98031412d8835afad0fde318b5c57a613bfb4fc8
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_points_regression(x,
y,
title,
xlabel,
ylabel,
prediction=None,
legend=False,
r_squared=None,
position=(90, 100)):
"""
Plots the data points and the prediction,
if there is one.
:param x: design matrix
:type x: np.array
:param y: regression targets
:type y: np.array
:param title: plot's title
:type title: str
:param xlabel: x axis label
:type xlabel: str
:param ylabel: y axis label
:type ylabel: str
:param prediction: model's prediction
:type prediction: np.array
:param legend: param to control print legends
:type legend: bool
:param r_squared: r^2 value
:type r_squared: float
:param position: text position
:type position: tuple
"""
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
line1, = ax.plot(x, y, 'bo', label='Real data')
if prediction is not None:
line2, = ax.plot(x, prediction, 'r', label='Predicted data')
if legend:
plt.legend(handles=[line1, line2], loc=2)
ax.set_title(title,
fontsize=20,
fontweight='bold')
if r_squared is not None:
bbox_props = dict(boxstyle="square,pad=0.3",
fc="white", ec="black", lw=0.2)
t = ax.text(position[0], position[1], "$R^2 ={:.4f}$".format(r_squared),
size=15, bbox=bbox_props)
ax.set_xlabel(xlabel, fontsize=20)
ax.set_ylabel(ylabel, fontsize=20)
plt.show()
def plot_cost_function_curve(X,
y,
cost_function,
title,
weights_list=None,
cost_list=None,
position=(20, 40),
range_points=(20, 40)):
"""
Plots a cost surfice.
It assumes that weight.shape == (2,).
:param X: design matrix
:type X: np.ndarray
:param y: regression targets
:type y: np.ndarray
:param cost_function: function to compute regression cost
:type cost_function: lambda: (np.ndarray, np.ndarray, np.ndarray) -> float
:param title: plot's title
:type title: str
:param weights_list: list of weights
:type weights_list: list
:param cost_list: list of costs
:type cost_list: list
:param position: surfice rotation position
:type position: tuple
:param range_points: range of values for w
:type range_points: tuple
"""
w_0, w_1 = 0, 0
ms = np.linspace(w_0 - range_points[0] , w_0 + range_points[0], range_points[0])
bs = np.linspace(w_1 - range_points[1] , w_1 + range_points[1], range_points[1])
M, B = np.meshgrid(ms, bs)
MB = np.stack((np.ravel(M), np.ravel(B)), axis=1)
size = MB.shape[0]
MB = MB.reshape((size, 2, 1))
zs = np.array([cost_function(X, y, MB[i])
for i in range(size)])
Z = zs.reshape(M.shape)
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(M, B, Z, rstride=1, cstride=1, color='b', alpha=0.2)
ax.set_xlabel('w[0]', labelpad=30, fontsize=24, fontweight='bold')
ax.set_ylabel('w[1]', labelpad=30, fontsize=24, fontweight='bold')
ax.set_zlabel('J(w)', labelpad=30, fontsize=24, fontweight='bold')
if weights_list is not None and cost_list is not None:
ax.plot([weights_list[0][0]],
[weights_list[0][1]],
[cost_list[0]],
markerfacecolor=(1.0, 0.0, 0.0, 1.0),
markeredgecolor=(1.0, 0.0, 0.0, 1.0),
marker='o',
markersize=7)
ax.plot([weights_list[-1][0]],
[weights_list[-1][1]],
[cost_list[-1]],
markerfacecolor=(0.0, 0.0, 1.0, 1.0),
markeredgecolor=(0.0, 0.0, 1.0, 1.0),
marker='o',
markersize=7)
temp_red = 1.0
temp_blue = 0.0
size = len(weights_list)
oldx = 0.0
oldy = 0.0
oldz = 0.0
for w, cost in zip(weights_list, cost_list):
rgba_color = (temp_red * 1.0, 0.0, temp_blue * 1.0, 1.0)
ax.plot([w[0]],
[w[1]],
[cost],
markerfacecolor=rgba_color,
markeredgecolor=rgba_color,
marker='.',
markersize=4)
if oldx + oldy + oldz != 0.0 :
rgba_color_weak = list(rgba_color)
rgba_color_weak[-1] = 0.3
ax.plot([w[0], oldx],[w[1], oldy], [cost, oldz],color=rgba_color_weak)
temp_red += - 1 / size
temp_blue += 1 / size
oldx = w[0]
oldy = w[1]
oldz = cost
ax.view_init(elev=position[0], azim=position[1])
ax.set_title(title,
fontsize=20,
fontweight='bold')
plt.show()
def simple_step_plot(ylist,
yname,
title,
figsize=(4, 4),
labels=None):
"""
Plots values over time.
:param ylist: list of values lists
:type ylist: list
:param yname: value name
:type yname: str
:param title: plot's title
:type title: str
:param figsize: plot's size
:type figsize: tuple
:param labels: label for each values list in ylist
:type range_points: list
"""
y0 = ylist[0]
x = np.arange(1, len(y0) + 1, 1)
fig, ax = plt.subplots(1, 1, figsize=figsize)
for y in ylist:
ax.plot(x, y)
plt.xlabel('step')
plt.ylabel(yname)
plt.title(title,
fontsize=14,
fontweight='bold')
plt.grid(True)
if labels is not None:
plt.legend(labels,
loc='upper right')
plt.show()
def plot9images(images, cls_true, img_shape, cls_pred=None, lspace=0.3):
"""
Function to show 9 images with their respective classes.
If cls_pred is an array, you can see the image and the prediction.
:param images: images
:type images: np array
:param cls_true: true classes
:type cls_true: np array
:param img_shape: image shape
:type img_shape: tuple
:param cls_pred: model's prediction
:type cls_pred: None or np array
:param lspace: space between images
:type lspace: float
"""
assert len(images) == len(cls_true) == 9
if cls_pred is None:
title = "Some images with labels"
else:
title = "Some images with predictions and labels"
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=lspace, wspace=0.3)
st = fig.suptitle(title, fontsize=24, fontweight='bold')
for i, ax in enumerate(axes.flat):
ax.imshow(images[i].reshape(img_shape), cmap=None)
if cls_pred is None:
xlabel = "Label: {0}".format(cls_true[i])
else:
xlabel = "Label: {0}\nPred: {1}".format(cls_true[i], cls_pred[i])
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
st.set_y(1.05)
fig.subplots_adjust(top=0.85)
plt.show()
def plot_confusion_matrix(truth,
predictions,
classes,
normalize=False,
save=False,
cmap=plt.cm.Oranges,
path="confusion_matrix.png"):
"""
This function plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
'cmap' controls the color plot. colors:
https://matplotlib.org/1.3.1/examples/color/colormaps_reference.html
:param truth: true labels
:type truth: np array
:param predictions: model predictions
:type predictions: np array
:param classes: list of classes in order
:type classes: list
:param normalize: param to normalize cm matrix
:type normalize: bool
:param save: param to save cm plot
:type save: bool
:param cmap: plt color map
:type cmap: plt.cm
:param path: path to save image
:type path: str
"""
acc = np.array(truth) == np.array(predictions)
size = float(acc.shape[0])
acc = np.sum(acc.astype("int32")) / size
title = "Confusion matrix of {0} examples\n accuracy = {1:.6f}".format(int(size), # noqa
acc)
cm = confusion_matrix(truth, predictions)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(9, 9))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=24, fontweight='bold')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label', fontweight='bold')
plt.xlabel('Predicted label', fontweight='bold')
plt.show()
if save:
plt.savefig(path)
def plot_histogram_from_labels(labels, labels_legend, comment):
"""
Plot dataset histogram
:param label_path: array of labels
:type label_path: np.array
:param labels_legend: list with the name of labels
:type labels_legend: list
:param comment: comment to dataset to be printed on title
:type comment: str
"""
data_hist = plt.hist(labels,
bins=np.arange(len(labels_legend) + 1) - 0.5,
edgecolor='black')
axes = plt.gca()
axes.set_ylim([0, len(labels)])
plt.title("Histogram of {} data points ({})".format(len(labels), comment))
plt.xticks(np.arange(len(labels_legend) + 1), labels_legend)
plt.xlabel("Label")
plt.ylabel("Frequency")
for i in range(len(labels_legend)):
plt.text(data_hist[1][i] + 0.25,
data_hist[0][i] + (data_hist[0][i] * 0.01),
str(int(data_hist[0][i])))
plt.show()
plt.close()
| 34.304207
| 93
| 0.553679
|
c5721f064e98b77681161a1bb9b152b1c1ec4f9c
| 13,306
|
py
|
Python
|
colour/models/tests/test_cie_lab.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/tests/test_cie_lab.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/tests/test_cie_lab.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
"""Defines the unit tests for the :mod:`colour.models.cie_lab` module."""
import numpy as np
import unittest
from itertools import permutations
from colour.models import XYZ_to_Lab, Lab_to_XYZ, Lab_to_LCHab, LCHab_to_Lab
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestXYZ_to_Lab",
"TestLab_to_XYZ",
"TestLab_to_LCHab",
"TestLCHab_to_Lab",
]
class TestXYZ_to_Lab(unittest.TestCase):
"""
Define :func:`colour.models.cie_lab.XYZ_to_Lab` definition unit tests
methods.
"""
def test_XYZ_to_Lab(self):
"""Test :func:`colour.models.cie_lab.XYZ_to_Lab` definition."""
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([41.52787529, 52.63858304, 26.92317922]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.14222010, 0.23042768, 0.10495772])),
np.array([55.11636304, -41.08791787, 30.91825778]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.07818780, 0.06157201, 0.28099326])),
np.array([29.80565520, 20.01830466, -48.34913874]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.44757, 0.40745]),
),
np.array([41.52787529, 38.48089305, -5.73295122]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850]),
),
np.array([41.52787529, 51.19354174, 19.91843098]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850, 1.00000]),
),
np.array([41.52787529, 51.19354174, 19.91843098]),
decimal=7,
)
def test_n_dimensional_XYZ_to_Lab(self):
"""
Test :func:`colour.models.cie_lab.XYZ_to_Lab` definition n-dimensional
support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
Lab = XYZ_to_Lab(XYZ, illuminant)
XYZ = np.tile(XYZ, (6, 1))
Lab = np.tile(Lab, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7
)
illuminant = np.tile(illuminant, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7
)
XYZ = np.reshape(XYZ, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
Lab = np.reshape(Lab, (2, 3, 3))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7
)
def test_domain_range_scale_XYZ_to_Lab(self):
"""
Test :func:`colour.models.cie_lab.XYZ_to_Lab` definition
domain and range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
Lab = XYZ_to_Lab(XYZ, illuminant)
d_r = (("reference", 1, 1), ("1", 1, 0.01), ("100", 100, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ * factor_a, illuminant),
Lab * factor_b,
decimal=7,
)
@ignore_numpy_errors
def test_nan_XYZ_to_Lab(self):
"""Test :func:`colour.models.cie_lab.XYZ_to_Lab` definition nan support."""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
illuminant = np.array(case[0:2])
XYZ_to_Lab(XYZ, illuminant)
class TestLab_to_XYZ(unittest.TestCase):
"""
Define :func:`colour.models.cie_lab.Lab_to_XYZ` definition unit tests
methods.
"""
def test_Lab_to_XYZ(self):
"""Test :func:`colour.models.cie_lab.Lab_to_XYZ` definition."""
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([41.52787529, 52.63858304, 26.92317922])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([55.11636304, -41.08791787, 30.91825778])),
np.array([0.14222010, 0.23042768, 0.10495772]),
decimal=7,
)
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([29.80565520, 20.01830466, -48.34913874])),
np.array([0.07818780, 0.06157201, 0.28099326]),
decimal=7,
)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 38.48089305, -5.73295122]),
np.array([0.44757, 0.40745]),
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 51.19354174, 19.91843098]),
np.array([0.34570, 0.35850]),
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 51.19354174, 19.91843098]),
np.array([0.34570, 0.35850, 1.00000]),
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
def test_n_dimensional_Lab_to_XYZ(self):
"""
Test :func:`colour.models.cie_lab.Lab_to_XYZ` definition n-dimensional
support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
illuminant = np.array([0.31270, 0.32900])
XYZ = Lab_to_XYZ(Lab, illuminant)
Lab = np.tile(Lab, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7
)
illuminant = np.tile(illuminant, (6, 1))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7
)
Lab = np.reshape(Lab, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7
)
def test_domain_range_scale_Lab_to_XYZ(self):
"""
Test :func:`colour.models.cie_lab.Lab_to_XYZ` definition
domain and range scale support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
illuminant = np.array([0.31270, 0.32900])
XYZ = Lab_to_XYZ(Lab, illuminant)
d_r = (("reference", 1, 1), ("1", 0.01, 1), ("100", 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab * factor_a, illuminant),
XYZ * factor_b,
decimal=7,
)
@ignore_numpy_errors
def test_nan_Lab_to_XYZ(self):
"""Test :func:`colour.models.cie_lab.Lab_to_XYZ` definition nan support."""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Lab = np.array(case)
illuminant = np.array(case[0:2])
Lab_to_XYZ(Lab, illuminant)
class TestLab_to_LCHab(unittest.TestCase):
"""
Define :func:`colour.models.cie_lab.Lab_to_LCHab` definition unit tests
methods.
"""
def test_Lab_to_LCHab(self):
"""Test :func:`colour.models.cie_lab.Lab_to_LCHab` definition."""
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([41.52787529, 52.63858304, 26.92317922])),
np.array([41.52787529, 59.12425901, 27.08848784]),
decimal=7,
)
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([55.11636304, -41.08791787, 30.91825778])),
np.array([55.11636304, 51.42135412, 143.03889556]),
decimal=7,
)
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([29.80565520, 20.01830466, -48.34913874])),
np.array([29.80565520, 52.32945383, 292.49133666]),
decimal=7,
)
def test_n_dimensional_Lab_to_LCHab(self):
"""
Test :func:`colour.models.cie_lab.Lab_to_LCHab` definition
n-dimensional arrays support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
LCHab = Lab_to_LCHab(Lab)
Lab = np.tile(Lab, (6, 1))
LCHab = np.tile(LCHab, (6, 1))
np.testing.assert_almost_equal(Lab_to_LCHab(Lab), LCHab, decimal=7)
Lab = np.reshape(Lab, (2, 3, 3))
LCHab = np.reshape(LCHab, (2, 3, 3))
np.testing.assert_almost_equal(Lab_to_LCHab(Lab), LCHab, decimal=7)
def test_domain_range_scale_Lab_to_LCHab(self):
"""
Test :func:`colour.models.cie_lab.Lab_to_LCHab` definition domain and
range scale support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
LCHab = Lab_to_LCHab(Lab)
d_r = (
("reference", 1, 1),
("1", 0.01, np.array([0.01, 0.01, 1 / 360])),
("100", 1, np.array([1, 1, 1 / 3.6])),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Lab_to_LCHab(Lab * factor_a), LCHab * factor_b, decimal=7
)
@ignore_numpy_errors
def test_nan_Lab_to_LCHab(self):
"""
Test :func:`colour.models.cie_lab.Lab_to_LCHab` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Lab = np.array(case)
Lab_to_LCHab(Lab)
class TestLCHab_to_Lab(unittest.TestCase):
"""
Define :func:`colour.models.cie_lab.LCHab_to_Lab` definition unit tests
methods.
"""
def test_LCHab_to_Lab(self):
"""Test :func:`colour.models.cie_lab.LCHab_to_Lab` definition."""
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([41.52787529, 59.12425901, 27.08848784])),
np.array([41.52787529, 52.63858304, 26.92317922]),
decimal=7,
)
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([55.11636304, 51.42135412, 143.03889556])),
np.array([55.11636304, -41.08791787, 30.91825778]),
decimal=7,
)
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([29.80565520, 52.32945383, 292.49133666])),
np.array([29.80565520, 20.01830466, -48.34913874]),
decimal=7,
)
def test_n_dimensional_LCHab_to_Lab(self):
"""
Test :func:`colour.models.cie_lab.LCHab_to_Lab` definition
n-dimensional arrays support.
"""
LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
Lab = LCHab_to_Lab(LCHab)
LCHab = np.tile(LCHab, (6, 1))
Lab = np.tile(Lab, (6, 1))
np.testing.assert_almost_equal(LCHab_to_Lab(LCHab), Lab, decimal=7)
LCHab = np.reshape(LCHab, (2, 3, 3))
Lab = np.reshape(Lab, (2, 3, 3))
np.testing.assert_almost_equal(LCHab_to_Lab(LCHab), Lab, decimal=7)
def test_domain_range_scale_LCHab_to_Lab(self):
"""
Test :func:`colour.models.cie_lab.LCHab_to_Lab` definition domain and
range scale support.
"""
LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
Lab = LCHab_to_Lab(LCHab)
d_r = (
("reference", 1, 1),
("1", np.array([0.01, 0.01, 1 / 360]), 0.01),
("100", np.array([1, 1, 1 / 3.6]), 1),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
LCHab_to_Lab(LCHab * factor_a), Lab * factor_b, decimal=7
)
@ignore_numpy_errors
def test_nan_LCHab_to_Lab(self):
"""
Test :func:`colour.models.cie_lab.LCHab_to_Lab` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
LCHab = np.array(case)
LCHab_to_Lab(LCHab)
if __name__ == "__main__":
unittest.main()
| 32.533007
| 83
| 0.56809
|
a22d4d22a892bb62ff9f39136884bd8bd64256e1
| 6,212
|
py
|
Python
|
my_args.py
|
rxys/DAIN
|
147791520d8bbb4d19e72c0222a59f5f3d7d218c
|
[
"MIT"
] | null | null | null |
my_args.py
|
rxys/DAIN
|
147791520d8bbb4d19e72c0222a59f5f3d7d218c
|
[
"MIT"
] | null | null | null |
my_args.py
|
rxys/DAIN
|
147791520d8bbb4d19e72c0222a59f5f3d7d218c
|
[
"MIT"
] | null | null | null |
import os
import datetime
import argparse
import numpy
import networks
import torch
modelnames = networks.__all__
# import datasets
datasetNames = ('Vimeo_90K_interp') #datasets.__all__
parser = argparse.ArgumentParser(description='DAIN')
parser.add_argument('--debug',action = 'store_true', help='Enable debug mode')
parser.add_argument('--netName', type=str, default='DAIN',
choices = modelnames,help = 'model architecture: ' +
' | '.join(modelnames) +
' (default: DAIN)')
parser.add_argument('--datasetName', default='Vimeo_90K_interp',
choices= datasetNames,nargs='+',
help='dataset type : ' +
' | '.join(datasetNames) +
' (default: Vimeo_90K_interp)')
parser.add_argument('--datasetPath',default='',help = 'the path of selected datasets')
parser.add_argument('--dataset_split', type = int, default=97, help = 'Split a dataset into trainining and validation by percentage (default: 97)')
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--numEpoch', '-e', type = int, default=100, help= 'Number of epochs to train(default:150)')
parser.add_argument('--batch_size', '-b',type = int ,default=1, help = 'batch size (default:1)' )
parser.add_argument('--workers', '-w', type =int,default=8, help = 'parallel workers for loading training samples (default : 1.6*10 = 16)')
parser.add_argument('--channels', '-c', type=int,default=3,choices = [1,3], help ='channels of images (default:3)')
parser.add_argument('--filter_size', '-f', type=int, default=4, help = 'the size of filters used (default: 4)',
choices=[2,4,6, 5,51]
)
parser.add_argument('--lr', type =float, default= 0.002, help= 'the basic learning rate for three subnetworks (default: 0.002)')
parser.add_argument('--rectify_lr', type=float, default=0.001, help = 'the learning rate for rectify/refine subnetworks (default: 0.001)')
parser.add_argument('--save_which', '-s', type=int, default=1, choices=[0,1], help='choose which result to save: 0 ==> interpolated, 1==> rectified')
parser.add_argument('--time_step', type=float, default=0.5, help='choose the time steps')
parser.add_argument('--flow_lr_coe', type = float, default=0.01, help = 'relative learning rate w.r.t basic learning rate (default: 0.01)')
parser.add_argument('--occ_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--filter_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--ctx_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--depth_lr_coe', type = float, default=0.001, help = 'relative learning rate w.r.t basic learning rate (default: 0.01)')
# parser.add_argument('--deblur_lr_coe', type = float, default=0.01, help = 'relative learning rate w.r.t basic learning rate (default: 0.01)')
parser.add_argument('--alpha', type=float,nargs='+', default=[0.0, 1.0], help= 'the ration of loss for interpolated and rectified result (default: [0.0, 1.0])')
parser.add_argument('--epsilon', type = float, default=1e-6, help = 'the epsilon for charbonier loss,etc (default: 1e-6)')
parser.add_argument('--weight_decay', type = float, default=0, help = 'the weight decay for whole network ' )
parser.add_argument('--patience', type=int, default=5, help = 'the patience of reduce on plateou')
parser.add_argument('--factor', type = float, default=0.2, help = 'the factor of reduce on plateou')
#
parser.add_argument('--pretrained', dest='SAVED_MODEL', default=None, help ='path to the pretrained model weights')
parser.add_argument('--no-date', action='store_true', help='don\'t append date timestamp to folder' )
parser.add_argument('--use_cuda', default= True, type = bool, help='use cuda or not')
parser.add_argument('--use_cudnn',default=1,type=int, help = 'use cudnn or not')
parser.add_argument('--dtype', default=torch.cuda.FloatTensor, choices = [torch.cuda.FloatTensor,torch.FloatTensor],help = 'tensor data type ')
# parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)')
parser.add_argument('--uid', type=str, default= None, help='unique id for the training')
parser.add_argument('--force', action='store_true', help='force to override the given uid')
args = parser.parse_args()
import shutil
if args.uid == None:
unique_id = str(numpy.random.randint(0, 100000))
print("revise the unique id to a random numer " + str(unique_id))
args.uid = unique_id
timestamp = datetime.datetime.now().strftime("%a-%b-%d-%H-%M")
save_path = './model_weights/'+ args.uid +'-' + timestamp
else:
save_path = './model_weights/'+ str(args.uid)
# print("no pth here : " + save_path + "/best"+".pth")
if not os.path.exists(save_path + "/best"+".pth"):
# print("no pth here : " + save_path + "/best" + ".pth")
os.makedirs(save_path,exist_ok=True)
else:
if not args.force:
raise("please use another uid ")
else:
print("override this uid" + args.uid)
for m in range(1,10):
if not os.path.exists(save_path+"/log.txt.bk" + str(m)):
shutil.copy(save_path+"/log.txt", save_path+"/log.txt.bk"+str(m))
shutil.copy(save_path+"/args.txt", save_path+"/args.txt.bk"+str(m))
break
parser.add_argument('--save_path',default=save_path,help = 'the output dir of weights')
parser.add_argument('--log', default = save_path+'/log.txt', help = 'the log file in training')
parser.add_argument('--arg', default = save_path+'/args.txt', help = 'the args used')
args = parser.parse_args()
with open(args.log, 'w') as f:
f.close()
with open(args.arg, 'w') as f:
print(args)
print(args,file=f)
f.close()
if args.use_cudnn:
print("cudnn is used")
torch.backends.cudnn.benchmark = True # to speed up the
else:
print("cudnn is not used")
torch.backends.cudnn.benchmark = False # to speed up the
| 52.201681
| 160
| 0.673213
|
25b057e612a0ca3106ae124468875dffffb39e39
| 2,107
|
py
|
Python
|
bonus_top_interview_questions/227. Basic Calculator II.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
bonus_top_interview_questions/227. Basic Calculator II.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
bonus_top_interview_questions/227. Basic Calculator II.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
"""
Runtime: 189 ms, faster than 16.21% of Python3 online submissions for Basic Calculator II.
Memory Usage: 20.1 MB, less than 5.55% of Python3 online submissions for Basic Calculator II.
"""
from typing import List
from typing import Optional
class Solution:
def calculate(self, s: str) -> int:
s = s.replace(' ', '')
list_eq = []
operand = ''
for idx, character in enumerate(s):
if character in ['*', '/', '+', '-']:
val = int(operand)
operand = ''
list_eq.append(val)
list_eq.append(character)
elif idx==len(s)-1:
operand += character
val = int(operand)
list_eq.append(val)
else:
operand += character
temp = []
for idx in range(len(list_eq)):
if list_eq[idx-1] == '*':
temp.pop()
temp.pop()
res = list_eq[idx-2]*list_eq[idx]
list_eq[idx] = res
temp.append(res)
elif list_eq[idx-1] == '/':
temp.pop()
temp.pop()
res = list_eq[idx-2]//list_eq[idx]
list_eq[idx] = res
temp.append(res)
else:
temp.append(list_eq[idx])
list_eq = temp
temp = []
for idx in range(len(list_eq)):
if list_eq[idx-1] == '+':
temp.pop()
temp.pop()
res = list_eq[idx-2]+list_eq[idx]
list_eq[idx] = res
temp.append(res)
elif list_eq[idx-1] == '-':
temp.pop()
temp.pop()
res = list_eq[idx-2]-list_eq[idx]
list_eq[idx] = res
temp.append(res)
else:
temp.append(list_eq[idx])
list_eq = temp
return list_eq[0]
def main():
sol = Solution()
print('Output:', sol.calculate("1+1+1"))
print('Expected:', 3)
if __name__ == "__main__":
main()
| 30.985294
| 93
| 0.452302
|
261cb36ffd4219f6f299c0db0ddefcae270fde1b
| 2,244
|
py
|
Python
|
archived/archive-WSPS/input-checker/input_checker.py
|
XiaoxiongXie/WRF-SUEWS
|
51f8d2359b394016e65121f3baab908b1ad4d89c
|
[
"MIT"
] | 2
|
2020-09-24T09:15:31.000Z
|
2020-09-28T16:16:30.000Z
|
archived/archive-WSPS/input-checker/input_checker.py
|
XiaoxiongXie/WRF-SUEWS
|
51f8d2359b394016e65121f3baab908b1ad4d89c
|
[
"MIT"
] | 3
|
2020-09-24T13:46:24.000Z
|
2020-10-01T09:54:17.000Z
|
archived/archive-WSPS/input-checker/input_checker.py
|
XiaoxiongXie/WRF-SUEWS
|
51f8d2359b394016e65121f3baab908b1ad4d89c
|
[
"MIT"
] | 2
|
2020-10-01T09:46:58.000Z
|
2022-01-09T10:38:21.000Z
|
# %%
#import yaml
#yaml.warnings({'YAMLLoadWarning': False})
import xarray as xr
import numpy as np
import pandas as pd
import json
import glob
import os
# %%
# the check list file with ranges and logics
check_file = 'check_file_suews.json'
# opening the check list file
def open_check_file(check_file):
with open(check_file) as cf:
cr = json.load(cf)
# making the keys upper case to be consistent with wrfinputs
cr_temp = {}
for key in cr.keys():
# for some reason pop() did not work here!!
cr_temp[key.upper()] = cr[key]
return cr_temp
# checking the range of each parameter
def check_range(var, values, cr):
min_v = cr[var]['param']['min']
max_v = cr[var]['param']['max']
description = ' should be between '+str(min_v)+' and '+str(max_v)
is_accepted_flag = False
for value in np.nditer(values):
if min_v <= value <= max_v:
is_accepted_flag = True
if(not is_accepted_flag):
is_accepted = 'No'
suggestion = 'change the parameter to fall into the acceptable range'
else:
is_accepted = 'Yes'
suggestion = ''
return [var, is_accepted, description, suggestion]
def check_zd_zh(var, values, cr):
return 0
# checks for suews parameters
def check_var_suews(var, values, cr, df_sum):
logic = cr[var]['logic']
if logic == 'range':
out_list = check_range(var, values, cr)
elif logic == 'zd-zh':
out_list = check_zd_zh(var, values, cr)
df_sum.loc[len(df_sum)] = out_list
return df_sum
cr = open_check_file(check_file)
# getting all the wrfinput files
fl_wrfinput = glob.glob('wrfinputs/wrfinput_d0?')
for wrfinput in fl_wrfinput:
df_sum = pd.DataFrame(
columns=['Parameter', 'Is acceptable?', 'Description', 'Suggestion'])
print('Working on '+wrfinput)
print('==========================================================================')
print('==================== Check Table for ' +
wrfinput+' ========================')
ds = xr.open_dataset(wrfinput)
for var in ds.data_vars:
if 'SUEWS' in var:
df_sum = check_var_suews(var, ds[var].values, cr, df_sum)
print(df_sum)
| 23.375
| 87
| 0.60205
|
9d358430f00ecead9b8842c993c9f1dfce303c82
| 77
|
py
|
Python
|
Contest/ABC017/a/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC017/a/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC017/a/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
print(eval("+eval(input().replace(' ','*'))"*3) // 10)
| 38.5
| 54
| 0.571429
|
cf6d0c37bd9ca81dc3e1fff8d44fabdfd0a0e04a
| 8,525
|
py
|
Python
|
habitat_sim/agent/agent.py
|
BKAUTO/Trinocular-habitat-sim
|
7ad70669095926cc4d5438befa65b019d0f91a8c
|
[
"MIT"
] | null | null | null |
habitat_sim/agent/agent.py
|
BKAUTO/Trinocular-habitat-sim
|
7ad70669095926cc4d5438befa65b019d0f91a8c
|
[
"MIT"
] | null | null | null |
habitat_sim/agent/agent.py
|
BKAUTO/Trinocular-habitat-sim
|
7ad70669095926cc4d5438befa65b019d0f91a8c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Union
import attr
import numpy as np
import habitat_sim.bindings as hsim
import habitat_sim.errors
from habitat_sim import utils
from habitat_sim.sensors import SensorSuite
from .controls import ActuationSpec, ObjectControls
__all__ = ["ActionSpec", "SixDOFPose", "AgentState", "AgentConfiguration", "Agent"]
def _default_action_space():
return dict(
move_forward=ActionSpec("move_forward", ActuationSpec(amount=0.25)),
turn_left=ActionSpec("turn_left", ActuationSpec(amount=10.0)),
turn_right=ActionSpec("turn_right", ActuationSpec(amount=10.0)),
)
@attr.s(auto_attribs=True)
class ActionSpec(object):
r"""Defines how a specific action is implemented
Args:
name (str): Name of the function implementing the action in the move_func_map
actuation (ActuationSpec): Arguements that will be passed to the function
"""
name: str
actuation: ActuationSpec = None
@attr.s(auto_attribs=True, slots=True)
class SixDOFPose(object):
r"""Specifies a position with 6 degrees of freedom
Args:
position (np.array): xyz position
rotation (np.quaternion): unit quaternion rotation
"""
position: np.array = np.zeros(3)
rotation: Union[np.quaternion, List] = np.quaternion(1, 0, 0, 0)
@attr.s(auto_attribs=True, slots=True)
class AgentState(object):
position: np.array = np.zeros(3)
rotation: Union[np.quaternion, List] = np.quaternion(1, 0, 0, 0)
velocity: np.array = np.zeros(3)
angular_velocity: np.array = np.zeros(3)
force: np.array = np.zeros(3)
torque: np.array = np.zeros(3)
sensor_states: Dict[str, SixDOFPose] = attr.Factory(dict)
@attr.s(auto_attribs=True, slots=True)
class AgentConfiguration(object):
height: float = 1.5
radius: float = 0.1
mass: float = 32.0
linear_acceleration: float = 20.0
angular_acceleration: float = 4 * np.pi
linear_friction: float = 0.5
angular_friction: float = 1.0
coefficient_of_restitution: float = 0.0
sensor_specifications: List[hsim.SensorSpec] = attr.Factory(
lambda: [hsim.SensorSpec()]
)
action_space: Dict[Any, ActionSpec] = attr.Factory(_default_action_space)
body_type: str = "cylinder"
@attr.s(auto_attribs=True)
class Agent(object):
r"""Implements an agent with multiple sensors
Args:
agent_config (AgentConfiguration): The configuration of the agent
Warning:
Agents are given controls over a node in the scene graph, but do **not**
own this node. This means that errors will occur if the owner of the scene graph
is deallocated. Generally the owner of the scene graph is the Simulator.
If you'd like to have an agent to control without loading up the simulator,
see unit tests for the agent in `tests/test_agent.py`. We recommend letting the
simulator create the agent and own the scene graph in almost all cases. Using the scene
graph in python is dangerous due to differences in c++ and python memory management
"""
agent_config: AgentConfiguration = attr.Factory(AgentConfiguration)
sensors: SensorSuite = attr.Factory(SensorSuite)
controls: ObjectControls = attr.Factory(ObjectControls)
body: hsim.AttachedObject = attr.Factory(hsim.AttachedObject)
def __attrs_post_init__(self):
self.body.object_type = hsim.AttachedObjectType.AGENT
self.reconfigure(self.agent_config)
def reconfigure(
self, agent_config: AgentConfiguration, reconfigure_sensors: bool = True
):
r"""Re-create the agent with a new configuration
Args:
agent_config (AgentConfiguration): New config
reconfigure_sensors (bool): Whether or not to also reconfigure the sensors, there
are specific cases where false makes sense, but most cases are covered by true
"""
self.agent_config = agent_config
if reconfigure_sensors:
self.sensors.clear()
for spec in self.agent_config.sensor_specifications:
self.sensors.add(hsim.PinholeCamera(spec))
if self.body.is_valid:
for _, v in self.sensors.items():
v.attach(self.scene_node.create_child())
def attach(self, scene_node: hsim.SceneNode):
r"""Gives the agent control over the specified scene node (but **not** ownership)
The agent will recursively call attach for the sensors
Args:
scene_node (hsim.SceneNode)
"""
self.body.attach(scene_node)
for _, v in self.sensors.items():
v.attach(self.scene_node.create_child())
def detach(self):
r"""Detaches the agent from the its current scene_node
Recursively calls detach on any sensors
"""
self.body.detach()
for _, v in self.sensors.items():
v.detach()
def act(self, action_id: Any) -> bool:
r"""Take the action specified by action_id
Args:
action_id (Any): ID of the action.
Retreives the action from agent_config.action_space
Returns:
bool: Whether or not the action taken resulted in a collision
"""
habitat_sim.errors.assert_obj_valid(self.body)
assert (
action_id in self.agent_config.action_space
), f"No action {action_id} in action space"
action = self.agent_config.action_space[action_id]
did_collide = False
if self.controls.is_body_action(action.name):
did_collide = self.controls.action(
self.scene_node, action.name, action.actuation, apply_filter=False
)
else:
for _, v in self.sensors.items():
habitat_sim.errors.assert_obj_valid(v)
self.controls.action(
v.get_scene_node(),
action.name,
action.actuation,
apply_filter=False,
)
return did_collide
def get_state(self) -> AgentState:
habitat_sim.errors.assert_obj_valid(self.body)
state = AgentState(
self.body.get_absolute_position(),
utils.quat_from_coeffs(self.body.get_rotation()),
)
for k, v in self.sensors.items():
habitat_sim.errors.assert_obj_valid(v)
state.sensor_states[k] = SixDOFPose(
v.get_absolute_position(),
state.rotation * utils.quat_from_coeffs(v.get_rotation()),
)
return state
def set_state(self, state: AgentState, reset_sensors: bool = True):
r"""Sets the agents state
Args:
state (AgentState): The state to set the agent to
reset_sensors (bool): Whether or not to reset the sensors to their default intrinsic/extrinsic parameters
before setting their extrinsic state
"""
habitat_sim.errors.assert_obj_valid(self.body)
if isinstance(state.rotation, list):
state.rotation = utils.quat_from_coeffs(state.rotation)
self.body.reset_transformation()
self.body.translate(state.position)
self.body.set_rotation(utils.quat_to_coeffs(state.rotation))
if reset_sensors:
for _, v in self.sensors.items():
v.set_transformation_from_spec()
for k, v in state.sensor_states.items():
assert k in self.sensors
if isinstance(v.rotation, list):
v.rotation = utils.quat_from_coeffs(v.rotation)
s = self.sensors[k]
s.reset_transformation()
s.translate(
utils.quat_rotate_vector(
state.rotation.inverse(), v.position - state.position
)
)
s.set_rotation(utils.quat_to_coeffs(state.rotation.inverse() * v.rotation))
@property
def scene_node(self):
habitat_sim.errors.assert_obj_valid(self.body)
return self.body.get_scene_node()
@property
def state(self):
return self.get_state()
@state.setter
def state(self, new_state):
self.set_state(new_state, reset_sensors=True)
def __del__(self):
self.detach()
| 33.431373
| 117
| 0.646452
|
487346778035cb20009d6ce8589677cd1a3a32c3
| 5,062
|
py
|
Python
|
tests/ti_deps/deps/prev_dagrun_dep.py
|
rubeshdcube/incubator-airflow
|
5419fbb78a2ea2388456c356d2f899ea1991b2de
|
[
"Apache-2.0"
] | 4
|
2017-06-25T14:09:31.000Z
|
2020-11-20T09:51:24.000Z
|
tests/ti_deps/deps/prev_dagrun_dep.py
|
rubeshdcube/incubator-airflow
|
5419fbb78a2ea2388456c356d2f899ea1991b2de
|
[
"Apache-2.0"
] | 1
|
2017-07-04T07:31:15.000Z
|
2017-07-06T06:01:42.000Z
|
tests/ti_deps/deps/prev_dagrun_dep.py
|
rubeshdcube/incubator-airflow
|
5419fbb78a2ea2388456c356d2f899ea1991b2de
|
[
"Apache-2.0"
] | 2
|
2018-04-09T15:13:50.000Z
|
2019-06-14T07:19:46.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.utils.state import State
from fake_models import FakeContext, FakeTask, FakeTI
class PrevDagrunDepTest(unittest.TestCase):
def test_not_depends_on_past(self):
"""
If depends on past isn't set in the task then the previous dagrun should be
ignored, even though there is no previous_ti which would normally fail the dep
"""
task = FakeTask(
depends_on_past=False,
start_date=datetime(2016, 1, 1),
wait_for_downstream=False)
prev_ti = FakeTI(
task=task,
execution_date=datetime(2016, 1, 2),
state=State.SUCCESS,
dependents_done=True)
ti = FakeTI(
task=task,
previous_ti=prev_ti,
execution_date=datetime(2016, 1, 3))
dep_context = FakeContext(ignore_depends_on_past=False)
self.assertTrue(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_context_ignore_depends_on_past(self):
"""
If the context overrides depends_on_past then the dep should be met, even though
there is no previous_ti which would normally fail the dep
"""
task = FakeTask(
depends_on_past=True,
start_date=datetime(2016, 1, 1),
wait_for_downstream=False)
prev_ti = FakeTI(
task=task,
execution_date=datetime(2016, 1, 2),
state=State.SUCCESS,
dependents_done=True)
ti = FakeTI(
task=task,
previous_ti=prev_ti,
execution_date=datetime(2016, 1, 3))
dep_context = FakeContext(ignore_depends_on_past=True)
self.assertTrue(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_first_task_run(self):
"""
The first task run for a TI should pass since it has no previous dagrun.
"""
task = FakeTask(
depends_on_past=True,
start_date=datetime(2016, 1, 1),
wait_for_downstream=False)
prev_ti = None
ti = FakeTI(
task=task,
previous_ti=prev_ti,
execution_date=datetime(2016, 1, 1))
dep_context = FakeContext(ignore_depends_on_past=False)
self.assertTrue(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_prev_ti_bad_state(self):
"""
If the previous TI did not complete execution this dep should fail.
"""
task = FakeTask(
depends_on_past=True,
start_date=datetime(2016, 1, 1),
wait_for_downstream=False)
prev_ti = FakeTI(
state=State.NONE,
dependents_done=True)
ti = FakeTI(
task=task,
previous_ti=prev_ti,
execution_date=datetime(2016, 1, 2))
dep_context = FakeContext(ignore_depends_on_past=False)
self.assertFalse(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_failed_wait_for_downstream(self):
"""
If the previous TI specified to wait for the downstream tasks of the previous
dagrun then it should fail this dep if the downstream TIs of the previous TI are
not done.
"""
task = FakeTask(
depends_on_past=True,
start_date=datetime(2016, 1, 1),
wait_for_downstream=True)
prev_ti = FakeTI(
state=State.SUCCESS,
dependents_done=False)
ti = FakeTI(
task=task,
previous_ti=prev_ti,
execution_date=datetime(2016, 1, 2))
dep_context = FakeContext(ignore_depends_on_past=False)
self.assertFalse(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
def test_all_met(self):
"""
Test to make sure all of the conditions for the dep are met
"""
task = FakeTask(
depends_on_past=True,
start_date=datetime(2016, 1, 1),
wait_for_downstream=True)
prev_ti = FakeTI(
state=State.SUCCESS,
dependents_done=True)
ti = FakeTI(
task=task,
previous_ti=prev_ti,
execution_date=datetime(2016, 1, 2))
dep_context = FakeContext(ignore_depends_on_past=False)
self.assertTrue(PrevDagrunDep().is_met(ti=ti, dep_context=dep_context))
| 35.152778
| 88
| 0.627815
|
4880c7d3d760e270416605ea2eeb3e315262a7e1
| 871
|
py
|
Python
|
plugins/execs.py
|
CPSLK/userbot
|
7d3ddd03e6a780493732f83e8015cae6145008f9
|
[
"MIT"
] | 1
|
2020-07-09T18:17:01.000Z
|
2020-07-09T18:17:01.000Z
|
plugins/execs.py
|
CPSLK/userbot
|
7d3ddd03e6a780493732f83e8015cae6145008f9
|
[
"MIT"
] | null | null | null |
plugins/execs.py
|
CPSLK/userbot
|
7d3ddd03e6a780493732f83e8015cae6145008f9
|
[
"MIT"
] | 1
|
2020-07-16T02:14:36.000Z
|
2020-07-16T02:14:36.000Z
|
import io
import re
import html
import traceback
from contextlib import redirect_stdout
from pyrogram import Client, Filters
from config import cmds
@Client.on_message(Filters.command("exec", prefixes=".") & Filters.me)
async def execs(client, message):
strio = io.StringIO()
code = re.split(r"[\n ]+", message.text, 1)[1]
exec('async def __ex(client, message): ' + ' '.join('\n ' + l for l in code.split('\n')))
with redirect_stdout(strio):
try:
await locals()["__ex"](client, message)
except:
return await message.reply_text(html.escape(traceback.format_exc()), parse_mode="HTML")
if strio.getvalue():
out = f"<code>{html.escape(strio.getvalue())}</code>"
else:
out = "Command executed."
await message.edit(out, parse_mode="HTML")
cmds.update({'.exec':'Run commands on python'})
| 29.033333
| 99
| 0.649828
|
8f1e5504dad2e6b433415327a00fa088ad326115
| 6,583
|
py
|
Python
|
frappe/event_streaming/doctype/document_type_mapping/document_type_mapping.py
|
Don-Leopardo/frappe
|
39097b05a7a9904776a435ee2c3d7a579d429389
|
[
"MIT"
] | 3,755
|
2015-01-06T07:47:43.000Z
|
2022-03-31T20:54:23.000Z
|
frappe/event_streaming/doctype/document_type_mapping/document_type_mapping.py
|
Don-Leopardo/frappe
|
39097b05a7a9904776a435ee2c3d7a579d429389
|
[
"MIT"
] | 7,369
|
2015-01-01T19:59:41.000Z
|
2022-03-31T23:02:05.000Z
|
frappe/event_streaming/doctype/document_type_mapping/document_type_mapping.py
|
Don-Leopardo/frappe
|
39097b05a7a9904776a435ee2c3d7a579d429389
|
[
"MIT"
] | 2,685
|
2015-01-07T17:51:03.000Z
|
2022-03-31T23:16:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe
import json
from frappe import _
from frappe.model.document import Document
from frappe.model import default_fields
class DocumentTypeMapping(Document):
def validate(self):
self.validate_inner_mapping()
def validate_inner_mapping(self):
meta = frappe.get_meta(self.local_doctype)
for field_map in self.field_mapping:
if field_map.local_fieldname not in default_fields:
field = meta.get_field(field_map.local_fieldname)
if not field:
frappe.throw(_('Row #{0}: Invalid Local Fieldname').format(field_map.idx))
fieldtype = field.get('fieldtype')
if fieldtype in ['Link', 'Dynamic Link', 'Table']:
if not field_map.mapping and not field_map.default_value:
msg = _('Row #{0}: Please set Mapping or Default Value for the field {1} since its a dependency field').format(
field_map.idx, frappe.bold(field_map.local_fieldname))
frappe.throw(msg, title='Inner Mapping Missing')
if field_map.mapping_type == 'Document' and not field_map.remote_value_filters:
msg = _('Row #{0}: Please set remote value filters for the field {1} to fetch the unique remote dependency document').format(
field_map.idx, frappe.bold(field_map.remote_fieldname))
frappe.throw(msg, title='Remote Value Filters Missing')
def get_mapping(self, doc, producer_site, update_type):
remote_fields = []
# list of tuples (local_fieldname, dependent_doc)
dependencies = []
for mapping in self.field_mapping:
if doc.get(mapping.remote_fieldname):
if mapping.mapping_type == 'Document':
if not mapping.default_value:
dependency = self.get_mapped_dependency(mapping, producer_site, doc)
if dependency:
dependencies.append((mapping.local_fieldname, dependency))
else:
doc[mapping.local_fieldname] = mapping.default_value
if mapping.mapping_type == 'Child Table' and update_type != 'Update':
doc[mapping.local_fieldname] = get_mapped_child_table_docs(mapping.mapping, doc[mapping.remote_fieldname], producer_site)
else:
# copy value into local fieldname key and remove remote fieldname key
doc[mapping.local_fieldname] = doc[mapping.remote_fieldname]
if mapping.local_fieldname != mapping.remote_fieldname:
remote_fields.append(mapping.remote_fieldname)
if not doc.get(mapping.remote_fieldname) and mapping.default_value and update_type != 'Update':
doc[mapping.local_fieldname] = mapping.default_value
#remove the remote fieldnames
for field in remote_fields:
doc.pop(field, None)
if update_type != 'Update':
doc['doctype'] = self.local_doctype
mapping = {'doc': frappe.as_json(doc)}
if len(dependencies):
mapping['dependencies'] = dependencies
return mapping
def get_mapped_update(self, update, producer_site):
update_diff = frappe._dict(json.loads(update.data))
mapping = update_diff
dependencies = []
if update_diff.changed:
doc_map = self.get_mapping(update_diff.changed, producer_site, 'Update')
mapped_doc = doc_map.get('doc')
mapping.changed = json.loads(mapped_doc)
if doc_map.get('dependencies'):
dependencies += doc_map.get('dependencies')
if update_diff.removed:
mapping = self.map_rows_removed(update_diff, mapping)
if update_diff.added:
mapping = self.map_rows(update_diff, mapping, producer_site, operation='added')
if update_diff.row_changed:
mapping = self.map_rows(update_diff, mapping, producer_site, operation='row_changed')
update = {'doc': frappe.as_json(mapping)}
if len(dependencies):
update['dependencies'] = dependencies
return update
def get_mapped_dependency(self, mapping, producer_site, doc):
inner_mapping = frappe.get_doc('Document Type Mapping', mapping.mapping)
filters = json.loads(mapping.remote_value_filters)
for key, value in filters.items():
if value.startswith('eval:'):
val = frappe.safe_eval(value[5:], None, dict(doc=doc))
filters[key] = val
if doc.get(value):
filters[key] = doc.get(value)
matching_docs = producer_site.get_doc(inner_mapping.remote_doctype, filters=filters)
if len(matching_docs):
remote_docname = matching_docs[0].get('name')
remote_doc = producer_site.get_doc(inner_mapping.remote_doctype, remote_docname)
doc = inner_mapping.get_mapping(remote_doc, producer_site, 'Insert').get('doc')
return doc
return
def map_rows_removed(self, update_diff, mapping):
removed = []
mapping['removed'] = update_diff.removed
for key, value in update_diff.removed.copy().items():
local_table_name = frappe.db.get_value('Document Type Field Mapping', {
'remote_fieldname': key,
'parent': self.name
},'local_fieldname')
mapping.removed[local_table_name] = value
if local_table_name != key:
removed.append(key)
#remove the remote fieldnames
for field in removed:
mapping.removed.pop(field, None)
return mapping
def map_rows(self, update_diff, mapping, producer_site, operation):
remote_fields = []
for tablename, entries in update_diff.get(operation).copy().items():
local_table_name = frappe.db.get_value('Document Type Field Mapping', {'remote_fieldname': tablename}, 'local_fieldname')
table_map = frappe.db.get_value('Document Type Field Mapping', {'local_fieldname': local_table_name, 'parent': self.name}, 'mapping')
table_map = frappe.get_doc('Document Type Mapping', table_map)
docs = []
for entry in entries:
mapped_doc = table_map.get_mapping(entry, producer_site, 'Update').get('doc')
docs.append(json.loads(mapped_doc))
mapping.get(operation)[local_table_name] = docs
if local_table_name != tablename:
remote_fields.append(tablename)
# remove the remote fieldnames
for field in remote_fields:
mapping.get(operation).pop(field, None)
return mapping
def get_mapped_child_table_docs(child_map, table_entries, producer_site):
"""Get mapping for child doctypes"""
child_map = frappe.get_doc('Document Type Mapping', child_map)
mapped_entries = []
remote_fields = []
for child_doc in table_entries:
for mapping in child_map.field_mapping:
if child_doc.get(mapping.remote_fieldname):
child_doc[mapping.local_fieldname] = child_doc[mapping.remote_fieldname]
if mapping.local_fieldname != mapping.remote_fieldname:
child_doc.pop(mapping.remote_fieldname, None)
mapped_entries.append(child_doc)
#remove the remote fieldnames
for field in remote_fields:
child_doc.pop(field, None)
child_doc['doctype'] = child_map.local_doctype
return mapped_entries
| 38.723529
| 136
| 0.745557
|
de8936834be2d4b2cffff2d2c096de83b1812b0f
| 1,168
|
py
|
Python
|
tests/test_image_to_boxes.py
|
amenezes/aiopytesseract
|
9ed8bcebbea87c39b194934e0fa99cf26b328a3c
|
[
"Apache-2.0"
] | 2
|
2022-02-25T07:09:22.000Z
|
2022-03-13T04:20:57.000Z
|
tests/test_image_to_boxes.py
|
amenezes/aiopytesseract
|
9ed8bcebbea87c39b194934e0fa99cf26b328a3c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_image_to_boxes.py
|
amenezes/aiopytesseract
|
9ed8bcebbea87c39b194934e0fa99cf26b328a3c
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
import pytest
import aiopytesseract
from aiopytesseract.models import Box
from aiopytesseract.exceptions import TesseractRuntimeError
@pytest.mark.asyncio
@pytest.mark.parametrize("image", ["tests/samples/file-sample_150kB.png"])
async def test_image_to_boxes_with_str_image(image):
boxes = await aiopytesseract.image_to_boxes(image)
assert isinstance(boxes, list)
assert isinstance(boxes[0], Box)
assert len(boxes) == 78
@pytest.mark.asyncio
@pytest.mark.parametrize("image", ["tests/samples/file-sample_150kB.png"])
async def test_image_to_boxes_with_bytes_image(image):
boxes = await aiopytesseract.image_to_boxes(Path(image).read_bytes())
assert isinstance(boxes, list)
assert isinstance(boxes[0], Box)
assert len(boxes) == 78
@pytest.mark.asyncio
async def test_image_to_boxes_with_invalid():
with pytest.raises(TesseractRuntimeError):
await aiopytesseract.image_to_boxes("tests/samples/file-sample_150kB.pdf")
@pytest.mark.asyncio
async def test_image_to_boxes_with_type_not_supported():
with pytest.raises(NotImplementedError):
await aiopytesseract.image_to_boxes(None)
| 30.736842
| 82
| 0.78339
|
7a39fbd265fbc708c4a6cda41219da7f26dbe56f
| 9,508
|
py
|
Python
|
main.py
|
jackdevey/Govee-CLI
|
e7fd5818032a5d0640141210a1ccdd9b06c07ea1
|
[
"WTFPL"
] | 2
|
2021-12-04T03:00:17.000Z
|
2022-01-16T03:14:55.000Z
|
main.py
|
jackdevey/Govee-CLI
|
e7fd5818032a5d0640141210a1ccdd9b06c07ea1
|
[
"WTFPL"
] | 1
|
2021-02-24T14:37:56.000Z
|
2021-03-14T17:00:34.000Z
|
main.py
|
jackdevey/Govee-CLI
|
e7fd5818032a5d0640141210a1ccdd9b06c07ea1
|
[
"WTFPL"
] | null | null | null |
import os
import pathlib
import requests
import json
import click
import contextVars
__author__ = "jack-txt & HarryDev06"
@click.group()
@click.pass_context
def main(ctx):
"""
Govee API v1.1.0 by jack-txt & HarryDev06
THIS PRODUCT IS NOT AFFILIATED WITH GOVEE
"""
key = os.environ.get('GOVEE_KEY')
# Make a request to Govee asking for devices
url = 'https://developer-api.govee.com/v1/devices'
headers = {'Govee-API-Key': key}
r = requests.get(url, headers=headers)
ctx.obj = contextVars.Context(json.loads(r.content)["data"]["devices"], key)
@main.command()
def viewlicense():
"GNU General Public License v3.0"
f = open(os.path.join(pathlib.Path(__file__).parent.absolute(), "LICENSE"), "r")
click.echo_via_pager(f.read())
@main.command()
def viewrepo():
"Open the repo in your browser"
click.echo("Opening Repository in browser")
click.launch("https://github.com/jack-txt/govee-api")
click.echo("Launched")
@main.command()
def giraffe():
"Prints a giraffe to the screen"
print("""\
._ o o
\_`-)|_
,"" \
," ## | ಠ ಠ.
," ## ,-\__ `.
," / `--._;)
," ## /
," ## /
""")
@main.command()
@click.pass_obj
def listdevices(ctx):
"""
Shows the devices registered to your Govee account
"""
content = ctx.devices
# For each device in the users account, display it's details and
# assign an iid that can be used to access the device
i = 0
for device in content:
print("------------------")
print("Device iid: " + str(i))
print("Device MAC: " + device["device"])
print("Model Name: " + device["model"])
print("Device Nickname: " + device["deviceName"])
print("Controllable: " + str(device["controllable"]))
print("Retrievable: " + str(device["retrievable"]))
print("Commands: ")
for commands in device["supportCmds"]:
print(" " + commands)
@main.command()
@click.argument("iid", metavar='<iid>')
@click.argument("state", metavar='<state>')
@click.pass_obj
def turn(ctx, iid, state):
"""
Turn a device on or off
"""
safe = True
try:
deviceID = ctx.devices[int(iid)]["device"]
model = ctx.devices[int(iid)]["model"]
except IndexError:
click.echo("Couldn't find device " + iid + ", run listdevices to see the devices on your account.", err=True)
safe = False
if str(state) != "on" and str(state) != "off" and safe:
click.echo(state + " is not valid! [on/off]", err=True)
safe = False
if safe:
# Contact Govee with the requested device and state
url = 'https://developer-api.govee.com/v1/devices/control'
headers = {'Content-Type': 'application/json', 'Govee-API-Key': ctx.apiKey}
jsonToSend = '{"device": "' + deviceID + '","model": "' + model + '","cmd": {"name": "turn", "value": "' + state + '"}} '
r = requests.put(url, data=jsonToSend, headers=headers)
if r.status_code == 200:
click.secho('Success', fg='green', bold=True)
click.echo("Device with iid " + str(iid) + " (" + model + ") was turned " + state)
else:
click.echo(
"There was an error while attempting to turn device " + str(iid) + " " + state + " [Error code: " + str(
r.status_code) + "]")
@main.command()
@click.argument("iid", metavar='<iid>')
@click.argument("value", metavar='<value>', )
@click.pass_obj
def brightness(ctx, iid, value):
"""
Change the brightness of a device
"""
safe = True
try:
deviceID = ctx.devices[int(iid)]["device"]
model = ctx.devices[int(iid)]["model"]
except IndexError:
click.echo("Couldn't find device " + iid + ", run listdevices to see the devices on your account.", err=True)
safe = False
if not 0 < int(value) <= 100 and safe:
click.echo(value + " must be a whole number and between 0 and 100", err=True)
safe = False
if safe:
# Contact Govee with the requested device and state
url = 'https://developer-api.govee.com/v1/devices/control'
headers = {'Content-Type': 'application/json', 'Govee-API-Key': ctx.apiKey}
jsonToSend = '{"device": "' + deviceID + '","model": "' + model + '","cmd": {"name": "brightness", "value": ' + value + '}} '
r = requests.put(url, data=jsonToSend, headers=headers)
if r.status_code == 200:
click.secho('Success', fg='green', bold=True)
click.echo("Device with iid " + str(iid) + " (" + model + ") was set to " + value + "% brightness")
else:
click.echo("There was an error while attempting to set brightness on device " + str(
iid) + " to " + value + "% [Error code: " + str(r.status_code) + "]", err=True)
@main.command()
@click.argument("iid", metavar='<iid>')
@click.argument("color", metavar='<color>')
@click.pass_obj
def color(ctx, iid, color):
"""
Change the color of a device
"""
if color == "red":
hexadec = "#ff0000"
elif color == "green":
hexadec = "#00ff00"
elif color == "blue":
hexadec = "#0000ff"
elif color == "purple":
hexadec = "#B200FF"
elif color == "orange":
hexadec = "#FFA200"
elif color == "skyblue":
hexadec = "#00E8FF"
elif color == "lime":
hexadec = "#4DFF00"
elif color == "computub":
hexadec = "#0067f4"
elif color == "bandev":
hexadec = "#5E17EB"
elif color == "buddha-quotes":
hexadec = "#E80054"
elif color == "labyrinth":
hexadec = "#0067f4"
else:
hexadec = color
safe = True
try:
deviceID = ctx.devices[int(iid)]["device"]
model = ctx.devices[int(iid)]["model"]
except IndexError:
click.echo("Couldn't find device " + iid + ", run listdevices to see the devices on your account.", err=True)
safe = False
hexadec_in = hexadec
hexadec = hexadec.replace("#", '')
colors = []
while hexadec:
colors.append(hexadec[:2])
hexadec = hexadec[2:]
try:
red = str(int(colors[0], 16))
except ValueError:
click.echo("Please enter a valid hexadecimal string, in format #RRGGBB or a color name", err=True)
safe = False
try:
green = str(int(colors[1], 16))
except ValueError:
click.echo("Please enter a valid hexadecimal string, in format #RRGGBB or a color name", err=True)
safe = False
try:
blue = str(int(colors[2], 16))
except ValueError:
click.echo("Please enter a valid hexadecimal string, in format #RRGGBB or a color name", err=True)
safe = False
if safe:
# Contact Govee with the requested device and state
url = 'https://developer-api.govee.com/v1/devices/control'
headers = {'Content-Type': 'application/json', 'Govee-API-Key': ctx.apiKey}
jsonToSend = '{"device": "' + deviceID + '","model": "' + model + '","cmd": {"name": "color", "value":{"r": ' + red + ', "g": ' + green + ', "b": ' + blue + '}}} '
r = requests.put(url, data=jsonToSend, headers=headers)
if r.status_code == 200:
click.secho('Success', fg='green', bold=True)
click.echo("Device with iid " + str(iid) + " (" + model + ") was set to color to " + hexadec_in)
else:
click.echo("There was an error while attempting to set color on device " + str(
iid) + " to " + hexadec + " [Error code: " + str(r.status_code) + "]", err=True)
@main.command()
@click.argument("iid", metavar='<iid>')
@click.argument("value", metavar='<value>')
@click.pass_obj
def colortem(ctx, iid, value):
"""
Change the colour temperature of a device
"""
safe = True
try:
deviceID = ctx.devices[int(iid)]["device"]
model = ctx.devices[int(iid)]["model"]
except IndexError:
click.echo("Couldn't find device " + iid + ", run listdevices to see the devices on your account.", err=True)
safe = False
if not 2000 <= int(value) <= 9000 and safe:
click.echo(value + " must be a whole number and between 2000 and 9000", err=True)
safe = False
if safe:
# Contact Govee with the requested device and state
url = 'https://developer-api.govee.com/v1/devices/control'
headers = {'Content-Type': 'application/json', 'Govee-API-Key': ctx.apiKey}
jsonToSend = '{"device": "' + deviceID + '","model": "' + model + '","cmd": {"name": "colorTem", "value": ' + value + '}} '
r = requests.put(url, data=jsonToSend, headers=headers)
if r.status_code == 200:
click.secho('Success', fg='green', bold=True)
click.echo("Device with iid " + str(iid) + " (" + model + ") was set to temperature value " + value)
else:
click.echo("There was an error while attempting to set temperature value on device " + str(
iid) + " to " + value + " [Error code: " + str(r.status_code) + "]", err=True)
if __name__ == "__main__":
main()
| 33.013889
| 171
| 0.555217
|
de862c32cff0abe47d4d5365be49d76e13fccf04
| 2,834
|
py
|
Python
|
augmentor/sl_intents_tool.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 3
|
2020-01-11T13:55:38.000Z
|
2020-08-25T22:34:15.000Z
|
augmentor/sl_intents_tool.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | null | null | null |
augmentor/sl_intents_tool.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 1
|
2021-01-01T05:21:44.000Z
|
2021-01-01T05:21:44.000Z
|
import streamlit as st
from interacts.common import display_lang_selector
from interacts.sl_utils import all_labels, write_styles
from interacts.tracker_streamlit import enable_streamlit_tracker
from sagas.nlu.utils import fix_sents
enable_streamlit_tracker()
write_styles()
def sidebar():
cur_lang=display_lang_selector()
return cur_lang
def get_all_intents():
from sagas.tool.intents_tool import intents_tool
rs = intents_tool.db.corpus.find({'$and': [
{"intent": {'$not': {'$size': 0}}},
{"intent": {'$exists': True}}
]})
rm = {r['text']: r['intent'] for r in rs}
return {r for r in rm.values()}
def get_records(lang, chapter, field, fix=False):
from sagas.tool.intents_tool import intents_tool
text_list = [(doc[field] if not fix else fix_sents(doc[field], lang),
doc['text'] if lang != 'en' else f"{doc['chapter'].lower()[:10]}_{doc['index']}",
doc['intent'] if 'intent' in doc else '',
)
for doc in intents_tool.db.corpus.find({'chapter': chapter})]
return text_list
def intents_tool_panel(lang):
from sagas.tool.intents_tool import intents_tool
import sagas
item=intents_tool.get_chapters()
chapter=st.selectbox('which chapter to modify', item['chapters'])
field = f'lang_{lang}' if lang != 'en' else 'text'
text_list=get_records(lang, chapter, field)
opts = [t[0] for t in text_list if t[2] == '']
# intent modify function only available when the lang==en
if lang=='en' and len(opts)>0:
sents=st.selectbox('which sentence to modify', opts )
entry=next(t for t in text_list if t[0]==sents)
st.markdown(f"{entry[0]} `{entry[2]}`")
sel_intents=st.multiselect('choose or input a intent', list(get_all_intents()))
st.write(sel_intents)
text_intent=st.text_input("intent", sel_intents[0] if len(sel_intents)>0 else '')
if text_intent.strip()!='':
# sel_intents.append(text_intent)
target_intent=text_intent.strip()
elif len(sel_intents)>0:
target_intent=sel_intents[0]
else:
target_intent=None
if target_intent is not None:
if st.button("store"):
st.write(f'.. store {target_intent}')
intents_tool.set_intent_by_text(sents, target_intent)
# refresh list
text_list = get_records(lang, chapter, field)
# for entry in text_list:
# st.markdown(f"{entry[0]} `{entry[1]}`")
st.table(sagas.to_df(text_list,
columns=[f'text_{lang}', 'text_en' if lang!='en' else 'location', 'intent']))
def main():
lang=sidebar()
st.subheader("intents tool")
intents_tool_panel(lang)
if __name__ == '__main__':
main()
| 34.987654
| 102
| 0.627735
|
c95f070a99b12c02aac6d14a3fade1526cd9a58e
| 1,005
|
py
|
Python
|
apps/profiles/forms.py
|
ecognize-hub/ecognize
|
e448098d7c5e815c68a7650b14b31d23976a8900
|
[
"Apache-2.0"
] | 1
|
2021-04-17T12:53:34.000Z
|
2021-04-17T12:53:34.000Z
|
apps/profiles/forms.py
|
ecognize-hub/ecognize
|
e448098d7c5e815c68a7650b14b31d23976a8900
|
[
"Apache-2.0"
] | null | null | null |
apps/profiles/forms.py
|
ecognize-hub/ecognize
|
e448098d7c5e815c68a7650b14b31d23976a8900
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from crispy_forms.helper import FormHelper
from .models import UserCreatedGroup, UserProfile
from django.urls import reverse
class CreateNewGroupForm(forms.ModelForm):
class Meta:
model = UserCreatedGroup
fields = ['display_name', 'visible', 'logo', 'join_mode']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id_createNewGroupForm'
self.helper.form_method = 'post'
self.helper.form_action = reverse('api-usergroups-create')
class UserSettingsForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['visible', 'anyone_can_message']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-settingsChangeForm'
self.helper.form_method = 'post'
self.helper.form_action = reverse('profile-settings')
| 32.419355
| 66
| 0.672637
|
b359f0dc2af009bdedb25781fd48d355169a03d0
| 1,263
|
py
|
Python
|
corehq/apps/cachehq/tests.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/cachehq/tests.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
corehq/apps/cachehq/tests.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import deepcopy
from mock import patch
from django.test import SimpleTestCase
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
from dimagi.ext.jsonobject import JsonObject, StringProperty
class Super(JsonObject):
@classmethod
def get(cls, *args, **kwargs):
pass
def save(self, *args, **kwargs):
pass
class BlogPost(CachedCouchDocumentMixin, Super):
title = StringProperty()
body = StringProperty()
class TestCachedCouchDocumentMixin(SimpleTestCase):
@patch.object(Super, 'get')
def test_get(self, doc_get):
blog_post = BlogPost(title="My favorite colors", body="blue")
blog_post['_id'] = 'idssrgglcfoyxdtrunbcae'
doc_get.return_value = deepcopy(blog_post)
blog_post.save()
blog_post.clear_caches()
# Make two `get`s and assert that only one made it to Document.get
BlogPost.get(blog_post['_id'])
BlogPost.get(blog_post['_id'])
doc_get.assert_called_once_with(blog_post['_id'])
# Update the doc, save, and assert that Document.get was hit again
blog_post.body = "Actually, it's purple"
blog_post.save()
BlogPost.get(blog_post['_id'])
self.assertEqual(doc_get.call_count, 2)
| 30.804878
| 74
| 0.688044
|
a3c8d1cdad61c28d1c9d67e52723b8ce54101cb8
| 8,255
|
py
|
Python
|
python/ray/air/train/integrations/tensorflow/tensorflow_trainer.py
|
orcahmlee/ray
|
298742d7241681ee1f307ec0dd3cd7e9713a3c7d
|
[
"Apache-2.0"
] | null | null | null |
python/ray/air/train/integrations/tensorflow/tensorflow_trainer.py
|
orcahmlee/ray
|
298742d7241681ee1f307ec0dd3cd7e9713a3c7d
|
[
"Apache-2.0"
] | 41
|
2021-09-21T01:13:48.000Z
|
2022-03-19T07:12:22.000Z
|
python/ray/air/train/integrations/tensorflow/tensorflow_trainer.py
|
LaudateCorpus1/ray
|
20cf2edfef7103c269358a49a48c2159315ee132
|
[
"Apache-2.0"
] | null | null | null |
from typing import Callable, Optional, Dict, Tuple, Type, Union
import tensorflow as tf
from ray.train.tensorflow import TensorflowConfig
from ray.air.trainer import GenDataset
from ray.air.train.data_parallel_trainer import DataParallelTrainer, _load_checkpoint
from ray.air.config import ScalingConfig, RunConfig, DatasetConfig
from ray.air.preprocessor import Preprocessor
from ray.air.checkpoint import Checkpoint
from ray.util import PublicAPI
@PublicAPI(stability="alpha")
class TensorflowTrainer(DataParallelTrainer):
"""A Trainer for data parallel Tensorflow training.
This Trainer runs the function ``train_loop_per_worker`` on multiple Ray
Actors. These actors already have the necessary TensorFlow process group already
configured for distributed TensorFlow training.
The ``train_loop_per_worker`` function is expected to take in either 0 or 1
arguments:
.. code-block:: python
def train_loop_per_worker():
...
.. code-block:: python
def train_loop_per_worker(config: Dict):
...
If ``train_loop_per_worker`` accepts an argument, then
``train_loop_config`` will be passed in as the argument. This is useful if you
want to tune the values in ``train_loop_config`` as hyperparameters.
If the ``datasets`` dict contains a training dataset (denoted by
the "train" key), then it will be split into multiple dataset
shards that can then be accessed by ``ray.train.get_dataset_shard("train")`` inside
``train_loop_per_worker``. All the other datasets will not be split and
``ray.train.get_dataset_shard(...)`` will return the the entire Dataset.
Inside the ``train_loop_per_worker`` function, you can use any of the
:ref:`Ray Train function utils <train-api-func-utils>`.
.. code-block:: python
def train_loop_per_worker():
# Report intermediate results for callbacks or logging.
train.report(...)
# Checkpoints the provided args as restorable state.
train.save_checkpoint(...)
# Returns dict of last saved checkpoint.
train.load_checkpoint()
# Returns the Ray Dataset shard for the given key.
train.get_dataset_shard("my_dataset")
# Returns the total number of workers executing training.
train.get_world_size()
# Returns the rank of this worker.
train.get_world_rank()
# Returns the rank of the worker on the current node.
train.get_local_rank()
You can also use any of the :ref:`TensorFlow specific function utils
<train-api-tensorflow-utils>`.
.. code-block:: python
def train_loop_per_worker():
# Turns off autosharding for a dataset.
# You should use this if you are doing
# `train.get_dataset_shard(...).to_tf(...)`
# as the data will be already sharded.
train.tensorflow.prepare_dataset_shard(...)
To save a model to use for the ``TensorflowPredictor``, you must save it under the
"model" kwarg in ``train.save_checkpoint()``.
Example:
.. code-block:: python
import tensorflow as tf
import ray
from ray import train
from ray.train.tensorflow import prepare_dataset_shard
from ray.air.train.integrations.tensorflow import TensorflowTrainer
input_size = 1
def build_model():
# toy neural network : 1-layer
return tf.keras.Sequential(
[tf.keras.layers.Dense(
1, activation="linear", input_shape=(input_size,))]
)
def train_loop_for_worker(config):
dataset_shard = train.get_dataset_shard("train")
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
with strategy.scope():
model = build_model()
model.compile(
optimizer="Adam", loss="mean_squared_error", metrics=["mse"])
for epoch in range(config["num_epochs"]):
tf_dataset = prepare_dataset_shard(
dataset_shard.to_tf(
label_column="y",
output_signature=(
tf.TensorSpec(shape=(None, 1), dtype=tf.float32),
tf.TensorSpec(shape=(None), dtype=tf.float32),
),
batch_size=1,
)
)
model.fit(tf_dataset)
train.save_checkpoint(
epoch=epoch, model=model.get_weights())
train_dataset = ray.data.from_items(
[{"x": x, "y": x + 1} for x in range(32)])
trainer = TensorflowTrainer(scaling_config={"num_workers": 3},
datasets={"train": train_dataset},
train_loop_config={"num_epochs": 2})
result = trainer.fit()
Args:
train_loop_per_worker: The training function to execute.
This can either take in no arguments or a ``config`` dict.
train_loop_config: Configurations to pass into
``train_loop_per_worker`` if it accepts an argument.
tensorflow_config: Configuration for setting up the TensorFlow backend.
If set to None, use the default configuration. This replaces the
``backend_config`` arg of ``DataParallelTrainer``.
scaling_config: Configuration for how to scale data parallel training.
dataset_config: Configuration for dataset ingest.
run_config: Configuration for the execution of the training run.
datasets: Any Ray Datasets to use for training. Use
the key "train" to denote which dataset is the training
dataset. If a ``preprocessor`` is provided and has not already been fit,
it will be fit on the training dataset. All datasets will be transformed
by the ``preprocessor`` if one is provided.
preprocessor: A ray.air.preprocessor.Preprocessor to preprocess the
provided datasets.
resume_from_checkpoint: A checkpoint to resume training from.
"""
def __init__(
self,
*,
train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]],
train_loop_config: Optional[Dict] = None,
tensorflow_config: Optional[TensorflowConfig] = None,
scaling_config: Optional[ScalingConfig] = None,
dataset_config: Optional[Dict[str, DatasetConfig]] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
preprocessor: Optional[Preprocessor] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
):
if not tensorflow_config:
tensorflow_config = TensorflowConfig()
super(TensorflowTrainer, self).__init__(
train_loop_per_worker=train_loop_per_worker,
train_loop_config=train_loop_config,
backend_config=tensorflow_config,
scaling_config=scaling_config,
dataset_config=dataset_config,
run_config=run_config,
datasets=datasets,
preprocessor=preprocessor,
resume_from_checkpoint=resume_from_checkpoint,
)
def load_checkpoint(
checkpoint: Checkpoint,
model: Union[Callable[[], tf.keras.Model], Type[tf.keras.Model], tf.keras.Model],
) -> Tuple[tf.keras.Model, Optional[Preprocessor]]:
"""Load a Checkpoint from ``TensorflowTrainer``.
Args:
checkpoint: The checkpoint to load the model and
preprocessor from. It is expected to be from the result of a
``TensorflowTrainer`` run.
model: A callable that returns a TensorFlow Keras model
to use, or an instantiated model.
Model weights will be loaded from the checkpoint.
Returns:
The model with set weights and AIR preprocessor contained within.
"""
model_weights, preprocessor = _load_checkpoint(checkpoint, "TensorflowTrainer")
if isinstance(model, type) or callable(model):
model = model()
model.set_weights(model_weights)
return model, preprocessor
| 39.309524
| 87
| 0.644094
|
64149d1d8564de5dfecd205e63eb014aa23c352b
| 7,178
|
py
|
Python
|
gateapi-python/gate_api/models/multi_chain_address_item.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
gateapi-python/gate_api/models/multi_chain_address_item.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
gateapi-python/gate_api/models/multi_chain_address_item.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Gate API v4
Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
Contact: support@mail.gate.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from gate_api.configuration import Configuration
class MultiChainAddressItem(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'chain': 'str',
'address': 'str',
'payment_id': 'str',
'payment_name': 'str',
'obtain_failed': 'int',
}
attribute_map = {
'chain': 'chain',
'address': 'address',
'payment_id': 'payment_id',
'payment_name': 'payment_name',
'obtain_failed': 'obtain_failed',
}
def __init__(
self,
chain=None,
address=None,
payment_id=None,
payment_name=None,
obtain_failed=None,
local_vars_configuration=None,
): # noqa: E501
# type: (str, str, str, str, int, Configuration) -> None
"""MultiChainAddressItem - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._chain = None
self._address = None
self._payment_id = None
self._payment_name = None
self._obtain_failed = None
self.discriminator = None
if chain is not None:
self.chain = chain
if address is not None:
self.address = address
if payment_id is not None:
self.payment_id = payment_id
if payment_name is not None:
self.payment_name = payment_name
if obtain_failed is not None:
self.obtain_failed = obtain_failed
@property
def chain(self):
"""Gets the chain of this MultiChainAddressItem. # noqa: E501
Name of the chain # noqa: E501
:return: The chain of this MultiChainAddressItem. # noqa: E501
:rtype: str
"""
return self._chain
@chain.setter
def chain(self, chain):
"""Sets the chain of this MultiChainAddressItem.
Name of the chain # noqa: E501
:param chain: The chain of this MultiChainAddressItem. # noqa: E501
:type: str
"""
self._chain = chain
@property
def address(self):
"""Gets the address of this MultiChainAddressItem. # noqa: E501
Deposit address # noqa: E501
:return: The address of this MultiChainAddressItem. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this MultiChainAddressItem.
Deposit address # noqa: E501
:param address: The address of this MultiChainAddressItem. # noqa: E501
:type: str
"""
self._address = address
@property
def payment_id(self):
"""Gets the payment_id of this MultiChainAddressItem. # noqa: E501
Notes that some currencies required(e.g., Tag, Memo) when depositing # noqa: E501
:return: The payment_id of this MultiChainAddressItem. # noqa: E501
:rtype: str
"""
return self._payment_id
@payment_id.setter
def payment_id(self, payment_id):
"""Sets the payment_id of this MultiChainAddressItem.
Notes that some currencies required(e.g., Tag, Memo) when depositing # noqa: E501
:param payment_id: The payment_id of this MultiChainAddressItem. # noqa: E501
:type: str
"""
self._payment_id = payment_id
@property
def payment_name(self):
"""Gets the payment_name of this MultiChainAddressItem. # noqa: E501
Note type, `Tag` or `Memo` # noqa: E501
:return: The payment_name of this MultiChainAddressItem. # noqa: E501
:rtype: str
"""
return self._payment_name
@payment_name.setter
def payment_name(self, payment_name):
"""Sets the payment_name of this MultiChainAddressItem.
Note type, `Tag` or `Memo` # noqa: E501
:param payment_name: The payment_name of this MultiChainAddressItem. # noqa: E501
:type: str
"""
self._payment_name = payment_name
@property
def obtain_failed(self):
"""Gets the obtain_failed of this MultiChainAddressItem. # noqa: E501
The obtain failed status- 0: address successfully obtained- 1: failed to obtain address # noqa: E501
:return: The obtain_failed of this MultiChainAddressItem. # noqa: E501
:rtype: int
"""
return self._obtain_failed
@obtain_failed.setter
def obtain_failed(self, obtain_failed):
"""Sets the obtain_failed of this MultiChainAddressItem.
The obtain failed status- 0: address successfully obtained- 1: failed to obtain address # noqa: E501
:param obtain_failed: The obtain_failed of this MultiChainAddressItem. # noqa: E501
:type: int
"""
self._obtain_failed = obtain_failed
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MultiChainAddressItem):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MultiChainAddressItem):
return True
return self.to_dict() != other.to_dict()
| 29.661157
| 239
| 0.602257
|
d90aea1870c68692fe64741cd0f792932adae3ce
| 484
|
py
|
Python
|
source/core/migrations/0001_initial.py
|
pedroaraujocaldeira/reconhecimento-image
|
2e45237abd1a44907af6aec3666a37f741fd0e82
|
[
"MIT"
] | null | null | null |
source/core/migrations/0001_initial.py
|
pedroaraujocaldeira/reconhecimento-image
|
2e45237abd1a44907af6aec3666a37f741fd0e82
|
[
"MIT"
] | 13
|
2020-06-01T04:33:22.000Z
|
2022-03-12T00:32:38.000Z
|
source/core/migrations/0001_initial.py
|
pedroaraujocaldeira/reconhecimento-image
|
2e45237abd1a44907af6aec3666a37f741fd0e82
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.3 on 2018-11-10 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.FileField(upload_to='')),
],
),
]
| 22
| 114
| 0.568182
|
0a156d7cf8e7849d836aab337c6da816ed13483b
| 791
|
py
|
Python
|
src/scancode/lockfile.py
|
s4-2/scancode-toolkit
|
8931b42e2630b94d0cabc834dfb3c16f01f82321
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1,511
|
2015-07-01T15:29:03.000Z
|
2022-03-30T13:40:05.000Z
|
src/scancode/lockfile.py
|
s4-2/scancode-toolkit
|
8931b42e2630b94d0cabc834dfb3c16f01f82321
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2,695
|
2015-07-01T16:01:35.000Z
|
2022-03-31T19:17:44.000Z
|
src/scancode/lockfile.py
|
s4-2/scancode-toolkit
|
8931b42e2630b94d0cabc834dfb3c16f01f82321
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 540
|
2015-07-01T15:08:19.000Z
|
2022-03-31T12:13:11.000Z
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
from contextlib import contextmanager
import fasteners
"""
An interprocess lockfile with a timeout.
"""
class LockTimeout(Exception):
pass
class FileLock(fasteners.InterProcessLock):
@contextmanager
def locked(self, timeout):
acquired = self.acquire(timeout=timeout)
if not acquired:
raise LockTimeout(timeout)
try:
yield
finally:
self.release()
| 23.264706
| 73
| 0.69153
|
031bdcb44aabc50f0a0c1a5e81ebad68e0ade9e8
| 11,602
|
py
|
Python
|
dejavu/database_sql.py
|
yimikai/dejavu
|
7f53f2ab6896b38cfd54cc396e2326a98b957d07
|
[
"MIT"
] | 4
|
2017-06-17T02:08:28.000Z
|
2020-11-25T04:46:56.000Z
|
dejavu/database_sql.py
|
kibichii-icode/dejavu
|
7f53f2ab6896b38cfd54cc396e2326a98b957d07
|
[
"MIT"
] | 7
|
2020-02-12T03:00:42.000Z
|
2021-06-10T21:43:13.000Z
|
dejavu/database_sql.py
|
kibichii-icode/dejavu
|
7f53f2ab6896b38cfd54cc396e2326a98b957d07
|
[
"MIT"
] | 2
|
2019-09-02T04:28:32.000Z
|
2021-05-04T10:25:21.000Z
|
from __future__ import absolute_import
from itertools import izip_longest
import Queue
import MySQLdb as mysql
from MySQLdb.cursors import DictCursor
from dejavu.database import Database
class SQLDatabase(Database):
"""
Queries:
1) Find duplicates (shouldn't be any, though):
select `hash`, `song_id`, `offset`, count(*) cnt
from fingerprints
group by `hash`, `song_id`, `offset`
having cnt > 1
order by cnt asc;
2) Get number of hashes by song:
select song_id, song_name, count(song_id) as num
from fingerprints
natural join songs
group by song_id
order by count(song_id) desc;
3) get hashes with highest number of collisions
select
hash,
count(distinct song_id) as n
from fingerprints
group by `hash`
order by n DESC;
=> 26 different songs with same fingerprint (392 times):
select songs.song_name, fingerprints.offset
from fingerprints natural join songs
where fingerprints.hash = "08d3c833b71c60a7b620322ac0c0aba7bf5a3e73";
"""
type = "mysql"
# tables
FINGERPRINTS_TABLENAME = "fingerprints"
SONGS_TABLENAME = "songs"
# fields
FIELD_FINGERPRINTED = "fingerprinted"
# creates
CREATE_FINGERPRINTS_TABLE = """
CREATE TABLE IF NOT EXISTS `%s` (
`%s` binary(10) not null,
`%s` mediumint unsigned not null,
`%s` int unsigned not null,
INDEX (%s),
UNIQUE KEY `unique_constraint` (%s, %s, %s),
FOREIGN KEY (%s) REFERENCES %s(%s) ON DELETE CASCADE
) ENGINE=INNODB;""" % (
FINGERPRINTS_TABLENAME, Database.FIELD_HASH,
Database.FIELD_SONG_ID, Database.FIELD_OFFSET, Database.FIELD_HASH,
Database.FIELD_SONG_ID, Database.FIELD_OFFSET, Database.FIELD_HASH,
Database.FIELD_SONG_ID, SONGS_TABLENAME, Database.FIELD_SONG_ID
)
CREATE_SONGS_TABLE = """
CREATE TABLE IF NOT EXISTS `%s` (
`%s` mediumint unsigned not null auto_increment,
`%s` varchar(250) not null,
`%s` tinyint default 0,
`%s` binary(20) not null,
PRIMARY KEY (`%s`),
UNIQUE KEY `%s` (`%s`)
) ENGINE=INNODB;""" % (
SONGS_TABLENAME, Database.FIELD_SONG_ID, Database.FIELD_SONGNAME, FIELD_FINGERPRINTED,
Database.FIELD_FILE_SHA1,
Database.FIELD_SONG_ID, Database.FIELD_SONG_ID, Database.FIELD_SONG_ID,
)
# inserts (ignores duplicates)
INSERT_FINGERPRINT = """
INSERT IGNORE INTO %s (%s, %s, %s) values
(UNHEX(%%s), %%s, %%s);
""" % (FINGERPRINTS_TABLENAME, Database.FIELD_HASH, Database.FIELD_SONG_ID, Database.FIELD_OFFSET)
INSERT_SONG = "INSERT INTO %s (%s, %s) values (%%s, UNHEX(%%s));" % (
SONGS_TABLENAME, Database.FIELD_SONGNAME, Database.FIELD_FILE_SHA1)
# selects
SELECT = """
SELECT %s, %s FROM %s WHERE %s = UNHEX(%%s);
""" % (Database.FIELD_SONG_ID, Database.FIELD_OFFSET, FINGERPRINTS_TABLENAME, Database.FIELD_HASH)
SELECT_MULTIPLE = """
SELECT HEX(%s), %s, %s FROM %s WHERE %s IN (%%s);
""" % (Database.FIELD_HASH, Database.FIELD_SONG_ID, Database.FIELD_OFFSET,
FINGERPRINTS_TABLENAME, Database.FIELD_HASH)
SELECT_ALL = """
SELECT %s, %s FROM %s;
""" % (Database.FIELD_SONG_ID, Database.FIELD_OFFSET, FINGERPRINTS_TABLENAME)
SELECT_SONG = """
SELECT %s, HEX(%s) as %s FROM %s WHERE %s = %%s;
""" % (Database.FIELD_SONGNAME, Database.FIELD_FILE_SHA1, Database.FIELD_FILE_SHA1, SONGS_TABLENAME, Database.FIELD_SONG_ID)
SELECT_NUM_FINGERPRINTS = """
SELECT COUNT(*) as n FROM %s
""" % (FINGERPRINTS_TABLENAME)
SELECT_UNIQUE_SONG_IDS = """
SELECT COUNT(DISTINCT %s) as n FROM %s WHERE %s = 1;
""" % (Database.FIELD_SONG_ID, SONGS_TABLENAME, FIELD_FINGERPRINTED)
SELECT_SONGS = """
SELECT %s, %s, HEX(%s) as %s FROM %s WHERE %s = 1;
""" % (Database.FIELD_SONG_ID, Database.FIELD_SONGNAME, Database.FIELD_FILE_SHA1, Database.FIELD_FILE_SHA1,
SONGS_TABLENAME, FIELD_FINGERPRINTED)
# drops
DROP_FINGERPRINTS = "DROP TABLE IF EXISTS %s;" % FINGERPRINTS_TABLENAME
DROP_SONGS = "DROP TABLE IF EXISTS %s;" % SONGS_TABLENAME
# update
UPDATE_SONG_FINGERPRINTED = """
UPDATE %s SET %s = 1 WHERE %s = %%s
""" % (SONGS_TABLENAME, FIELD_FINGERPRINTED, Database.FIELD_SONG_ID)
# delete
DELETE_UNFINGERPRINTED = """
DELETE FROM %s WHERE %s = 0;
""" % (SONGS_TABLENAME, FIELD_FINGERPRINTED)
def __init__(self, **options):
super(SQLDatabase, self).__init__()
self.cursor = cursor_factory(**options)
self._options = options
def after_fork(self):
# Clear the cursor cache, we don't want any stale connections from
# the previous process.
Cursor.clear_cache()
def setup(self):
"""
Creates any non-existing tables required for dejavu to function.
This also removes all songs that have been added but have no
fingerprints associated with them.
"""
with self.cursor() as cur:
cur.execute(self.CREATE_SONGS_TABLE)
cur.execute(self.CREATE_FINGERPRINTS_TABLE)
cur.execute(self.DELETE_UNFINGERPRINTED)
def empty(self):
"""
Drops tables created by dejavu and then creates them again
by calling `SQLDatabase.setup`.
.. warning:
This will result in a loss of data
"""
with self.cursor() as cur:
cur.execute(self.DROP_FINGERPRINTS)
cur.execute(self.DROP_SONGS)
self.setup()
def delete_unfingerprinted_songs(self):
"""
Removes all songs that have no fingerprints associated with them.
"""
with self.cursor() as cur:
cur.execute(self.DELETE_UNFINGERPRINTED)
def get_num_songs(self):
"""
Returns number of songs the database has fingerprinted.
"""
with self.cursor() as cur:
cur.execute(self.SELECT_UNIQUE_SONG_IDS)
for count, in cur:
return count
return 0
def get_num_fingerprints(self):
"""
Returns number of fingerprints the database has fingerprinted.
"""
with self.cursor() as cur:
cur.execute(self.SELECT_NUM_FINGERPRINTS)
for count, in cur:
return count
return 0
def set_song_fingerprinted(self, sid):
"""
Set the fingerprinted flag to TRUE (1) once a song has been completely
fingerprinted in the database.
"""
with self.cursor() as cur:
cur.execute(self.UPDATE_SONG_FINGERPRINTED, (sid,))
def get_songs(self):
"""
Return songs that have the fingerprinted flag set TRUE (1).
"""
with self.cursor(cursor_type=DictCursor) as cur:
cur.execute(self.SELECT_SONGS)
for row in cur:
yield row
def get_song_by_id(self, sid):
"""
Returns song by its ID.
"""
with self.cursor(cursor_type=DictCursor) as cur:
cur.execute(self.SELECT_SONG, (sid,))
return cur.fetchone()
def insert(self, hash, sid, offset):
"""
Insert a (sha1, song_id, offset) row into database.
"""
with self.cursor() as cur:
cur.execute(self.INSERT_FINGERPRINT, (hash, sid, offset))
def insert_song(self, songname, file_hash):
"""
Inserts song in the database and returns the ID of the inserted record.
"""
with self.cursor() as cur:
cur.execute(self.INSERT_SONG, (songname, file_hash))
return cur.lastrowid
def query(self, hash):
"""
Return all tuples associated with hash.
If hash is None, returns all entries in the
database (be careful with that one!).
"""
# select all if no key
query = self.SELECT_ALL if hash is None else self.SELECT
with self.cursor() as cur:
cur.execute(query)
for sid, offset in cur:
yield (sid, offset)
def get_iterable_kv_pairs(self):
"""
Returns all tuples in database.
"""
return self.query(None)
def insert_hashes(self, sid, hashes):
"""
Insert series of hash => song_id, offset
values into the database.
"""
values = []
for hash, offset in hashes:
values.append((hash, sid, offset))
with self.cursor() as cur:
for split_values in grouper(values, 1000):
cur.executemany(self.INSERT_FINGERPRINT, split_values)
def return_matches(self, hashes):
"""
Return the (song_id, offset_diff) tuples associated with
a list of (sha1, sample_offset) values.
"""
# Create a dictionary of hash => offset pairs for later lookups
mapper = {}
for hash, offset in hashes:
mapper[hash.upper()] = offset
# Get an iteratable of all the hashes we need
values = mapper.keys()
with self.cursor() as cur:
for split_values in grouper(values, 1000):
# Create our IN part of the query
query = self.SELECT_MULTIPLE
query = query % ', '.join(['UNHEX(%s)'] * len(split_values))
cur.execute(query, split_values)
for hash, sid, offset in cur:
# (sid, db_offset - song_sampled_offset)
yield (sid, offset - mapper[hash])
def __getstate__(self):
return (self._options,)
def __setstate__(self, state):
self._options, = state
self.cursor = cursor_factory(**self._options)
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return (filter(None, values) for values
in izip_longest(fillvalue=fillvalue, *args))
def cursor_factory(**factory_options):
def cursor(**options):
options.update(factory_options)
return Cursor(**options)
return cursor
class Cursor(object):
"""
Establishes a connection to the database and returns an open cursor.
```python
# Use as context manager
with Cursor() as cur:
cur.execute(query)
```
"""
_cache = Queue.Queue(maxsize=5)
def __init__(self, cursor_type=mysql.cursors.Cursor, **options):
super(Cursor, self).__init__()
try:
conn = self._cache.get_nowait()
except Queue.Empty:
conn = mysql.connect(**options)
else:
# Ping the connection before using it from the cache.
conn.ping(True)
self.conn = conn
self.conn.autocommit(False)
self.cursor_type = cursor_type
@classmethod
def clear_cache(cls):
cls._cache = Queue.Queue(maxsize=5)
def __enter__(self):
self.cursor = self.conn.cursor(self.cursor_type)
return self.cursor
def __exit__(self, extype, exvalue, traceback):
# if we had a MySQL related error we try to rollback the cursor.
if extype is mysql.MySQLError:
self.cursor.rollback()
self.cursor.close()
self.conn.commit()
# Put it back on the queue
try:
self._cache.put_nowait(self.conn)
except Queue.Full:
self.conn.close()
| 31.02139
| 128
| 0.603172
|
24b54349b1b8fd4fb38bbc59997e291ad1a480fc
| 14,209
|
py
|
Python
|
utils/dataset/beam_dataset.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 17
|
2021-07-30T14:08:24.000Z
|
2022-03-30T13:57:02.000Z
|
utils/dataset/beam_dataset.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 4
|
2021-09-09T03:02:18.000Z
|
2022-03-24T13:55:55.000Z
|
utils/dataset/beam_dataset.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 2
|
2021-08-30T11:51:16.000Z
|
2021-09-03T09:18:50.000Z
|
# pylint: disable=no-member, not-callable
import logging
import os
import itertools
import random
import copy
from typing import List, Iterator, TypeVar, Union, Tuple
import numpy as np
import torch
from transformers import BertTokenizer
from torch.utils.data import Dataset
from utils.dataset.common import (
get_headings,
get_viewpoints,
load_distances,
load_json_data,
load_nav_graphs,
randomize_regions,
randomize_tokens,
save_json_data,
tokenize,
)
from utils.dataset.features_reader import FeaturesReader
logger = logging.getLogger(__name__)
T = TypeVar("T")
def shuffle_different(seq: List[T]) -> Iterator[List[T]]:
sequences = list(itertools.permutations(seq, len(seq)))
random.shuffle(sequences)
for s in sequences:
l = list(s)
if l != seq:
yield l
def shuffle_non_adjacent(seq: List[T]) -> Iterator[List[T]]:
n = len(seq)
starting = {i: [j for j in range(n) if abs(j - i) > 1] for i in range(n)}
keys = list(starting.keys())
done = []
while keys != []:
idx_keys, start = random.choice(list(enumerate(keys)))
idx_list, permute = random.choice(list(enumerate(starting[start])))
del starting[start][idx_list]
if starting[start] == []:
del keys[idx_keys]
if {start, permute} in done:
continue
done.append({start, permute})
shuffled = copy.deepcopy(seq)
shuffled[start], shuffled[permute] = shuffled[permute], shuffled[start]
yield shuffled
class BeamDataset(Dataset):
def __init__(
self,
vln_path: str,
beam_path: str,
tokenizer: BertTokenizer,
features_reader: FeaturesReader,
max_instruction_length: int,
max_path_length: int,
max_num_boxes: int,
num_beams: int,
num_beams_strict: bool,
training: bool,
masked_vision: bool,
masked_language: bool,
default_gpu: bool,
num_negatives: int,
ground_truth_trajectory: bool,
highlighted_language: bool,
shuffle_visual_features: bool,
shuffler: str = "different",
**kwargs,
):
# load and tokenize data (with caching)
tokenized_path = f"_tokenized_{max_instruction_length}".join(
os.path.splitext(vln_path)
)
if os.path.exists(tokenized_path):
self._vln_data = load_json_data(tokenized_path)
else:
self._vln_data = load_json_data(vln_path)
tokenize(self._vln_data, tokenizer, max_instruction_length)
save_json_data(self._vln_data, tokenized_path)
self._tokenizer = tokenizer
# load navigation graphs
scan_list = list(set([item["scan"] for item in self._vln_data]))
self._graphs = load_nav_graphs(scan_list)
self._distances = load_distances(scan_list)
# get all of the viewpoints for this dataset
self._viewpoints = get_viewpoints(scan_list, self._graphs, features_reader)
# in training we only need 4 beams
if training:
self._num_beams = num_beams
num_beams_strict = False
# load beamsearch data
temp_beam_data = load_json_data(beam_path)
# filter beams based on length
self._beam_data = []
for idx, item in enumerate(temp_beam_data):
if len(item["ranked_paths"]) >= num_beams:
if num_beams_strict:
item["ranked_paths"] = item["ranked_paths"][:num_beams]
self._beam_data.append(item)
elif default_gpu:
logger.warning(
f"skipping index: {idx} in beam data in from path: {beam_path}"
)
# get mapping from path id to vln index
path_to_vln = {}
for idx, vln_item in enumerate(self._vln_data):
path_to_vln[vln_item["path_id"]] = idx
# get mapping from beam to vln
self._beam_to_vln = {}
for idx, beam_item in enumerate(self._beam_data):
path_id = int(beam_item["instr_id"].split("_")[0])
if path_id not in path_to_vln:
if default_gpu:
logger.warning(f"Skipping beam {beam_item['instr_id']}")
continue
self._beam_to_vln[idx] = path_to_vln[path_id]
if shuffler == "different":
self._shuffler = shuffle_different
elif shuffler == "nonadj":
self._shuffler = shuffle_non_adjacent
else:
raise ValueError(f"Unexpected shuffling mode ({shuffler})")
self._features_reader = features_reader
self._max_instruction_length = max_instruction_length
self._max_path_length = max_path_length
self._max_num_boxes = max_num_boxes
self._training = training
self._masked_vision = masked_vision
self._masked_language = masked_language
self._highlighted_language = highlighted_language
self._ground_truth_trajectory = ground_truth_trajectory
self._default_gpu = default_gpu
self._shuffle_visual_features = shuffle_visual_features
self._num_negatives = num_negatives
def __len__(self):
return len(self._beam_data)
def __getitem__(self, beam_index: int):
vln_index = self._beam_to_vln[beam_index]
vln_item = self._vln_data[vln_index]
# get beam info
path_id, instruction_index = map(
int, self._beam_data[beam_index]["instr_id"].split("_")
)
# get vln info
scan_id = vln_item["scan"]
heading = vln_item["heading"]
gt_path = vln_item["path"]
# get the instruction data
instr_tokens = torch.tensor(vln_item["instruction_tokens"][instruction_index])
instr_mask = instr_tokens > 0
segment_ids = torch.zeros_like(instr_tokens)
# applying a token level loss
if self._highlighted_language:
instr_highlights = torch.tensor(
vln_item["instruction_highlights"][instruction_index]
)
else:
instr_highlights = torch.tensor([])
# get all of the paths
beam_paths = []
for ranked_path in self._beam_data[beam_index]["ranked_paths"]:
beam_paths.append([p for p, _, _ in ranked_path])
success = self._get_path_success(scan_id, gt_path, beam_paths)
target: Union[List[int], int]
# select one positive and three negative paths
if self._training:
# special case for data_aug with negative samples
if "positive" in vln_item and not vln_item["positive"][instruction_index]:
target = -1
selected_paths = beam_paths[: self._num_beams]
assert not self._ground_truth_trajectory, "Not compatible"
# not enough positive or negative paths (this should be rare)
if np.sum(success == 1) == 0 or np.sum(success == 0) < self._num_beams - 1:
target = -1 # default ignore index
if self._ground_truth_trajectory:
selected_paths = [self._vln_data[vln_index]["path"]] + beam_paths[
: self._num_beams - 1
]
else:
selected_paths = beam_paths[: self._num_beams]
else:
target = 0
selected_paths = []
# first select a positive
if self._ground_truth_trajectory:
selected_paths.append(self._vln_data[vln_index]["path"])
else:
idx = np.random.choice(np.where(success == 1)[0]) # type: ignore
selected_paths.append(beam_paths[idx])
# next select three negatives
idxs = np.random.choice( # type: ignore
np.where(success == 0)[0], size=self._num_beams - 1, replace=False
)
for idx in idxs:
selected_paths.append(beam_paths[idx])
# shuffle the visual features from the ground truth as a free negative path
if self._shuffle_visual_features:
path = self._vln_data[vln_index]["path"]
# selected_paths += [
# corr
# for corr, _ in zip(shuffle_gen(path), range(self._num_negatives))
# ]
for corr, _ in zip(self._shuffler(path), range(self._num_negatives)):
selected_paths += [corr]
else:
target = success
selected_paths = beam_paths
# This should be used only for testing the influence of shuffled sequences!
if self._shuffle_visual_features:
if isinstance(target, int):
raise ValueError("fix mypy")
# we shuffled all positive trajectories
# the target is now zero everywhere
for i in np.arange(len(success))[success.astype("bool")]:
if i > self._num_negatives:
break
selected_paths.append(next(self._shuffler(selected_paths[i])))
target = np.append(target, 0)
# get path features
features, boxes, probs, masks = [], [], [], []
for path in selected_paths:
f, b, p, m = self._get_path_features(scan_id, path, heading)
features.append(f)
boxes.append(b)
probs.append(p)
masks.append(m)
# convert data into tensors
image_features = torch.tensor(features).float()
image_boxes = torch.tensor(boxes).float()
image_probs = torch.tensor(probs).float()
image_masks = torch.tensor(masks).long()
instr_tokens = instr_tokens.repeat(len(features), 1).long()
instr_mask = instr_mask.repeat(len(features), 1).long()
segment_ids = segment_ids.repeat(len(features), 1).long()
instr_highlights = instr_highlights.repeat(len(features), 1).long()
# randomly mask image features
if self._masked_vision:
image_features, image_targets, image_targets_mask = randomize_regions(
image_features, image_probs, image_masks
)
else:
image_targets = torch.ones_like(image_probs) / image_probs.shape[-1]
image_targets_mask = torch.zeros_like(image_masks)
# randomly mask instruction tokens
if self._masked_language:
instr_tokens, instr_targets = randomize_tokens(
instr_tokens, instr_mask, self._tokenizer
)
else:
instr_targets = torch.ones_like(instr_tokens) * -1
# construct null return items
co_attention_mask = torch.zeros(
2, self._max_path_length * self._max_num_boxes, self._max_instruction_length
).long()
instr_id = torch.tensor([path_id, instruction_index]).long()
return (
torch.tensor(target).long(),
image_features,
image_boxes,
image_masks,
image_targets,
image_targets_mask,
instr_tokens,
instr_mask,
instr_targets,
instr_highlights,
segment_ids,
co_attention_mask,
instr_id,
torch.ones(image_features.shape[0]).bool(),
)
def _get_path_success(self, scan_id, path, beam_paths, success_criteria=3):
d = self._distances[scan_id]
success = np.zeros(len(beam_paths))
for idx, beam_path in enumerate(beam_paths):
if d[path[-1]][beam_path[-1]] < success_criteria:
success[idx] = 1
return success
# TODO move to utils
def _get_path_features(self, scan_id: str, path: List[str], first_heading: float):
""" Get features for a given path. """
headings = get_headings(self._graphs[scan_id], path, first_heading)
# for next headings duplicate the last
next_headings = headings[1:] + [headings[-1]]
path_length = min(len(path), self._max_path_length)
path_features, path_boxes, path_probs, path_masks = [], [], [], []
for path_idx, path_id in enumerate(path[:path_length]):
key = scan_id + "-" + path_id
# get image features
features, boxes, probs = self._features_reader[
key, headings[path_idx], next_headings[path_idx],
]
num_boxes = min(len(boxes), self._max_num_boxes)
# pad features and boxes (if needed)
pad_features = np.zeros((self._max_num_boxes, 2048))
pad_features[:num_boxes] = features[:num_boxes]
pad_boxes = np.zeros((self._max_num_boxes, 12))
pad_boxes[:num_boxes, :11] = boxes[:num_boxes, :11]
pad_boxes[:, 11] = np.ones(self._max_num_boxes) * path_idx
pad_probs = np.zeros((self._max_num_boxes, 1601))
pad_probs[:num_boxes] = probs[:num_boxes]
box_pad_length = self._max_num_boxes - num_boxes
pad_masks = [1] * num_boxes + [0] * box_pad_length
path_features.append(pad_features)
path_boxes.append(pad_boxes)
path_probs.append(pad_probs)
path_masks.append(pad_masks)
# pad path lists (if needed)
for path_idx in range(path_length, self._max_path_length):
pad_features = np.zeros((self._max_num_boxes, 2048))
pad_boxes = np.zeros((self._max_num_boxes, 12))
pad_boxes[:, 11] = np.ones(self._max_num_boxes) * path_idx
pad_probs = np.zeros((self._max_num_boxes, 1601))
pad_masks = [0] * self._max_num_boxes
path_features.append(pad_features)
path_boxes.append(pad_boxes)
path_probs.append(pad_probs)
path_masks.append(pad_masks)
return (
np.vstack(path_features),
np.vstack(path_boxes),
np.vstack(path_probs),
np.hstack(path_masks),
)
| 36.810881
| 88
| 0.596946
|
0ef7b60cb4307d6344c62a2d00af2050e23af348
| 16,009
|
py
|
Python
|
tasks/models/resnet_evonorm.py
|
epfml/relaysgd
|
536f809f2a5fed5f5004b3f49857d67462ac89d2
|
[
"MIT"
] | 3
|
2021-10-31T21:00:36.000Z
|
2022-03-03T13:04:16.000Z
|
tasks/models/resnet_evonorm.py
|
epfml/relaysgd
|
536f809f2a5fed5f5004b3f49857d67462ac89d2
|
[
"MIT"
] | null | null | null |
tasks/models/resnet_evonorm.py
|
epfml/relaysgd
|
536f809f2a5fed5f5004b3f49857d67462ac89d2
|
[
"MIT"
] | 2
|
2022-02-19T05:25:09.000Z
|
2022-03-17T15:41:14.000Z
|
# -*- coding: utf-8 -*-
import math
import functools
from collections import OrderedDict
import torch
import torch.nn as nn
__all__ = ["resnet_evonorm"]
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
ctx.save_for_backward(i)
return i * torch.sigmoid(i)
@staticmethod
def backward(ctx, grad_output):
sigmoid_i = torch.sigmoid(ctx.saved_variables[0])
return grad_output * (
sigmoid_i * (1 + ctx.saved_variables[0] * (1 - sigmoid_i))
)
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
@torch.jit.script
def instance_std(x, eps):
var = torch.var(x, dim=(2, 3), keepdim=True).expand_as(x)
if torch.isnan(var).any():
var = torch.zeros(var.shape)
return torch.sqrt(var + eps)
@torch.jit.script
def group_std(x, eps):
N, C, H, W = x.size()
groups = 32
groups = C if groups > C else groups
x = x.view(N, groups, C // groups, H, W)
var = torch.var(x, dim=(2, 3, 4), keepdim=True).expand_as(x)
return torch.sqrt(var.add(eps)).view(N, C, H, W)
class EvoNorm2D(nn.Module):
def __init__(
self,
input,
non_linear=True,
version="S0",
efficient=True,
affine=True,
momentum=0.9,
eps=1e-5,
groups=32,
training=True,
):
super(EvoNorm2D, self).__init__()
self.non_linear = non_linear
self.version = version
self.training = training
self.momentum = momentum
self.efficient = efficient
if self.version == "S0":
self.swish = MemoryEfficientSwish()
self.groups = groups
self.eps = torch.FloatTensor([eps])
if self.version not in ["B0", "S0"]:
raise ValueError("Invalid EvoNorm version")
self.insize = input
self.affine = affine
if self.affine:
self.gamma = nn.Parameter(torch.ones(1, self.insize, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, self.insize, 1, 1))
if self.non_linear and (
(self.version == "S0" and not self.efficient) or self.version == "B0"
):
self.v = nn.Parameter(torch.ones(1, self.insize, 1, 1))
else:
self.register_parameter("gamma", None)
self.register_parameter("beta", None)
self.register_buffer("v", None)
self.register_buffer("running_var", torch.ones(1, self.insize, 1, 1))
self.reset_parameters()
def reset_parameters(self):
self.running_var.fill_(1)
def _check_input_dim(self, x):
if x.dim() != 4:
raise ValueError("expected 4D input (got {}D input)".format(x.dim()))
def forward(self, x):
self._check_input_dim(x)
if self.version == "S0":
if self.non_linear:
if not self.efficient:
num = x * torch.sigmoid(
self.v * x
) # Original Swish Implementation, however memory intensive.
else:
num = self.swish(
x
) # Experimental Memory Efficient Variant of Swish
if self.eps.device != self.gamma.device:
self.eps = self.eps.to(self.gamma.device)
return num / group_std(x, eps=self.eps) * self.gamma + self.beta
else:
return x * self.gamma + self.beta
if self.version == "B0":
if self.training:
var = torch.var(x, dim=(0, 2, 3), unbiased=False, keepdim=True)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var)
else:
var = self.running_var
if self.non_linear:
den = torch.max(
(var + self.eps).sqrt(), self.v * x + instance_std(x, eps=self.eps)
)
return x / den * self.gamma + self.beta
else:
return x * self.gamma + self.beta
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding."
return nn.Conv2d(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(
self,
in_planes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
version="S0",
norm_layer=None,
use_bn_stat=False,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = functools.partial(
nn.BatchNorm2d, track_running_stats=use_bn_stat
)
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(in_planes, planes, stride)
self.evo = EvoNorm2D(planes, version=version)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.evo(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""
[1 * 1, x]
[3 * 3, x]
[1 * 1, x * 4]
"""
expansion = 4
def __init__(
self,
in_planes,
out_planes,
stride=1,
downsample=None,
version="S0",
norm_layer=None,
use_bn_stat=False,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = functools.partial(
nn.BatchNorm2d, track_running_stats=use_bn_stat
)
self.conv1 = nn.Conv2d(
in_channels=in_planes, out_channels=out_planes, kernel_size=1, bias=False
)
self.evo1 = EvoNorm2D(out_planes, version=version)
self.conv2 = nn.Conv2d(
in_channels=out_planes,
out_channels=out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
self.evo2 = EvoNorm2D(out_planes, version=version)
self.conv3 = nn.Conv2d(
in_channels=out_planes,
out_channels=out_planes * 4,
kernel_size=1,
bias=False,
)
self.bn3 = norm_layer(out_planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.evo1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.evo2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out.expand_as(residual) + residual
out = self.relu(out)
return out
class ResNet_cifar(nn.Module):
def __init__(
self,
resnet_size,
scaling=1,
save_activations=False,
use_bn_stat=False,
num_classes=10,
version="S0",
):
super(ResNet_cifar, self).__init__()
self.use_bn_stat = use_bn_stat
self.version = "S0" if version is None else version
# define model.
if resnet_size % 6 != 2:
raise ValueError("resnet_size must be 6n + 2:", resnet_size)
block_nums = (resnet_size - 2) // 6
if resnet_size >= 44:
raise NotImplementedError("not supported yet.")
else:
block_fn = BasicBlock
self.num_classes = num_classes
# define layers.
self.inplanes = int(16 * scaling)
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=int(16 * scaling),
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.evo1 = EvoNorm2D(int(16 * scaling))
# self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_block(
block_fn=block_fn, planes=int(16 * scaling), block_num=block_nums
)
self.layer2 = self._make_block(
block_fn=block_fn, planes=int(32 * scaling), block_num=block_nums, stride=2
)
self.layer3 = self._make_block(
block_fn=block_fn, planes=int(64 * scaling), block_num=block_nums, stride=2
)
self.avgpool = nn.AvgPool2d(kernel_size=8)
self.classifier = nn.Linear(
in_features=int(64 * scaling * block_fn.expansion),
out_features=self.num_classes,
)
# weight initialization based on layer type.
self._weight_initialization()
# a placeholder for activations in the intermediate layers.
self.save_activations = save_activations
self.activations = None
def _weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# elif isinstance(m, nn.Linear):
# m.weight.data.normal_(mean=0, std=0.01)
# m.bias.data.zero_()
def _make_block(self, block_fn, planes, block_num, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block_fn.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block_fn.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(
planes * block_fn.expansion, track_running_stats=self.use_bn_stat
),
)
layers = []
layers.append(
block_fn(
in_planes=self.inplanes,
planes=planes,
stride=stride,
downsample=downsample,
use_bn_stat=self.use_bn_stat,
version=self.version,
)
)
self.inplanes = planes * block_fn.expansion
for _ in range(1, block_num):
layers.append(block_fn(in_planes=self.inplanes, planes=planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.evo1(x)
x = self.layer1(x)
activation1 = x.clone()
x = self.layer2(x)
activation2 = x.clone()
x = self.layer3(x)
activation3 = x.clone()
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
if self.save_activations:
self.activations = [activation1, activation2, activation3]
return x
class ResNet_ImageNet(nn.Module):
def __init__(self, resnet_size, use_bn_stat=False, version="S0", num_classes=1000):
super(ResNet_ImageNet, self).__init__()
self.use_bn_stat = use_bn_stat
self.version = "S0" if version is None else version
# define model param.
model_params = {
18: {"block": BasicBlock, "layers": [2, 2, 2, 2]},
34: {"block": BasicBlock, "layers": [3, 4, 6, 3]},
50: {"block": Bottleneck, "layers": [3, 4, 6, 3]},
101: {"block": Bottleneck, "layers": [3, 4, 23, 3]},
152: {"block": Bottleneck, "layers": [3, 8, 36, 3]},
}
block_fn = model_params[resnet_size]["block"]
block_nums = model_params[resnet_size]["layers"]
# define layers.
self.inplanes = 64
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3,
bias=False,
)
self.evo1 = EvoNorm2D(64)
# self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_block(
block_fn=block_fn, planes=64, block_num=block_nums[0]
)
self.layer2 = self._make_block(
block_fn=block_fn, planes=128, block_num=block_nums[1], stride=2
)
self.layer3 = self._make_block(
block_fn=block_fn, planes=256, block_num=block_nums[2], stride=2
)
self.layer4 = self._make_block(
block_fn=block_fn, planes=512, block_num=block_nums[3], stride=2
)
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=1)
self.classifier = nn.Linear(
in_features=512 * block_fn.expansion, out_features=num_classes
)
# weight initialization based on layer type.
self._weight_initialization()
self.train()
def _make_block(self, block_fn, planes, block_num, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block_fn.expansion:
downsample = nn.Sequential(
OrderedDict(
[
(
"conv",
nn.Conv2d(
self.inplanes,
planes * block_fn.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
),
(
"bn",
nn.BatchNorm2d(
planes * block_fn.expansion,
track_running_stats=self.use_bn_stat,
),
),
]
)
)
layers = []
layers.append(
block_fn(
self.inplanes,
planes,
stride=stride,
downsample=downsample,
use_bn_stat=self.use_bn_stat,
version=self.version,
)
)
self.inplanes = planes * block_fn.expansion
for _ in range(1, block_num):
layers.append(block_fn(self.inplanes, planes))
return nn.Sequential(*layers)
def _weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# elif isinstance(m, nn.Linear):
# m.weight.data.normal_(mean=0, std=0.01)
# m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.evo1(x)
# x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
| 30.609943
| 90
| 0.528515
|
3292c7643b2e5ba0db38f609812c657ef2affecb
| 1,657
|
py
|
Python
|
b88d80170_fake_data/generate.py
|
archfiery/fake-data
|
44833995ca2e4410d25a6f354c8d6ddec199040d
|
[
"Apache-2.0"
] | null | null | null |
b88d80170_fake_data/generate.py
|
archfiery/fake-data
|
44833995ca2e4410d25a6f354c8d6ddec199040d
|
[
"Apache-2.0"
] | null | null | null |
b88d80170_fake_data/generate.py
|
archfiery/fake-data
|
44833995ca2e4410d25a6f354c8d6ddec199040d
|
[
"Apache-2.0"
] | null | null | null |
from .address import *
from .company import *
from .personal_data import *
from .name import first_name
"""
The amount of information we capture is small, but will be enlarged through iterations
Generate numP number of people, and
numC number of companies
Each person is associated with a company, via different relationships
WORKS_AT / OWNS
Each company is associated with an address
There are 4 files generated
person.tsv
[name, gender, dob, email, mobile, phone]
company.tsv
[name, (start, end), isSixHourClosure]
address.tsv
[number, street, suburb, postcode, state, country]
person_company.tsv
[person_name, company_name, WORKS_AT / OWNS]
company_address.tsv
[company_name, address]
"""
def gen(num_person, num_company):
person_file = open('person.tsv', 'wb+')
person_set = set()
for i in range(num_person):
d = dict()
fng = first_name()
fn, gender = fng[0], fng[1]
ln = last_name()
d['id'] = i
d['name'] = ' '.join([fn, ln]).strip()
d['email'] = email(fn, ln)
d['dob'] = dob()
d['gender'] = gender
d['mobile'] = mobile()
d['phone'] = landline()
p = Person(d)
person_set.add(p)
person_file.write((p.tsv() + '\n').encode())
person_file.close()
company_file = open('company.tsv', 'wb+')
company_set = set()
for i in range(num_company):
d = dict()
d['name'] = company_name()
d['openingHours'] = '6:00-23:00'
d['isSixHourClosure'] = True
c = Company(d)
company_set.add(c)
company_file.write((c.tsv() + '\n').encode())
company_file.close()
| 23.671429
| 86
| 0.617381
|
9ee82b3bf90650f686fef3d3072123293c838955
| 1,340
|
py
|
Python
|
src/repovisor/__main__.py
|
gjcooper/repovisor
|
22d1861fda8a86f47cd5ff1f286b39c30a218f53
|
[
"BSD-3-Clause"
] | 2
|
2020-12-17T00:10:56.000Z
|
2022-03-24T06:09:51.000Z
|
src/repovisor/__main__.py
|
gjcooper/repovisor
|
22d1861fda8a86f47cd5ff1f286b39c30a218f53
|
[
"BSD-3-Clause"
] | 3
|
2017-11-15T22:57:34.000Z
|
2019-07-03T00:59:08.000Z
|
src/repovisor/__main__.py
|
gjcooper/repovisor
|
22d1861fda8a86f47cd5ff1f286b39c30a218f53
|
[
"BSD-3-Clause"
] | null | null | null |
from repovisor import repovisor as rv
import click
@click.group()
@click.version_option()
def main():
pass
@main.command(name='search')
@click.argument('folders', nargs=-1, type=click.Path(exists=True, file_okay=False, resolve_path=True))
@click.option('--brief', '-b', is_flag=True, help='Use the short form of reporting status')
@click.option('--prune', '-p', is_flag=True, help='Prune directories as soon as we reach a repository')
@click.option('--tree', '-t', is_flag=True, help='Show repositories in a tree structure, only used if brief is also selected')
@click.option('--hide', is_flag=True, help='Hide repositories that are clean')
@click.option('--remote', is_flag=True, help='Warn if repo has no remote')
def __search_action(folders, brief, prune, tree, hide, remote):
'''search through a set of folders and print status for them'''
for repo_level, repo in rv.reposearch(*folders, prune=prune, level=0):
view = rv.repo_view(repo, brief=brief)
if tree and brief:
view = '-'*repo_level + view
if hide and not repo.bare:
if not repo.state['dirty']:
if not repo.state['untracked']:
if all(rv.branch_uptodate(b, true_on_missing_origin=not remote) for b in repo.state['refcheck']):
continue
print(view)
| 44.666667
| 126
| 0.664925
|
49a805b46937a2becd3fa903497f431c0f9d0315
| 5,676
|
py
|
Python
|
src/m9_using_objects.py
|
harperis/03-AccumulatorsAndFunctionsWithParameters
|
b7103344f95fcf20ca0ce78418ab1cee0597dd1a
|
[
"MIT"
] | null | null | null |
src/m9_using_objects.py
|
harperis/03-AccumulatorsAndFunctionsWithParameters
|
b7103344f95fcf20ca0ce78418ab1cee0597dd1a
|
[
"MIT"
] | null | null | null |
src/m9_using_objects.py
|
harperis/03-AccumulatorsAndFunctionsWithParameters
|
b7103344f95fcf20ca0ce78418ab1cee0597dd1a
|
[
"MIT"
] | null | null | null |
"""
This module lets you practice ** using objects **, including:
-- CONSTRUCTING objects,
-- applying METHODS to them, and
-- accessing their DATA via INSTANCE VARIABLES
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Mark Hays, Amanda Stouder,
their colleagues and Isaac Harper.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
import math
def main():
two_circles()
circle_and_rectangle()
lines()
""" Calls the other functions to demonstrate and/or test them. """
# Test your functions by putting calls to them here:
def two_circles():
window = rg.RoseWindow()
center_point1 = rg.Point(100, 100)
radius1 = 15
center_point2 = rg.Point(200, 172)
radius2 = 25
circle1 = rg.Circle(center_point1, radius1)
circle2 = rg.Circle(center_point2, radius2)
circle2.fill_color = 'red'
circle1.attach_to(window)
circle2.attach_to(window)
window.render()
window.close_on_mouse_click()
"""
-- Constructs an rg.RoseWindow.
-- Constructs and draws two rg.Circle objects on the window
such that:
-- They fit in the window and are easily visible.
-- They have different radii.
-- One is filled with some color and one is not filled.
-- Waits for the user to press the mouse, then closes the window.
"""
# ------------------------------------------------------------------
# Done: 2. Implement this function, per its green doc-string above.
# -- ANY two rg.Circle objects that meet the criteria are fine.
# -- File COLORS.pdf lists all legal color-names.
# Put a statement in main to test this function
# (by calling this function).
# ------------------------------------------------------------------
def circle_and_rectangle():
window = rg.RoseWindow()
x = 251
y = 213
center_point = rg.Point(x, y)
radius = 35
circle = rg.Circle(center_point, radius)
circle.fill_color = 'blue'
circle.attach_to(window)
a = 50
b = 87
c = 100
d = 300
point1 = rg.Point(a, b)
point2 = rg.Point(c, d)
rectangle = rg.Rectangle(point1, point2)
center = rectangle.get_center()
rectangle.attach_to(window)
window.render()
print(circle.outline_thickness)
print(circle.fill_color)
print(circle.center)
print(x)
print(y)
print(rectangle.outline_thickness)
print(rectangle.fill_color)
print(center)
print((a + c) / 2)
print((b + d) / 2)
window.close_on_mouse_click()
"""
-- Constructs an rg.RoseWindow.
-- Constructs and draws a rg.Circle and rg.Rectangle
on the window such that:
-- They fit in the window and are easily visible.
-- The rg.Circle is filled with 'blue'
-- Prints (on the console, on SEPARATE lines) the following data
associated with your rg.Circle:
-- Its outline thickness.
-- Its fill color.
-- Its center.
-- Its center's x coordinate.
-- Its center's y coordinate.
-- Prints (on the console, on SEPARATE lines) the same data
but for your rg.Rectangle.
-- Waits for the user to press the mouse, then closes the window.
Here is an example of the output on the console,
for one particular circle and rectangle:
1
blue
Point(180.0, 115.0)
180
115
1
None
Point(75.0, 150.0)
75.0
150.0
"""
# ------------------------------------------------------------------
# Done: 3. Implement this function, per its green doc-string above.
# -- ANY objects that meet the criteria are fine.
# Put a statement in main to test this function
# (by calling this function).
#
# IMPORTANT: Use the DOT TRICK to guess the names of the relevant
# instance variables for outline thickness, etc.
# ------------------------------------------------------------------
def lines():
window = rg.RoseWindow()
a = 10
b = 20
c = 300
d = 250
point1 = rg.Point(a, b)
point2 = rg.Point(c, d)
point3 = rg.Point(250, 123)
point4 = rg.Point(100, 75)
line1 = rg.Line(point1, point2)
line1.thickness = 15
line2 = rg.Line(point3, point4)
line1.attach_to(window)
line2.attach_to(window)
midpoint = rg.Line.get_midpoint(line1)
print(midpoint)
print((a + c) / 2)
print((b + d) / 2)
window.render()
window.close_on_mouse_click()
"""
-- Constructs a rg.RoseWindow.
-- Constructs and draws on the window two rg.Lines such that:
-- They both fit in the window and are easily visible.
-- One rg.Line has the default thickness.
-- The other rg.Line is thicker (i.e., has a bigger width).
-- Uses a rg.Line method to get the midpoint (center) of the
thicker rg.Line.
-- Then prints (on the console, on SEPARATE lines):
-- the midpoint itself
-- the x-coordinate of the midpoint
-- the y-coordinate of the midpoint
Here is an example of the output on the console, if the two
endpoints of the thicker line are at (100, 100) and (121, 200):
Point(110.5, 150.0)
110.5
150.0
-- Waits for the user to press the mouse, then closes the window.
"""
# Done: 4. Implement and test this function.
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 31.88764
| 79
| 0.570296
|
181a765b950c68044e014a8b6a685b04ff383012
| 2,661
|
py
|
Python
|
nmigen/back/verilog.py
|
davidlattimore/nmigen
|
8fe319f065807f421b092e7ffb8b90748512bf8c
|
[
"BSD-2-Clause"
] | 1
|
2022-02-21T16:04:10.000Z
|
2022-02-21T16:04:10.000Z
|
nmigen/back/verilog.py
|
davidlattimore/nmigen
|
8fe319f065807f421b092e7ffb8b90748512bf8c
|
[
"BSD-2-Clause"
] | 5
|
2021-03-19T00:08:54.000Z
|
2021-10-02T15:02:28.000Z
|
nmigen/back/verilog.py
|
davidlattimore/nmigen
|
8fe319f065807f421b092e7ffb8b90748512bf8c
|
[
"BSD-2-Clause"
] | null | null | null |
from .._toolchain.yosys import *
from . import rtlil
__all__ = ["YosysError", "convert", "convert_fragment"]
def _convert_rtlil_text(rtlil_text, *, strip_internal_attrs=False, write_verilog_opts=()):
# this version requirement needs to be synchronized with the one in setup.py!
yosys = find_yosys(lambda ver: ver >= (0, 9))
yosys_version = yosys.version()
script = []
script.append("read_ilang <<rtlil\n{}\nrtlil".format(rtlil_text))
if yosys_version >= (0, 9, 3468):
# Yosys >=0.9+3468 (since commit 128522f1) emits the workaround for the `always @*`
# initial scheduling issue on its own.
script.append("delete w:$verilog_initial_trigger")
if yosys_version >= (0, 9, 3527):
# Yosys >=0.9+3527 (since commit 656ee70f) supports the `-nomux` option for the `proc`
# script pass. Because the individual `proc_*` passes are not a stable interface,
# `proc -nomux` is used instead, if available.
script.append("proc -nomux")
else:
# On earlier versions, use individual `proc_*` passes; this is a known range of Yosys
# versions and we know it's compatible with what nMigen does.
script.append("proc_init")
script.append("proc_arst")
script.append("proc_dff")
script.append("proc_clean")
script.append("memory_collect")
if strip_internal_attrs:
attr_map = []
attr_map.append("-remove generator")
attr_map.append("-remove top")
attr_map.append("-remove src")
attr_map.append("-remove nmigen.hierarchy")
attr_map.append("-remove nmigen.decoding")
script.append("attrmap {}".format(" ".join(attr_map)))
script.append("attrmap -modattr {}".format(" ".join(attr_map)))
script.append("write_verilog -norename {}".format(" ".join(write_verilog_opts)))
return yosys.run(["-q", "-"], "\n".join(script),
# At the moment, Yosys always shows a warning indicating that not all processes can be
# translated to Verilog. We carefully emit only the processes that *can* be translated, and
# squash this warning. Once Yosys' write_verilog pass is fixed, we should remove this.
ignore_warnings=True)
def convert_fragment(*args, strip_internal_attrs=False, **kwargs):
rtlil_text, name_map = rtlil.convert_fragment(*args, **kwargs)
return _convert_rtlil_text(rtlil_text, strip_internal_attrs=strip_internal_attrs), name_map
def convert(*args, strip_internal_attrs=False, **kwargs):
rtlil_text = rtlil.convert(*args, **kwargs)
return _convert_rtlil_text(rtlil_text, strip_internal_attrs=strip_internal_attrs)
| 42.919355
| 99
| 0.682074
|
e39c818c2a4c0655d637396a59f2761ecfd88c0e
| 1,026
|
py
|
Python
|
200-299/210-219/219.py
|
dcragusa/LeetCode
|
01c30de0832b378a1b054d80d1ea1d3f09a2abd3
|
[
"MIT"
] | null | null | null |
200-299/210-219/219.py
|
dcragusa/LeetCode
|
01c30de0832b378a1b054d80d1ea1d3f09a2abd3
|
[
"MIT"
] | null | null | null |
200-299/210-219/219.py
|
dcragusa/LeetCode
|
01c30de0832b378a1b054d80d1ea1d3f09a2abd3
|
[
"MIT"
] | null | null | null |
"""
Given an integer array nums and an integer k, return true if there are two distinct indices i and j in the array such
that nums[i] == nums[j] and abs(i - j) <= k.
Example 1:
Input: nums = [1, 2, 3, 1], k = 3, Output: True
Example 2:
Input: nums = [1, 0, 1, 1], k = 1, Output: True
Example 3:
Input: nums = [1, 2, 3, 1, 2, 3], k = 2, Output: False
"""
"""
We set up a map of number to index. We iterate through nums - if the number if already in the map, we return True if
the difference in indices is <= k. We then update the number to index map with the current number and index.
"""
def contains_nearby_duplicate(nums, k):
num_to_idx_map = {}
for idx, num in enumerate(nums):
if num in num_to_idx_map and idx - num_to_idx_map[num] <= k:
return True
num_to_idx_map[num] = idx
return False
assert contains_nearby_duplicate([1, 2, 3, 1], 3) is True
assert contains_nearby_duplicate([1, 0, 1, 1], 1) is True
assert contains_nearby_duplicate([1, 2, 3, 1, 2, 3], 2) is False
| 31.090909
| 117
| 0.658869
|
ad2804250b7d6713c9fb127c83cc7f601a78cbad
| 192
|
py
|
Python
|
src/compas_ui/rhino/ui/COMPAS/dev/COMPAS__saveas_cmd.py
|
BlockResearchGroup/compas_ui
|
8b5a6121eee837d306bf20c44c91f94a5c185f90
|
[
"MIT"
] | null | null | null |
src/compas_ui/rhino/ui/COMPAS/dev/COMPAS__saveas_cmd.py
|
BlockResearchGroup/compas_ui
|
8b5a6121eee837d306bf20c44c91f94a5c185f90
|
[
"MIT"
] | 3
|
2022-02-24T17:56:30.000Z
|
2022-03-31T09:48:40.000Z
|
src/compas_ui/rhino/ui/COMPAS/dev/COMPAS__saveas_cmd.py
|
BlockResearchGroup/compas_ui
|
8b5a6121eee837d306bf20c44c91f94a5c185f90
|
[
"MIT"
] | null | null | null |
from compas_ui.ui import UI
__commandname__ = "COMPAS__saveas"
@UI.error()
def RunCommand(is_interactive):
ui = UI()
ui.saveas()
if __name__ == "__main__":
RunCommand(True)
| 12
| 34
| 0.677083
|
21494b241feb1066318a4c3a63733ebe946e4b39
| 1,699
|
py
|
Python
|
01_gen_data.py
|
babarosa08/BaiduXJ
|
b060d165b9dd36fd2a70b279b68d18e77fe1f8ec
|
[
"MIT"
] | 1
|
2018-06-14T04:00:03.000Z
|
2018-06-14T04:00:03.000Z
|
01_gen_data.py
|
babarosa08/BaiduXJ
|
b060d165b9dd36fd2a70b279b68d18e77fe1f8ec
|
[
"MIT"
] | null | null | null |
01_gen_data.py
|
babarosa08/BaiduXJ
|
b060d165b9dd36fd2a70b279b68d18e77fe1f8ec
|
[
"MIT"
] | null | null | null |
import os
import shutil
import sys
DATA_PATH = '/home/huligang/workspace/BaiDuXJD2018/data'
TRAIN_PATH = '/home/huligang/workspace/BaiDuXJD2018/data/train/'
VAL_PATH = '/home/huligang/workspace/BaiDuXJD2018/data/val/'
TEST_PATH = '/home/huligang/workspace/BaiDuXJD2018/data/test'
TRAIN_FILE = '/home/huligang/workspace/BaiDuXJD2018/datasets/train.txt'
TEST_FILE = '/home/huligang/workspace/BaiDuXJD2018/datasets/testV1.txt'
def rmrf_mkdir(dirname):
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.mkdir(dirname)
rmrf_mkdir(DATA_PATH)
rmrf_mkdir(TRAIN_PATH)
rmrf_mkdir(TEST_PATH)
# rmrf_mkdir(VAL_PATH)
# generate train set
with open(TRAIN_FILE) as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(' ')
image_name = line[0]
image_label = line[1]
CATEGORY_PATH = os.path.join(TRAIN_PATH, image_label)
if not (os.path.exists(CATEGORY_PATH)):
os.mkdir(CATEGORY_PATH)
src = os.path.join(
'/home/huligang/workspace/BaiDuXJD2018/datasets/train', image_name)
dst = os.path.join(CATEGORY_PATH, image_name)
os.symlink(src, dst)
# generate test set
with open(TEST_FILE) as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(' ')
image_name = line[0]
image_label = line[1]
CATEGORY_PATH = os.path.join(TEST_PATH, image_label)
if not (os.path.exists(CATEGORY_PATH)):
os.mkdir(CATEGORY_PATH)
src = os.path.join(
'/home/huligang/workspace/BaiDuXJD2018/datasets/test', image_name)
dst = os.path.join(CATEGORY_PATH, image_name)
os.symlink(src, dst)
| 28.79661
| 79
| 0.675103
|
ee7056fe7a08e4224934ad0836e42cdb48795a8e
| 9,338
|
py
|
Python
|
script/old/vartsv_table-0.0.1.py
|
genepii/seqmet
|
89fdab79131c861d4a5aae364ecdbeb3a9e0ae23
|
[
"MIT"
] | null | null | null |
script/old/vartsv_table-0.0.1.py
|
genepii/seqmet
|
89fdab79131c861d4a5aae364ecdbeb3a9e0ae23
|
[
"MIT"
] | null | null | null |
script/old/vartsv_table-0.0.1.py
|
genepii/seqmet
|
89fdab79131c861d4a5aae364ecdbeb3a9e0ae23
|
[
"MIT"
] | null | null | null |
import os
import sys
import getopt
import math
import copy
def main(argv):
global tsv
global ref
global bed
global oup
global datatype
tsv = ''
ref = ''
bed = ''
oup = ''
datatype = 'norm'
try:
opts, args = getopt.getopt(argv, 'ht:r:b:o:d:', ['--help', '--tsv', '--reference', '--bed', '--output', '--datatype'])
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt in ('-t', '--tsv'):
tsv = arg
elif opt in ('-r', '--reference'):
ref = arg
elif opt in ('-b', '--bed'):
bed = arg
elif opt in ('-o', '--output'):
oup = arg
elif opt in ('-d', '--datatype'):
datatype = arg
if tsv == '':
usage()
sys.exit()
if ref == '':
usage()
sys.exit()
if bed == '':
usage()
sys.exit()
if oup == '':
oup = inp.split('/')[-1].split('.')[0]
except getopt.GetoptError:
usage()
sys.exit(2)
def usage():
print 'usage: ', sys.argv[0], '-h --help -t --tsv -r --ref [fasta] -b --bed [bed12] -o --output [table] -d --datatype [freq,abs,norm]'
if __name__ == '__main__':
main(sys.argv[1:])
stoplist = ['TAA', 'TGA', 'TAG']
cdt = {'TTT': 'F', 'CTT': 'L', 'ATT': 'I', 'GTT': 'V',
'TTC': 'F', 'CTC': 'L', 'ATC': 'I', 'GTC': 'V',
'TTA': 'L', 'CTA': 'L', 'ATA': 'I', 'GTA': 'V',
'TTG': 'L', 'CTG': 'L', 'ATG': 'M', 'GTG': 'V',
'TCT': 'S', 'CCT': 'P', 'ACT': 'T', 'GCT': 'A',
'TCC': 'S', 'CCC': 'P', 'ACC': 'T', 'GCC': 'A',
'TCA': 'S', 'CCA': 'P', 'ACA': 'T', 'GCA': 'A',
'TCG': 'S', 'CCG': 'P', 'ACG': 'T', 'GCG': 'A',
'TAT': 'Y', 'CAT': 'H', 'AAT': 'N', 'GAT': 'D',
'TAC': 'Y', 'CAC': 'H', 'AAC': 'N', 'GAC': 'D',
'TAA': '*', 'CAA': 'Q', 'AAA': 'K', 'GAA': 'E',
'TAG': '*', 'CAG': 'Q', 'AAG': 'K', 'GAG': 'E',
'TGT': 'C', 'CGT': 'R', 'AGT': 'S', 'GGT': 'G',
'TGC': 'C', 'CGC': 'R', 'AGC': 'S', 'GGC': 'G',
'TGA': '*', 'CGA': 'R', 'AGA': 'R', 'GGA': 'G',
'TGG': 'W', 'CGG': 'R', 'AGG': 'R', 'GGG': 'G'}
headers = open(tsv, 'r').read().replace("\r", "").rstrip('\n').split('\n')[0].split('\t')
ichrom = headers.index("#CHROM")
ipos = headers.index("POS")
iref = headers.index("REF")
ivar = headers.index("ALT")
iaf = headers.index("AF")
icount = headers.index("AO")
idepth = headers.index("DP")
itype = headers.index("TYPE")
ref = [ [x.split('\n')[0], ''.join(x.split('\n')[1:])] for x in open(ref, 'r').read().replace("\r", "").rstrip('\n').split('>')[1:]]
con = copy.deepcopy(ref)
for item in open(tsv, 'r').read().replace("\r", "").rstrip('\n').split('\n')[1:]:
var = item.split('\t')
refindex = [ x[0] for x in ref ].index(var[ichrom])
if var[itype] == 'snp' and len(var[ivar]) == 1 and float(var[iaf]) >= 0.5:
con[refindex][1] = con[refindex][1][0:int(var[ipos])-1] + var[ivar] + con[refindex][1][int(var[ipos]):]
elif float(var[iaf]) >= 0.5:
con[refindex][1] = con[refindex][1][0:int(var[ipos])-1] + 'X' + con[refindex][1][int(var[ipos]):]
qsp = copy.deepcopy(con)
for item in open(tsv, 'r').read().replace("\r", "").rstrip('\n').split('\n')[1:]:
var = item.split('\t')
refindex = [ x[0] for x in ref ].index(var[ichrom])
if var[itype] == 'snp' and len(var[ivar]) == 1 and float(var[iaf]) < 0.5:
qsp[refindex][1] = qsp[refindex][1][0:int(var[ipos])-1] + var[ivar] + qsp[refindex][1][int(var[ipos]):]
elif float(var[iaf]) < 0.5:
qsp[refindex][1] = qsp[refindex][1][0:int(var[ipos])-1] + 'X' + qsp[refindex][1][int(var[ipos]):]
refpn_seq = []
refpn_chromname = []
refpn_pnname = []
refpn_pos = []
refpn_fna = []
refpn_faa = []
for item in open(bed, 'r').read().replace("\r", "").rstrip('\n').split('\n'):
if item != '':
pn = item.split('\t')
refpn_seq.append(ref[[x[0] for x in ref].index(pn[0])][1])
refpn_chromname += [pn[0]]
refpn_pnname += [pn[3]]
part = []
for i in range(len(pn[11].split(','))):
y = int(pn[11].split(',')[i])
z = int(pn[10].split(',')[i])
part += range(y,y+z)
refpn_pos.append(part)
refpn_fna.append([refpn_seq[-1][x] for x in part])
refpn_faa.append([cdt[''.join(refpn_fna[-1][x:x+3])] if ''.join(refpn_fna[-1][x:x+3]) in cdt else 'X' for x in range(0,len(refpn_fna[-1]),3)])
conpn_seq = []
conpn_chromname = []
conpn_pnname = []
conpn_pos = []
conpn_fna = []
conpn_faa = []
for item in open(bed, 'r').read().replace("\r", "").rstrip('\n').split('\n'):
if item != '':
pn = item.split('\t')
conpn_seq.append(con[[x[0] for x in con].index(pn[0])][1])
conpn_chromname += [pn[0]]
conpn_pnname += [pn[3]]
part = []
for i in range(len(pn[11].split(','))):
y = int(pn[11].split(',')[i])
z = int(pn[10].split(',')[i])
part += range(y,y+z)
conpn_pos.append(part)
conpn_fna.append([conpn_seq[-1][x] for x in part])
conpn_faa.append([cdt[''.join(conpn_fna[-1][x:x+3])] if ''.join(conpn_fna[-1][x:x+3]) in cdt else 'X' for x in range(0,len(conpn_fna[-1]),3)])
qsppn_seq = []
qsppn_chromname = []
qsppn_pnname = []
qsppn_pos = []
qsppn_fna = []
qsppn_faa = []
for item in open(bed, 'r').read().replace("\r", "").rstrip('\n').split('\n'):
if item != '':
pn = item.split('\t')
qsppn_seq.append(qsp[[x[0] for x in qsp].index(pn[0])][1])
qsppn_chromname += [pn[0]]
qsppn_pnname += [pn[3]]
part = []
for i in range(len(pn[11].split(','))):
y = int(pn[11].split(',')[i])
z = int(pn[10].split(',')[i])
part += range(y,y+z)
qsppn_pos.append(part)
qsppn_fna.append([qsppn_seq[-1][x] for x in part])
qsppn_faa.append([cdt[''.join(qsppn_fna[-1][x:x+3])] if ''.join(qsppn_fna[-1][x:x+3]) in cdt else 'X' for x in range(0,len(qsppn_fna[-1]),3)])
w = open(oup, 'w')
for item in open(tsv, 'r').read().replace("\r", "").rstrip('\n').split('\n')[1:]:
var = item.split('\t')
varoup = []
refindex = [ x[0] for x in ref ].index(var[ichrom])
pnname = []
pnname = [ x for x in refpn_pnname if var[ichrom] == refpn_chromname[refpn_pnname.index(x)] and (int(var[ipos])-1) in refpn_pos[refpn_pnname.index(x)]]
varoup.append(var[ichrom].split('|')[0])
varoup.append(var[ichrom].split('|')[1])
varoup.append('CODING') if len(pnname) > 0 else varoup.append('NON_CODING')
varoup.append(var[ipos])
varoup.append(','.join(pnname)) if len(pnname) > 0 else varoup.append('__')
varoup.append(','.join([ str((((refpn_pos[refpn_pnname.index(x)].index(int(var[ipos])-1))+3)/3)) for x in pnname ])) if len(pnname) > 0 else varoup.append('__')
if var[itype] == 'snp':
if int(float(var[iaf])*100) >= 50:
varoup.append(','.join([ refpn_faa[refpn_pnname.index(x)][(((refpn_pos[refpn_pnname.index(x)].index(int(var[ipos])-1))+3)/3)-1] for x in pnname ]) + '>' + ','.join([ conpn_faa[conpn_pnname.index(x)][(((conpn_pos[conpn_pnname.index(x)].index(int(var[ipos])-1))+3)/3)-1] for x in pnname ])) if len(pnname) > 0 else varoup.append('__')
varoup.append( ref[refindex][1][int(var[ipos])-1] + '>' + var[ivar] )
elif int(float(var[iaf])*100) < 50:
varoup.append(','.join([ refpn_faa[refpn_pnname.index(x)][(((refpn_pos[refpn_pnname.index(x)].index(int(var[ipos])-1))+3)/3)-1] for x in pnname ]) + '>' + ','.join([ qsppn_faa[qsppn_pnname.index(x)][(((qsppn_pos[qsppn_pnname.index(x)].index(int(var[ipos])-1))+3)/3)-1] for x in pnname ])) if len(pnname) > 0 else varoup.append('__')
varoup.append( ref[refindex][1][int(var[ipos])-1] + '>' + qsp[refindex][1][int(var[ipos])-1] )
elif var[itype] == 'ins':
varoup.append(','.join([ 'INS' for x in pnname ])) if len(pnname) > 0 else varoup.append('INS')
varoup.append( ref[refindex][1][int(var[ipos])-1] + '>' + var[ivar] )
elif var[itype] == 'del':
varoup.append(','.join([ 'DEL' for x in pnname ])) if len(pnname) > 0 else varoup.append('DEL')
varoup.append( ref[refindex][1][int(var[ipos])-1] + var[iref][1:] + '>' + var[ivar] )
elif var[itype] == 'mnp':
varoup.append(','.join([ 'MNP' for x in pnname ])) if len(pnname) > 0 else varoup.append('MNP')
varoup.append(','.join([ 'MNP' for x in pnname ])) if len(pnname) > 0 else varoup.append('MNP')
else:
varoup.append(','.join([ 'COMPLEX' for x in pnname ])) if len(pnname) > 0 else varoup.append('COMPLEX')
varoup.append(','.join([ 'COMPLEX' for x in pnname ])) if len(pnname) > 0 else varoup.append('COMPLEX')
varoup.append('MAJOR') if int(float(var[iaf])*100) >= 50 else varoup.append('MINOR')
val = ''
if datatype == 'freq':
val = str(int(float(var[iaf])*100))
elif datatype == 'abs':
val = '1'
elif datatype == 'norm':
val = str(int(float(var[iaf])*100) * int(var[idepth]))
w.write('|'.join(varoup) + '\t' + val + '\n')
w.close()
w = open("test.faa", "w")
for item in refpn_faa:
w.write(''.join(item) + '\n')
w.close()
| 42.834862
| 344
| 0.514136
|
944ee02af1746c07ed2f0fa891fdf9a0b6453cd7
| 3,877
|
py
|
Python
|
examples/admin/set_password.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 105
|
2015-01-07T09:51:13.000Z
|
2022-03-24T04:23:54.000Z
|
examples/admin/set_password.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 180
|
2015-01-01T19:29:50.000Z
|
2022-03-19T14:14:06.000Z
|
examples/admin/set_password.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 94
|
2015-01-21T19:17:48.000Z
|
2022-01-31T07:17:47.000Z
|
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import aerospike
import sys
from optparse import OptionParser
################################################################################
# Options Parsing
################################################################################
usage = "usage: %prog [options]"
optparser = OptionParser(usage=usage, add_help_option=False)
optparser.add_option(
"--help", dest="help", action="store_true",
help="Displays this message.")
optparser.add_option(
"-h", "--host", dest="host", type="string", default="127.0.0.1", metavar="<ADDRESS>",
help="Address of Aerospike server.")
optparser.add_option(
"-p", "--port", dest="port", type="int", default=3000, metavar="<PORT>",
help="Port of the Aerospike server.")
optparser.add_option(
"-U", "--username", dest="username", type="string", metavar="<USERNAME>",
help="Username to connect to database.")
optparser.add_option(
"-P", "--password", dest="password", type="string", metavar="<PASSWORD>",
help="Password to connect to database.")
(options, args) = optparser.parse_args()
if options.help:
optparser.print_help()
print()
sys.exit(1)
if options.username == None or options.password == None:
optparser.print_help()
print()
sys.exit(1)
################################################################################
# Client Configuration
################################################################################
config = {
'hosts': [ (options.host, options.port) ]
}
################################################################################
# Application
################################################################################
exitCode = 0
try:
# ----------------------------------------------------------------------------
# Connect to Cluster
# ----------------------------------------------------------------------------
client = aerospike.client(config).connect(options.username, options.password)
# ----------------------------------------------------------------------------
# Perform Operation
# ----------------------------------------------------------------------------
try:
policy = {}
user = "foo-example"
password = "bar"
client.admin_set_password(user, password, policy)
print("OK, password set for 1 user")
except Exception as e:
print("error: {0}".format(e), file=sys.stderr)
print("In case of invalid user first create user using create_user.py")
exitCode = 2
# ----------------------------------------------------------------------------
# Close Connection to Cluster
# ----------------------------------------------------------------------------
client.close()
except Exception, eargs:
print("error: {0}".format(eargs), file=sys.stderr)
exitCode = 3
################################################################################
# Exit
################################################################################
sys.exit(exitCode)
| 32.041322
| 89
| 0.446737
|
cd4fb0bad8846091c96ee09ba6e362b0bcbb3276
| 12
|
py
|
Python
|
projects/mako/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 25
|
2021-10-30T19:54:59.000Z
|
2022-03-29T06:11:02.000Z
|
projects/mako/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 21
|
2021-10-19T01:09:38.000Z
|
2022-03-24T16:08:53.000Z
|
projects/mako/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 3
|
2022-01-25T20:25:13.000Z
|
2022-03-08T02:58:50.000Z
|
import mako
| 6
| 11
| 0.833333
|
694afc6bd0008541f5cf1afdac8fb90c75209b2c
| 1,959
|
py
|
Python
|
cf_xarray/tests/__init__.py
|
ocefpaf/cf-xarray
|
7bdb033395c8959a5d8147fe148ec987c1122387
|
[
"Apache-2.0"
] | 91
|
2020-06-01T16:24:17.000Z
|
2022-03-30T23:17:45.000Z
|
cf_xarray/tests/__init__.py
|
ocefpaf/cf-xarray
|
7bdb033395c8959a5d8147fe148ec987c1122387
|
[
"Apache-2.0"
] | 245
|
2020-06-01T16:16:48.000Z
|
2022-03-31T14:15:59.000Z
|
cf_xarray/tests/__init__.py
|
ocefpaf/cf-xarray
|
7bdb033395c8959a5d8147fe148ec987c1122387
|
[
"Apache-2.0"
] | 29
|
2020-06-01T15:57:27.000Z
|
2022-01-19T16:44:14.000Z
|
import importlib
import re
from contextlib import contextmanager
from distutils import version
import dask
import pytest
@contextmanager
def raises_regex(error, pattern):
__tracebackhide__ = True
with pytest.raises(error) as excinfo:
yield
message = str(excinfo.value)
if not re.search(pattern, message):
raise AssertionError(
f"exception {excinfo.value!r} did not match pattern {pattern!r}"
)
class CountingScheduler:
"""Simple dask scheduler counting the number of computes.
Reference: https://stackoverflow.com/questions/53289286/"""
def __init__(self, max_computes=0):
self.total_computes = 0
self.max_computes = max_computes
def __call__(self, dsk, keys, **kwargs):
self.total_computes += 1
if self.total_computes > self.max_computes:
raise RuntimeError(
"Too many computes. Total: %d > max: %d."
% (self.total_computes, self.max_computes)
)
return dask.get(dsk, keys, **kwargs)
def raise_if_dask_computes(max_computes=0):
scheduler = CountingScheduler(max_computes)
return dask.config.set(scheduler=scheduler)
def _importorskip(modname, minversion=None):
try:
mod = importlib.import_module(modname)
has = True
if minversion is not None:
if LooseVersion(mod.__version__) < LooseVersion(minversion):
raise ImportError("Minimum version not satisfied")
except ImportError:
has = False
func = pytest.mark.skipif(not has, reason=f"requires {modname}")
return has, func
def LooseVersion(vstring):
# Our development version is something like '0.10.9+aac7bfc'
# This function just ignored the git commit id.
vstring = vstring.split("+")[0]
return version.LooseVersion(vstring)
has_pint, requires_pint = _importorskip("pint")
has_shapely, requires_shapely = _importorskip("shapely")
| 28.808824
| 76
| 0.677897
|
fb4509267fb994db2e7ad09aa12a9ac7a647e6e4
| 1,151
|
py
|
Python
|
gui/component/navigation_bar.py
|
acc-cosc-1336/cosc-1336-fall-2017-RobScaley
|
44c807ac95a6348f5643941d745b9232a127610e
|
[
"MIT"
] | null | null | null |
gui/component/navigation_bar.py
|
acc-cosc-1336/cosc-1336-fall-2017-RobScaley
|
44c807ac95a6348f5643941d745b9232a127610e
|
[
"MIT"
] | null | null | null |
gui/component/navigation_bar.py
|
acc-cosc-1336/cosc-1336-fall-2017-RobScaley
|
44c807ac95a6348f5643941d745b9232a127610e
|
[
"MIT"
] | null | null | null |
from tkinter import LEFT
from tkinter.ttk import Frame, Button
class NavigationBar(Frame):
def __init__(self, parent, data_source):
Frame.__init__(self, parent)
self.data_source = data_source
self.init_form()
def init_form(self):
nextButton = Button(self, text="Next ", command=self.on_next)
updateButton = Button(self, text="Update ", command=self.on_update)
deleteButton = Button(self, text="Delete ", command=self.on_delete)
previousButton = Button(self, text="Previous ", command=self.on_previous)
searchButton = Button(self, text="Search ", command=self.master.on_search)
nextButton.pack(side=LEFT)
previousButton.pack(side=LEFT)
updateButton.pack(side=LEFT)
deleteButton.pack(side=LEFT)
searchButton.pack(side=LEFT)
def on_next(self):
self.data_source.next_record()
def on_update(self):
self.data_source.request_update()
def on_delete(self):
pass
def on_previous(self):
self.data_source.previous_record()
| 31.972222
| 87
| 0.629018
|
c60b1a37bc6cd21450aa86672bd743b8fd1b0747
| 18,363
|
py
|
Python
|
qa/rpc-tests/test_framework/comptool.py
|
mirzaei-ce/core-rezabit
|
39c65eab32ca33a498c440556860163731a79195
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/comptool.py
|
mirzaei-ce/core-rezabit
|
39c65eab32ca33a498c440556860163731a79195
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/comptool.py
|
mirzaei-ce/core-rezabit
|
39c65eab32ca33a498c440556860163731a79195
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from blockstore import BlockStore, TxStore
from util import p2p_port
'''
This is a tool for comparing two or more rezabitds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == 'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == 'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print 'Block not in reject map: %064x' % (blockhash)
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print 'Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print 'Tx not in reject map: %064x' % (txhash)
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print 'Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| 44.035971
| 145
| 0.602461
|
e55894eb9b946d49b32faf653e07bece54394cb9
| 239,821
|
py
|
Python
|
imgaug/augmenters/geometric.py
|
fmder/imgaug
|
4c81c7a7503b64f54d76144385ea4330fd7c8a84
|
[
"MIT"
] | null | null | null |
imgaug/augmenters/geometric.py
|
fmder/imgaug
|
4c81c7a7503b64f54d76144385ea4330fd7c8a84
|
[
"MIT"
] | null | null | null |
imgaug/augmenters/geometric.py
|
fmder/imgaug
|
4c81c7a7503b64f54d76144385ea4330fd7c8a84
|
[
"MIT"
] | null | null | null |
"""Augmenters that apply affine or similar transformations.
List of augmenters:
* :class:`Affine`
* :class:`ScaleX`
* :class:`ScaleY`
* :class:`TranslateX`
* :class:`TranslateY`
* :class:`Rotate`
* :class:`ShearX`
* :class:`ShearY`
* :class:`AffineCv2`
* :class:`PiecewiseAffine`
* :class:`PerspectiveTransform`
* :class:`ElasticTransformation`
* :class:`Rot90`
* :class:`WithPolarWarping`
* :class:`Jigsaw`
"""
from __future__ import print_function, division, absolute_import
import math
import functools
import numpy as np
from scipy import ndimage
from skimage import transform as tf
import cv2
import six.moves as sm
import imgaug as ia
from imgaug.augmentables.polys import _ConcavePolygonRecoverer
from . import meta
from . import blur as blur_lib
from . import size as size_lib
from .. import parameters as iap
from .. import dtypes as iadt
from .. import random as iarandom
_VALID_DTYPES_CV2_ORDER_0 = {"uint8", "uint16", "int8", "int16", "int32",
"float16", "float32", "float64",
"bool"}
_VALID_DTYPES_CV2_ORDER_NOT_0 = {"uint8", "uint16", "int8", "int16",
"float16", "float32", "float64",
"bool"}
# skimage | cv2
# 0 | cv2.INTER_NEAREST
# 1 | cv2.INTER_LINEAR
# 2 | -
# 3 | cv2.INTER_CUBIC
# 4 | -
_AFFINE_INTERPOLATION_ORDER_SKIMAGE_TO_CV2 = {
0: cv2.INTER_NEAREST,
1: cv2.INTER_LINEAR,
2: cv2.INTER_CUBIC,
3: cv2.INTER_CUBIC,
4: cv2.INTER_CUBIC
}
# constant, edge, symmetric, reflect, wrap
# skimage | cv2
# constant | cv2.BORDER_CONSTANT
# edge | cv2.BORDER_REPLICATE
# symmetric | cv2.BORDER_REFLECT
# reflect | cv2.BORDER_REFLECT_101
# wrap | cv2.BORDER_WRAP
_AFFINE_MODE_SKIMAGE_TO_CV2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"symmetric": cv2.BORDER_REFLECT,
"reflect": cv2.BORDER_REFLECT_101,
"wrap": cv2.BORDER_WRAP
}
def _handle_order_arg(order, backend):
# Peformance in skimage for Affine:
# 1.0x order 0
# 1.5x order 1
# 3.0x order 3
# 30.0x order 4
# 60.0x order 5
# measurement based on 256x256x3 batches, difference is smaller
# on smaller images (seems to grow more like exponentially with image
# size)
if order == ia.ALL:
if backend in ["auto", "cv2"]:
return iap.Choice([0, 1, 3])
# dont use order=2 (bi-quadratic) because that is apparently
# currently not recommended (and throws a warning)
return iap.Choice([0, 1, 3, 4, 5])
if ia.is_single_integer(order):
assert 0 <= order <= 5, (
"Expected order's integer value to be in the interval [0, 5], "
"got %d." % (order,))
if backend == "cv2":
assert order in [0, 1, 3], (
"Backend \"cv2\" and order=%d was chosen, but cv2 backend "
"can only handle order 0, 1 or 3." % (order,))
return iap.Deterministic(order)
if isinstance(order, list):
assert all([ia.is_single_integer(val) for val in order]), (
"Expected order list to only contain integers, "
"got types %s." % (str([type(val) for val in order]),))
assert all([0 <= val <= 5 for val in order]), (
"Expected all of order's integer values to be in range "
"0 <= x <= 5, got %s." % (str(order),))
if backend == "cv2":
assert all([val in [0, 1, 3] for val in order]), (
"cv2 backend can only handle order 0, 1 or 3. Got order "
"list of %s." % (order,))
return iap.Choice(order)
if isinstance(order, iap.StochasticParameter):
return order
raise Exception(
"Expected order to be imgaug.ALL, int, list of int or "
"StochasticParameter, got %s." % (type(order),))
def _handle_cval_arg(cval):
if cval == ia.ALL:
# TODO change this so that it is dynamically created per image
# (or once per dtype)
return iap.Uniform(0, 255) # skimage transform expects float
return iap.handle_continuous_param(
cval, "cval", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
# currently used for Affine and PiecewiseAffine
# TODO use iap.handle_categorical_string_param() here
def _handle_mode_arg(mode):
if mode == ia.ALL:
return iap.Choice(["constant", "edge", "symmetric",
"reflect", "wrap"])
if ia.is_string(mode):
return iap.Deterministic(mode)
if isinstance(mode, list):
assert all([ia.is_string(val) for val in mode]), (
"Expected list of modes to only contain strings, got "
"types %s" % (", ".join([str(type(v)) for v in mode])))
return iap.Choice(mode)
if isinstance(mode, iap.StochasticParameter):
return mode
raise Exception(
"Expected mode to be imgaug.ALL, a string, a list of strings "
"or StochasticParameter, got %s." % (type(mode),))
def _warp_affine_arr(arr, matrix, order=1, mode="constant", cval=0,
output_shape=None, backend="auto"):
if ia.is_single_integer(cval):
cval = [cval] * len(arr.shape[2])
# no changes to zero-sized arrays
if arr.size == 0:
return arr
min_value, _center_value, max_value = \
iadt.get_value_range_of_dtype(arr.dtype)
cv2_bad_order = order not in [0, 1, 3]
if order == 0:
cv2_bad_dtype = (
arr.dtype.name
not in _VALID_DTYPES_CV2_ORDER_0)
else:
cv2_bad_dtype = (
arr.dtype.name
not in _VALID_DTYPES_CV2_ORDER_NOT_0
)
cv2_impossible = cv2_bad_order or cv2_bad_dtype
use_skimage = (
backend == "skimage"
or (backend == "auto" and cv2_impossible)
)
if use_skimage:
# cval contains 3 values as cv2 can handle 3, but
# skimage only 1
cval = cval[0]
# skimage does not clip automatically
cval = max(min(cval, max_value), min_value)
image_warped = _warp_affine_arr_skimage(
arr,
matrix,
cval=cval,
mode=mode,
order=order,
output_shape=output_shape
)
else:
assert not cv2_bad_dtype, (
not cv2_bad_dtype,
"cv2 backend in Affine got a dtype %s, which it "
"cannot handle. Try using a different dtype or set "
"order=0." % (
arr.dtype,))
image_warped = _warp_affine_arr_cv2(
arr,
matrix,
cval=tuple([int(v) for v in cval]),
mode=mode,
order=order,
output_shape=output_shape
)
return image_warped
def _warp_affine_arr_skimage(arr, matrix, cval, mode, order, output_shape):
iadt.gate_dtypes(
arr,
allowed=["bool",
"uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32", "float64"],
disallowed=["uint64", "uint128", "uint256",
"int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
input_dtype = arr.dtype
image_warped = tf.warp(
arr,
matrix.inverse,
order=order,
mode=mode,
cval=cval,
preserve_range=True,
output_shape=output_shape,
)
# tf.warp changes all dtypes to float64, including uint8
if input_dtype == np.bool_:
image_warped = image_warped > 0.5
else:
image_warped = iadt.restore_dtypes_(image_warped, input_dtype)
return image_warped
def _warp_affine_arr_cv2(arr, matrix, cval, mode, order, output_shape):
iadt.gate_dtypes(
arr,
allowed=["bool",
"uint8", "uint16",
"int8", "int16", "int32",
"float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
if order != 0:
assert arr.dtype.name != "int32", (
"Affine only supports cv2-based transformations of int32 "
"arrays when using order=0, but order was set to %d." % (
order,))
input_dtype = arr.dtype
if input_dtype in [np.bool_, np.float16]:
arr = arr.astype(np.float32)
elif input_dtype == np.int8 and order != 0:
arr = arr.astype(np.int16)
dsize = (
int(np.round(output_shape[1])),
int(np.round(output_shape[0]))
)
# map key X from skimage to cv2 or fall back to key X
mode = _AFFINE_MODE_SKIMAGE_TO_CV2.get(mode, mode)
order = _AFFINE_INTERPOLATION_ORDER_SKIMAGE_TO_CV2.get(order, order)
# TODO this uses always a tuple of 3 values for cval, even if
# #chans != 3, works with 1d but what in other cases?
nb_channels = arr.shape[-1]
if nb_channels <= 3:
# TODO this block can also be when order==0 for any nb_channels,
# but was deactivated for now, because cval would always
# contain 3 values and not nb_channels values
image_warped = cv2.warpAffine(
arr,
matrix.params[:2],
dsize=dsize,
flags=order,
borderMode=mode,
borderValue=cval
)
# cv2 warp drops last axis if shape is (H, W, 1)
if image_warped.ndim == 2:
image_warped = image_warped[..., np.newaxis]
else:
# warp each channel on its own, re-add channel axis, then stack
# the result from a list of [H, W, 1] to (H, W, C).
image_warped = [
cv2.warpAffine(
arr[:, :, c],
matrix.params[:2],
dsize=dsize,
flags=order,
borderMode=mode,
borderValue=tuple([cval[0]])
)
for c in sm.xrange(nb_channels)]
image_warped = np.stack(image_warped, axis=-1)
if input_dtype.name == "bool":
image_warped = image_warped > 0.5
elif input_dtype.name in ["int8", "float16"]:
image_warped = iadt.restore_dtypes_(image_warped, input_dtype)
return image_warped
def _compute_affine_warp_output_shape(matrix, input_shape):
height, width = input_shape[:2]
if height == 0 or width == 0:
return matrix, input_shape
# determine shape of output image
corners = np.array([
[0, 0],
[0, height - 1],
[width - 1, height - 1],
[width - 1, 0]
])
corners = matrix(corners)
minc = corners[:, 0].min()
minr = corners[:, 1].min()
maxc = corners[:, 0].max()
maxr = corners[:, 1].max()
out_height = maxr - minr + 1
out_width = maxc - minc + 1
if len(input_shape) == 3:
output_shape = np.ceil((out_height, out_width, input_shape[2]))
else:
output_shape = np.ceil((out_height, out_width))
output_shape = tuple([int(v) for v in output_shape.tolist()])
# fit output image in new shape
translation = (-minc, -minr)
matrix_to_fit = tf.SimilarityTransform(translation=translation)
matrix = matrix + matrix_to_fit
return matrix, output_shape
# TODO allow -1 destinations
def apply_jigsaw(arr, destinations):
"""Move cells of an image similar to a jigsaw puzzle.
This function will split the image into ``rows x cols`` cells and
move each cell to the target index given in `destinations`.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
arr : ndarray
Array with at least two dimensions denoting height and width.
destinations : ndarray
2-dimensional array containing for each cell the id of the destination
cell. The order is expected to a flattened c-order, i.e. row by row.
The height of the image must be evenly divisible by the number of
rows in this array. Analogous for the width and columns.
Returns
-------
ndarray
Modified image with cells moved according to `destinations`.
"""
# pylint complains about unravel_index() here
# pylint: disable=unbalanced-tuple-unpacking
nb_rows, nb_cols = destinations.shape[0:2]
assert arr.ndim >= 2, (
"Expected array with at least two dimensions, but got %d with "
"shape %s." % (arr.ndim, arr.shape))
assert (arr.shape[0] % nb_rows) == 0, (
"Expected image height to by divisible by number of rows, but got "
"height %d and %d rows. Use cropping or padding to modify the image "
"height or change the number of rows." % (arr.shape[0], nb_rows)
)
assert (arr.shape[1] % nb_cols) == 0, (
"Expected image width to by divisible by number of columns, but got "
"width %d and %d columns. Use cropping or padding to modify the image "
"width or change the number of columns." % (arr.shape[1], nb_cols)
)
cell_height = arr.shape[0] // nb_rows
cell_width = arr.shape[1] // nb_cols
dest_rows, dest_cols = np.unravel_index(
destinations.flatten(), (nb_rows, nb_cols))
result = np.zeros_like(arr)
i = 0
for source_row in np.arange(nb_rows):
for source_col in np.arange(nb_cols):
# TODO vectorize coords computation
dest_row, dest_col = dest_rows[i], dest_cols[i]
source_y1 = source_row * cell_height
source_y2 = source_y1 + cell_height
source_x1 = source_col * cell_width
source_x2 = source_x1 + cell_width
dest_y1 = dest_row * cell_height
dest_y2 = dest_y1 + cell_height
dest_x1 = dest_col * cell_width
dest_x2 = dest_x1 + cell_width
source = arr[source_y1:source_y2, source_x1:source_x2]
result[dest_y1:dest_y2, dest_x1:dest_x2] = source
i += 1
return result
def apply_jigsaw_to_coords(coords, destinations, image_shape):
"""Move coordinates on an image similar to a jigsaw puzzle.
This is the same as :func:`apply_jigsaw`, but moves coordinates within
the cells.
Parameters
----------
coords : ndarray
``(N, 2)`` array denoting xy-coordinates.
destinations : ndarray
See :func:`apply_jigsaw`.
image_shape : tuple of int
``(height, width, ...)`` shape of the image on which the
coordinates are placed. Only height and width are required.
Returns
-------
ndarray
Moved coordinates.
"""
# pylint complains about unravel_index() here
# pylint: disable=unbalanced-tuple-unpacking
nb_rows, nb_cols = destinations.shape[0:2]
height, width = image_shape[0:2]
cell_height = height // nb_rows
cell_width = width // nb_cols
dest_rows, dest_cols = np.unravel_index(
destinations.flatten(), (nb_rows, nb_cols))
result = np.copy(coords)
# TODO vectorize this loop
for i, (x, y) in enumerate(coords):
ooi_x = (x < 0 or x >= width)
ooi_y = (y < 0 or y >= height)
if ooi_x or ooi_y:
continue
source_row = int(y // cell_height)
source_col = int(x // cell_width)
source_cell_idx = (source_row * nb_cols) + source_col
dest_row = dest_rows[source_cell_idx]
dest_col = dest_cols[source_cell_idx]
source_y1 = source_row * cell_height
source_x1 = source_col * cell_width
dest_y1 = dest_row * cell_height
dest_x1 = dest_col * cell_width
result[i, 0] = dest_x1 + (x - source_x1)
result[i, 1] = dest_y1 + (y - source_y1)
return result
def generate_jigsaw_destinations(nb_rows, nb_cols, max_steps, random_state,
connectivity=4):
"""Generate a destination pattern for :func:`apply_jigsaw`.
Parameters
----------
nb_rows : int
Number of rows to split the image into.
nb_cols : int
Number of columns to split the image into.
max_steps : int
Maximum number of cells that each cell may be moved.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState
RNG or seed to use. If ``None`` the global RNG will be used.
connectivity : int, optional
Whether a diagonal move of a cell counts as one step
(``connectivity=8``) or two steps (``connectivity=4``).
Returns
-------
ndarray
2-dimensional array containing for each cell the id of the target
cell.
"""
assert connectivity in (4, 8), (
"Expected connectivity of 4 or 8, got %d." % (connectivity,))
random_state = iarandom.RNG(random_state)
steps = random_state.integers(0, max_steps, size=(nb_rows, nb_cols),
endpoint=True)
directions = random_state.integers(0, connectivity,
size=(nb_rows, nb_cols, max_steps),
endpoint=False)
destinations = np.arange(nb_rows*nb_cols).reshape((nb_rows, nb_cols))
for step in np.arange(max_steps):
directions_step = directions[:, :, step]
for y in np.arange(nb_rows):
for x in np.arange(nb_cols):
if steps[y, x] > 0:
y_target, x_target = {
0: (y-1, x+0),
1: (y+0, x+1),
2: (y+1, x+0),
3: (y+0, x-1),
4: (y-1, x-1),
5: (y-1, x+1),
6: (y+1, x+1),
7: (y+1, x-1)
}[directions_step[y, x]]
y_target = max(min(y_target, nb_rows-1), 0)
x_target = max(min(x_target, nb_cols-1), 0)
target_steps = steps[y_target, x_target]
if (y, x) != (y_target, x_target) and target_steps >= 1:
source_dest = destinations[y, x]
target_dest = destinations[y_target, x_target]
destinations[y, x] = target_dest
destinations[y_target, x_target] = source_dest
steps[y, x] -= 1
steps[y_target, x_target] -= 1
return destinations
class _AffineSamplingResult(object):
def __init__(self, scale=None, translate=None, translate_mode="px",
rotate=None, shear=None, cval=None, mode=None, order=None):
self.scale = scale
self.translate = translate
self.translate_mode = translate_mode
self.rotate = rotate
self.shear = shear
self.cval = cval
self.mode = mode
self.order = order
def get_affine_parameters(self, idx, arr_shape, image_shape):
scale_y = self.scale[1][idx] # TODO 1 and 0 should be inverted here
scale_x = self.scale[0][idx]
translate_y = self.translate[1][idx] # TODO same as above
translate_x = self.translate[0][idx]
assert self.translate_mode in ["px", "percent"], (
"Expected 'px' or 'percent', got '%s'." % (self.translate_mode,))
if self.translate_mode == "percent":
translate_y_px = translate_y * arr_shape[0]
else:
translate_y_px = (translate_y / image_shape[0]) * arr_shape[0]
if self.translate_mode == "percent":
translate_x_px = translate_x * arr_shape[1]
else:
translate_x_px = (translate_x / image_shape[1]) * arr_shape[1]
rotate_deg = self.rotate[idx]
shear_x_deg = self.shear[0][idx]
shear_y_deg = self.shear[1][idx]
rotate_rad, shear_x_rad, shear_y_rad = np.deg2rad([
rotate_deg, shear_x_deg, shear_y_deg])
# we add the _deg versions of rotate and shear here for PILAffine,
# Affine itself only uses *_rad
return {
"scale_y": scale_y,
"scale_x": scale_x,
"translate_y_px": translate_y_px,
"translate_x_px": translate_x_px,
"rotate_rad": rotate_rad,
"shear_y_rad": shear_y_rad,
"shear_x_rad": shear_x_rad,
"rotate_deg": rotate_deg,
"shear_y_deg": shear_y_deg,
"shear_x_deg": shear_x_deg
}
def to_matrix(self, idx, arr_shape, image_shape, fit_output,
shift_add=(0.5, 0.5)):
if 0 in image_shape:
return tf.AffineTransform(), arr_shape
height, width = arr_shape[0:2]
params = self.get_affine_parameters(idx,
arr_shape=arr_shape,
image_shape=image_shape)
# for images we use additional shifts of (0.5, 0.5) as otherwise
# we get an ugly black border for 90deg rotations
shift_y = height / 2.0 - shift_add[0]
shift_x = width / 2.0 - shift_add[1]
matrix_to_topleft = tf.SimilarityTransform(
translation=[-shift_x, -shift_y])
matrix_shear_y_rot = tf.AffineTransform(rotation=-3.141592/2)
matrix_shear_y = tf.AffineTransform(shear=params["shear_y_rad"])
matrix_shear_y_rot_inv = tf.AffineTransform(rotation=3.141592/2)
matrix_transforms = tf.AffineTransform(
scale=(params["scale_x"], params["scale_y"]),
translation=(params["translate_x_px"], params["translate_y_px"]),
rotation=params["rotate_rad"],
shear=params["shear_x_rad"]
)
matrix_to_center = tf.SimilarityTransform(
translation=[shift_x, shift_y])
matrix = (matrix_to_topleft
+ matrix_shear_y_rot
+ matrix_shear_y
+ matrix_shear_y_rot_inv
+ matrix_transforms
+ matrix_to_center)
if fit_output:
return _compute_affine_warp_output_shape(matrix, arr_shape)
return matrix, arr_shape
def to_matrix_cba(self, idx, arr_shape, fit_output, shift_add=(0.0, 0.0)):
return self.to_matrix(idx, arr_shape, arr_shape, fit_output, shift_add)
def copy(self):
return _AffineSamplingResult(
scale=self.scale,
translate=self.translate,
translate_mode=self.translate_mode,
rotate=self.rotate,
shear=self.shear,
cval=self.cval,
mode=self.mode,
order=self.order
)
def _is_identity_matrix(matrix, eps=1e-4):
identity = np.float32([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
return np.average(np.abs(identity - matrix.params)) <= eps
class Affine(meta.Augmenter):
"""
Augmenter to apply affine transformations to images.
This is mostly a wrapper around the corresponding classes and functions
in OpenCV and skimage.
Affine transformations involve:
- Translation ("move" image on the x-/y-axis)
- Rotation
- Scaling ("zoom" in/out)
- Shear (move one side of the image, turning a square into a trapezoid)
All such transformations can create "new" pixels in the image without a
defined content, e.g. if the image is translated to the left, pixels
are created on the right.
A method has to be defined to deal with these pixel values. The
parameters `cval` and `mode` of this class deal with this.
Some transformations involve interpolations between several pixels
of the input image to generate output pixel values. The parameter `order`
deals with the method of interpolation used for this.
.. note ::
While this augmenter supports segmentation maps and heatmaps that
have a different size than the corresponding image, it is strongly
recommended to use the same aspect ratios. E.g. for an image of
shape ``(200, 100, 3)``, good segmap/heatmap array shapes also follow
a ``2:1`` ratio and ideally are ``(200, 100, C)``, ``(100, 50, C)`` or
``(50, 25, C)``. Otherwise, transformations involving rotations or
shearing will produce unaligned outputs.
For performance reasons, there is no explicit validation of whether
the aspect ratios are similar.
dtype support::
if (backend="skimage", order in [0, 1])::
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (1)
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (2)
* ``bool``: yes; tested
- (1) scikit-image converts internally to float64, which might
affect the accuracy of large integers. In tests this seemed
to not be an issue.
- (2) results too inaccurate
if (backend="skimage", order in [3, 4])::
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (1)
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: limited; tested (3)
* ``float128``: no (2)
* ``bool``: yes; tested
- (1) scikit-image converts internally to float64, which might
affect the accuracy of large integers. In tests this seemed
to not be an issue.
- (2) results too inaccurate
- (3) ``NaN`` around minimum and maximum of float64 value range
if (backend="skimage", order=5])::
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (1)
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: limited; not tested (3)
* ``float128``: no (2)
* ``bool``: yes; tested
- (1) scikit-image converts internally to ``float64``, which
might affect the accuracy of large integers. In tests
this seemed to not be an issue.
- (2) results too inaccurate
- (3) ``NaN`` around minimum and maximum of float64 value range
if (backend="cv2", order=0)::
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: no (2)
* ``float16``: yes; tested (3)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (3)
- (1) rejected by cv2
- (2) changed to ``int32`` by cv2
- (3) mapped internally to ``float32``
if (backend="cv2", order=1):
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes; tested (4)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (4)
- (1) rejected by cv2
- (2) causes cv2 error: ``cv2.error: OpenCV(3.4.4)
(...)imgwarp.cpp:1805: error:
(-215:Assertion failed) ifunc != 0 in function 'remap'``
- (3) mapped internally to ``int16``
- (4) mapped internally to ``float32``
if (backend="cv2", order=3):
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes; tested (4)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (4)
- (1) rejected by cv2
- (2) causes cv2 error: ``cv2.error: OpenCV(3.4.4)
(...)imgwarp.cpp:1805: error:
(-215:Assertion failed) ifunc != 0 in function 'remap'``
- (3) mapped internally to ``int16``
- (4) mapped internally to ``float32``
Parameters
----------
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter or dict {"x": number/tuple/list/StochasticParameter, "y": number/tuple/list/StochasticParameter}, optional
Scaling factor to use, where ``1.0`` denotes "no change" and
``0.5`` is zoomed out to ``50`` percent of the original size.
* If a single number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``. That value will be
used identically for both x- and y-axis.
* If a list, then a random value will be sampled from that list
per image (again, used for both x- and y-axis).
* If a ``StochasticParameter``, then from that parameter a value
will be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys ``x``
and/or ``y``. Each of these keys can have the same values as
described above. Using a dictionary allows to set different
values for the two axis and sampling will then happen
*independently* per axis, resulting in samples that differ
between the axes.
translate_percent : None or number or tuple of number or list of number or imgaug.parameters.StochasticParameter or dict {"x": number/tuple/list/StochasticParameter, "y": number/tuple/list/StochasticParameter}, optional
Translation as a fraction of the image height/width (x-translation,
y-translation), where ``0`` denotes "no change" and ``0.5`` denotes
"half of the axis size".
* If ``None`` then equivalent to ``0.0`` unless `translate_px` has
a value other than ``None``.
* If a single number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``. That sampled fraction
value will be used identically for both x- and y-axis.
* If a list, then a random value will be sampled from that list
per image (again, used for both x- and y-axis).
* If a ``StochasticParameter``, then from that parameter a value
will be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys ``x``
and/or ``y``. Each of these keys can have the same values as
described above. Using a dictionary allows to set different
values for the two axis and sampling will then happen
*independently* per axis, resulting in samples that differ
between the axes.
translate_px : None or int or tuple of int or list of int or imgaug.parameters.StochasticParameter or dict {"x": int/tuple/list/StochasticParameter, "y": int/tuple/list/StochasticParameter}, optional
Translation in pixels.
* If ``None`` then equivalent to ``0`` unless `translate_percent`
has a value other than ``None``.
* If a single int, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the discrete interval ``[a..b]``. That number
will be used identically for both x- and y-axis.
* If a list, then a random value will be sampled from that list
per image (again, used for both x- and y-axis).
* If a ``StochasticParameter``, then from that parameter a value
will be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys ``x``
and/or ``y``. Each of these keys can have the same values as
described above. Using a dictionary allows to set different
values for the two axis and sampling will then happen
*independently* per axis, resulting in samples that differ
between the axes.
rotate : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Rotation in degrees (**NOT** radians), i.e. expected value range is
around ``[-360, 360]``. Rotation happens around the *center* of the
image, not the top left corner as in some other frameworks.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]`` and used as the rotation
value.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then this parameter will be used to
sample the rotation value per image.
shear : number or tuple of number or list of number or imgaug.parameters.StochasticParameter or dict {"x": int/tuple/list/StochasticParameter, "y": int/tuple/list/StochasticParameter}, optional
Shear in degrees (**NOT** radians), i.e. expected value range is
around ``[-360, 360]``, with reasonable values being in the range
of ``[-45, 45]``.
* If a number, then that value will be used for all images as
the shear on the x-axis (no shear on the y-axis will be done).
* If a tuple ``(a, b)``, then two value will be uniformly sampled
per image from the interval ``[a, b]`` and be used as the
x- and y-shear value.
* If a list, then two random values will be sampled from that list
per image, denoting x- and y-shear.
* If a ``StochasticParameter``, then this parameter will be used
to sample the x- and y-shear values per image.
* If a dictionary, then similar to `translate_percent`, i.e. one
``x`` key and/or one ``y`` key are expected, denoting the
shearing on the x- and y-axis respectively. The allowed datatypes
are again ``number``, ``tuple`` ``(a, b)``, ``list`` or
``StochasticParameter``.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Interpolation order to use. Same meaning as in ``skimage``:
* ``0``: ``Nearest-neighbor``
* ``1``: ``Bi-linear`` (default)
* ``2``: ``Bi-quadratic`` (not recommended by skimage)
* ``3``: ``Bi-cubic``
* ``4``: ``Bi-quartic``
* ``5``: ``Bi-quintic``
Method ``0`` and ``1`` are fast, ``3`` is a bit slower, ``4`` and
``5`` are very slow. If the backend is ``cv2``, the mapping to
OpenCV's interpolation modes is as follows:
* ``0`` -> ``cv2.INTER_NEAREST``
* ``1`` -> ``cv2.INTER_LINEAR``
* ``2`` -> ``cv2.INTER_CUBIC``
* ``3`` -> ``cv2.INTER_CUBIC``
* ``4`` -> ``cv2.INTER_CUBIC``
As datatypes this parameter accepts:
* If a single ``int``, then that order will be used for all images.
* If a list, then a random value will be sampled from that list
per image.
* If ``imgaug.ALL``, then equivalant to list ``[0, 1, 3, 4, 5]``
in case of ``backend=skimage`` and otherwise ``[0, 1, 3]``.
* If ``StochasticParameter``, then that parameter is queried per
image to sample the order value to use.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
The constant value to use when filling in newly created pixels.
(E.g. translating by 1px to the right will create a new 1px-wide
column of pixels on the left of the image). The value is only used
when `mode=constant`. The expected value range is ``[0, 255]`` for
``uint8`` images. It may be a float value.
* If this is a single number, then that value will be used
(e.g. 0 results in black pixels).
* If a tuple ``(a, b)``, then three values (for three image
channels) will be uniformly sampled per image from the
interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If ``imgaug.ALL`` then equivalent to tuple ``(0, 255)`.
* If a ``StochasticParameter``, a new value will be sampled from
the parameter per image.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Method to use when filling in newly created pixels.
Same meaning as in ``skimage`` (and :func:`numpy.pad`):
* ``constant``: Pads with a constant value
* ``edge``: Pads with the edge values of array
* ``symmetric``: Pads with the reflection of the vector mirrored
along the edge of the array.
* ``reflect``: Pads with the reflection of the vector mirrored on
the first and last values of the vector along each axis.
* ``wrap``: Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the end values
are used to pad the beginning.
If ``cv2`` is chosen as the backend the mapping is as follows:
* ``constant`` -> ``cv2.BORDER_CONSTANT``
* ``edge`` -> ``cv2.BORDER_REPLICATE``
* ``symmetric`` -> ``cv2.BORDER_REFLECT``
* ``reflect`` -> ``cv2.BORDER_REFLECT_101``
* ``wrap`` -> ``cv2.BORDER_WRAP``
The datatype of the parameter may be:
* If a single string, then that mode will be used for all images.
* If a list of strings, then a random mode will be picked
from that list per image.
* If ``imgaug.ALL``, then a random mode from all possible modes
will be picked.
* If ``StochasticParameter``, then the mode will be sampled from
that parameter per image, i.e. it must return only the above
mentioned strings.
fit_output : bool, optional
Whether to modify the affine transformation so that the whole output
image is always contained in the image plane (``True``) or accept
parts of the image being outside the image plane (``False``).
This can be thought of as first applying the affine transformation
and then applying a second transformation to "zoom in" on the new
image so that it fits the image plane,
This is useful to avoid corners of the image being outside of the image
plane after applying rotations. It will however negate translation
and scaling.
Note also that activating this may lead to image sizes differing from
the input image sizes. To avoid this, wrap ``Affine`` in
:class:`imgaug.augmenters.size.KeepSizeByResize`,
e.g. ``KeepSizeByResize(Affine(...))``.
backend : str, optional
Framework to use as a backend. Valid values are ``auto``, ``skimage``
(scikit-image's warp) and ``cv2`` (OpenCV's warp).
If ``auto`` is used, the augmenter will automatically try
to use ``cv2`` whenever possible (order must be in ``[0, 1, 3]``). It
will silently fall back to skimage if order/dtype is not supported by
cv2. cv2 is generally faster than skimage. It also supports RGB cvals,
while skimage will resort to intensity cvals (i.e. 3x the same value
as RGB). If ``cv2`` is chosen and order is ``2`` or ``4``, it will
automatically fall back to order ``3``.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Affine(scale=2.0)
Zoom in on all images by a factor of ``2``.
>>> aug = iaa.Affine(translate_px=16)
Translate all images on the x- and y-axis by 16 pixels (towards the
bottom right) and fill up any new pixels with zero (black values).
>>> aug = iaa.Affine(translate_percent=0.1)
Translate all images on the x- and y-axis by ``10`` percent of their
width/height (towards the bottom right). The pixel values are computed
per axis based on that axis' size. Fill up any new pixels with zero
(black values).
>>> aug = iaa.Affine(rotate=35)
Rotate all images by ``35`` *degrees*. Fill up any new pixels with zero
(black values).
>>> aug = iaa.Affine(shear=15)
Shear all images by ``15`` *degrees*. Fill up any new pixels with zero
(black values).
>>> aug = iaa.Affine(translate_px=(-16, 16))
Translate all images on the x- and y-axis by a random value
between ``-16`` and ``16`` pixels (to the bottom right) and fill up any new
pixels with zero (black values). The translation value is sampled once
per image and is the same for both axis.
>>> aug = iaa.Affine(translate_px={"x": (-16, 16), "y": (-4, 4)})
Translate all images on the x-axis by a random value
between ``-16`` and ``16`` pixels (to the right) and on the y-axis by a
random value between ``-4`` and ``4`` pixels to the bottom. The sampling
happens independently per axis, so even if both intervals were identical,
the sampled axis-wise values would likely be different.
This also fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(scale=2.0, order=[0, 1])
Same as in the above `scale` example, but uses (randomly) either
nearest neighbour interpolation or linear interpolation. If `order` is
not specified, ``order=1`` would be used by default.
>>> aug = iaa.Affine(translate_px=16, cval=(0, 255))
Same as in the `translate_px` example above, but newly created pixels
are now filled with a random color (sampled once per image and the
same for all newly created pixels within that image).
>>> aug = iaa.Affine(translate_px=16, mode=["constant", "edge"])
Similar to the previous example, but the newly created pixels are
filled with black pixels in half of all images (mode ``constant`` with
default `cval` being ``0``) and in the other half of all images using
``edge`` mode, which repeats the color of the spatially closest pixel
of the corresponding image edge.
>>> aug = iaa.Affine(shear={"y": (-45, 45)})
Shear images only on the y-axis. Set `shear` to ``shear=(-45, 45)`` to
shear randomly on both axes, using for each image the same sample for
both the x- and y-axis. Use ``shear={"x": (-45, 45), "y": (-45, 45)}``
to get independent samples per axis.
"""
def __init__(self, scale=1.0, translate_percent=None, translate_px=None,
rotate=0.0, shear=0.0, order=1, cval=0, mode="constant",
fit_output=False, backend="auto",
name=None, deterministic=False, random_state=None):
super(Affine, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
assert backend in ["auto", "skimage", "cv2"], (
"Expected 'backend' to be \"auto\", \"skimage\" or \"cv2\", "
"got %s." % (backend,))
self.backend = backend
self.order = _handle_order_arg(order, backend)
self.cval = _handle_cval_arg(cval)
self.mode = _handle_mode_arg(mode)
self.scale = self._handle_scale_arg(scale)
self.translate = self._handle_translate_arg(
translate_px, translate_percent)
self.rotate = iap.handle_continuous_param(
rotate, "rotate", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.shear, self._shear_param_type = self._handle_shear_arg(shear)
self.fit_output = fit_output
# Special order, mode and cval parameters for heatmaps and
# segmentation maps. These may either be None or a fixed value.
# Stochastic parameters are currently *not* supported.
# If set to None, the same values as for images will be used.
# That is really not recommended for the cval parameter.
#
# Segmentation map augmentation by default always pads with a
# constant value of 0 (background class id), and always uses nearest
# neighbour interpolation. While other pad modes and BG class ids
# could be used, the interpolation mode has to be NN as any other
# mode would lead to averaging class ids, which makes no sense to do.
self._order_heatmaps = 3
self._order_segmentation_maps = 0
self._mode_heatmaps = "constant"
self._mode_segmentation_maps = "constant"
self._cval_heatmaps = 0
self._cval_segmentation_maps = 0
@classmethod
def _handle_scale_arg(cls, scale):
if isinstance(scale, dict):
assert "x" in scale or "y" in scale, (
"Expected scale dictionary to contain at least key \"x\" or "
"key \"y\". Found neither of them.")
x = scale.get("x", 1.0)
y = scale.get("y", 1.0)
return (
iap.handle_continuous_param(
x, "scale['x']", value_range=(0+1e-4, None),
tuple_to_uniform=True, list_to_choice=True),
iap.handle_continuous_param(
y, "scale['y']", value_range=(0+1e-4, None),
tuple_to_uniform=True, list_to_choice=True)
)
return iap.handle_continuous_param(
scale, "scale", value_range=(0+1e-4, None),
tuple_to_uniform=True, list_to_choice=True)
@classmethod
def _handle_translate_arg(cls, translate_px, translate_percent):
# pylint: disable=no-else-return
if translate_percent is None and translate_px is None:
translate_px = 0
assert translate_percent is None or translate_px is None, (
"Expected either translate_percent or translate_px to be "
"provided, but neither of them was.")
if translate_percent is not None:
# translate by percent
if isinstance(translate_percent, dict):
assert "x" in translate_percent or "y" in translate_percent, (
"Expected translate_percent dictionary to contain at "
"least key \"x\" or key \"y\". Found neither of them.")
x = translate_percent.get("x", 0)
y = translate_percent.get("y", 0)
return (
iap.handle_continuous_param(
x, "translate_percent['x']", value_range=None,
tuple_to_uniform=True, list_to_choice=True),
iap.handle_continuous_param(
y, "translate_percent['y']", value_range=None,
tuple_to_uniform=True, list_to_choice=True),
"percent"
)
return (
iap.handle_continuous_param(
translate_percent, "translate_percent",
value_range=None, tuple_to_uniform=True,
list_to_choice=True),
None,
"percent"
)
else:
# translate by pixels
if isinstance(translate_px, dict):
assert "x" in translate_px or "y" in translate_px, (
"Expected translate_px dictionary to contain at "
"least key \"x\" or key \"y\". Found neither of them.")
x = translate_px.get("x", 0)
y = translate_px.get("y", 0)
return (
iap.handle_discrete_param(
x, "translate_px['x']", value_range=None,
tuple_to_uniform=True, list_to_choice=True,
allow_floats=False),
iap.handle_discrete_param(
y, "translate_px['y']", value_range=None,
tuple_to_uniform=True, list_to_choice=True,
allow_floats=False),
"px"
)
return (
iap.handle_discrete_param(
translate_px, "translate_px", value_range=None,
tuple_to_uniform=True, list_to_choice=True,
allow_floats=False),
None,
"px"
)
@classmethod
def _handle_shear_arg(cls, shear):
# pylint: disable=no-else-return
if isinstance(shear, dict):
assert "x" in shear or "y" in shear, (
"Expected shear dictionary to contain at "
"least key \"x\" or key \"y\". Found neither of them.")
x = shear.get("x", 0)
y = shear.get("y", 0)
return (
iap.handle_continuous_param(
x, "shear['x']", value_range=None,
tuple_to_uniform=True, list_to_choice=True),
iap.handle_continuous_param(
y, "shear['y']", value_range=None,
tuple_to_uniform=True, list_to_choice=True)
), "dict"
else:
param_type = "other"
if ia.is_single_number(shear):
param_type = "single-number"
return iap.handle_continuous_param(
shear, "shear", value_range=None, tuple_to_uniform=True,
list_to_choice=True
), param_type
def _augment_batch(self, batch, random_state, parents, hooks):
samples = self._draw_samples(batch.nb_rows, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, samples, "arr_0to1", self._cval_heatmaps,
self._mode_heatmaps, self._order_heatmaps, "float32")
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, samples, "arr",
self._cval_segmentation_maps, self._mode_segmentation_maps,
self._order_segmentation_maps, "int32")
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
for i, cbaoi in enumerate(augm_value):
matrix, output_shape = samples.to_matrix_cba(
i, cbaoi.shape, self.fit_output)
if (not _is_identity_matrix(matrix)
and not cbaoi.empty
and not 0 in cbaoi.shape[0:2]):
# TODO this is hacky
if augm_name == "bounding_boxes":
# Ensure that 4 points are used for bbs.
# to_keypoints_on_images() does return 4 points,
# to_xy_array() does not.
kpsoi = cbaoi.to_keypoints_on_image()
coords = kpsoi.to_xy_array()
coords_aug = tf.matrix_transform(coords,
matrix.params)
kpsoi = kpsoi.fill_from_xy_array_(coords_aug)
cbaoi = cbaoi.invert_to_keypoints_on_image_(
kpsoi)
else:
coords = cbaoi.to_xy_array()
coords_aug = tf.matrix_transform(coords,
matrix.params)
cbaoi = cbaoi.fill_from_xy_array_(coords_aug)
cbaoi.shape = output_shape
augm_value[i] = cbaoi
return batch
def _augment_images_by_samples(self, images, samples,
image_shapes=None,
return_matrices=False):
nb_images = len(images)
input_was_array = ia.is_np_array(images)
input_dtype = None if not input_was_array else images.dtype
result = []
if return_matrices:
matrices = [None] * nb_images
for i in sm.xrange(nb_images):
image = images[i]
image_shape = (image.shape if image_shapes is None
else image_shapes[i])
matrix, output_shape = samples.to_matrix(i, image.shape,
image_shape,
self.fit_output)
cval = samples.cval[i]
mode = samples.mode[i]
order = samples.order[i]
if not _is_identity_matrix(matrix):
image_warped = _warp_affine_arr(
image, matrix,
order=order, mode=mode, cval=cval,
output_shape=output_shape, backend=self.backend)
result.append(image_warped)
else:
result.append(image)
if return_matrices:
matrices[i] = matrix
# the shapes can change due to fit_output, then it may not be possible
# to return an array, even when the input was an array
if input_was_array:
nb_shapes = len({image.shape for image in result})
if nb_shapes == 1:
result = np.array(result, input_dtype)
if return_matrices:
result = (result, matrices)
return result
def _augment_maps_by_samples(self, augmentables, samples,
arr_attr_name, cval, mode, order, cval_dtype):
nb_images = len(augmentables)
samples = samples.copy()
if cval is not None:
samples.cval = np.full((nb_images, 1), cval, dtype=cval_dtype)
if mode is not None:
samples.mode = [mode] * nb_images
if order is not None:
samples.order = [order] * nb_images
arrs = [getattr(augmentable, arr_attr_name)
for augmentable in augmentables]
image_shapes = [augmentable.shape for augmentable in augmentables]
arrs_aug, matrices = self._augment_images_by_samples(
arrs, samples, image_shapes=image_shapes, return_matrices=True)
gen = zip(augmentables, arrs_aug, matrices, samples.order)
for augmentable_i, arr_aug, matrix, order_i in gen:
# skip augmented HM/SM arrs for which the images were not
# augmented due to being zero-sized
if 0 in augmentable_i.shape:
continue
# order=3 matches cubic interpolation and can cause values to go
# outside of the range [0.0, 1.0] not clear whether 4+ also do that
# We don't clip here for Segmentation Maps, because for these
# the value range isn't clearly limited to [0, 1] (and they should
# also never use order=3 to begin with).
# TODO add test for this
if order_i >= 3 and isinstance(augmentable_i, ia.HeatmapsOnImage):
arr_aug = np.clip(arr_aug, 0.0, 1.0, out=arr_aug)
setattr(augmentable_i, arr_attr_name, arr_aug)
if self.fit_output:
_, output_shape_i = _compute_affine_warp_output_shape(
matrix, augmentable_i.shape)
else:
output_shape_i = augmentable_i.shape
augmentable_i.shape = output_shape_i
return augmentables
def _draw_samples(self, nb_samples, random_state):
rngs = random_state.duplicate(12)
if isinstance(self.scale, tuple):
scale_samples = (
self.scale[0].draw_samples((nb_samples,), random_state=rngs[0]),
self.scale[1].draw_samples((nb_samples,), random_state=rngs[1]),
)
else:
scale_samples = self.scale.draw_samples((nb_samples,),
random_state=rngs[2])
scale_samples = (scale_samples, scale_samples)
if self.translate[1] is not None:
translate_samples = (
self.translate[0].draw_samples((nb_samples,),
random_state=rngs[3]),
self.translate[1].draw_samples((nb_samples,),
random_state=rngs[4]),
)
else:
translate_samples = self.translate[0].draw_samples(
(nb_samples,), random_state=rngs[5])
translate_samples = (translate_samples, translate_samples)
rotate_samples = self.rotate.draw_samples((nb_samples,),
random_state=rngs[6])
if self._shear_param_type == "dict":
shear_samples = (
self.shear[0].draw_samples((nb_samples,), random_state=rngs[7]),
self.shear[1].draw_samples((nb_samples,), random_state=rngs[8])
)
elif self._shear_param_type == "single-number":
# only shear on the x-axis if a single number was given
shear_samples = self.shear.draw_samples((nb_samples,),
random_state=rngs[7])
shear_samples = (shear_samples, np.zeros_like(shear_samples))
else:
shear_samples = self.shear.draw_samples((nb_samples,),
random_state=rngs[7])
shear_samples = (shear_samples, shear_samples)
cval_samples = self.cval.draw_samples((nb_samples, 3),
random_state=rngs[9])
mode_samples = self.mode.draw_samples((nb_samples,),
random_state=rngs[10])
order_samples = self.order.draw_samples((nb_samples,),
random_state=rngs[11])
return _AffineSamplingResult(
scale=scale_samples,
translate=translate_samples,
translate_mode=self.translate[2],
rotate=rotate_samples,
shear=shear_samples,
cval=cval_samples,
mode=mode_samples,
order=order_samples)
def get_parameters(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [
self.scale, self.translate, self.rotate, self.shear, self.order,
self.cval, self.mode, self.backend, self.fit_output]
class ScaleX(Affine):
"""Apply affine scaling on the x-axis to input data.
This is a wrapper around :class:`Affine`.
dtype support::
See :class:`imgaug.augmenters.geometric.Affine`.
Parameters
----------
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Analogous to ``scale`` in :class:`Affine`, except that this scale
value only affects the x-axis. No dictionary input is allowed.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
fit_output : bool, optional
See :class:`Affine`.
backend : str, optional
See :class:`Affine`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.ScaleX((0.5, 1.5))
Create an augmenter that scales images along the width to sizes between
``50%`` and ``150%``. This does not change the image shape (i.e. height
and width), only the pixels within the image are remapped and potentially
new ones are filled in.
"""
def __init__(self, scale, order=1, cval=0, mode="constant",
fit_output=False, backend="auto",
name=None, deterministic=False, random_state=None):
super(ScaleX, self).__init__(
scale={"x": scale},
order=order,
cval=cval,
mode=mode,
fit_output=fit_output,
backend=backend,
name=name,
deterministic=deterministic,
random_state=random_state
)
# TODO make Affine more efficient for translation-only transformations
class TranslateX(Affine):
"""Apply affine translation on the x-axis to input data.
This is a wrapper around :class:`Affine`.
dtype support::
See :class:`imgaug.augmenters.geometric.Affine`.
Parameters
----------
percent : None or number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Analogous to ``translate_percent`` in :class:`Affine`, except that
this translation value only affects the x-axis. No dictionary input
is allowed.
px : None or int or tuple of int or list of int or imgaug.parameters.StochasticParameter or dict {"x": int/tuple/list/StochasticParameter, "y": int/tuple/list/StochasticParameter}, optional
Analogous to ``translate_px`` in :class:`Affine`, except that
this translation value only affects the x-axis. No dictionary input
is allowed.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
fit_output : bool, optional
See :class:`Affine`.
backend : str, optional
See :class:`Affine`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.TranslateX(px=(-20, 20))
Create an augmenter that translates images along the x-axis by
``-20`` to ``20`` pixels.
>>> aug = iaa.TranslateX(percent=(-0.1, 0.1))
Create an augmenter that translates images along the x-axis by
``-10%`` to ``10%`` (relative to the x-axis size).
"""
def __init__(self, percent=None, px=None, order=1,
cval=0, mode="constant", fit_output=False, backend="auto",
name=None, deterministic=False, random_state=None):
# we don't test here if both are not-None at the same time, because
# that is already checked in Affine
assert percent is not None or px is not None, (
"Expected either `percent` to be not-None or "
"`px` to be not-None, but both were None.")
super(TranslateX, self).__init__(
translate_percent=({"x": percent} if percent is not None else None),
translate_px=({"x": px} if px is not None else None),
order=order,
cval=cval,
mode=mode,
fit_output=fit_output,
backend=backend,
name=name,
deterministic=deterministic,
random_state=random_state
)
# TODO make Affine more efficient for translation-only transformations
class TranslateY(Affine):
"""Apply affine translation on the y-axis to input data.
This is a wrapper around :class:`Affine`.
dtype support::
See :class:`imgaug.augmenters.geometric.Affine`.
Parameters
----------
percent : None or number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Analogous to ``translate_percent`` in :class:`Affine`, except that
this translation value only affects the y-axis. No dictionary input
is allowed.
px : None or int or tuple of int or list of int or imgaug.parameters.StochasticParameter or dict {"x": int/tuple/list/StochasticParameter, "y": int/tuple/list/StochasticParameter}, optional
Analogous to ``translate_px`` in :class:`Affine`, except that
this translation value only affects the y-axis. No dictionary input
is allowed.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
fit_output : bool, optional
See :class:`Affine`.
backend : str, optional
See :class:`Affine`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.TranslateY(px=(-20, 20))
Create an augmenter that translates images along the y-axis by
``-20`` to ``20`` pixels.
>>> aug = iaa.TranslateY(percent=(-0.1, 0.1))
Create an augmenter that translates images along the y-axis by
``-10%`` to ``10%`` (relative to the y-axis size).
"""
def __init__(self, percent=None, px=None, order=1,
cval=0, mode="constant", fit_output=False, backend="auto",
name=None, deterministic=False, random_state=None):
# we don't test here if both are not-None at the same time, because
# that is already checked in Affine
assert percent is not None or px is not None, (
"Expected either `percent` to be not-None or "
"`px` to be not-None, but both were None.")
super(TranslateY, self).__init__(
translate_percent=({"y": percent} if percent is not None else None),
translate_px=({"y": px} if px is not None else None),
order=order,
cval=cval,
mode=mode,
fit_output=fit_output,
backend=backend,
name=name,
deterministic=deterministic,
random_state=random_state
)
class ScaleY(Affine):
"""Apply affine scaling on the y-axis to input data.
This is a wrapper around :class:`Affine`.
dtype support::
See :class:`imgaug.augmenters.geometric.Affine`.
Parameters
----------
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Analogous to ``scale`` in :class:`Affine`, except that this scale
value only affects the y-axis. No dictionary input is allowed.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
fit_output : bool, optional
See :class:`Affine`.
backend : str, optional
See :class:`Affine`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.ScaleY((0.5, 1.5))
Create an augmenter that scales images along the height to sizes between
``50%`` and ``150%``. This does not change the image shape (i.e. height
and width), only the pixels within the image are remapped and potentially
new ones are filled in.
"""
def __init__(self, scale, order=1, cval=0, mode="constant",
fit_output=False, backend="auto",
name=None, deterministic=False, random_state=None):
super(ScaleY, self).__init__(
scale={"y": scale},
order=order,
cval=cval,
mode=mode,
fit_output=fit_output,
backend=backend,
name=name,
deterministic=deterministic,
random_state=random_state
)
class Rotate(Affine):
"""Apply affine rotation on the y-axis to input data.
This is a wrapper around :class:`Affine`.
It is the same as ``Affine(rotate=<value>)``.
dtype support::
See :class:`imgaug.augmenters.geometric.Affine`.
Parameters
----------
rotate : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
fit_output : bool, optional
See :class:`Affine`.
backend : str, optional
See :class:`Affine`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Rotate((-45, 45))
Create an augmenter that rotates images by a random value between ``-45``
and ``45`` degress.
"""
def __init__(self, rotate, order=1, cval=0, mode="constant",
fit_output=False, backend="auto",
name=None, deterministic=False, random_state=None):
super(Rotate, self).__init__(
rotate=rotate,
order=order,
cval=cval,
mode=mode,
fit_output=fit_output,
backend=backend,
name=name,
deterministic=deterministic,
random_state=random_state
)
class ShearX(Affine):
"""Apply affine shear on the x-axis to input data.
This is a wrapper around :class:`Affine`.
dtype support::
See :class:`imgaug.augmenters.geometric.Affine`.
Parameters
----------
shear : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Analogous to ``shear`` in :class:`Affine`, except that this shear
value only affects the x-axis. No dictionary input is allowed.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
fit_output : bool, optional
See :class:`Affine`.
backend : str, optional
See :class:`Affine`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
def __init__(self, shear, order=1, cval=0, mode="constant",
fit_output=False, backend="auto",
name=None, deterministic=False, random_state=None):
super(ShearX, self).__init__(
shear={"x": shear},
order=order,
cval=cval,
mode=mode,
fit_output=fit_output,
backend=backend,
name=name,
deterministic=deterministic,
random_state=random_state
)
class ShearY(Affine):
"""Apply affine shear on the y-axis to input data.
This is a wrapper around :class:`Affine`.
dtype support::
See :class:`imgaug.augmenters.geometric.Affine`.
Parameters
----------
shear : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Analogous to ``shear`` in :class:`Affine`, except that this shear
value only affects the y-axis. No dictionary input is allowed.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :class:`Affine`.
fit_output : bool, optional
See :class:`Affine`.
backend : str, optional
See :class:`Affine`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
def __init__(self, shear, order=1, cval=0, mode="constant",
fit_output=False, backend="auto",
name=None, deterministic=False, random_state=None):
super(ShearY, self).__init__(
shear={"y": shear},
order=order,
cval=cval,
mode=mode,
fit_output=fit_output,
backend=backend,
name=name,
deterministic=deterministic,
random_state=random_state
)
class AffineCv2(meta.Augmenter):
"""
**Deprecated.** Augmenter to apply affine transformations to images using
cv2 (i.e. opencv) backend.
.. warning::
This augmenter is deprecated since 0.4.0.
Use ``Affine(..., backend='cv2')`` instead.
Affine transformations
involve:
- Translation ("move" image on the x-/y-axis)
- Rotation
- Scaling ("zoom" in/out)
- Shear (move one side of the image, turning a square into a trapezoid)
All such transformations can create "new" pixels in the image without a
defined content, e.g. if the image is translated to the left, pixels
are created on the right.
A method has to be defined to deal with these pixel values. The
parameters `cval` and `mode` of this class deal with this.
Some transformations involve interpolations between several pixels
of the input image to generate output pixel values. The parameter `order`
deals with the method of interpolation used for this.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter or dict {"x": number/tuple/list/StochasticParameter, "y": number/tuple/list/StochasticParameter}, optional
Scaling factor to use, where ``1.0`` denotes \"no change\" and
``0.5`` is zoomed out to ``50`` percent of the original size.
* If a single number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``. That value will be
used identically for both x- and y-axis.
* If a list, then a random value will be sampled from that list
per image (again, used for both x- and y-axis).
* If a ``StochasticParameter``, then from that parameter a value
will be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys ``x``
and/or ``y``. Each of these keys can have the same values as
described above. Using a dictionary allows to set different
values for the two axis and sampling will then happen
*independently* per axis, resulting in samples that differ
between the axes.
translate_percent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter or dict {"x": number/tuple/list/StochasticParameter, "y": number/tuple/list/StochasticParameter}, optional
Translation as a fraction of the image height/width (x-translation,
y-translation), where ``0`` denotes "no change" and ``0.5`` denotes
"half of the axis size".
* If ``None`` then equivalent to ``0.0`` unless `translate_px` has
a value other than ``None``.
* If a single number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]``. That sampled fraction
value will be used identically for both x- and y-axis.
* If a list, then a random value will be sampled from that list
per image (again, used for both x- and y-axis).
* If a ``StochasticParameter``, then from that parameter a value
will be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys ``x``
and/or ``y``. Each of these keys can have the same values as
described above. Using a dictionary allows to set different
values for the two axis and sampling will then happen
*independently* per axis, resulting in samples that differ
between the axes.
translate_px : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or dict {"x": int/tuple/list/StochasticParameter, "y": int/tuple/list/StochasticParameter}, optional
Translation in pixels.
* If ``None`` then equivalent to ``0`` unless `translate_percent`
has a value other than ``None``.
* If a single int, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the discrete interval ``[a..b]``. That number
will be used identically for both x- and y-axis.
* If a list, then a random value will be sampled from that list
per image (again, used for both x- and y-axis).
* If a ``StochasticParameter``, then from that parameter a value
will be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys ``x``
and/or ``y``. Each of these keys can have the same values as
described above. Using a dictionary allows to set different
values for the two axis and sampling will then happen
*independently* per axis, resulting in samples that differ
between the axes.
rotate : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Rotation in degrees (**NOT** radians), i.e. expected value range is
around ``[-360, 360]``. Rotation happens around the *center* of the
image, not the top left corner as in some other frameworks.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]`` and used as the rotation
value.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then this parameter will be used to
sample the rotation value per image.
shear : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Shear in degrees (**NOT** radians), i.e. expected value range is
around ``[-360, 360]``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be uniformly sampled
per image from the interval ``[a, b]`` and be used as the
rotation value.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then this parameter will be used
to sample the shear value per image.
order : int or list of int or str or list of str or imaug.ALL or imgaug.parameters.StochasticParameter, optional
Interpolation order to use. Allowed are:
* ``cv2.INTER_NEAREST`` (nearest-neighbor interpolation)
* ``cv2.INTER_LINEAR`` (bilinear interpolation, used by default)
* ``cv2.INTER_CUBIC`` (bicubic interpolation over ``4x4`` pixel
neighborhood)
* ``cv2.INTER_LANCZOS4``
* string ``nearest`` (same as ``cv2.INTER_NEAREST``)
* string ``linear`` (same as ``cv2.INTER_LINEAR``)
* string ``cubic`` (same as ``cv2.INTER_CUBIC``)
* string ``lanczos4`` (same as ``cv2.INTER_LANCZOS``)
``INTER_NEAREST`` (nearest neighbour interpolation) and
``INTER_NEAREST`` (linear interpolation) are the fastest.
* If a single ``int``, then that order will be used for all images.
* If a string, then it must be one of: ``nearest``, ``linear``,
``cubic``, ``lanczos4``.
* If an iterable of ``int``/``str``, then for each image a random
value will be sampled from that iterable (i.e. list of allowed
order values).
* If ``imgaug.ALL``, then equivalant to list ``[cv2.INTER_NEAREST,
cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4]``.
* If ``StochasticParameter``, then that parameter is queried per
image to sample the order value to use.
cval : number or tuple of number or list of number or imaug.ALL or imgaug.parameters.StochasticParameter, optional
The constant value to use when filling in newly created pixels.
(E.g. translating by 1px to the right will create a new 1px-wide
column of pixels on the left of the image). The value is only used
when `mode=constant`. The expected value range is ``[0, 255]`` for
``uint8`` images. It may be a float value.
* If this is a single number, then that value will be used
(e.g. 0 results in black pixels).
* If a tuple ``(a, b)``, then three values (for three image
channels) will be uniformly sampled per image from the
interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If ``imgaug.ALL`` then equivalent to tuple ``(0, 255)`.
* If a ``StochasticParameter``, a new value will be sampled from
the parameter per image.
mode : int or str or list of str or list of int or imgaug.ALL or imgaug.parameters.StochasticParameter,
optional
Method to use when filling in newly created pixels.
Same meaning as in OpenCV's border mode. Let ``abcdefgh`` be an image's
content and ``|`` be an image boundary after which new pixels are
filled in, then the valid modes and their behaviour are the following:
* ``cv2.BORDER_REPLICATE``: ``aaaaaa|abcdefgh|hhhhhhh``
* ``cv2.BORDER_REFLECT``: ``fedcba|abcdefgh|hgfedcb``
* ``cv2.BORDER_REFLECT_101``: ``gfedcb|abcdefgh|gfedcba``
* ``cv2.BORDER_WRAP``: ``cdefgh|abcdefgh|abcdefg``
* ``cv2.BORDER_CONSTANT``: ``iiiiii|abcdefgh|iiiiiii``,
where ``i`` is the defined cval.
* ``replicate``: Same as ``cv2.BORDER_REPLICATE``.
* ``reflect``: Same as ``cv2.BORDER_REFLECT``.
* ``reflect_101``: Same as ``cv2.BORDER_REFLECT_101``.
* ``wrap``: Same as ``cv2.BORDER_WRAP``.
* ``constant``: Same as ``cv2.BORDER_CONSTANT``.
The datatype of the parameter may be:
* If a single ``int``, then it must be one of the ``cv2.BORDER_*``
constants.
* If a single string, then it must be one of: ``replicate``,
``reflect``, ``reflect_101``, ``wrap``, ``constant``.
* If a list of ``int``/``str``, then per image a random mode will
be picked from that list.
* If ``imgaug.ALL``, then a random mode from all possible modes
will be picked.
* If ``StochasticParameter``, then the mode will be sampled from
that parameter per image, i.e. it must return only the above
mentioned strings.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AffineCv2(scale=2.0)
Zoom in on all images by a factor of ``2``.
>>> aug = iaa.AffineCv2(translate_px=16)
Translate all images on the x- and y-axis by 16 pixels (towards the
bottom right) and fill up any new pixels with zero (black values).
>>> aug = iaa.AffineCv2(translate_percent=0.1)
Translate all images on the x- and y-axis by ``10`` percent of their
width/height (towards the bottom right). The pixel values are computed
per axis based on that axis' size. Fill up any new pixels with zero
(black values).
>>> aug = iaa.AffineCv2(rotate=35)
Rotate all images by ``35`` *degrees*. Fill up any new pixels with zero
(black values).
>>> aug = iaa.AffineCv2(shear=15)
Shear all images by ``15`` *degrees*. Fill up any new pixels with zero
(black values).
>>> aug = iaa.AffineCv2(translate_px=(-16, 16))
Translate all images on the x- and y-axis by a random value
between ``-16`` and ``16`` pixels (to the bottom right) and fill up any new
pixels with zero (black values). The translation value is sampled once
per image and is the same for both axis.
>>> aug = iaa.AffineCv2(translate_px={"x": (-16, 16), "y": (-4, 4)})
Translate all images on the x-axis by a random value
between ``-16`` and ``16`` pixels (to the right) and on the y-axis by a
random value between ``-4`` and ``4`` pixels to the bottom. The sampling
happens independently per axis, so even if both intervals were identical,
the sampled axis-wise values would likely be different.
This also fills up any new pixels with zero (black values).
>>> aug = iaa.AffineCv2(scale=2.0, order=[0, 1])
Same as in the above `scale` example, but uses (randomly) either
nearest neighbour interpolation or linear interpolation. If `order` is
not specified, ``order=1`` would be used by default.
>>> aug = iaa.AffineCv2(translate_px=16, cval=(0, 255))
Same as in the `translate_px` example above, but newly created pixels
are now filled with a random color (sampled once per image and the
same for all newly created pixels within that image).
>>> aug = iaa.AffineCv2(translate_px=16, mode=["constant", "replicate"])
Similar to the previous example, but the newly created pixels are
filled with black pixels in half of all images (mode ``constant`` with
default `cval` being ``0``) and in the other half of all images using
``replicate`` mode, which repeats the color of the spatially closest pixel
of the corresponding image edge.
"""
def __init__(self, scale=1.0, translate_percent=None, translate_px=None,
rotate=0.0, shear=0.0, order=cv2.INTER_LINEAR, cval=0,
mode=cv2.BORDER_CONSTANT,
name=None, deterministic=False, random_state=None):
super(AffineCv2, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
# using a context on __init__ seems to produce no warning,
# so warn manually here
ia.warn_deprecated(
"AffineCv2 is deprecated. "
"Use imgaug.augmenters.geometric.Affine(..., backend='cv2') "
"instead.", stacklevel=4)
available_orders = [cv2.INTER_NEAREST, cv2.INTER_LINEAR,
cv2.INTER_CUBIC, cv2.INTER_LANCZOS4]
available_orders_str = ["nearest", "linear", "cubic", "lanczos4"]
if order == ia.ALL:
self.order = iap.Choice(available_orders)
elif ia.is_single_integer(order):
assert order in available_orders, (
"Expected order's integer value to be in %s, got %d." % (
str(available_orders), order))
self.order = iap.Deterministic(order)
elif ia.is_string(order):
assert order in available_orders_str, (
"Expected order to be in %s, got %s." % (
str(available_orders_str), order))
self.order = iap.Deterministic(order)
elif isinstance(order, list):
valid_types = all(
[ia.is_single_integer(val) or ia.is_string(val)
for val in order])
assert valid_types, (
"Expected order list to only contain integers/strings, got "
"types %s." % (str([type(val) for val in order]),))
valid_orders = all(
[val in available_orders + available_orders_str
for val in order])
assert valid_orders, (
"Expected all order values to be in %s, got %s." % (
available_orders + available_orders_str, str(order),))
self.order = iap.Choice(order)
elif isinstance(order, iap.StochasticParameter):
self.order = order
else:
raise Exception(
"Expected order to be imgaug.ALL, int, string, a list of"
"int/string or StochasticParameter, got %s." % (type(order),))
if cval == ia.ALL:
self.cval = iap.DiscreteUniform(0, 255)
else:
self.cval = iap.handle_discrete_param(
cval, "cval", value_range=(0, 255), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
available_modes = [cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT,
cv2.BORDER_REFLECT_101, cv2.BORDER_WRAP,
cv2.BORDER_CONSTANT]
available_modes_str = ["replicate", "reflect", "reflect_101",
"wrap", "constant"]
if mode == ia.ALL:
self.mode = iap.Choice(available_modes)
elif ia.is_single_integer(mode):
assert mode in available_modes, (
"Expected mode to be in %s, got %d." % (
str(available_modes), mode))
self.mode = iap.Deterministic(mode)
elif ia.is_string(mode):
assert mode in available_modes_str, (
"Expected mode to be in %s, got %s." % (
str(available_modes_str), mode))
self.mode = iap.Deterministic(mode)
elif isinstance(mode, list):
all_valid_types = all([
ia.is_single_integer(val) or ia.is_string(val) for val in mode])
assert all_valid_types, (
"Expected mode list to only contain integers/strings, "
"got types %s." % (str([type(val) for val in mode]),))
all_valid_modes = all([
val in available_modes + available_modes_str for val in mode])
assert all_valid_modes, (
"Expected all mode values to be in %s, got %s." % (
str(available_modes + available_modes_str), str(mode)))
self.mode = iap.Choice(mode)
elif isinstance(mode, iap.StochasticParameter):
self.mode = mode
else:
raise Exception(
"Expected mode to be imgaug.ALL, an int, a string, a list of "
"int/strings or StochasticParameter, got %s." % (type(mode),))
# scale
if isinstance(scale, dict):
assert "x" in scale or "y" in scale, (
"Expected scale dictionary to contain at "
"least key \"x\" or key \"y\". Found neither of them.")
x = scale.get("x", 1.0)
y = scale.get("y", 1.0)
self.scale = (
iap.handle_continuous_param(
x, "scale['x']", value_range=(0+1e-4, None),
tuple_to_uniform=True, list_to_choice=True),
iap.handle_continuous_param(
y, "scale['y']", value_range=(0+1e-4, None),
tuple_to_uniform=True, list_to_choice=True)
)
else:
self.scale = iap.handle_continuous_param(
scale, "scale", value_range=(0+1e-4, None),
tuple_to_uniform=True, list_to_choice=True)
# translate
if translate_percent is None and translate_px is None:
translate_px = 0
assert translate_percent is None or translate_px is None, (
"Expected either translate_percent or translate_px to be "
"provided, but neither of them was.")
if translate_percent is not None:
# translate by percent
if isinstance(translate_percent, dict):
assert "x" in translate_percent or "y" in translate_percent, (
"Expected translate_percent dictionary to contain at "
"least key \"x\" or key \"y\". Found neither of them.")
x = translate_percent.get("x", 0)
y = translate_percent.get("y", 0)
self.translate = (
iap.handle_continuous_param(
x, "translate_percent['x']", value_range=None,
tuple_to_uniform=True, list_to_choice=True),
iap.handle_continuous_param(
y, "translate_percent['y']", value_range=None,
tuple_to_uniform=True, list_to_choice=True)
)
else:
self.translate = iap.handle_continuous_param(
translate_percent, "translate_percent", value_range=None,
tuple_to_uniform=True, list_to_choice=True)
else:
# translate by pixels
if isinstance(translate_px, dict):
assert "x" in translate_px or "y" in translate_px, (
"Expected translate_px dictionary to contain at "
"least key \"x\" or key \"y\". Found neither of them.")
x = translate_px.get("x", 0)
y = translate_px.get("y", 0)
self.translate = (
iap.handle_discrete_param(
x, "translate_px['x']", value_range=None,
tuple_to_uniform=True, list_to_choice=True,
allow_floats=False),
iap.handle_discrete_param(
y, "translate_px['y']", value_range=None,
tuple_to_uniform=True, list_to_choice=True,
allow_floats=False)
)
else:
self.translate = iap.handle_discrete_param(
translate_px, "translate_px", value_range=None,
tuple_to_uniform=True, list_to_choice=True,
allow_floats=False)
self.rotate = iap.handle_continuous_param(
rotate, "rotate", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.shear = iap.handle_continuous_param(
shear, "shear", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
scale_samples, translate_samples, rotate_samples, shear_samples, \
cval_samples, mode_samples, order_samples = self._draw_samples(
nb_images, random_state)
result = self._augment_images_by_samples(
images, scale_samples, translate_samples, rotate_samples,
shear_samples, cval_samples, mode_samples, order_samples)
return result
@classmethod
def _augment_images_by_samples(cls, images, scale_samples,
translate_samples, rotate_samples,
shear_samples, cval_samples, mode_samples,
order_samples):
# TODO change these to class attributes
order_str_to_int = {
"nearest": cv2.INTER_NEAREST,
"linear": cv2.INTER_LINEAR,
"cubic": cv2.INTER_CUBIC,
"lanczos4": cv2.INTER_LANCZOS4
}
mode_str_to_int = {
"replicate": cv2.BORDER_REPLICATE,
"reflect": cv2.BORDER_REFLECT,
"reflect_101": cv2.BORDER_REFLECT_101,
"wrap": cv2.BORDER_WRAP,
"constant": cv2.BORDER_CONSTANT
}
nb_images = len(images)
result = images
for i in sm.xrange(nb_images):
height, width = images[i].shape[0], images[i].shape[1]
shift_x = width / 2.0 - 0.5
shift_y = height / 2.0 - 0.5
scale_x, scale_y = scale_samples[0][i], scale_samples[1][i]
translate_x = translate_samples[0][i]
translate_y = translate_samples[1][i]
if ia.is_single_float(translate_y):
translate_y_px = int(
np.round(translate_y * images[i].shape[0]))
else:
translate_y_px = translate_y
if ia.is_single_float(translate_x):
translate_x_px = int(
np.round(translate_x * images[i].shape[1]))
else:
translate_x_px = translate_x
rotate = rotate_samples[i]
shear = shear_samples[i]
cval = cval_samples[i]
mode = mode_samples[i]
order = order_samples[i]
mode = (mode
if ia.is_single_integer(mode)
else mode_str_to_int[mode])
order = (order
if ia.is_single_integer(order)
else order_str_to_int[order])
any_change = (
scale_x != 1.0 or scale_y != 1.0
or translate_x_px != 0 or translate_y_px != 0
or rotate != 0 or shear != 0
)
if any_change:
matrix_to_topleft = tf.SimilarityTransform(
translation=[-shift_x, -shift_y])
matrix_transforms = tf.AffineTransform(
scale=(scale_x, scale_y),
translation=(translate_x_px, translate_y_px),
rotation=math.radians(rotate),
shear=math.radians(shear)
)
matrix_to_center = tf.SimilarityTransform(
translation=[shift_x, shift_y])
matrix = (matrix_to_topleft
+ matrix_transforms
+ matrix_to_center)
image_warped = cv2.warpAffine(
images[i],
matrix.params[:2],
dsize=(width, height),
flags=order,
borderMode=mode,
borderValue=tuple([int(v) for v in cval])
)
# cv2 warp drops last axis if shape is (H, W, 1)
if image_warped.ndim == 2:
image_warped = image_warped[..., np.newaxis]
# warp changes uint8 to float64, making this necessary
result[i] = image_warped
else:
result[i] = images[i]
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
nb_images = len(heatmaps)
scale_samples, translate_samples, rotate_samples, shear_samples, \
cval_samples, mode_samples, order_samples = self._draw_samples(
nb_images, random_state)
cval_samples = np.zeros((cval_samples.shape[0], 1), dtype=np.float32)
mode_samples = ["constant"] * len(mode_samples)
arrs = [heatmap_i.arr_0to1 for heatmap_i in heatmaps]
arrs_aug = self._augment_images_by_samples(
arrs, scale_samples, translate_samples, rotate_samples,
shear_samples, cval_samples, mode_samples, order_samples)
for heatmap_i, arr_aug in zip(heatmaps, arrs_aug):
heatmap_i.arr_0to1 = arr_aug
return heatmaps
def _augment_segmentation_maps(self, segmaps, random_state, parents, hooks):
nb_images = len(segmaps)
scale_samples, translate_samples, rotate_samples, shear_samples, \
cval_samples, mode_samples, order_samples = self._draw_samples(
nb_images, random_state)
cval_samples = np.zeros((cval_samples.shape[0], 1), dtype=np.float32)
mode_samples = ["constant"] * len(mode_samples)
order_samples = [0] * len(order_samples)
arrs = [segmaps_i.arr for segmaps_i in segmaps]
arrs_aug = self._augment_images_by_samples(
arrs, scale_samples, translate_samples, rotate_samples,
shear_samples, cval_samples, mode_samples, order_samples)
for segmaps_i, arr_aug in zip(segmaps, arrs_aug):
segmaps_i.arr = arr_aug
return segmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents,
hooks):
result = []
nb_images = len(keypoints_on_images)
scale_samples, translate_samples, rotate_samples, shear_samples, \
_cval_samples, _mode_samples, _order_samples = self._draw_samples(
nb_images, random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if not keypoints_on_image.keypoints:
# AffineCv2 does not change the image shape, hence we can skip
# all steps below if there are no keypoints
result.append(keypoints_on_image)
continue
height, width = keypoints_on_image.height, keypoints_on_image.width
shift_x = width / 2.0 - 0.5
shift_y = height / 2.0 - 0.5
scale_x, scale_y = scale_samples[0][i], scale_samples[1][i]
translate_x = translate_samples[0][i]
translate_y = translate_samples[1][i]
if ia.is_single_float(translate_y):
translate_y_px = int(
np.round(translate_y * keypoints_on_image.shape[0]))
else:
translate_y_px = translate_y
if ia.is_single_float(translate_x):
translate_x_px = int(
np.round(translate_x * keypoints_on_image.shape[1]))
else:
translate_x_px = translate_x
rotate = rotate_samples[i]
shear = shear_samples[i]
any_change = (
scale_x != 1.0 or scale_y != 1.0
or translate_x_px != 0 or translate_y_px != 0
or rotate != 0 or shear != 0
)
if any_change:
matrix_to_topleft = tf.SimilarityTransform(
translation=[-shift_x, -shift_y])
matrix_transforms = tf.AffineTransform(
scale=(scale_x, scale_y),
translation=(translate_x_px, translate_y_px),
rotation=math.radians(rotate),
shear=math.radians(shear)
)
matrix_to_center = tf.SimilarityTransform(
translation=[shift_x, shift_y])
matrix = (matrix_to_topleft
+ matrix_transforms
+ matrix_to_center)
coords = keypoints_on_image.to_xy_array()
coords_aug = tf.matrix_transform(coords, matrix.params)
kps_new = [kp.deepcopy(x=coords[0], y=coords[1])
for kp, coords
in zip(keypoints_on_image.keypoints, coords_aug)]
result.append(keypoints_on_image.deepcopy(
keypoints=kps_new,
shape=keypoints_on_image.shape
))
else:
result.append(keypoints_on_image)
return result
def _augment_polygons(self, polygons_on_images, random_state, parents,
hooks):
return self._augment_polygons_as_keypoints(
polygons_on_images, random_state, parents, hooks)
def _augment_line_strings(self, line_strings_on_images, random_state,
parents, hooks):
return self._augment_line_strings_as_keypoints(
line_strings_on_images, random_state, parents, hooks)
def _augment_bounding_boxes(self, bounding_boxes_on_images, random_state,
parents, hooks):
return self._augment_bounding_boxes_as_keypoints(
bounding_boxes_on_images, random_state, parents, hooks)
def get_parameters(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.scale, self.translate, self.rotate, self.shear,
self.order, self.cval, self.mode]
def _draw_samples(self, nb_samples, random_state):
rngs = random_state.duplicate(11)
if isinstance(self.scale, tuple):
scale_samples = (
self.scale[0].draw_samples((nb_samples,),
random_state=rngs[0]),
self.scale[1].draw_samples((nb_samples,),
random_state=rngs[1]),
)
else:
scale_samples = self.scale.draw_samples((nb_samples,),
random_state=rngs[2])
scale_samples = (scale_samples, scale_samples)
if isinstance(self.translate, tuple):
translate_samples = (
self.translate[0].draw_samples((nb_samples,),
random_state=rngs[3]),
self.translate[1].draw_samples((nb_samples,),
random_state=rngs[4]),
)
else:
translate_samples = self.translate.draw_samples(
(nb_samples,), random_state=rngs[5])
translate_samples = (translate_samples, translate_samples)
valid_dts = ["int32", "int64", "float32", "float64"]
for i in sm.xrange(2):
assert translate_samples[i].dtype.name in valid_dts, (
"Expected translate_samples to have any dtype of %s. "
"Got %s." % (str(valid_dts), translate_samples[i].dtype.name,))
rotate_samples = self.rotate.draw_samples((nb_samples,),
random_state=rngs[6])
shear_samples = self.shear.draw_samples((nb_samples,),
random_state=rngs[7])
cval_samples = self.cval.draw_samples((nb_samples, 3),
random_state=rngs[8])
mode_samples = self.mode.draw_samples((nb_samples,),
random_state=rngs[9])
order_samples = self.order.draw_samples((nb_samples,),
random_state=rngs[10])
return (
scale_samples, translate_samples, rotate_samples, shear_samples,
cval_samples, mode_samples, order_samples
)
class _PiecewiseAffineSamplingResult(object):
def __init__(self, nb_rows, nb_cols, jitter, order, cval, mode):
self.nb_rows = nb_rows
self.nb_cols = nb_cols
self.order = order
self.jitter = jitter
self.cval = cval
self.mode = mode
def get_clipped_cval(self, idx, dtype):
min_value, _, max_value = iadt.get_value_range_of_dtype(dtype)
cval = self.cval[idx]
cval = max(min(cval, max_value), min_value)
return cval
class PiecewiseAffine(meta.Augmenter):
"""
Apply affine transformations that differ between local neighbourhoods.
This augmenter places a regular grid of points on an image and randomly
moves the neighbourhood of these point around via affine transformations.
This leads to local distortions.
This is mostly a wrapper around scikit-image's ``PiecewiseAffine``.
See also ``Affine`` for a similar technique.
.. note::
This augmenter is very slow. See :ref:`performance`.
Try to use ``ElasticTransformation`` instead, which is at least 10x
faster.
.. note::
For coordinate-based inputs (keypoints, bounding boxes, polygons,
...), this augmenter still has to perform an image-based augmentation,
which will make it significantly slower for such inputs than other
augmenters. See :ref:`performance`.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested (1)
* ``uint32``: yes; tested (1) (2)
* ``uint64``: no (3)
* ``int8``: yes; tested (1)
* ``int16``: yes; tested (1)
* ``int32``: yes; tested (1) (2)
* ``int64``: no (3)
* ``float16``: yes; tested (1)
* ``float32``: yes; tested (1)
* ``float64``: yes; tested (1)
* ``float128``: no (3)
* ``bool``: yes; tested (1) (4)
- (1) Only tested with `order` set to ``0``.
- (2) scikit-image converts internally to ``float64``, which might
introduce inaccuracies. Tests showed that these inaccuracies
seemed to not be an issue.
- (3) Results too inaccurate.
- (4) Mapped internally to ``float64``.
Parameters
----------
scale : float or tuple of float or imgaug.parameters.StochasticParameter, optional
Each point on the regular grid is moved around via a normal
distribution. This scale factor is equivalent to the normal
distribution's sigma. Note that the jitter (how far each point is
moved in which direction) is multiplied by the height/width of the
image if ``absolute_scale=False`` (default), so this scale can be
the same for different sized images.
Recommended values are in the range ``0.01`` to ``0.05`` (weak to
strong augmentations).
* If a single ``float``, then that value will always be used as
the scale.
* If a tuple ``(a, b)`` of ``float`` s, then a random value will
be uniformly sampled per image from the interval ``[a, b]``.
* If a list, then a random value will be picked from that list
per image.
* If a ``StochasticParameter``, then that parameter will be
queried to draw one value per image.
nb_rows : int or tuple of int or imgaug.parameters.StochasticParameter, optional
Number of rows of points that the regular grid should have.
Must be at least ``2``. For large images, you might want to pick a
higher value than ``4``. You might have to then adjust scale to lower
values.
* If a single ``int``, then that value will always be used as the
number of rows.
* If a tuple ``(a, b)``, then a value from the discrete interval
``[a..b]`` will be uniformly sampled per image.
* If a list, then a random value will be picked from that list
per image.
* If a StochasticParameter, then that parameter will be queried to
draw one value per image.
nb_cols : int or tuple of int or imgaug.parameters.StochasticParameter, optional
Number of columns. Analogous to `nb_rows`.
order : int or list of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :func:`imgaug.augmenters.geometric.Affine.__init__`.
cval : int or float or tuple of float or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :func:`imgaug.augmenters.geometric.Affine.__init__`.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
See :func:`imgaug.augmenters.geometric.Affine.__init__`.
absolute_scale : bool, optional
Take `scale` as an absolute value rather than a relative value.
polygon_recoverer : 'auto' or None or imgaug.augmentables.polygons._ConcavePolygonRecoverer, optional
The class to use to repair invalid polygons.
If ``"auto"``, a new instance of
:class`imgaug.augmentables.polygons._ConcavePolygonRecoverer`
will be created.
If ``None``, no polygon recoverer will be used.
If an object, then that object will be used and must provide a
``recover_from()`` method, similar to
:class:`imgaug.augmentables.polygons._ConcavePolygonRecoverer`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PiecewiseAffine(scale=(0.01, 0.05))
Place a regular grid of points on each image and then randomly move each
point around by ``1`` to ``5`` percent (with respect to the image
height/width). Pixels between these points will be moved accordingly.
>>> aug = iaa.PiecewiseAffine(scale=(0.01, 0.05), nb_rows=8, nb_cols=8)
Same as the previous example, but uses a denser grid of ``8x8`` points
(default is ``4x4``). This can be useful for large images.
"""
def __init__(self, scale=0, nb_rows=4, nb_cols=4, order=1, cval=0,
mode="constant", absolute_scale=False, polygon_recoverer=None,
name=None, deterministic=False, random_state=None):
super(PiecewiseAffine, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.scale = iap.handle_continuous_param(
scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
self.jitter = iap.Normal(loc=0, scale=self.scale)
self.nb_rows = iap.handle_discrete_param(
nb_rows, "nb_rows", value_range=(2, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.nb_cols = iap.handle_discrete_param(
nb_cols, "nb_cols", value_range=(2, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.order = _handle_order_arg(order, backend="skimage")
self.cval = _handle_cval_arg(cval)
self.mode = _handle_mode_arg(mode)
self.absolute_scale = absolute_scale
self.polygon_recoverer = polygon_recoverer
if polygon_recoverer == "auto":
self.polygon_recoverer = _ConcavePolygonRecoverer()
# Special order, mode and cval parameters for heatmaps and
# segmentation maps. These may either be None or a fixed value.
# Stochastic parameters are currently *not* supported.
# If set to None, the same values as for images will be used.
# That is really not recommended for the cval parameter.
self._order_heatmaps = 3
self._order_segmentation_maps = 0
self._mode_heatmaps = "constant"
self._mode_segmentation_maps = "constant"
self._cval_heatmaps = 0
self._cval_segmentation_maps = 0
def _augment_batch(self, batch, random_state, parents, hooks):
samples = self._draw_samples(batch.nb_rows, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, "arr_0to1", samples, self._cval_heatmaps,
self._mode_heatmaps, self._order_heatmaps)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, "arr", samples,
self._cval_segmentation_maps, self._mode_segmentation_maps,
self._order_segmentation_maps)
# TODO add test for recoverer
if batch.polygons is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
batch.polygons = self._apply_to_polygons_as_keypoints(
batch.polygons, func, recoverer=self.polygon_recoverer)
for augm_name in ["keypoints", "bounding_boxes", "line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
iadt.gate_dtypes(
images,
allowed=["bool",
"uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32", "float64"],
disallowed=["uint64", "uint128", "uint256",
"int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=self)
result = images
for i, image in enumerate(images):
transformer = self._get_transformer(
image.shape, image.shape, samples.nb_rows[i],
samples.nb_cols[i], samples.jitter[i])
if transformer is not None:
input_dtype = image.dtype
if image.dtype.kind == "b":
image = image.astype(np.float64)
image_warped = tf.warp(
image,
transformer,
order=samples.order[i],
mode=samples.mode[i],
cval=samples.get_clipped_cval(i, image.dtype),
preserve_range=True,
output_shape=images[i].shape
)
if input_dtype.kind == "b":
image_warped = image_warped > 0.5
else:
# warp seems to change everything to float64, including
# uint8, making this necessary
image_warped = iadt.restore_dtypes_(
image_warped, input_dtype)
result[i] = image_warped
return result
def _augment_maps_by_samples(self, augmentables, arr_attr_name, samples,
cval, mode, order):
result = augmentables
for i, augmentable in enumerate(augmentables):
arr = getattr(augmentable, arr_attr_name)
transformer = self._get_transformer(
arr.shape, augmentable.shape, samples.nb_rows[i],
samples.nb_cols[i], samples.jitter[i])
if transformer is not None:
arr_warped = tf.warp(
arr,
transformer,
order=order if order is not None else samples.order[i],
mode=mode if mode is not None else samples.mode[i],
cval=cval if cval is not None else samples.cval[i],
preserve_range=True,
output_shape=arr.shape
)
# skimage converts to float64
arr_warped = arr_warped.astype(arr.dtype)
# TODO not entirely clear whether this breaks the value
# range -- Affine does
# TODO add test for this
# order=3 matches cubic interpolation and can cause values
# to go outside of the range [0.0, 1.0] not clear whether
# 4+ also do that
# We don't modify segmaps here, because they don't have a
# clear value range of [0, 1]
if order >= 3 and isinstance(augmentable, ia.HeatmapsOnImage):
arr_warped = np.clip(arr_warped, 0.0, 1.0, out=arr_warped)
setattr(augmentable, arr_attr_name, arr_warped)
return result
def _augment_keypoints_by_samples(self, kpsois, samples):
# pylint: disable=pointless-string-statement
result = []
for i, kpsoi in enumerate(kpsois):
h, w = kpsoi.shape[0:2]
transformer = self._get_transformer(
kpsoi.shape, kpsoi.shape, samples.nb_rows[i],
samples.nb_cols[i], samples.jitter[i])
if transformer is None or len(kpsoi.keypoints) == 0:
result.append(kpsoi)
else:
# Augmentation routine that only modifies keypoint coordinates
# This is efficient (coordinates of all other locations in the
# image are ignored). The code below should usually work, but
# for some reason augmented coordinates are often wildly off
# for large scale parameters (lots of jitter/distortion).
# The reason for that is unknown.
"""
coords = keypoints_on_images[i].get_coords_array()
coords_aug = transformer.inverse(coords)
result.append(
ia.KeypointsOnImage.from_coords_array(
coords_aug,
shape=keypoints_on_images[i].shape
)
)
"""
# TODO this could be done a little bit more efficient by
# removing first all KPs that are outside of the image
# plane so that no corresponding distance map has to
# be augmented
# Image based augmentation routine. Draws the keypoints on
# the image plane using distance maps (more accurate than
# just marking the points), then augments these images, then
# searches for the new (visual) location of the keypoints.
# Much slower than directly augmenting the coordinates, but
# here the only method that reliably works.
dist_maps = kpsoi.to_distance_maps(inverted=True)
dist_maps_warped = tf.warp(
dist_maps,
transformer,
order=1,
preserve_range=True,
output_shape=(kpsoi.shape[0], kpsoi.shape[1],
len(kpsoi.keypoints))
)
kps_aug = ia.KeypointsOnImage.from_distance_maps(
dist_maps_warped,
inverted=True,
threshold=0.01,
if_not_found_coords={"x": -1, "y": -1},
nb_channels=(
None if len(kpsoi.shape) < 3 else kpsoi.shape[2])
)
for kp, kp_aug in zip(kpsoi.keypoints, kps_aug.keypoints):
# Keypoints that were outside of the image plane before the
# augmentation were replaced with (-1, -1) by default (as
# they can't be drawn on the keypoint images).
within_image = (0 <= kp.x < w and 0 <= kp.y < h)
if within_image:
kp.x = kp_aug.x
kp.y = kp_aug.y
result.append(kpsoi)
return result
def _draw_samples(self, nb_images, random_state):
rss = random_state.duplicate(6)
nb_rows_samples = self.nb_rows.draw_samples((nb_images,),
random_state=rss[-6])
nb_cols_samples = self.nb_cols.draw_samples((nb_images,),
random_state=rss[-5])
order_samples = self.order.draw_samples((nb_images,),
random_state=rss[-4])
cval_samples = self.cval.draw_samples((nb_images,),
random_state=rss[-3])
mode_samples = self.mode.draw_samples((nb_images,),
random_state=rss[-2])
nb_rows_samples = np.clip(nb_rows_samples, 2, None)
nb_cols_samples = np.clip(nb_cols_samples, 2, None)
nb_cells = nb_rows_samples * nb_cols_samples
jitter = self.jitter.draw_samples((int(np.sum(nb_cells)), 2),
random_state=rss[-1])
jitter_by_image = []
counter = 0
for nb_cells_i in nb_cells:
jitter_img = jitter[counter:counter+nb_cells_i, :]
jitter_by_image.append(jitter_img)
counter += nb_cells_i
return _PiecewiseAffineSamplingResult(
nb_rows=nb_rows_samples, nb_cols=nb_cols_samples,
jitter=jitter_by_image,
order=order_samples, cval=cval_samples, mode=mode_samples)
def _get_transformer(self, augmentable_shape, image_shape, nb_rows,
nb_cols, jitter_img):
# get coords on y and x axis of points to move around
# these coordinates are supposed to be at the centers of each cell
# (otherwise the first coordinate would be at (0, 0) and could hardly
# be moved around before leaving the image),
# so we use here (half cell height/width to H/W minus half
# height/width) instead of (0, H/W)
# pylint: disable=no-else-return
y = np.linspace(0, augmentable_shape[0], nb_rows)
x = np.linspace(0, augmentable_shape[1], nb_cols)
# (H, W) and (H, W) for H=rows, W=cols
xx_src, yy_src = np.meshgrid(x, y)
# (1, HW, 2) => (HW, 2) for H=rows, W=cols
points_src = np.dstack([yy_src.flat, xx_src.flat])[0]
any_nonzero = np.any(jitter_img > 0)
if not any_nonzero:
return None
else:
# Without this, jitter gets changed between different augmentables.
# TODO if left out, only one test failed -- should be more
jitter_img = np.copy(jitter_img)
if self.absolute_scale:
if image_shape[0] > 0:
jitter_img[:, 0] = jitter_img[:, 0] / image_shape[0]
else:
jitter_img[:, 0] = 0.0
if image_shape[1] > 0:
jitter_img[:, 1] = jitter_img[:, 1] / image_shape[1]
else:
jitter_img[:, 1] = 0.0
jitter_img[:, 0] = jitter_img[:, 0] * augmentable_shape[0]
jitter_img[:, 1] = jitter_img[:, 1] * augmentable_shape[1]
points_dest = np.copy(points_src)
points_dest[:, 0] = points_dest[:, 0] + jitter_img[:, 0]
points_dest[:, 1] = points_dest[:, 1] + jitter_img[:, 1]
# Restrict all destination points to be inside the image plane.
# This is necessary, as otherwise keypoints could be augmented
# outside of the image plane and these would be replaced by
# (-1, -1), which would not conform with the behaviour of the
# other augmenters.
points_dest[:, 0] = np.clip(points_dest[:, 0],
0, augmentable_shape[0]-1)
points_dest[:, 1] = np.clip(points_dest[:, 1],
0, augmentable_shape[1]-1)
# tf.warp() results in qhull error if the points are identical,
# which is mainly the case if any axis is 0
has_low_axis = any([axis <= 1 for axis in augmentable_shape[0:2]])
has_zero_channels = (
(
augmentable_shape is not None
and len(augmentable_shape) == 3
and augmentable_shape[-1] == 0
)
or
(
image_shape is not None
and len(image_shape) == 3
and image_shape[-1] == 0
)
)
if has_low_axis or has_zero_channels:
return None
else:
matrix = tf.PiecewiseAffineTransform()
matrix.estimate(points_src[:, ::-1], points_dest[:, ::-1])
return matrix
def get_parameters(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [
self.scale, self.nb_rows, self.nb_cols, self.order, self.cval,
self.mode, self.absolute_scale]
class _PerspectiveTransformSamplingResult(object):
def __init__(self, matrices, max_heights, max_widths, cvals, modes):
self.matrices = matrices
self.max_heights = max_heights
self.max_widths = max_widths
self.cvals = cvals
self.modes = modes
# TODO add arg for image interpolation
class PerspectiveTransform(meta.Augmenter):
"""
Apply random four point perspective transformations to images.
Each of the four points is placed on the image using a random distance from
its respective corner. The distance is sampled from a normal distribution.
As a result, most transformations don't change the image very much, while
some "focus" on polygons far inside the image.
The results of this augmenter have some similarity with ``Crop``.
Code partially from
http://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
dtype support::
if (keep_size=False)::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes; tested (4)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (4)
- (1) rejected by opencv
- (2) leads to opencv error: cv2.error: ``OpenCV(3.4.4)
(...)imgwarp.cpp:1805: error: (-215:Assertion failed)
ifunc != 0 in function 'remap'``.
- (3) mapped internally to ``int16``.
- (4) mapped intenally to ``float32``.
if (keep_size=True)::
minimum of (
``imgaug.augmenters.geometric.PerspectiveTransform(keep_size=False)``,
:func:`imgaug.imgaug.imresize_many_images`
)
Parameters
----------
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distributions. These are used to
sample the random distances of the subimage's corners from the full
image's corners. The sampled values reflect percentage values (with
respect to image height/width). Recommended values are in the range
``0.0`` to ``0.1``.
* If a single number, then that value will always be used as the
scale.
* If a tuple ``(a, b)`` of numbers, then a random value will be
uniformly sampled per image from the interval ``(a, b)``.
* If a list of values, a random value will be picked from the
list per image.
* If a ``StochasticParameter``, then that parameter will be
queried to draw one value per image.
keep_size : bool, optional
Whether to resize image's back to their original size after applying
the perspective transform. If set to ``False``, the resulting images
may end up having different shapes and will always be a list, never
an array.
cval : number or tuple of number or list of number or imaug.ALL or imgaug.parameters.StochasticParameter, optional
The constant value used to fill up pixels in the result image that
didn't exist in the input image (e.g. when translating to the left,
some new pixels are created at the right). Such a fill-up with a
constant value only happens, when `mode` is ``constant``.
The expected value range is ``[0, 255]`` for ``uint8`` images.
It may be a float value.
* If this is a single int or float, then that value will be used
(e.g. 0 results in black pixels).
* If a tuple ``(a, b)``, then a random value is uniformly sampled
per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If ``imgaug.ALL``, then equivalent to tuple ``(0, 255)``.
* If a ``StochasticParameter``, a new value will be sampled from
the parameter per image.
mode : int or str or list of str or list of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Parameter that defines the handling of newly created pixels.
Same meaning as in OpenCV's border mode. Let ``abcdefgh`` be an image's
content and ``|`` be an image boundary, then:
* ``cv2.BORDER_REPLICATE``: ``aaaaaa|abcdefgh|hhhhhhh``
* ``cv2.BORDER_CONSTANT``: ``iiiiii|abcdefgh|iiiiiii``, where
``i`` is the defined cval.
* ``replicate``: Same as ``cv2.BORDER_REPLICATE``.
* ``constant``: Same as ``cv2.BORDER_CONSTANT``.
The datatype of the parameter may be:
* If a single ``int``, then it must be one of ``cv2.BORDER_*``.
* If a single string, then it must be one of: ``replicate``,
``reflect``, ``reflect_101``, ``wrap``, ``constant``.
* If a list of ints/strings, then per image a random mode will be
picked from that list.
* If ``imgaug.ALL``, then a random mode from all possible modes
will be picked per image.
* If ``StochasticParameter``, then the mode will be sampled from
that parameter per image, i.e. it must return only the above
mentioned strings.
fit_output : bool, optional
If ``True``, the image plane size and position will be adjusted
to still capture the whole image after perspective transformation.
(Followed by image resizing if `keep_size` is set to ``True``.)
Otherwise, parts of the transformed image may be outside of the image
plane.
This setting should not be set to ``True`` when using large `scale`
values as it could lead to very large images.
polygon_recoverer : 'auto' or None or imgaug.augmentables.polygons._ConcavePolygonRecoverer, optional
The class to use to repair invalid polygons.
If ``"auto"``, a new instance of
:class`imgaug.augmentables.polygons._ConcavePolygonRecoverer`
will be created.
If ``None``, no polygon recoverer will be used.
If an object, then that object will be used and must provide a
``recover_from()`` method, similar to
:class:`imgaug.augmentables.polygons._ConcavePolygonRecoverer`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PerspectiveTransform(scale=(0.01, 0.15))
Apply perspective transformations using a random scale between ``0.01``
and ``0.15`` per image, where the scale is roughly a measure of how far
the perspective transformation's corner points may be distanced from the
image's corner points. Higher scale values lead to stronger "zoom-in"
effects (and thereby stronger distortions).
>>> aug = iaa.PerspectiveTransform(scale=(0.01, 0.15), keep_size=False)
Same as in the previous example, but images are not resized back to
the input image size after augmentation. This will lead to smaller
output images.
"""
_BORDER_MODE_STR_TO_INT = {
"replicate": cv2.BORDER_REPLICATE,
"constant": cv2.BORDER_CONSTANT
}
def __init__(self, scale=0, cval=0, mode="constant", keep_size=True,
fit_output=False, polygon_recoverer="auto",
name=None, deterministic=False, random_state=None):
super(PerspectiveTransform, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.scale = iap.handle_continuous_param(
scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
self.jitter = iap.Normal(loc=0, scale=self.scale)
# setting these to 1x1 caused problems for large scales and polygon
# augmentation
# TODO there is now a recoverer for polygons - are these minima still
# needed/sensible?
self.min_width = 2
self.min_height = 2
self.cval = _handle_cval_arg(cval)
self.mode = self._handle_mode_arg(mode)
self.keep_size = keep_size
self.fit_output = fit_output
self.polygon_recoverer = polygon_recoverer
if polygon_recoverer == "auto":
self.polygon_recoverer = _ConcavePolygonRecoverer()
# Special order, mode and cval parameters for heatmaps and
# segmentation maps. These may either be None or a fixed value.
# Stochastic parameters are currently *not* supported.
# If set to None, the same values as for images will be used.
# That is really not recommended for the cval parameter.
self._order_heatmaps = cv2.INTER_LINEAR
self._order_segmentation_maps = cv2.INTER_NEAREST
self._mode_heatmaps = cv2.BORDER_CONSTANT
self._mode_segmentation_maps = cv2.BORDER_CONSTANT
self._cval_heatmaps = 0
self._cval_segmentation_maps = 0
# TODO unify this somehow with the global _handle_mode_arg() that is
# currently used for Affine and PiecewiseAffine
@classmethod
def _handle_mode_arg(cls, mode):
available_modes = [cv2.BORDER_REPLICATE, cv2.BORDER_CONSTANT]
available_modes_str = ["replicate", "constant"]
if mode == ia.ALL:
return iap.Choice(available_modes)
if ia.is_single_integer(mode):
assert mode in available_modes, (
"Expected mode to be in %s, got %d." % (
str(available_modes), mode))
return iap.Deterministic(mode)
if ia.is_string(mode):
assert mode in available_modes_str, (
"Expected mode to be in %s, got %s." % (
str(available_modes_str), mode))
return iap.Deterministic(mode)
if isinstance(mode, list):
valid_types = all([ia.is_single_integer(val) or ia.is_string(val)
for val in mode])
assert valid_types, (
"Expected mode list to only contain integers/strings, got "
"types %s." % (
", ".join([str(type(val)) for val in mode]),))
valid_modes = all([val in available_modes + available_modes_str
for val in mode])
assert valid_modes, (
"Expected all mode values to be in %s, got %s." % (
str(available_modes + available_modes_str), str(mode)))
return iap.Choice(mode)
if isinstance(mode, iap.StochasticParameter):
return mode
raise Exception(
"Expected mode to be imgaug.ALL, an int, a string, a list "
"of int/strings or StochasticParameter, got %s." % (
type(mode),))
def _augment_batch(self, batch, random_state, parents, hooks):
samples_images = self._draw_samples(batch.get_rowwise_shapes(),
random_state.copy())
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples_images)
if batch.heatmaps is not None:
samples = self._draw_samples(
[augmentable.arr_0to1.shape
for augmentable in batch.heatmaps],
random_state.copy())
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, "arr_0to1", samples, samples_images,
self._cval_heatmaps, self._mode_heatmaps, self._order_heatmaps)
if batch.segmentation_maps is not None:
samples = self._draw_samples(
[augmentable.arr.shape
for augmentable in batch.segmentation_maps],
random_state.copy())
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, "arr", samples, samples_images,
self._cval_segmentation_maps, self._mode_segmentation_maps,
self._order_segmentation_maps)
# large scale values cause invalid polygons (unclear why that happens),
# hence the recoverer
# TODO add test for recoverer
if batch.polygons is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples_images=samples_images)
batch.polygons = self._apply_to_polygons_as_keypoints(
batch.polygons, func, recoverer=self.polygon_recoverer)
for augm_name in ["keypoints", "bounding_boxes", "line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples_images=samples_images)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
iadt.gate_dtypes(
images,
allowed=["bool",
"uint8", "uint16",
"int8", "int16",
"float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=self)
result = images
if not self.keep_size:
result = list(result)
gen = enumerate(zip(images, samples.matrices, samples.max_heights,
samples.max_widths, samples.cvals, samples.modes))
for i, (image, matrix, max_height, max_width, cval, mode) in gen:
input_dtype = image.dtype
if input_dtype.name in ["int8"]:
image = image.astype(np.int16)
elif input_dtype.name in ["bool", "float16"]:
image = image.astype(np.float32)
# cv2.warpPerspective only supports <=4 channels and errors
# on axes with size zero
nb_channels = image.shape[2]
has_zero_sized_axis = (image.size == 0)
if has_zero_sized_axis:
warped = image
elif nb_channels <= 4:
warped = cv2.warpPerspective(
image,
matrix,
(max_width, max_height),
borderValue=cval,
borderMode=mode)
if warped.ndim == 2 and images[i].ndim == 3:
warped = np.expand_dims(warped, 2)
else:
# warp each channel on its own
# note that cv2 removes the channel axis in case of (H,W,1)
# inputs
warped = [
cv2.warpPerspective(
image[..., c],
matrix,
(max_width, max_height),
borderValue=cval[min(c, len(cval)-1)],
borderMode=mode,
flags=cv2.INTER_LINEAR
)
for c in sm.xrange(nb_channels)
]
warped = np.stack(warped, axis=-1)
if self.keep_size and not has_zero_sized_axis:
h, w = image.shape[0:2]
warped = ia.imresize_single_image(warped, (h, w))
if input_dtype.name == "bool":
warped = warped > 0.5
elif warped.dtype.name != input_dtype.name:
warped = iadt.restore_dtypes_(warped, input_dtype)
result[i] = warped
return result
def _augment_maps_by_samples(self, augmentables, arr_attr_name,
samples, samples_images, cval, mode, flags):
result = augmentables
# estimate max_heights/max_widths for the underlying images
# this is only necessary if keep_size is False as then the underlying
# image sizes change and we need to update them here
# TODO this was re-used from before _augment_batch() -- reoptimize
if self.keep_size:
max_heights_imgs = samples.max_heights
max_widths_imgs = samples.max_widths
else:
max_heights_imgs = samples_images.max_heights
max_widths_imgs = samples_images.max_widths
gen = enumerate(zip(augmentables, samples.matrices, samples.max_heights,
samples.max_widths))
for i, (augmentable_i, matrix, max_height, max_width) in gen:
arr = getattr(augmentable_i, arr_attr_name)
mode_i = mode
if mode is None:
mode_i = samples.modes[i]
cval_i = cval
if cval is None:
cval_i = samples.cvals[i]
nb_channels = arr.shape[2]
image_has_zero_sized_axis = (0 in augmentable_i.shape)
map_has_zero_sized_axis = (arr.size == 0)
if not image_has_zero_sized_axis:
if not map_has_zero_sized_axis:
warped = [
cv2.warpPerspective(
arr[..., c],
matrix,
(max_width, max_height),
borderValue=cval_i,
borderMode=mode_i,
flags=flags
)
for c in sm.xrange(nb_channels)
]
warped = np.stack(warped, axis=-1)
setattr(augmentable_i, arr_attr_name, warped)
if self.keep_size:
h, w = arr.shape[0:2]
augmentable_i = augmentable_i.resize((h, w))
else:
new_shape = (
max_heights_imgs[i], max_widths_imgs[i]
) + augmentable_i.shape[2:]
augmentable_i.shape = new_shape
result[i] = augmentable_i
return result
def _augment_keypoints_by_samples(self, kpsois, samples_images):
result = kpsois
gen = enumerate(zip(kpsois,
samples_images.matrices,
samples_images.max_heights,
samples_images.max_widths))
for i, (kpsoi, matrix, max_height, max_width) in gen:
image_has_zero_sized_axis = (0 in kpsoi.shape)
if not image_has_zero_sized_axis:
shape_orig = kpsoi.shape
shape_new = (max_height, max_width) + kpsoi.shape[2:]
kpsoi.shape = shape_new
if not kpsoi.empty:
kps_arr = kpsoi.to_xy_array()
warped = cv2.perspectiveTransform(
np.array([kps_arr], dtype=np.float32), matrix)
warped = warped[0]
for kp, coords in zip(kpsoi.keypoints, warped):
kp.x = coords[0]
kp.y = coords[1]
if self.keep_size:
kpsoi = kpsoi.on_(shape_orig)
result[i] = kpsoi
return result
def _draw_samples(self, shapes, random_state):
# pylint: disable=invalid-name
matrices = []
max_heights = []
max_widths = []
nb_images = len(shapes)
rngs = random_state.duplicate(3)
cval_samples = self.cval.draw_samples((nb_images, 3),
random_state=rngs[0])
mode_samples = self.mode.draw_samples((nb_images,),
random_state=rngs[1])
jitter = self.jitter.draw_samples((nb_images, 4, 2),
random_state=rngs[2])
# cv2 perspectiveTransform doesn't accept numpy arrays as cval
cval_samples_cv2 = cval_samples.tolist()
# if border modes are represented by strings, convert them to cv2
# border mode integers
if mode_samples.dtype.kind not in ["i", "u"]:
for mode, mapped_mode in self._BORDER_MODE_STR_TO_INT.items():
mode_samples[mode_samples == mode] = mapped_mode
# modify jitter to the four corner point coordinates
# some x/y values have to be modified from `jitter` to `1-jtter`
# for that
# TODO remove the abs() here. it currently only allows to "zoom-in",
# not to "zoom-out"
points = np.mod(np.abs(jitter), 1)
# top left -- no changes needed, just use jitter
# top right
points[:, 1, 0] = 1.0 - points[:, 1, 0] # w = 1.0 - jitter
# bottom right
points[:, 2, 0] = 1.0 - points[:, 2, 0] # w = 1.0 - jitter
points[:, 2, 1] = 1.0 - points[:, 2, 1] # h = 1.0 - jitter
# bottom left
points[:, 3, 1] = 1.0 - points[:, 3, 1] # h = 1.0 - jitter
for shape, points_i in zip(shapes, points):
h, w = shape[0:2]
points_i[:, 0] *= w
points_i[:, 1] *= h
# Obtain a consistent order of the points and unpack them
# individually.
# Warning: don't just do (tl, tr, br, bl) = _order_points(...)
# here, because the reordered points_i is used further below.
points_i = self._order_points(points_i)
(tl, tr, br, bl) = points_i
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
min_width = None
max_width = None
while min_width is None or min_width < self.min_width:
width_top = np.sqrt(((tr[0]-tl[0])**2) + ((tr[1]-tl[1])**2))
width_bottom = np.sqrt(((br[0]-bl[0])**2) + ((br[1]-bl[1])**2))
max_width = int(max(width_top, width_bottom))
min_width = int(min(width_top, width_bottom))
if min_width < self.min_width:
step_size = (self.min_width - min_width)/2
tl[0] -= step_size
tr[0] += step_size
bl[0] -= step_size
br[0] += step_size
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
min_height = None
max_height = None
while min_height is None or min_height < self.min_height:
height_right = np.sqrt(((tr[0]-br[0])**2) + ((tr[1]-br[1])**2))
height_left = np.sqrt(((tl[0]-bl[0])**2) + ((tl[1]-bl[1])**2))
max_height = int(max(height_right, height_left))
min_height = int(min(height_right, height_left))
if min_height < self.min_height:
step_size = (self.min_height - min_height)/2
tl[1] -= step_size
tr[1] -= step_size
bl[1] += step_size
br[1] += step_size
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[max_width - 1, 0],
[max_width - 1, max_height - 1],
[0, max_height - 1]
], dtype=np.float32)
# compute the perspective transform matrix and then apply it
m = cv2.getPerspectiveTransform(points_i, dst)
if self.fit_output:
m, max_width, max_height = self._expand_transform(m, (h, w))
matrices.append(m)
max_heights.append(max_height)
max_widths.append(max_width)
mode_samples = mode_samples.astype(np.int32)
return _PerspectiveTransformSamplingResult(
matrices, max_heights, max_widths, cval_samples_cv2,
mode_samples)
@classmethod
def _order_points(cls, pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
pts_ordered = np.zeros((4, 2), dtype=np.float32)
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
pointwise_sum = pts.sum(axis=1)
pts_ordered[0] = pts[np.argmin(pointwise_sum)]
pts_ordered[2] = pts[np.argmax(pointwise_sum)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
pts_ordered[1] = pts[np.argmin(diff)]
pts_ordered[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return pts_ordered
@classmethod
def _expand_transform(cls, matrix, shape):
height, width = shape
rect = np.array([
[0, 0],
[width - 1, 0],
[width - 1, height - 1],
[0, height - 1]], dtype=np.float32)
dst = cv2.perspectiveTransform(np.array([rect]), matrix)[0]
# get min x, y over transformed 4 points
# then modify target points by subtracting these minima
# => shift to (0, 0)
dst -= dst.min(axis=0, keepdims=True)
dst = np.around(dst, decimals=0)
matrix_expanded = cv2.getPerspectiveTransform(rect, dst)
max_width, max_height = dst.max(axis=0) + 1
return matrix_expanded, max_width, max_height
def get_parameters(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.jitter, self.keep_size, self.cval, self.mode,
self.fit_output]
class _ElasticTransformationSamplingResult(object):
def __init__(self, random_states, alphas, sigmas, orders, cvals, modes):
self.random_states = random_states
self.alphas = alphas
self.sigmas = sigmas
self.orders = orders
self.cvals = cvals
self.modes = modes
# TODO add independent sigmas for x/y
# TODO add independent alphas for x/y
# TODO add backend arg
class ElasticTransformation(meta.Augmenter):
"""
Transform images by moving pixels locally around using displacement fields.
The augmenter has the parameters ``alpha`` and ``sigma``. ``alpha``
controls the strength of the displacement: higher values mean that pixels
are moved further. ``sigma`` controls the smoothness of the displacement:
higher values lead to smoother patterns -- as if the image was below water
-- while low values will cause indivdual pixels to be moved very
differently from their neighbours, leading to noisy and pixelated images.
A relation of 10:1 seems to be good for ``alpha`` and ``sigma``, e.g.
``alpha=10`` and ``sigma=1`` or ``alpha=50``, ``sigma=5``. For ``128x128``
a setting of ``alpha=(0, 70.0)``, ``sigma=(4.0, 6.0)`` may be a good
choice and will lead to a water-like effect.
Code here was initially inspired by
https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
For a detailed explanation, see ::
Simard, Steinkraus and Platt
Best Practices for Convolutional Neural Networks applied to Visual
Document Analysis
in Proc. of the International Conference on Document Analysis and
Recognition, 2003
.. note::
For coordinate-based inputs (keypoints, bounding boxes, polygons,
...), this augmenter still has to perform an image-based augmentation,
which will make it significantly slower for such inputs than other
augmenters. See :ref:`performance`.
dtype support::
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; tested (1)
* ``uint32``: yes; tested (2)
* ``uint64``: limited; tested (3)
* ``int8``: yes; tested (1) (4) (5)
* ``int16``: yes; tested (4) (6)
* ``int32``: yes; tested (4) (6)
* ``int64``: limited; tested (3)
* ``float16``: yes; tested (1)
* ``float32``: yes; tested (1)
* ``float64``: yes; tested (1)
* ``float128``: no
* ``bool``: yes; tested (1) (7)
- (1) Always handled by ``cv2``.
- (2) Always handled by ``scipy``.
- (3) Only supported for ``order != 0``. Will fail for ``order=0``.
- (4) Mapped internally to ``float64`` when ``order=1``.
- (5) Mapped internally to ``int16`` when ``order>=2``.
- (6) Handled by ``cv2`` when ``order=0`` or ``order=1``, otherwise by
``scipy``.
- (7) Mapped internally to ``float32``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Strength of the distortion field. Higher values mean that pixels are
moved further with respect to the distortion field's direction. Set
this to around 10 times the value of `sigma` for visible effects.
* If number, then that value will be used for all images.
* If tuple ``(a, b)``, then a random value will be uniformly
sampled per image from the interval ``[a, b]``.
* If a list, then for each image a random value will be sampled
from that list.
* If ``StochasticParameter``, then that parameter will be used to
sample a value per image.
sigma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the gaussian kernel used to smooth the distortion
fields. Higher values (for ``128x128`` images around 5.0) lead to more
water-like effects, while lower values (for ``128x128`` images
around ``1.0`` and lower) lead to more noisy, pixelated images. Set
this to around 1/10th of `alpha` for visible effects.
* If number, then that value will be used for all images.
* If tuple ``(a, b)``, then a random value will be uniformly
sampled per image from the interval ``[a, b]``.
* If a list, then for each image a random value will be sampled
from that list.
* If ``StochasticParameter``, then that parameter will be used to
sample a value per image.
order : int or list of int or imaug.ALL or imgaug.parameters.StochasticParameter, optional
Interpolation order to use. Same meaning as in
:func:`scipy.ndimage.map_coordinates` and may take any integer value
in the range ``0`` to ``5``, where orders close to ``0`` are faster.
* If a single int, then that order will be used for all images.
* If a tuple ``(a, b)``, then a random value will be uniformly
sampled per image from the interval ``[a, b]``.
* If a list, then for each image a random value will be sampled
from that list.
* If ``imgaug.ALL``, then equivalant to list
``[0, 1, 2, 3, 4, 5]``.
* If ``StochasticParameter``, then that parameter is queried per
image to sample the order value to use.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
The constant intensity value used to fill in new pixels.
This value is only used if `mode` is set to ``constant``.
For standard ``uint8`` images (value range ``0`` to ``255``), this
value may also should also be in the range ``0`` to ``255``. It may
be a ``float`` value, even for images with integer dtypes.
* If this is a single number, then that value will be used
(e.g. ``0`` results in black pixels).
* If a tuple ``(a, b)``, then a random value will be uniformly
sampled per image from the interval ``[a, b]``.
* If a list, then a random value will be picked from that list per
image.
* If ``imgaug.ALL``, a value from the discrete range ``[0..255]``
will be sampled per image.
* If a ``StochasticParameter``, a new value will be sampled from
the parameter per image.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Parameter that defines the handling of newly created pixels.
May take the same values as in :func:`scipy.ndimage.map_coordinates`,
i.e. ``constant``, ``nearest``, ``reflect`` or ``wrap``.
* If a single string, then that mode will be used for all images.
* If a list of strings, then per image a random mode will be picked
from that list.
* If ``imgaug.ALL``, then a random mode from all possible modes
will be picked.
* If ``StochasticParameter``, then the mode will be sampled from
that parameter per image, i.e. it must return only the above
mentioned strings.
polygon_recoverer : 'auto' or None or imgaug.augmentables.polygons._ConcavePolygonRecoverer, optional
The class to use to repair invalid polygons.
If ``"auto"``, a new instance of
:class`imgaug.augmentables.polygons._ConcavePolygonRecoverer`
will be created.
If ``None``, no polygon recoverer will be used.
If an object, then that object will be used and must provide a
``recover_from()`` method, similar to
:class:`imgaug.augmentables.polygons._ConcavePolygonRecoverer`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.ElasticTransformation(alpha=50.0, sigma=5.0)
Apply elastic transformations with a strength/alpha of ``50.0`` and
smoothness of ``5.0`` to all images.
>>> aug = iaa.ElasticTransformation(alpha=(0.0, 70.0), sigma=5.0)
Apply elastic transformations with a strength/alpha that comes
from the interval ``[0.0, 70.0]`` (randomly picked per image) and
with a smoothness of ``5.0``.
"""
NB_NEIGHBOURING_KEYPOINTS = 3
NEIGHBOURING_KEYPOINTS_DISTANCE = 1.0
KEYPOINT_AUG_ALPHA_THRESH = 0.05
# even at high alphas we don't augment keypoints if the sigma is too low,
# because then the pixel movements are mostly gaussian noise anyways
KEYPOINT_AUG_SIGMA_THRESH = 1.0
_MAPPING_MODE_SCIPY_CV2 = {
"constant": cv2.BORDER_CONSTANT,
"nearest": cv2.BORDER_REPLICATE,
"reflect": cv2.BORDER_REFLECT_101,
"wrap": cv2.BORDER_WRAP
}
_MAPPING_ORDER_SCIPY_CV2 = {
0: cv2.INTER_NEAREST,
1: cv2.INTER_LINEAR,
2: cv2.INTER_CUBIC,
3: cv2.INTER_CUBIC,
4: cv2.INTER_CUBIC,
5: cv2.INTER_CUBIC
}
def __init__(self, alpha=0, sigma=0, order=3, cval=0, mode="constant",
polygon_recoverer="auto", name=None, deterministic=False,
random_state=None):
super(ElasticTransformation, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.alpha = iap.handle_continuous_param(
alpha, "alpha", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
self.sigma = iap.handle_continuous_param(
sigma, "sigma", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
self.order = self._handle_order_arg(order)
self.cval = _handle_cval_arg(cval)
self.mode = self._handle_mode_arg(mode)
self.polygon_recoverer = polygon_recoverer
if polygon_recoverer == "auto":
self.polygon_recoverer = _ConcavePolygonRecoverer()
# Special order, mode and cval parameters for heatmaps and
# segmentation maps. These may either be None or a fixed value.
# Stochastic parameters are currently *not* supported.
# If set to None, the same values as for images will be used.
# That is really not recommended for the cval parameter.
#
self._order_heatmaps = 3
self._order_segmentation_maps = 0
self._mode_heatmaps = "constant"
self._mode_segmentation_maps = "constant"
self._cval_heatmaps = 0.0
self._cval_segmentation_maps = 0
@classmethod
def _handle_order_arg(cls, order):
if order == ia.ALL:
return iap.Choice([0, 1, 2, 3, 4, 5])
return iap.handle_discrete_param(
order, "order", value_range=(0, 5), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
@classmethod
def _handle_mode_arg(cls, mode):
if mode == ia.ALL:
return iap.Choice(["constant", "nearest", "reflect", "wrap"])
if ia.is_string(mode):
return iap.Deterministic(mode)
if ia.is_iterable(mode):
assert all([ia.is_string(val) for val in mode]), (
"Expected mode list to only contain strings, got "
"types %s." % (
", ".join([str(type(val)) for val in mode]),))
return iap.Choice(mode)
if isinstance(mode, iap.StochasticParameter):
return mode
raise Exception(
"Expected mode to be imgaug.ALL, a string, a list of strings "
"or StochasticParameter, got %s." % (type(mode),))
def _draw_samples(self, nb_images, random_state):
rss = random_state.duplicate(nb_images+5)
alphas = self.alpha.draw_samples((nb_images,), random_state=rss[-5])
sigmas = self.sigma.draw_samples((nb_images,), random_state=rss[-4])
orders = self.order.draw_samples((nb_images,), random_state=rss[-3])
cvals = self.cval.draw_samples((nb_images,), random_state=rss[-2])
modes = self.mode.draw_samples((nb_images,), random_state=rss[-1])
return _ElasticTransformationSamplingResult(
rss[0:-5], alphas, sigmas, orders, cvals, modes)
def _augment_batch(self, batch, random_state, parents, hooks):
# pylint: disable=invalid-name
if batch.images is not None:
iadt.gate_dtypes(
batch.images,
allowed=["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
disallowed=["uint128", "uint256",
"int128", "int256",
"float96", "float128", "float256"],
augmenter=self)
shapes = batch.get_rowwise_shapes()
samples = self._draw_samples(len(shapes), random_state)
for i, shape in enumerate(shapes):
dx, dy = self._generate_shift_maps(
shape[0:2],
alpha=samples.alphas[i],
sigma=samples.sigmas[i],
random_state=samples.random_states[i])
if batch.images is not None:
batch.images[i] = self._augment_image_by_samples(
batch.images[i], i, samples, dx, dy)
if batch.heatmaps is not None:
batch.heatmaps[i] = self._augment_hm_or_sm_by_samples(
batch.heatmaps[i], i, samples, dx, dy, "arr_0to1",
self._cval_heatmaps, self._mode_heatmaps,
self._order_heatmaps)
if batch.segmentation_maps is not None:
batch.segmentation_maps[i] = self._augment_hm_or_sm_by_samples(
batch.segmentation_maps[i], i, samples, dx, dy, "arr",
self._cval_segmentation_maps, self._mode_segmentation_maps,
self._order_segmentation_maps)
if batch.keypoints is not None:
batch.keypoints[i] = self._augment_kpsoi_by_samples(
batch.keypoints[i], i, samples, dx, dy)
if batch.bounding_boxes is not None:
batch.bounding_boxes[i] = self._augment_bbsoi_by_samples(
batch.bounding_boxes[i], i, samples, dx, dy)
if batch.polygons is not None:
batch.polygons[i] = self._augment_psoi_by_samples(
batch.polygons[i], i, samples, dx, dy)
if batch.line_strings is not None:
batch.line_strings[i] = self._augment_lsoi_by_samples(
batch.line_strings[i], i, samples, dx, dy)
return batch
def _augment_image_by_samples(self, image, row_idx, samples, dx, dy):
# pylint: disable=invalid-name
min_value, _center_value, max_value = \
iadt.get_value_range_of_dtype(image.dtype)
cval = max(min(samples.cvals[row_idx], max_value), min_value)
input_dtype = image.dtype
if image.dtype.name == "float16":
image = image.astype(np.float32)
image_aug = self._map_coordinates(
image, dx, dy,
order=samples.orders[row_idx],
cval=cval,
mode=samples.modes[row_idx])
if image.dtype.name != input_dtype.name:
image_aug = iadt.restore_dtypes_(image_aug, input_dtype)
return image_aug
def _augment_hm_or_sm_by_samples(self, augmentable, row_idx, samples,
dx, dy, arr_attr_name, cval, mode, order):
# pylint: disable=invalid-name
cval = cval if cval is not None else samples.cvals[row_idx]
mode = mode if mode is not None else samples.modes[row_idx]
order = order if order is not None else samples.orders[row_idx]
# note that we do not have to check for zero-sized axes here,
# because _generate_shift_maps(), _map_coordinates(), .resize()
# and np.clip() are all known to handle arrays with zero-sized axes
arr = getattr(augmentable, arr_attr_name)
if arr.shape[0:2] == augmentable.shape[0:2]:
arr_warped = self._map_coordinates(
arr, dx, dy, order=order, cval=cval, mode=mode)
# interpolation in map_coordinates() can cause some values to
# be below/above 1.0, so we clip here
if order >= 3 and isinstance(augmentable, ia.HeatmapsOnImage):
arr_warped = np.clip(arr_warped, 0.0, 1.0, out=arr_warped)
setattr(augmentable, arr_attr_name, arr_warped)
else:
# Heatmaps/Segmaps do not have the same size as augmented
# images. This may result in indices of moved pixels being
# different. To prevent this, we use the same image size as
# for the base images, but that requires resizing the heatmaps
# temporarily to the image sizes.
height_orig, width_orig = arr.shape[0:2]
augmentable = augmentable.resize(augmentable.shape[0:2])
arr = getattr(augmentable, arr_attr_name)
# TODO will it produce similar results to first downscale the
# shift maps and then remap? That would make the remap
# step take less operations and would also mean that the
# heatmaps wouldnt have to be scaled up anymore. It would
# also simplify the code as this branch could be merged
# with the one above.
arr_warped = self._map_coordinates(
arr, dx, dy, order=order, cval=cval, mode=mode)
# interpolation in map_coordinates() can cause some values to
# be below/above 1.0, so we clip here
if order >= 3 and isinstance(augmentable, ia.HeatmapsOnImage):
arr_warped = np.clip(arr_warped, 0.0, 1.0, out=arr_warped)
setattr(augmentable, arr_attr_name, arr_warped)
augmentable = augmentable.resize((height_orig, width_orig))
return augmentable
def _augment_kpsoi_by_samples(self, kpsoi, row_idx, samples, dx, dy):
# pylint: disable=misplaced-comparison-constant, invalid-name
height, width = kpsoi.shape[0:2]
alpha = samples.alphas[row_idx]
sigma = samples.sigmas[row_idx]
# TODO add test for keypoint alignment when keypoints are empty
# Note: this block must be placed after _generate_shift_maps() to
# keep samples aligned
# Note: we should stop for zero-sized axes early here, event though
# there is a height/width check for each keypoint, because the
# channel number can also be zero
image_has_zero_sized_axes = (0 in kpsoi.shape)
params_below_thresh = (
alpha <= self.KEYPOINT_AUG_ALPHA_THRESH
or sigma <= self.KEYPOINT_AUG_SIGMA_THRESH)
if kpsoi.empty or image_has_zero_sized_axes or params_below_thresh:
# ElasticTransformation does not change the shape, hence we can
# skip the below steps
return kpsoi
for kp in kpsoi.keypoints:
within_image_plane = (0 <= kp.x < width and 0 <= kp.y < height)
if within_image_plane:
kp_neighborhood = kp.generate_similar_points_manhattan(
self.NB_NEIGHBOURING_KEYPOINTS,
self.NEIGHBOURING_KEYPOINTS_DISTANCE,
return_array=True
)
# We can clip here, because we made sure above that the
# keypoint is inside the image plane. Keypoints at the
# bottom row or right columns might be rounded outside
# the image plane, which we prevent here. We reduce
# neighbours to only those within the image plane as only
# for such points we know where to move them.
xx = np.round(kp_neighborhood[:, 0]).astype(np.int32)
yy = np.round(kp_neighborhood[:, 1]).astype(np.int32)
inside_image_mask = np.logical_and(
np.logical_and(0 <= xx, xx < width),
np.logical_and(0 <= yy, yy < height)
)
xx = xx[inside_image_mask]
yy = yy[inside_image_mask]
xxyy = np.concatenate(
[xx[:, np.newaxis], yy[:, np.newaxis]],
axis=1)
xxyy_aug = np.copy(xxyy).astype(np.float32)
xxyy_aug[:, 0] += dx[yy, xx]
xxyy_aug[:, 1] += dy[yy, xx]
med = ia.compute_geometric_median(xxyy_aug)
# uncomment to use average instead of median
# med = np.average(xxyy_aug, 0)
kp.x = med[0]
kp.y = med[1]
return kpsoi
def _augment_psoi_by_samples(self, psoi, row_idx, samples, dx, dy):
# pylint: disable=invalid-name
func = functools.partial(self._augment_kpsoi_by_samples,
row_idx=row_idx, samples=samples, dx=dx, dy=dy)
return self._apply_to_polygons_as_keypoints(
psoi, func, recoverer=self.polygon_recoverer)
def _augment_lsoi_by_samples(self, lsoi, row_idx, samples, dx, dy):
# pylint: disable=invalid-name
func = functools.partial(self._augment_kpsoi_by_samples,
row_idx=row_idx, samples=samples, dx=dx, dy=dy)
return self._apply_to_cbaois_as_keypoints(lsoi, func)
def _augment_bbsoi_by_samples(self, bbsoi, row_idx, samples, dx, dy):
# pylint: disable=invalid-name
func = functools.partial(self._augment_kpsoi_by_samples,
row_idx=row_idx, samples=samples, dx=dx, dy=dy)
return self._apply_to_cbaois_as_keypoints(bbsoi, func)
def get_parameters(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.alpha, self.sigma, self.order, self.cval, self.mode]
@classmethod
def _generate_shift_maps(cls, shape, alpha, sigma, random_state):
# pylint: disable=protected-access, invalid-name
assert len(shape) == 2, ("Expected 2d shape, got %s." % (shape,))
ksize = blur_lib._compute_gaussian_blur_ksize(sigma)
ksize = ksize + 1 if ksize % 2 == 0 else ksize
padding = ksize
h, w = shape[0:2]
h_pad = h + 2*padding
w_pad = w + 2*padding
# The step of random number generation could be batched, so that
# random numbers are sampled once for the whole batch. Would get rid
# of creating many random_states.
dxdy_unsmoothed = random_state.random((2 * h_pad, w_pad)) * 2 - 1
dx_unsmoothed = dxdy_unsmoothed[0:h_pad, :]
dy_unsmoothed = dxdy_unsmoothed[h_pad:, :]
# TODO could this also work with an average blur? would probably be
# faster
dx = blur_lib.blur_gaussian_(dx_unsmoothed, sigma) * alpha
dy = blur_lib.blur_gaussian_(dy_unsmoothed, sigma) * alpha
if padding > 0:
dx = dx[padding:-padding, padding:-padding]
dy = dy[padding:-padding, padding:-padding]
return dx, dy
@classmethod
def _map_coordinates(cls, image, dx, dy, order=1, cval=0, mode="constant"):
"""Remap pixels in an image according to x/y shift maps.
dtype support::
if (backend="scipy" and order=0)::
* ``uint8``: yes
* ``uint16``: yes
* ``uint32``: yes
* ``uint64``: no (1)
* ``int8``: yes
* ``int16``: yes
* ``int32``: yes
* ``int64``: no (2)
* ``float16``: yes
* ``float32``: yes
* ``float64``: yes
* ``float128``: no (3)
* ``bool``: yes
- (1) produces array filled with only 0
- (2) produces array filled with <min_value> when testing
with <max_value>
- (3) causes: 'data type no supported'
if (backend="scipy" and order>0)::
* ``uint8``: yes (1)
* ``uint16``: yes (1)
* ``uint32``: yes (1)
* ``uint64``: yes (1)
* ``int8``: yes (1)
* ``int16``: yes (1)
* ``int32``: yes (1)
* ``int64``: yes (1)
* ``float16``: yes (1)
* ``float32``: yes (1)
* ``float64``: yes (1)
* ``float128``: no (2)
* ``bool``: yes
- (1) rather loose test, to avoid having to re-compute the
interpolation
- (2) causes: 'data type no supported'
if (backend="cv2" and order=0)::
* ``uint8``: yes
* ``uint16``: yes
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes
* ``int16``: yes
* ``int32``: yes
* ``int64``: no (2)
* ``float16``: yes
* ``float32``: yes
* ``float64``: yes
* ``float128``: no (3)
* ``bool``: no (4)
- (1) causes: src data type = 6 is not supported
- (2) silently converts to int32
- (3) causes: src data type = 13 is not supported
- (4) causes: src data type = 0 is not supported
if (backend="cv2" and order=1)::
* ``uint8``: yes
* ``uint16``: yes
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: no (2)
* ``int16``: no (2)
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes
* ``float32``: yes
* ``float64``: yes
* ``float128``: no (3)
* ``bool``: no (4)
- (1) causes: src data type = 6 is not supported
- (2) causes: OpenCV(3.4.5) (...)/imgwarp.cpp:1805:
error: (-215:Assertion failed) ifunc != 0 in function
'remap'
- (3) causes: src data type = 13 is not supported
- (4) causes: src data type = 0 is not supported
if (backend="cv2" and order>=2)::
* ``uint8``: yes
* ``uint16``: yes
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: no (2)
* ``int16``: yes
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes
* ``float32``: yes
* ``float64``: yes
* ``float128``: no (3)
* ``bool``: no (4)
- (1) causes: src data type = 6 is not supported
- (2) causes: OpenCV(3.4.5) (...)/imgwarp.cpp:1805:
error: (-215:Assertion failed) ifunc != 0 in function
'remap'
- (3) causes: src data type = 13 is not supported
- (4) causes: src data type = 0 is not supported
"""
# pylint: disable=invalid-name
if image.size == 0:
return np.copy(image)
if order == 0 and image.dtype.name in ["uint64", "int64"]:
raise Exception(
"dtypes uint64 and int64 are only supported in "
"ElasticTransformation for order=0, got order=%d with "
"dtype=%s." % (order, image.dtype.name))
input_dtype = image.dtype
if image.dtype.name == "bool":
image = image.astype(np.float32)
elif order == 1 and image.dtype.name in ["int8", "int16", "int32"]:
image = image.astype(np.float64)
elif order >= 2 and image.dtype.name == "int8":
image = image.astype(np.int16)
elif order >= 2 and image.dtype.name == "int32":
image = image.astype(np.float64)
shrt_max = 32767 # maximum of datatype `short`
backend = "cv2"
if order == 0:
bad_dtype_cv2 = (
image.dtype.name in [
"uint32", "uint64",
"int64",
"float128",
"bool"]
)
elif order == 1:
bad_dtype_cv2 = (
image.dtype.name in [
"uint32", "uint64",
"int8", "int16", "int32", "int64",
"float128",
"bool"]
)
else:
bad_dtype_cv2 = (
image.dtype.name in [
"uint32", "uint64",
"int8", "int32", "int64",
"float128",
"bool"]
)
bad_dx_shape_cv2 = (dx.shape[0] >= shrt_max or dx.shape[1] >= shrt_max)
bad_dy_shape_cv2 = (dy.shape[0] >= shrt_max or dy.shape[1] >= shrt_max)
if bad_dtype_cv2 or bad_dx_shape_cv2 or bad_dy_shape_cv2:
backend = "scipy"
assert image.ndim == 3, (
"Expected 3-dimensional image, got %d dimensions." % (image.ndim,))
result = np.copy(image)
height, width = image.shape[0:2]
if backend == "scipy":
h, w = image.shape[0:2]
y, x = np.meshgrid(
np.arange(h).astype(np.float32),
np.arange(w).astype(np.float32),
indexing="ij")
x_shifted = x + (-1) * dx
y_shifted = y + (-1) * dy
for c in sm.xrange(image.shape[2]):
remapped_flat = ndimage.interpolation.map_coordinates(
image[..., c],
(y_shifted.flatten(), x_shifted.flatten()),
order=order,
cval=cval,
mode=mode
)
remapped = remapped_flat.reshape((height, width))
result[..., c] = remapped
else:
h, w, nb_channels = image.shape
y, x = np.meshgrid(
np.arange(h).astype(np.float32),
np.arange(w).astype(np.float32),
indexing="ij")
x_shifted = x + (-1) * dx
y_shifted = y + (-1) * dy
if image.dtype.kind == "f":
cval = float(cval)
else:
cval = int(cval)
border_mode = cls._MAPPING_MODE_SCIPY_CV2[mode]
interpolation = cls._MAPPING_ORDER_SCIPY_CV2[order]
is_nearest_neighbour = (interpolation == cv2.INTER_NEAREST)
map1, map2 = cv2.convertMaps(
x_shifted, y_shifted, cv2.CV_16SC2,
nninterpolation=is_nearest_neighbour)
# remap only supports up to 4 channels
if nb_channels <= 4:
result = cv2.remap(
image, map1, map2, interpolation=interpolation,
borderMode=border_mode, borderValue=cval)
if image.ndim == 3 and result.ndim == 2:
result = result[..., np.newaxis]
else:
current_chan_idx = 0
result = []
while current_chan_idx < nb_channels:
channels = image[..., current_chan_idx:current_chan_idx+4]
result_c = cv2.remap(
channels, map1, map2, interpolation=interpolation,
borderMode=border_mode, borderValue=cval)
if result_c.ndim == 2:
result_c = result_c[..., np.newaxis]
result.append(result_c)
current_chan_idx += 4
result = np.concatenate(result, axis=2)
if result.dtype.name != input_dtype.name:
result = iadt.restore_dtypes_(result, input_dtype)
return result
class Rot90(meta.Augmenter):
"""
Rotate images clockwise by multiples of 90 degrees.
This could also be achieved using ``Affine``, but ``Rot90`` is
significantly more efficient.
dtype support::
if (keep_size=False)::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
if (keep_size=True)::
minimum of (
``imgaug.augmenters.geometric.Rot90(keep_size=False)``,
:func:`imgaug.imgaug.imresize_many_images`
)
Parameters
----------
k : int or list of int or tuple of int or imaug.ALL or imgaug.parameters.StochasticParameter, optional
How often to rotate clockwise by 90 degrees.
* If a single ``int``, then that value will be used for all images.
* If a tuple ``(a, b)``, then a random value will be uniformly
sampled per image from the discrete interval ``[a..b]``.
* If a list, then for each image a random value will be sampled
from that list.
* If ``imgaug.ALL``, then equivalant to list ``[0, 1, 2, 3]``.
* If ``StochasticParameter``, then that parameter is queried per
image to sample the value to use.
keep_size : bool, optional
After rotation by an odd-valued `k` (e.g. 1 or 3), the resulting image
may have a different height/width than the original image.
If this parameter is set to ``True``, then the rotated
image will be resized to the input image's size. Note that this might
also cause the augmented image to look distorted.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Rot90(1)
Rotate all images by 90 degrees.
Resize these images afterwards to keep the size that they had before
augmentation.
This may cause the images to look distorted.
>>> aug = iaa.Rot90([1, 3])
Rotate all images by 90 or 270 degrees.
Resize these images afterwards to keep the size that they had before
augmentation.
This may cause the images to look distorted.
>>> aug = iaa.Rot90((1, 3))
Rotate all images by 90, 180 or 270 degrees.
Resize these images afterwards to keep the size that they had before
augmentation.
This may cause the images to look distorted.
>>> aug = iaa.Rot90((1, 3), keep_size=False)
Rotate all images by 90, 180 or 270 degrees.
Does not resize to the original image size afterwards, i.e. each image's
size may change.
"""
def __init__(self, k, keep_size=True, name=None, deterministic=False,
random_state=None):
super(Rot90, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
if k == ia.ALL:
k = [0, 1, 2, 3]
self.k = iap.handle_discrete_param(
k, "k", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.keep_size = keep_size
def _draw_samples(self, nb_images, random_state):
return self.k.draw_samples((nb_images,), random_state=random_state)
def _augment_batch(self, batch, random_state, parents, hooks):
# pylint: disable=invalid-name
ks = self._draw_samples(batch.nb_rows, random_state)
if batch.images is not None:
batch.images = self._augment_arrays_by_samples(
batch.images, ks, self.keep_size, ia.imresize_single_image)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, "arr_0to1", ks)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, "arr", ks)
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
ks=ks)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
@classmethod
def _augment_arrays_by_samples(cls, arrs, ks, keep_size, resize_func):
# pylint: disable=invalid-name
input_was_array = ia.is_np_array(arrs)
input_dtype = arrs.dtype if input_was_array else None
arrs_aug = []
for arr, k_i in zip(arrs, ks):
# adding axes here rotates clock-wise instead of ccw
arr_aug = np.rot90(arr, k_i, axes=(1, 0))
do_resize = (
keep_size
and arr.shape != arr_aug.shape
and resize_func is not None)
if do_resize:
arr_aug = resize_func(arr_aug, arr.shape[0:2])
arrs_aug.append(arr_aug)
if keep_size and input_was_array:
n_shapes = len({arr.shape for arr in arrs_aug})
if n_shapes == 1:
arrs_aug = np.array(arrs_aug, dtype=input_dtype)
return arrs_aug
def _augment_maps_by_samples(self, augmentables, arr_attr_name, ks):
# pylint: disable=invalid-name
arrs = [getattr(map_i, arr_attr_name) for map_i in augmentables]
arrs_aug = self._augment_arrays_by_samples(
arrs, ks, self.keep_size, None)
maps_aug = []
gen = zip(augmentables, arrs, arrs_aug, ks)
for augmentable_i, arr, arr_aug, k_i in gen:
shape_orig = arr.shape
setattr(augmentable_i, arr_attr_name, arr_aug)
if self.keep_size:
augmentable_i = augmentable_i.resize(shape_orig[0:2])
elif k_i % 2 == 1:
h, w = augmentable_i.shape[0:2]
augmentable_i.shape = tuple(
[w, h] + list(augmentable_i.shape[2:]))
else:
# keep_size was False, but rotated by a multiple of 2,
# hence height and width do not change
pass
maps_aug.append(augmentable_i)
return maps_aug
def _augment_keypoints_by_samples(self, keypoints_on_images, ks):
# pylint: disable=invalid-name
result = []
for kpsoi_i, k_i in zip(keypoints_on_images, ks):
shape_orig = kpsoi_i.shape
if (k_i % 4) == 0:
result.append(kpsoi_i)
else:
k_i = int(k_i) % 4 # this is also correct when k_i is negative
h, w = kpsoi_i.shape[0:2]
h_aug, w_aug = (h, w) if (k_i % 2) == 0 else (w, h)
for kp in kpsoi_i.keypoints:
y, x = kp.y, kp.x
yr, xr = y, x
wr, hr = w, h
for _ in sm.xrange(k_i):
# for int coordinates this would instead be
# xr, yr = (hr - 1) - yr, xr
# here we assume that coordinates are always
# subpixel-accurate
xr, yr = hr - yr, xr
wr, hr = hr, wr
kp.x = xr
kp.y = yr
shape_aug = tuple([h_aug, w_aug] + list(kpsoi_i.shape[2:]))
kpsoi_i.shape = shape_aug
if self.keep_size and (h, w) != (h_aug, w_aug):
kpsoi_i = kpsoi_i.on_(shape_orig)
kpsoi_i.shape = shape_orig
result.append(kpsoi_i)
return result
def get_parameters(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.k, self.keep_size]
# TODO semipolar
class WithPolarWarping(meta.Augmenter):
"""Augmenter that applies other augmenters in a polar-transformed space.
This augmenter first transforms an image into a polar representation,
then applies its child augmenter, then transforms back to cartesian
space. The polar representation is still in the image's input dtype
(i.e. ``uint8`` stays ``uint8``) and can be visualized. It can be thought
of as an "unrolled" version of the image, where previously circular lines
appear straight. Hence, applying child augmenters in that space can lead
to circular effects. E.g. replacing rectangular pixel areas in the polar
representation with black pixels will lead to curved black areas in
the cartesian result.
This augmenter can create new pixels in the image. It will fill these
with black pixels. For segmentation maps it will fill with class
id ``0``. For heatmaps it will fill with ``0.0``.
This augmenter is limited to arrays with a height and/or width of
``32767`` or less.
.. warning::
When augmenting coordinates in polar representation, it is possible
that these are shifted outside of the polar image, but are inside the
image plane after transforming back to cartesian representation,
usually on newly created pixels (i.e. black backgrounds).
These coordinates are currently not removed. It is recommended to
not use very strong child transformations when also augmenting
coordinate-based augmentables.
.. warning::
For bounding boxes, this augmenter suffers from the same problem as
affine rotations applied to bounding boxes, i.e. the resulting
bounding boxes can have unintuitive (seemingly wrong) appearance.
This is due to coordinates being "rotated" that are inside the
bounding box, but do not fall on the object and actually are
background.
It is recommended to use this augmenter with caution when augmenting
bounding boxes.
.. warning::
For polygons, this augmenter should not be combined with
augmenters that perform automatic polygon recovery for invalid
polygons, as the polygons will frequently appear broken in polar
representation and their "fixed" version will be very broken in
cartesian representation. Augmenters that perform such polygon
recovery are currently ``PerspectiveTransform``, ``PiecewiseAffine``
and ``ElasticTransformation``.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: no (2)
* ``float16``: yes; tested (3)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (4)
- (1) OpenCV produces error
``TypeError: Expected cv::UMat for argument 'src'``
- (2) OpenCV produces array of nothing but zeros.
- (3) Mapepd to ``float32``.
- (4) Mapped to ``uint8``.
Parameters
----------
children : imgaug.augmenters.meta.Augmenter or list of imgaug.augmenters.meta.Augmenter or None, optional
One or more augmenters to apply to images after they were transformed
to polar representation.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.WithPolarWarping(iaa.CropAndPad(percent=(-0.1, 0.1)))
Apply cropping and padding in polar representation, then warp back to
cartesian representation.
>>> aug = iaa.WithPolarWarping(
>>> iaa.Affine(
>>> translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
>>> rotate=(-35, 35),
>>> scale=(0.8, 1.2),
>>> shear={"x": (-15, 15), "y": (-15, 15)}
>>> )
>>> )
Apply affine transformations in polar representation.
>>> aug = iaa.WithPolarWarping(iaa.AveragePooling((2, 8)))
Apply average pooling in polar representation. This leads to circular
bins.
"""
def __init__(self, children, name=None, deterministic=False,
random_state=None):
super(WithPolarWarping, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.children = meta.handle_children_list(children, self.name, "then")
def _augment_batch(self, batch, random_state, parents, hooks):
if batch.images is not None:
iadt.gate_dtypes(
batch.images,
allowed=["bool",
"uint8", "uint16",
"int8", "int16", "int32",
"float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=self)
with batch.propagation_hooks_ctx(self, hooks, parents):
batch, inv_data_bbs = self._convert_bbs_to_polygons_(batch)
inv_data = {}
for column in batch.columns:
func = getattr(self, "_warp_%s_" % (column.name,))
col_aug, inv_data_col = func(column.value)
setattr(batch, column.attr_name, col_aug)
inv_data[column.name] = inv_data_col
batch = self.children.augment_batch(batch,
parents=parents + [self],
hooks=hooks)
for column in batch.columns:
func = getattr(self, "_invert_warp_%s_" % (column.name,))
col_unaug = func(column.value, inv_data[column.name])
setattr(batch, column.attr_name, col_unaug)
batch = self._invert_convert_bbs_to_polygons_(batch, inv_data_bbs)
return batch
@classmethod
def _convert_bbs_to_polygons_(cls, batch):
batch_contained_polygons = batch.polygons is not None
if batch.bounding_boxes is None:
return batch, (False, batch_contained_polygons)
psois = [bbsoi.to_polygons_on_image() for bbsoi in batch.bounding_boxes]
psois = [psoi.subdivide_(2) for psoi in psois]
# Mark Polygons that are really Bounding Boxes
for psoi in psois:
for polygon in psoi.polygons:
if polygon.label is None:
polygon.label = "$$IMGAUG_BB_AS_POLYGON"
else:
polygon.label = polygon.label + ";$$IMGAUG_BB_AS_POLYGON"
# Merge Fake-Polygons into existing Polygons
if batch.polygons is None:
batch.polygons = psois
else:
for psoi, bbs_as_psoi in zip(batch.polygons, psois):
assert psoi.shape == bbs_as_psoi.shape, (
"Expected polygons and bounding boxes to have the same "
".shape value, got %s and %s." % (psoi.shape,
bbs_as_psoi.shape))
psoi.polygons.extend(bbs_as_psoi.polygons)
batch.bounding_boxes = None
return batch, (True, batch_contained_polygons)
@classmethod
def _invert_convert_bbs_to_polygons_(cls, batch, inv_data):
batch_contained_bbs, batch_contained_polygons = inv_data
if not batch_contained_bbs:
return batch
bbsois = []
for psoi in batch.polygons:
polygons = []
bbs = []
for polygon in psoi.polygons:
is_bb = False
if polygon.label is None:
is_bb = False
elif polygon.label == "$$IMGAUG_BB_AS_POLYGON":
polygon.label = None
is_bb = True
elif polygon.label.endswith(";$$IMGAUG_BB_AS_POLYGON"):
polygon.label = \
polygon.label[:-len(";$$IMGAUG_BB_AS_POLYGON")]
is_bb = True
if is_bb:
bbs.append(polygon.to_bounding_box())
else:
polygons.append(polygon)
psoi.polygons = polygons
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=psoi.shape)
bbsois.append(bbsoi)
batch.bounding_boxes = bbsois
if not batch_contained_polygons:
batch.polygons = None
return batch
@classmethod
def _warp_images_(cls, images):
return cls._warp_arrays(images, False)
@classmethod
def _invert_warp_images_(cls, images_warped, inv_data):
return cls._invert_warp_arrays(images_warped, False, inv_data)
@classmethod
def _warp_heatmaps_(cls, heatmaps):
return cls._warp_maps_(heatmaps, "arr_0to1", False)
@classmethod
def _invert_warp_heatmaps_(cls, heatmaps_warped, inv_data):
return cls._invert_warp_maps_(heatmaps_warped, "arr_0to1", False,
inv_data)
@classmethod
def _warp_segmentation_maps_(cls, segmentation_maps):
return cls._warp_maps_(segmentation_maps, "arr", True)
@classmethod
def _invert_warp_segmentation_maps_(cls, segmentation_maps_warped,
inv_data):
return cls._invert_warp_maps_(segmentation_maps_warped, "arr", True,
inv_data)
@classmethod
def _warp_keypoints_(cls, kpsois):
return cls._warp_cbaois_(kpsois)
@classmethod
def _invert_warp_keypoints_(cls, kpsois_warped, image_shapes_orig):
return cls._invert_warp_cbaois_(kpsois_warped, image_shapes_orig)
@classmethod
def _warp_bounding_boxes_(cls, bbsois): # pylint: disable=useless-return
assert bbsois is None, ("Expected BBs to have been converted "
"to polygons.")
return None
@classmethod
def _invert_warp_bounding_boxes_(cls, bbsois_warped, _image_shapes_orig): # pylint: disable=useless-return
assert bbsois_warped is None, ("Expected BBs to have been converted "
"to polygons.")
return None
@classmethod
def _warp_polygons_(cls, psois):
return cls._warp_cbaois_(psois)
@classmethod
def _invert_warp_polygons_(cls, psois_warped, image_shapes_orig):
return cls._invert_warp_cbaois_(psois_warped, image_shapes_orig)
@classmethod
def _warp_line_strings_(cls, lsois):
return cls._warp_cbaois_(lsois)
@classmethod
def _invert_warp_line_strings_(cls, lsois_warped, image_shapes_orig):
return cls._invert_warp_cbaois_(lsois_warped, image_shapes_orig)
@classmethod
def _warp_arrays(cls, arrays, interpolation_nearest):
if arrays is None:
return None, None
flags = cv2.WARP_FILL_OUTLIERS + cv2.WARP_POLAR_LINEAR
if interpolation_nearest:
flags += cv2.INTER_NEAREST
arrays_warped = []
shapes_orig = []
for arr in arrays:
if 0 in arr.shape:
arrays_warped.append(arr)
shapes_orig.append(arr.shape)
continue
input_dtype = arr.dtype.name
if input_dtype == "bool":
arr = arr.astype(np.uint8) * 255
elif input_dtype == "float16":
arr = arr.astype(np.float32)
height, width = arr.shape[0:2]
# remap limitation, see docs for warpPolar()
assert height <= 32767 and width <= 32767, (
"WithPolarWarping._warp_arrays() can currently only handle "
"arrays with axis sizes below 32767, but got shape %s. This "
"is an OpenCV limitation." % (arr.shape,))
dest_size = (0, 0)
center_xy = (width/2, height/2)
max_radius = np.sqrt((height/2.0)**2.0 + (width/2.0)**2.0)
if arr.ndim == 3 and arr.shape[-1] > 512:
arr_warped = np.stack(
[cv2.warpPolar(arr[..., c_idx], dest_size, center_xy,
max_radius, flags)
for c_idx in np.arange(arr.shape[-1])],
axis=-1)
else:
arr_warped = cv2.warpPolar(arr, dest_size, center_xy,
max_radius, flags)
if arr_warped.ndim == 2 and arr.ndim == 3:
arr_warped = arr_warped[:, :, np.newaxis]
if input_dtype == "bool":
arr_warped = (arr_warped > 128)
elif input_dtype == "float16":
arr_warped = arr_warped.astype(np.float16)
arrays_warped.append(arr_warped)
shapes_orig.append(arr.shape)
return arrays_warped, shapes_orig
@classmethod
def _invert_warp_arrays(cls, arrays_warped, interpolation_nearest,
inv_data):
shapes_orig = inv_data
if arrays_warped is None:
return None
flags = (cv2.WARP_FILL_OUTLIERS + cv2.WARP_POLAR_LINEAR
+ cv2.WARP_INVERSE_MAP)
if interpolation_nearest:
flags += cv2.INTER_NEAREST
# TODO this does per iteration almost the same as _warp_arrays()
# make DRY
arrays_inv = []
for arr_warped, shape_orig in zip(arrays_warped, shapes_orig):
if 0 in arr_warped.shape:
arrays_inv.append(arr_warped)
continue
input_dtype = arr_warped.dtype.name
if input_dtype == "bool":
arr_warped = arr_warped.astype(np.uint8) * 255
elif input_dtype == "float16":
arr_warped = arr_warped.astype(np.float32)
height, width = shape_orig[0:2]
# remap limitation, see docs for warpPolar()
assert (arr_warped.shape[0] <= 32767
and arr_warped.shape[1] <= 32767), (
"WithPolarWarping._warp_arrays() can currently only "
"handle arrays with axis sizes below 32767, but got "
"shape %s. This is an OpenCV limitation." % (
arr_warped.shape,))
dest_size = (width, height)
center_xy = (width/2, height/2)
max_radius = np.sqrt((height/2.0)**2.0 + (width/2.0)**2.0)
if arr_warped.ndim == 3 and arr_warped.shape[-1] > 512:
arr_inv = np.stack(
[cv2.warpPolar(arr_warped[..., c_idx], dest_size,
center_xy, max_radius, flags)
for c_idx in np.arange(arr_warped.shape[-1])],
axis=-1)
else:
arr_inv = cv2.warpPolar(arr_warped, dest_size, center_xy,
max_radius, flags)
if arr_inv.ndim == 2 and arr_warped.ndim == 3:
arr_inv = arr_inv[:, :, np.newaxis]
if input_dtype == "bool":
arr_inv = (arr_inv > 128)
elif input_dtype == "float16":
arr_inv = arr_inv.astype(np.float16)
arrays_inv.append(arr_inv)
return arrays_inv
@classmethod
def _warp_maps_(cls, maps, arr_attr_name, interpolation_nearest):
if maps is None:
return None, None
skipped = [False] * len(maps)
arrays = []
shapes_imgs_orig = []
for i, map_i in enumerate(maps):
if 0 in map_i.shape:
skipped[i] = True
arrays.append(np.zeros((0, 0), dtype=np.int32))
shapes_imgs_orig.append(map_i.shape)
else:
arrays.append(getattr(map_i, arr_attr_name))
shapes_imgs_orig.append(map_i.shape)
arrays_warped, warparr_inv_data = cls._warp_arrays(
arrays, interpolation_nearest)
shapes_imgs_warped = cls._warp_shape_tuples(shapes_imgs_orig)
for i, map_i in enumerate(maps):
if not skipped[i]:
map_i.shape = shapes_imgs_warped[i]
setattr(map_i, arr_attr_name, arrays_warped[i])
return maps, (shapes_imgs_orig, warparr_inv_data, skipped)
@classmethod
def _invert_warp_maps_(cls, maps_warped, arr_attr_name,
interpolation_nearest, invert_data):
if maps_warped is None:
return None
shapes_imgs_orig, warparr_inv_data, skipped = invert_data
arrays_warped = []
for i, map_warped in enumerate(maps_warped):
if skipped[i]:
arrays_warped.append(np.zeros((0, 0), dtype=np.int32))
else:
arrays_warped.append(getattr(map_warped, arr_attr_name))
arrays_inv = cls._invert_warp_arrays(arrays_warped,
interpolation_nearest,
warparr_inv_data)
for i, map_i in enumerate(maps_warped):
if not skipped[i]:
map_i.shape = shapes_imgs_orig[i]
setattr(map_i, arr_attr_name, arrays_inv[i])
return maps_warped
@classmethod
def _warp_coords(cls, coords, image_shapes):
if coords is None:
return None, None
image_shapes_warped = cls._warp_shape_tuples(image_shapes)
flags = cv2.WARP_POLAR_LINEAR
coords_warped = []
for coords_i, shape, shape_warped in zip(coords, image_shapes,
image_shapes_warped):
if 0 in shape:
coords_warped.append(coords_i)
continue
height, width = shape[0:2]
dest_size = (shape_warped[1], shape_warped[0])
center_xy = (width/2, height/2)
max_radius = np.sqrt((height/2.0)**2.0 + (width/2.0)**2.0)
coords_i_warped = cls.warpPolarCoords(
coords_i, dest_size, center_xy, max_radius, flags)
coords_warped.append(coords_i_warped)
return coords_warped, image_shapes
@classmethod
def _invert_warp_coords(cls, coords_warped, image_shapes_after_aug,
inv_data):
image_shapes_orig = inv_data
if coords_warped is None:
return None
flags = cv2.WARP_POLAR_LINEAR + cv2.WARP_INVERSE_MAP
coords_inv = []
gen = enumerate(zip(coords_warped, image_shapes_orig))
for i, (coords_i_warped, shape_orig) in gen:
if 0 in shape_orig:
coords_inv.append(coords_i_warped)
continue
shape_warped = image_shapes_after_aug[i]
height, width = shape_orig[0:2]
dest_size = (shape_warped[1], shape_warped[0])
center_xy = (width/2, height/2)
max_radius = np.sqrt((height/2.0)**2.0 + (width/2.0)**2.0)
coords_i_inv = cls.warpPolarCoords(coords_i_warped,
dest_size, center_xy,
max_radius, flags)
coords_inv.append(coords_i_inv)
return coords_inv
@classmethod
def _warp_cbaois_(cls, cbaois):
if cbaois is None:
return None, None
coords = [cbaoi.to_xy_array() for cbaoi in cbaois]
image_shapes = [cbaoi.shape for cbaoi in cbaois]
image_shapes_warped = cls._warp_shape_tuples(image_shapes)
coords_warped, inv_data = cls._warp_coords(coords, image_shapes)
for i, (cbaoi, coords_i_warped) in enumerate(zip(cbaois,
coords_warped)):
cbaoi = cbaoi.fill_from_xy_array_(coords_i_warped)
cbaoi.shape = image_shapes_warped[i]
cbaois[i] = cbaoi
return cbaois, inv_data
@classmethod
def _invert_warp_cbaois_(cls, cbaois_warped, image_shapes_orig):
if cbaois_warped is None:
return None
coords = [cbaoi.to_xy_array() for cbaoi in cbaois_warped]
image_shapes_after_aug = [cbaoi.shape for cbaoi in cbaois_warped]
coords_warped = cls._invert_warp_coords(coords, image_shapes_after_aug,
image_shapes_orig)
cbaois = cbaois_warped
for i, (cbaoi, coords_i_warped) in enumerate(zip(cbaois,
coords_warped)):
cbaoi = cbaoi.fill_from_xy_array_(coords_i_warped)
cbaoi.shape = image_shapes_orig[i]
cbaois[i] = cbaoi
return cbaois
@classmethod
def _warp_shape_tuples(cls, shapes):
# pylint: disable=invalid-name
pi = np.pi
result = []
for shape in shapes:
if 0 in shape:
result.append(shape)
continue
height, width = shape[0:2]
max_radius = np.sqrt((height/2.0)**2.0 + (width/2.0)**2.0)
# np.round() is here a replacement for cvRound(). It is not fully
# clear whether the two functions behave exactly identical in all
# situations.
# See
# https://github.com/opencv/opencv/blob/master/
# modules/core/include/opencv2/core/fast_math.hpp
# for OpenCV's implementation.
width = int(np.round(max_radius))
height = int(np.round(max_radius * pi))
result.append(tuple([height, width] + list(shape[2:])))
return result
@classmethod
def warpPolarCoords(cls, src, dsize, center, maxRadius, flags):
# See
# https://docs.opencv.org/3.4.8/da/d54/group__imgproc__transform.html
# for the equations
# or also
# https://github.com/opencv/opencv/blob/master/modules/imgproc/src/
# imgwarp.cpp
#
# pylint: disable=invalid-name, no-else-return
assert dsize[0] > 0
assert dsize[1] > 0
dsize_width = dsize[0]
dsize_height = dsize[1]
center_x = center[0]
center_y = center[1]
if np.logical_and(flags, cv2.WARP_INVERSE_MAP):
rho = src[:, 0]
phi = src[:, 1]
Kangle = dsize_height / (2*np.pi)
angleRad = phi / Kangle
if np.bitwise_and(flags, cv2.WARP_POLAR_LOG):
Klog = dsize_width / np.log(maxRadius)
magnitude = np.exp(rho / Klog)
else:
Klin = dsize_width / maxRadius
magnitude = rho / Klin
x = center_x + magnitude * np.cos(angleRad)
y = center_y + magnitude * np.sin(angleRad)
x = x[:, np.newaxis]
y = y[:, np.newaxis]
return np.concatenate([x, y], axis=1)
else:
x = src[:, 0]
y = src[:, 1]
Kangle = dsize_height / (2*np.pi)
Klin = dsize_width / maxRadius
I_x, I_y = (x - center_x, y - center_y)
magnitude_I, angle_I = cv2.cartToPolar(I_x, I_y)
phi = Kangle * angle_I
# TODO add semilog support here
rho = Klin * magnitude_I
return np.concatenate([rho, phi], axis=1)
def get_parameters(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_parameters`."""
return []
def get_children_lists(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_children_lists`."""
return [self.children]
def _to_deterministic(self):
aug = self.copy()
aug.children = aug.children.to_deterministic()
aug.deterministic = True
aug.random_state = self.random_state.derive_rng_()
return aug
def __str__(self):
pattern = (
"%s("
"name=%s, children=%s, deterministic=%s"
")")
return pattern % (self.__class__.__name__, self.name,
self.children, self.deterministic)
class Jigsaw(meta.Augmenter):
"""Move cells within images similar to jigsaw patterns.
.. note::
This augmenter will by default pad images until their height is a
multiple of `nb_rows`. Analogous for `nb_cols`.
.. note::
This augmenter will resize heatmaps and segmentation maps to the
image size, then apply similar padding as for the corresponding images
and resize back to the original map size. That also means that images
may change in shape (due to padding), but heatmaps/segmaps will not
change. For heatmaps/segmaps, this deviates from pad augmenters that
will change images and heatmaps/segmaps in corresponding ways and then
keep the heatmaps/segmaps at the new size.
.. warning::
This augmenter currently only supports augmentation of images,
heatmaps, segmentation maps and keypoints. Other augmentables,
i.e. bounding boxes, polygons and line strings, will result in errors.
dtype support::
See :func:`apply_jigsaw`.
Parameters
----------
nb_rows : int or list of int or tuple of int or imgaug.parameters.StochasticParameter
How many rows the jigsaw pattern should have.
* If a single ``int``, then that value will be used for all images.
* If a tuple ``(a, b)``, then a random value will be uniformly
sampled per image from the discrete interval ``[a..b]``.
* If a list, then for each image a random value will be sampled
from that list.
* If ``StochasticParameter``, then that parameter is queried per
image to sample the value to use.
nb_cols : int or list of int or tuple of int or imgaug.parameters.StochasticParameter
How many cols the jigsaw pattern should have.
* If a single ``int``, then that value will be used for all images.
* If a tuple ``(a, b)``, then a random value will be uniformly
sampled per image from the discrete interval ``[a..b]``.
* If a list, then for each image a random value will be sampled
from that list.
* If ``StochasticParameter``, then that parameter is queried per
image to sample the value to use.
max_steps : int or list of int or tuple of int or imgaug.parameters.StochasticParameter, optional
How many steps each jigsaw cell may be moved.
* If a single ``int``, then that value will be used for all images.
* If a tuple ``(a, b)``, then a random value will be uniformly
sampled per image from the discrete interval ``[a..b]``.
* If a list, then for each image a random value will be sampled
from that list.
* If ``StochasticParameter``, then that parameter is queried per
image to sample the value to use.
allow_pad : bool, optional
Whether to allow automatically padding images until they are evenly
divisible by ``nb_rows`` and ``nb_cols``.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Jigsaw(nb_rows=10, nb_cols=10)
Create a jigsaw augmenter that splits images into ``10x10`` cells
and shifts them around by ``0`` to ``2`` steps (default setting).
>>> aug = iaa.Jigsaw(nb_rows=(1, 4), nb_cols=(1, 4))
Create a jigsaw augmenter that splits each image into ``1`` to ``4``
cells along each axis.
>>> aug = iaa.Jigsaw(nb_rows=10, nb_cols=10, max_steps=(1, 5))
Create a jigsaw augmenter that moves the cells in each image by a random
amount between ``1`` and ``5`` times (decided per image). Some images will
be barely changed, some will be fairly distorted.
"""
def __init__(self, nb_rows, nb_cols, max_steps=2, allow_pad=True,
name=None, deterministic=False, random_state=None):
super(Jigsaw, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.nb_rows = iap.handle_discrete_param(
nb_rows, "nb_rows", value_range=(1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.nb_cols = iap.handle_discrete_param(
nb_cols, "nb_cols", value_range=(1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.max_steps = iap.handle_discrete_param(
max_steps, "max_steps", value_range=(0, None),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.allow_pad = allow_pad
def _augment_batch(self, batch, random_state, parents, hooks):
samples = self._draw_samples(batch, random_state)
# We resize here heatmaps/segmaps early to the image size in order to
# avoid problems where the jigsaw cells don't fit perfectly into
# the heatmap/segmap arrays or there are minor padding-related
# differences.
# TODO This step could most likely be avoided.
# TODO add something like
# 'with batch.maps_resized_to_image_sizes(): ...'
batch, maps_shapes_orig = self._resize_maps(batch)
if self.allow_pad:
# this is a bit more difficult than one might expect, because we
# (a) might have different numbers of rows/cols per image
# (b) might have different shapes per image
# (c) have non-image data that also requires padding
# TODO enable support for stochastic parameters in
# PadToMultiplesOf, then we can simple use two
# DeterministicLists here to generate rowwise values
for i in np.arange(len(samples.destinations)):
padder = size_lib.CenterPadToMultiplesOf(
width_multiple=samples.nb_cols[i],
height_multiple=samples.nb_rows[i])
row = batch.subselect_rows_by_indices([i])
row = padder.augment_batch(row, parents=parents + [self],
hooks=hooks)
batch = batch.invert_subselect_rows_by_indices_([i], row)
if batch.images is not None:
for i, image in enumerate(batch.images):
image[...] = apply_jigsaw(image, samples.destinations[i])
if batch.heatmaps is not None:
for i, heatmap in enumerate(batch.heatmaps):
heatmap.arr_0to1 = apply_jigsaw(heatmap.arr_0to1,
samples.destinations[i])
if batch.segmentation_maps is not None:
for i, segmap in enumerate(batch.segmentation_maps):
segmap.arr = apply_jigsaw(segmap.arr, samples.destinations[i])
if batch.keypoints is not None:
for i, kpsoi in enumerate(batch.keypoints):
xy = kpsoi.to_xy_array()
xy[...] = apply_jigsaw_to_coords(xy,
samples.destinations[i],
image_shape=kpsoi.shape)
kpsoi.fill_from_xy_array_(xy)
has_other_cbaoi = any([getattr(batch, attr_name) is not None
for attr_name
in ["bounding_boxes", "polygons",
"line_strings"]])
if has_other_cbaoi:
raise NotImplementedError(
"Jigsaw currently only supports augmentation of images "
"and keypoints.")
# We don't crop back to the original size, partly because it is
# rather cumbersome to implement, partly because the padded
# borders might have been moved into the inner parts of the image
batch = self._invert_resize_maps(batch, maps_shapes_orig)
return batch
def _draw_samples(self, batch, random_state):
nb_images = batch.nb_rows
nb_rows = self.nb_rows.draw_samples((nb_images,),
random_state=random_state)
nb_cols = self.nb_cols.draw_samples((nb_images,),
random_state=random_state)
max_steps = self.max_steps.draw_samples((nb_images,),
random_state=random_state)
destinations = []
for i in np.arange(nb_images):
destinations.append(
generate_jigsaw_destinations(
nb_rows[i], nb_cols[i], max_steps[i],
random_state=random_state)
)
samples = _JigsawSamples(nb_rows, nb_cols, max_steps, destinations)
return samples
@classmethod
def _resize_maps(cls, batch):
# skip computation of rowwise shapes
if batch.heatmaps is None and batch.segmentation_maps is None:
return batch, (None, None)
image_shapes = batch.get_rowwise_shapes()
batch.heatmaps, heatmaps_shapes_orig = cls._resize_maps_single_list(
batch.heatmaps, "arr_0to1", image_shapes)
batch.segmentation_maps, sm_shapes_orig = cls._resize_maps_single_list(
batch.segmentation_maps, "arr", image_shapes)
return batch, (heatmaps_shapes_orig, sm_shapes_orig)
@classmethod
def _resize_maps_single_list(cls, augmentables, arr_attr_name,
image_shapes):
if augmentables is None:
return None, None
shapes_orig = []
augms_resized = []
for augmentable, image_shape in zip(augmentables, image_shapes):
shape_orig = getattr(augmentable, arr_attr_name).shape
augm_rs = augmentable.resize(image_shape[0:2])
augms_resized.append(augm_rs)
shapes_orig.append(shape_orig)
return augms_resized, shapes_orig
@classmethod
def _invert_resize_maps(cls, batch, shapes_orig):
batch.heatmaps = cls._invert_resize_maps_single_list(
batch.heatmaps, shapes_orig[0])
batch.segmentation_maps = cls._invert_resize_maps_single_list(
batch.segmentation_maps, shapes_orig[1])
return batch
@classmethod
def _invert_resize_maps_single_list(cls, augmentables, shapes_orig):
if shapes_orig is None:
return None
augms_resized = []
for augmentable, shape_orig in zip(augmentables, shapes_orig):
augms_resized.append(augmentable.resize(shape_orig[0:2]))
return augms_resized
def get_parameters(self):
return [self.nb_rows, self.nb_cols, self.max_steps, self.allow_pad]
class _JigsawSamples(object):
def __init__(self, nb_rows, nb_cols, max_steps, destinations):
self.nb_rows = nb_rows
self.nb_cols = nb_cols
self.max_steps = max_steps
self.destinations = destinations
| 41.758837
| 223
| 0.579795
|
ef6912f36ffa9956d1263aff3fce019a653d1916
| 1,686
|
py
|
Python
|
visualization_process/embedded_visualization.py
|
imdreamer2018/privacy-preserving-for-face-recognition
|
be7dde549c7a168085d89b17635ad8bdd375e04b
|
[
"MIT"
] | 9
|
2020-07-28T19:59:46.000Z
|
2021-12-16T07:14:12.000Z
|
visualization_process/embedded_visualization.py
|
imdreamer2018/privacy-preserving-for-face-recognition
|
be7dde549c7a168085d89b17635ad8bdd375e04b
|
[
"MIT"
] | 10
|
2020-05-17T06:23:39.000Z
|
2022-02-10T01:53:06.000Z
|
visualization_process/embedded_visualization.py
|
imdreamer2018/privacy-preserving-for-face-recognition
|
be7dde549c7a168085d89b17635ad8bdd375e04b
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
# load embedded
embedded = np.load('../face_embedded_data/paper/embedded_nn4_lfw_10.npy')
# load customDataset
metadata = np.load('../face_embedded_data/paper/metadata_nn4_lfw_10.npy')
print(metadata.shape)
targets = np.array([m.name for m in metadata])
print(targets)
X_embedded = TSNE(n_components=2, init='pca', method='exact').fit_transform(embedded)
# print(X_embedded.shape)
# print(X_embedded)
colors = iter(cm.rainbow(np.linspace(0, 1, len(list(enumerate(set(targets)))))))
print(X_embedded)
print(list(enumerate(set(targets))))
# try:
# while True:
# print(next(colors))
# except StopIteration:
# pass
def plt3d():
fig = plt.figure()
ax = Axes3D(fig)
# 添加坐标轴(顺序是Z, Y, X)
# ax.set_zlabel('Z', fontdict={'size': 15, 'color': 'red'})
# ax.set_ylabel('Y', fontdict={'size': 15, 'color': 'red'})
# ax.set_xlabel('X', fontdict={'size': 15, 'color': 'red'})
for i, t in enumerate(set(targets)):
idx = targets == t
ax.scatter(X_embedded[idx, 0], X_embedded[idx, 1], X_embedded[idx, 2], colors=next(colors))
def plt2d():
for i, t in enumerate(set(targets)):
idx = targets == t
plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1])
plt2d()
plt.legend(bbox_to_anchor=(1, 1))
# 添加坐标轴(顺序是Z, Y, X)
# ax.set_zlabel('Z', fontdict={'size': 15, 'color': 'red'})
# ax.set_ylabel('Y', fontdict={'size': 15, 'color': 'red'})
# ax.set_xlabel('X', fontdict={'size': 15, 'color': 'red'})
plt.title('Embedded visualization when number of people is 10 ')
plt.show()
| 31.811321
| 99
| 0.668446
|
dd652ada96d9b41ba839df9e3b936e0ca54de346
| 1,875
|
py
|
Python
|
scanALSpySC_MFedit/example_hdf5.py
|
MFraund/PyALS_BL11_DAQGUI
|
88c923ebc31d68e653b49cea2c265f52221c9e95
|
[
"MIT"
] | 1
|
2022-03-07T11:10:13.000Z
|
2022-03-07T11:10:13.000Z
|
scanALSpySC_MFedit/example_hdf5.py
|
MFraund/PyALS_BL11_DAQGUI
|
88c923ebc31d68e653b49cea2c265f52221c9e95
|
[
"MIT"
] | null | null | null |
scanALSpySC_MFedit/example_hdf5.py
|
MFraund/PyALS_BL11_DAQGUI
|
88c923ebc31d68e653b49cea2c265f52221c9e95
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@copyright: (C) 2020 Surface Concept GmbH
Test HDF5 streaming.
"""
OUTPUTFILEPATH = "testA.h5" # warning! this file is overwritten if it exists
def test_hdf5_streaming():
""" Performs 1 measurement and streams DLD events to a HDF5 file. """
import scTDC
device = scTDC.Device(inifilepath="tdc_gpx3.ini",
autoinit=False)
retcode, errmsg = device.initialize()
if retcode < 0:
print("Error during initialization : ({}) {}".format(errmsg, retcode))
return
success, errmsg = device.hdf5_enable()
if not success:
print("Error while enabling HDF5 : " + errmsg)
return
versionstr = device.hdf5_lib_version()
print("Version of the libscTDC_hdf5 : " + versionstr)
a = scTDC.HDF5DataSelection # short alias
datasel = scTDC.HDF5DataSelection(a.X | a.TIME)
print("Opening HDF5 file " + OUTPUTFILEPATH)
success, errmsg = device.hdf5_open(
OUTPUTFILEPATH, "output of example_hdf5.py", datasel)
if not success:
print("Error while opening HDF5 file : " + errmsg)
return
print("Starting a measurement")
retcode, errmsg = device.do_measurement(time_ms=15000, synchronous=True)
if retcode < 0:
print("Error while starting measurement : ({}) {}".format(
errmsg, retcode))
print("Finished measurements")
print("Closing the HDF5 file") # this is very important: the HDF5 will be
# incomplete and most likely not even readable at all if it is not closed
success, errmsg = device.hdf5_close()
if not success:
print("Error while closing the HDF5 file")
# (it is also possible to aggregate many measurements into one HDF5 file)
device.hdf5_disable()
device.deinitialize()
if __name__ == "__main__":
test_hdf5_streaming()
| 30.241935
| 78
| 0.650133
|
41f09dcbd83519b02b0dd6c9c2188c98fa990e42
| 43,242
|
py
|
Python
|
bdOdonto/sistema_fichas/migrations/0001_initial.py
|
FACTOSolution/bdOdonto
|
e6cbbcef1faa8c77dc6e72b8888e5eaac8f046e2
|
[
"MIT"
] | null | null | null |
bdOdonto/sistema_fichas/migrations/0001_initial.py
|
FACTOSolution/bdOdonto
|
e6cbbcef1faa8c77dc6e72b8888e5eaac8f046e2
|
[
"MIT"
] | null | null | null |
bdOdonto/sistema_fichas/migrations/0001_initial.py
|
FACTOSolution/bdOdonto
|
e6cbbcef1faa8c77dc6e72b8888e5eaac8f046e2
|
[
"MIT"
] | 1
|
2019-10-18T20:16:28.000Z
|
2019-10-18T20:16:28.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-30 18:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Aluno',
fields=[
('matricula', models.CharField(max_length=15, primary_key=True, serialize=False)),
('usuario', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Atendimento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Dados_Dentes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('letra', models.CharField(max_length=3)),
('dente', models.PositiveIntegerField(null=True)),
('rec', models.PositiveIntegerField()),
('placa', models.BooleanField()),
('ps', models.PositiveIntegerField()),
('ig', models.BooleanField()),
('pic', models.PositiveIntegerField()),
],
),
migrations.CreateModel(
name='Ficha_Dentistica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('motivo_consulta', models.CharField(blank=True, max_length=20, null=True)),
('ultima_consulta', models.CharField(blank=True, max_length=10, null=True)),
('escova_dentes', models.CharField(choices=[('1x', '1x'), ('2x', '2x'), ('3x', '3x')], max_length=20)),
('horario_escovacao', models.CharField(blank=True, max_length=20, null=True)),
('usa_fio_dental', models.CharField(blank=True, max_length=10, null=True)),
('diario_alimentar', models.CharField(blank=True, max_length=30, null=True)),
('frequencia_consumo_acucar', models.CharField(choices=[('3x ao dia', '3x ao dia'), ('5x ao dia', '5x ao dia'), ('>5x ao dia', '>5x ao dia')], max_length=20)),
('horario_consumo_acucar', models.CharField(choices=[('Junto Ã\xa0s refeições', 'Junto Ã\xa0s refeições'), ('Intervalos entre refeições', 'Intervalos entre refeições'), ('Junto Ã\xa0s refeições e nos intervalos das mesmas', 'Junto Ã\xa0s refeições e nos intervalos das mesmas')], max_length=20)),
('toma_medicamento', models.CharField(blank=True, max_length=20, null=True)),
('fluxo_salivar', models.CharField(blank=True, max_length=10, null=True)),
('caracteristica_da_placa1', models.CharField(choices=[('Flocular e pegajosa', 'Flocular e pegajosa'), ('Calcificada', 'Calcificada')], max_length=30, null=True)),
('caracteristica_da_placa2', models.CharField(choices=[('Flocular e pegajosa', 'Flocular e pegajosa'), ('Calcificada', 'Calcificada')], max_length=30, null=True)),
('diag_risco_carie', models.CharField(choices=[('Alto', 'Alto'), ('Médio', 'Médio'), ('Baixo', 'Baixo')], max_length=20)),
('orientacao', models.BooleanField()),
('evidenciacao_de_placa', models.BooleanField()),
('profilaxia', models.BooleanField()),
('fosfato', models.BooleanField()),
('sodio', models.BooleanField()),
('fluoreto', models.BooleanField()),
('clorexidina', models.BooleanField()),
('aquosa_digluconato', models.BooleanField()),
('selamento_fissuras', models.CharField(blank=True, max_length=20, null=True)),
('remineralizacao_de_lesoes_de_carie', models.CharField(blank=True, max_length=20, null=True)),
('outra_medida', models.CharField(blank=True, max_length=20, null=True)),
('restauracoes_provisorias', models.CharField(blank=True, max_length=20, null=True)),
('tratamento_expectante', models.CharField(blank=True, max_length=20, null=True)),
('restauracoes_com_amalgama', models.CharField(blank=True, max_length=20, null=True)),
('restauracao_com_resina', models.CharField(blank=True, max_length=20, null=True)),
('radiografias', models.CharField(blank=True, max_length=20, null=True)),
('observacoes_dentistica', models.CharField(blank=True, max_length=20, null=True)),
('encaminhamento_para', models.CharField(blank=True, max_length=20, null=True)),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Ficha_Diagnostico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.DateField(auto_now=True)),
('motivo', models.CharField(max_length=200)),
('historia', models.TextField()),
('ultima_consulta', models.DateField(blank=True, null=True)),
('frequencia_consultas', models.CharField(max_length=200)),
('higiene_propria', models.CharField(max_length=200)),
('frequencia_escova', models.PositiveIntegerField()),
('dentes_sensiveis', models.BooleanField()),
('sangramento_gengiva', models.BooleanField()),
('morde_objetos', models.BooleanField()),
('mobilidade', models.BooleanField()),
('protese', models.BooleanField()),
('range_dentes', models.BooleanField()),
('dificuldade_abrir', models.BooleanField()),
('estalido', models.BooleanField()),
('boca_seca', models.BooleanField()),
('sol_frequente', models.BooleanField()),
('tabagismo', models.BooleanField()),
('tipo_tabagismo', models.CharField(blank=True, max_length=200, null=True)),
('duracao_tabagismo', models.CharField(blank=True, max_length=200, null=True)),
('frequencia_tabagismo', models.CharField(blank=True, max_length=200, null=True)),
('tempo_abandono_tabagismo', models.CharField(blank=True, max_length=200, null=True)),
('alcool', models.BooleanField()),
('frequencia_alcool', models.CharField(blank=True, max_length=200, null=True)),
('drogas_ilicitas', models.BooleanField()),
('def_drogas_ilicitas', models.CharField(blank=True, max_length=200, null=True)),
('tratamento_medico', models.BooleanField()),
('def_tratamento_medico', models.CharField(blank=True, max_length=200, null=True)),
('medicacao', models.BooleanField()),
('def_medicacao', models.CharField(blank=True, max_length=200, null=True)),
('doenca_grave', models.BooleanField()),
('def_doenca_grave', models.CharField(blank=True, max_length=200, null=True)),
('cirurgia', models.BooleanField()),
('def_cirurgia', models.CharField(blank=True, max_length=200, null=True)),
('anticoncepcional', models.BooleanField()),
('gravida', models.BooleanField()),
('tempo_gravidez', models.CharField(blank=True, max_length=200, null=True)),
('alergia', models.BooleanField()),
('def_alergia', models.CharField(blank=True, max_length=200, null=True)),
('reacao_medicamento', models.BooleanField()),
('def_reacao_medicamento', models.CharField(blank=True, max_length=200, null=True)),
('anestesia_dentaria', models.BooleanField()),
('reacao_anestesia_dentaria', models.CharField(blank=True, max_length=200, null=True)),
('anestesia_geral', models.BooleanField()),
('reacao_anestesia_geral', models.CharField(blank=True, max_length=200, null=True)),
('disturbios_respiratorios', models.BooleanField()),
('disturbios_respiratorios_abaixo', models.CharField(blank=True, choices=[('Pneumonia', 'Pneumonia'), ('Sinusite', 'Sinusite'), ('Rinite', 'Rinite'), ('Bronquite', 'Bronquite'), ('Asma', 'Asma'), ('Outro', 'Outro')], max_length=15, null=True)),
('disturbios_respiratorios_outro', models.CharField(blank=True, max_length=15, null=True)),
('hipertenso', models.BooleanField()),
('pressao_arterial', models.CharField(blank=True, max_length=200, null=True)),
('sangramento_excesso', models.BooleanField()),
('palpitacao', models.BooleanField()),
('falta_ar', models.BooleanField()),
('pes_inchados', models.BooleanField()),
('febre_reumatica', models.BooleanField()),
('problema_cardiovascular', models.BooleanField()),
('def_problema_cardiovascular', models.CharField(blank=True, max_length=200, null=True)),
('doencas_transmissiveis', models.BooleanField()),
('doencas_transmissiveis_abaixo', models.CharField(blank=True, choices=[('Pneumonia', 'Pneumonia'), ('Sinusite', 'Sinusite'), ('Rinite', 'Rinite'), ('Bronquite', 'Bronquite'), ('Asma', 'Asma'), ('Outro', 'Outro')], max_length=15, null=True)),
('doencas_transmissiveis_hepatite', models.CharField(blank=True, max_length=5, null=True)),
('doencas_transmissiveis_outro', models.CharField(blank=True, max_length=15, null=True)),
('virus', models.BooleanField()),
('def_virus', models.CharField(blank=True, max_length=200, null=True)),
('diabetes', models.BooleanField()),
('cicatrizacao_demorada', models.BooleanField()),
('perda_peso', models.BooleanField()),
('aumento_freq_urina', models.BooleanField()),
('desmaios', models.BooleanField()),
('convulsoes', models.BooleanField()),
('epilepsia', models.BooleanField()),
('disturbio_sanguineo', models.BooleanField()),
('def_disturbio_sanguineo', models.CharField(blank=True, max_length=200, null=True)),
('outro_problema', models.BooleanField()),
('def_outro_problema', models.CharField(blank=True, max_length=200, null=True)),
('face', models.TextField()),
('atm', models.TextField()),
('m_mastigatorios', models.TextField()),
('g_salivares', models.TextField()),
('g_linfaticos', models.TextField()),
('labios', models.TextField()),
('mucosa_j', models.TextField()),
('gengiva', models.TextField()),
('soalho_boca', models.TextField()),
('lingua', models.TextField()),
('palato', models.TextField()),
('orofaringe', models.TextField()),
('percussao', models.TextField()),
('exames_complementares', models.TextField()),
('necessidade_perio', models.TextField(null=True)),
('necessidade_cirurgia', models.TextField(null=True)),
('necessidade_endo', models.TextField(null=True)),
('necessidade_dentistica', models.TextField(null=True)),
('necessidade_protese', models.TextField(null=True)),
('disc', models.CharField(choices=[('Estágio I', 'Estágio I'), ('Estágio II', 'Estágio II'), ('Estágio III', 'Estágio III'), ('EstágioIV', 'Estágio IV'), ('Outro', 'Outro')], max_length=15, null=True)),
('disc_outro', models.CharField(blank=True, max_length=20, null=True)),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Ficha_Endodontia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('em_tratamento_med', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('quanto_tempo', models.CharField(blank=True, max_length=10, null=True)),
('alguma_cirurgia', models.CharField(blank=True, max_length=50, null=True)),
('diabetes', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('febre_reumatica', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('alteracoes_sanguineas', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('doencas_cardiovasculares', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('problemas_hemorragicos', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('hipertensao', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('marcapasso', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('gravida', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('tempo_gravidez', models.CharField(blank=True, max_length=10, null=True)),
('hepatite', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('tempo_hepatite', models.CharField(blank=True, max_length=10, null=True)),
('tipo_hepatite', models.CharField(blank=True, max_length=15, null=True)),
('uso_de_medicamento', models.CharField(blank=True, max_length=50, null=True)),
('uso_continuo_de_medicamento', models.CharField(blank=True, max_length=50, null=True)),
('alergia', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=15)),
('outras_informacoes', models.CharField(blank=True, max_length=100, null=True)),
('historia_dental', models.CharField(blank=True, max_length=100, null=True)),
('caracteristicas_da_dor', models.CharField(blank=True, max_length=60, null=True)),
('uso_analgesicos', models.BooleanField()),
('uso_antiinflamatorios', models.BooleanField()),
('uso_antibiotico', models.BooleanField()),
('dente', models.PositiveIntegerField()),
('dor_frio', models.BooleanField()),
('dor_calor', models.BooleanField()),
('dor_percussao_vertical', models.BooleanField()),
('dor_percusao_horizontal', models.BooleanField()),
('dor_palpacao_apical', models.BooleanField()),
('camara_normal', models.BooleanField()),
('camara_calcificada', models.BooleanField()),
('camara_com_perfuracao', models.BooleanField()),
('camara_com_reabsorcao_interna', models.BooleanField()),
('canal_amplo', models.BooleanField()),
('canal_atresiado', models.BooleanField()),
('canal_ja_manipulado', models.BooleanField()),
('canal_obturacao_deficiente', models.BooleanField()),
('canal_rizogenese_incompleta', models.BooleanField()),
('canal_instrumento_fraturado', models.BooleanField()),
('canal_fratura_radicular', models.BooleanField()),
('canal_sobre_obturacao', models.BooleanField()),
('canal_reabsorcao_apical', models.BooleanField()),
('canal_reabsorcao_externa', models.BooleanField()),
('canal_reabsorcao_interna', models.BooleanField()),
('canal_perfuracao', models.BooleanField()),
('pericemento_normal', models.BooleanField()),
('pericemento_espessado', models.BooleanField()),
('pericemento_hipercementose', models.BooleanField()),
('periapice_osteite_rarefaciente_difusa', models.BooleanField()),
('periapice_osteite_rarefaciente_circunscrita', models.BooleanField()),
('diag_clinico_provavel', models.CharField(max_length=100)),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Ficha_Endodontia_Tabela',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dente1', models.PositiveIntegerField(blank=True, null=True)),
('canal1', models.CharField(blank=True, max_length=20, null=True)),
('ponto_referencia1', models.CharField(blank=True, max_length=5, null=True)),
('cad1', models.PositiveIntegerField(blank=True, null=True)),
('ctp1', models.PositiveIntegerField(blank=True, null=True)),
('crt1', models.PositiveIntegerField(blank=True, null=True)),
('iai1', models.PositiveIntegerField(blank=True, null=True)),
('iaf1', models.PositiveIntegerField(blank=True, null=True)),
('im1', models.PositiveIntegerField(blank=True, null=True)),
('dente2', models.PositiveIntegerField(blank=True, null=True)),
('canal2', models.CharField(blank=True, max_length=20, null=True)),
('ponto_referencia2', models.CharField(blank=True, max_length=5, null=True)),
('cad2', models.PositiveIntegerField(blank=True, null=True)),
('ctp2', models.PositiveIntegerField(blank=True, null=True)),
('crt2', models.PositiveIntegerField(blank=True, null=True)),
('iai2', models.PositiveIntegerField(blank=True, null=True)),
('iaf2', models.PositiveIntegerField(blank=True, null=True)),
('im2', models.PositiveIntegerField(blank=True, null=True)),
('dente3', models.PositiveIntegerField(blank=True, null=True)),
('canal3', models.CharField(blank=True, max_length=20, null=True)),
('ponto_referencia3', models.CharField(blank=True, max_length=5, null=True)),
('cad3', models.PositiveIntegerField(blank=True, null=True)),
('ctp3', models.PositiveIntegerField(blank=True, null=True)),
('crt3', models.PositiveIntegerField(blank=True, null=True)),
('iai3', models.PositiveIntegerField(blank=True, null=True)),
('iaf3', models.PositiveIntegerField(blank=True, null=True)),
('im3', models.PositiveIntegerField(blank=True, null=True)),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Ficha_Ortodontia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('queixa', models.CharField(max_length=200)),
('cor', models.CharField(choices=[('Branca', 'Branca'), ('Negra', 'Negra'), ('Amarela', 'Amarela'), ('Parda', 'Parda')], max_length=10)),
('doencas', models.CharField(choices=[('Não relata', 'Não relata'), ('Habituais', 'Habituais'), ('Outras', 'Outras')], max_length=10)),
('alergias', models.BooleanField()),
('def_alergias', models.CharField(blank=True, max_length=20, null=True)),
('operacao', models.BooleanField()),
('estado_saude', models.CharField(choices=[('Bom', 'Bom'), ('Regular', 'Regular'), ('Deficiente', 'Deficiente')], max_length=15)),
('traumatismo', models.BooleanField()),
('data_traumatismo', models.CharField(blank=True, max_length=10, null=True)),
('vontade_correcao', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sabe', 'Não sabe')], max_length=15)),
('aparelho', models.BooleanField()),
('tempo_aparelho', models.CharField(blank=True, max_length=10, null=True)),
('observacoes_anamnese', models.TextField()),
('psicologico', models.CharField(choices=[('Normal', 'Normal'), ('Extrovertido', 'Extrovertido'), ('Introvertido', 'Introvertido')], max_length=15)),
('simetria_facial', models.BooleanField()),
('tipo_facial', models.CharField(choices=[('Dolicofacial', 'Dolicofacial'), ('Mesofacial', 'Mesofacial'), ('Braquifacial', 'Braquifacial')], max_length=16)),
('selamento_labial_frontal', models.BooleanField()),
('relacao_ls', models.CharField(choices=[('Normal', 'Normal'), ('Diminuído', 'Diminuído'), ('Aumentado', 'Aumentado')], max_length=15)),
('espessura', models.CharField(choices=[('Normal', 'Normal'), ('Diminuído', 'Diminuído'), ('Aumentado', 'Aumentado')], max_length=15)),
('tonicidade_labial', models.CharField(choices=[('Normal', 'Normal'), ('Diminuído', 'Diminuído'), ('Aumentado', 'Aumentado')], max_length=15)),
('tonicidade_mentoniano', models.CharField(choices=[('Normal', 'Normal'), ('Diminuído', 'Diminuído'), ('Aumentado', 'Aumentado')], max_length=15)),
('zigomatico_frontal', models.CharField(choices=[('Normal', 'Normal'), ('Diminuído', 'Diminuído'), ('Aumentado', 'Aumentado')], max_length=15)),
('observacoes_frontal', models.TextField()),
('simetria_sorriso', models.BooleanField()),
('qtd_gengiva_incisos', models.CharField(choices=[('Normal', 'Normal'), ('Diminuído', 'Diminuído'), ('Aumentado', 'Aumentado')], max_length=15)),
('corredor_bucal', models.CharField(choices=[('Normal', 'Normal'), ('Diminuído', 'Diminuído'), ('Aumentado', 'Aumentado')], max_length=15)),
('observacoes_frontal_sorrindo', models.TextField()),
('perfil', models.CharField(choices=[('Reto', 'Reto'), ('Côncavo', 'Côncavo'), ('Convexo', 'Convexo')], max_length=15)),
('dimensao', models.CharField(choices=[('1/3 faciais proporcionais', '1/3 faciais proporcionais'), ('1/3 inf. aumentado', '1/3 inf. aumentado'), ('1/3 inf. diminuido', '1/3 inf. diminuido')], max_length=30)),
('nariz', models.CharField(choices=[('Normal', 'Normal'), ('Pequeno', 'Pequeno'), ('Grande', 'Grande')], max_length=15)),
('selamento_labial_perfil', models.BooleanField()),
('maxila', models.CharField(choices=[('Normal', 'Normal'), ('Prostruída', 'Prostruída'), ('Retruída', 'Retruída')], max_length=15)),
('zigomatico_perfil', models.CharField(choices=[('Normal', 'Normal'), ('Ausente', 'Ausente'), ('Proeminente', 'Proeminente')], max_length=15)),
('angulo_nasolabial', models.CharField(choices=[('Normal', 'Normal'), ('Fechado', 'Fechado'), ('Aberto', 'Aberto')], max_length=15)),
('posicao_labio_superior', models.CharField(choices=[('Normal', 'Normal'), ('Curto', 'Curto'), ('Longo', 'Longo')], max_length=15)),
('posicao_labio_inferior', models.CharField(choices=[('Normal', 'Normal'), ('Eversão', 'Eversão')], max_length=15)),
('mandibula', models.CharField(choices=[('Normal', 'Normal'), ('Prostruída', 'Prostruída'), ('Retruída', 'Retruída')], max_length=15)),
('qtd_mento', models.CharField(choices=[('Normal', 'Normal'), ('Deficiente', 'Deficiente'), ('Proeminente', 'Proeminente')], max_length=15)),
('sulco_mentolabial', models.CharField(choices=[('Normal', 'Normal'), ('Diminuído', 'Diminuído'), ('Aumentado', 'Aumentado')], max_length=15)),
('observacoes_perfil', models.TextField()),
('respiracao', models.CharField(choices=[('Nasal', 'Nasal'), ('Bucal', 'Bucal'), ('Naso-Bucal', 'Naso-Bucal')], max_length=15)),
('degluticao', models.CharField(choices=[('Normal', 'Normal'), ('Atípica', 'Atípica')], max_length=15)),
('fonacao', models.CharField(choices=[('Normal', 'Normal'), ('Atipica', 'Atípica')], max_length=15)),
('habitos', models.CharField(choices=[('Não relata', 'Não relata'), ('Sucção', 'Sucção'), ('Interposição labial', 'Interposição labial'), ('Interposição', 'Interposição'), ('Onicofagia', 'Onicofagia'), ('Outros', 'Outros')], max_length=25)),
('habitos_outros', models.CharField(max_length=20)),
('atm', models.TextField()),
('observacoes_funcional', models.TextField()),
('dentadura', models.CharField(choices=[('Decidua', 'DecÃ\xaddua'), ('Mista(1o Transit.)', 'Mista(1o Transit.)'), ('Mista(2o Transit.)', 'Mista(2o Transit.)'), ('Mista(Intertransit.)', 'Mista(Intertransit.)'), ('Permanente', 'Permanente'), ('Arco Tipo I', 'Arco Tipo I'), ('Arco Tipo II', 'Arco Tipo II')], max_length=25)),
('erupcao_dentaria', models.CharField(choices=[('Normal', 'Normal'), ('Precoce', 'Precoce'), ('Tardia', 'Tardia')], max_length=15)),
('arco_superior', models.CharField(choices=[('Normal', 'Normal'), ('Amplo', 'Amplo'), ('Atrésico', 'Atrésico')], max_length=15)),
('arco_inferior', models.CharField(choices=[('Normal', 'Normal'), ('Amplo', 'Amplo'), ('Atrésico', 'Atrésico')], max_length=15)),
('linha_med_sup', models.CharField(choices=[('Normal', 'Normal'), ('Desvio p/ direita', 'Desvio p/ direita'), ('Desvio p/ esquerda', 'Desvio p/ esquerda')], max_length=20)),
('linha_med_inf', models.CharField(choices=[('Normal', 'Normal'), ('Desvio p/ direita', 'Desvio p/ direita'), ('Desvio p/ esquerda', 'Desvio p/ esquerda')], max_length=20)),
('trespasse_horizontal', models.CharField(choices=[('Normal', 'Normal'), ('Aumentado', 'Aumentado'), ('Negativo', 'Negativo')], max_length=15)),
('trespasse_vertical', models.CharField(choices=[('Normal', 'Normal'), ('Aumentado', 'Aumentado'), ('Topo', 'Topo'), ('Mordida aberta', 'Mordida aberta'), ('Dentoalveolar', 'Dentoalveolar'), ('Esquelética', 'Esquelética')], max_length=17)),
('mordida_cruzada', models.CharField(choices=[('Ausente', 'Ausente'), ('Anterior', 'Anterior'), ('Unilateral Verdadeira', 'Unilateral Verdadeira'), ('Unilateral Funcional', 'Unilateral Funcional'), ('Bilateral', 'Bilateral'), ('Localizada', 'Localizada')], max_length=17)),
('spee_sup', models.CharField(choices=[('Normal', 'Normal'), ('Acentuada', 'Acentuada')], max_length=15)),
('spee_inf', models.CharField(choices=[('Normal', 'Normal'), ('Acentuada', 'Acentuada')], max_length=15)),
('relacao_caninos_dir', models.CharField(choices=[('Classe I', 'Classe I'), ('Classe II', 'Classe II'), ('Classe III', 'Classe III')], max_length=15)),
('relacao_caninos_esq', models.CharField(choices=[('Classe I', 'Classe I'), ('Classe II', 'Classe II'), ('Classe III', 'Classe III')], max_length=15)),
('relacao_molares_dir', models.CharField(choices=[('Classe I', 'Classe I'), ('Classe II', 'Classe II'), ('Classe III', 'Classe III')], max_length=15)),
('relacao_molares_esq', models.CharField(choices=[('Classe I', 'Classe I'), ('Classe II', 'Classe II'), ('Classe III', 'Classe III')], max_length=15)),
('angle', models.CharField(choices=[('Classe I', 'Classe I'), ('Classe II, 1a', 'Classe II, 1a'), ('Classe II, 2a', 'Classe II, 2a'), ('Classe III', 'Classe III'), ('Subdiv. direita', 'Subdiv. direita'), ('Subdiv. esquerda', 'Subdiv. esquerda')], max_length=15)),
('andrews', models.CharField(choices=[('Classe I', 'Classe I'), ('Classe II', 'Classe II'), ('Classe III', 'Classe III'), ('1/4', '1/4'), ('1/2', '1/2'), ('3/4', '3/4'), ('Total', 'Total')], max_length=15)),
('diagnostico', models.CharField(choices=[('Oclusão normal', 'Oclusão normal'), ('Má oclusão', 'Má oclusão')], max_length=20)),
('observacoes_oclusal', models.TextField()),
('observacoes_odontograma', models.TextField()),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Ficha_Periodontia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sangramento_gengiva', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('tratamento_gengiva', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('hemorragia_extrac_dentes', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('aparelho_ortodontico', models.BooleanField()),
('alergia_anestesia', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('alergia_antibioticos', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('alergia_sulfas', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('alergia_aspirina', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('alergia_outros', models.CharField(blank=True, max_length=20, null=True)),
('alergia_nao_medicamentos', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('quais_alergias', models.CharField(blank=True, max_length=20, null=True)),
('cuidados_medicos', models.BooleanField()),
('motivo_cuidados_medicos', models.CharField(blank=True, max_length=20, null=True)),
('medicamentos', models.BooleanField()),
('quais_medicamentos', models.CharField(blank=True, max_length=20, null=True)),
('febre_reumatica', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('doencas_cardiovasculares', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('diabetes', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('tonturas', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('anemia', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('acamado', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('inchaco_dor_juntas', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('ulcera', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('figado', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('tuberculose', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('sangramento_excessivo', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('operacao', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('qual_operacao', models.CharField(blank=True, max_length=20, null=True)),
('variacao_peso', models.CharField(choices=[('Aumentou', 'Aumentou'), ('Diminuiu', 'Diminuiu'), ('Sem mudanças', 'Sem mudanças')], max_length=15)),
('radioterapia', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('regiao_radioterapia', models.CharField(blank=True, max_length=20, null=True)),
('tempo_radioterapia', models.CharField(blank=True, max_length=20, null=True)),
('pressao_arterial', models.CharField(choices=[('Alta', 'Alta'), ('Baixa', 'Baixa'), ('Normal', 'Normal')], max_length=10)),
('problema_menstruacao', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('gravida', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('fumante', models.BooleanField()),
('tempo_abandono_tabagismo', models.CharField(blank=True, max_length=20, null=True)),
('cigs_dia', models.PositiveIntegerField(blank=True, null=True)),
('doenca_infec', models.CharField(choices=[('Sim', 'Sim'), ('Não', 'Não'), ('Não sei', 'Não sei')], max_length=10)),
('qual_doenca_infec', models.CharField(blank=True, max_length=20, null=True)),
('drogas_ilicitas', models.BooleanField()),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Ficha_PPR',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_kennedy_sup', models.TextField()),
('tratamento_previo_sup', models.TextField()),
('planejamento_protese_sup', models.TextField()),
('observacoes_sup', models.TextField()),
('class_kennedy_inf', models.TextField()),
('tratamento_previo_inf', models.TextField()),
('planejamento_protese_inf', models.TextField()),
('observacoes_inf', models.TextField()),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Ficha_Urgencia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('historia_clinica', models.CharField(max_length=60)),
('medicamentos', models.CharField(max_length=60)),
('motivo', models.CharField(max_length=60)),
('diagnostico_provavel', models.CharField(max_length=20)),
('atend', models.CharField(choices=[('Estágio III', 'Estágio III'), ('Estágio IV', 'Estágio IV'), ('Outro', 'Outro')], max_length=15)),
('atend_outro', models.CharField(blank=True, max_length=20, null=True)),
('procedimento', models.CharField(max_length=60)),
('encaminhamento', models.CharField(blank=True, max_length=60, null=True)),
('prescricoes', models.CharField(blank=True, max_length=60, null=True)),
('especialidade', models.CharField(choices=[('Endodontia', 'Endodontia'), ('Prótese', 'Prótese'), ('Periodontia', 'Periodontia'), ('DentÃ\xadstica', 'DentÃ\xadstica'), ('Cirurgia', 'Cirurgia'), ('Outro', 'Outro')], max_length=15)),
('especialidade_outro', models.CharField(blank=True, max_length=20, null=True)),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Odontograma',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pontos', models.TextField(blank=True)),
('atendimento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Atendimento')),
],
),
migrations.CreateModel(
name='Paciente',
fields=[
('cpf', models.CharField(max_length=11, primary_key=True, serialize=False)),
('nome', models.CharField(max_length=200)),
('endereco', models.CharField(max_length=200)),
('bairro', models.CharField(max_length=200)),
('cidade', models.CharField(max_length=200)),
('estado', models.CharField(max_length=200)),
('cep', models.CharField(blank=True, max_length=9)),
('tel', models.CharField(blank=True, max_length=11)),
('cel', models.CharField(blank=True, max_length=11)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('estado_civil', models.CharField(max_length=200)),
('data_nasc', models.DateField()),
('idade', models.CharField(max_length=3)),
('cor', models.CharField(max_length=200)),
('sexo', models.CharField(choices=[('M', 'M'), ('F', 'F')], max_length=1)),
('rg', models.CharField(max_length=8)),
('naturalidade', models.CharField(max_length=200)),
('nacionalidade', models.CharField(max_length=200)),
('profissao_atual', models.CharField(blank=True, max_length=200, null=True)),
('profissao_anterior', models.CharField(blank=True, max_length=200, null=True)),
('endereco_profissional', models.CharField(blank=True, max_length=200, null=True)),
('bairro_profissional', models.CharField(blank=True, max_length=200, null=True)),
('cep_profissional', models.CharField(blank=True, max_length=9, null=True)),
],
),
migrations.CreateModel(
name='Professor',
fields=[
('nome', models.CharField(max_length=50)),
('codigo', models.CharField(max_length=15, primary_key=True, serialize=False)),
('login', models.CharField(max_length=20)),
('senha', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Tipo_Ficha',
fields=[
('codigo', models.PositiveIntegerField(primary_key=True, serialize=False)),
('nome', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Turma',
fields=[
('codigo', models.CharField(max_length=13, primary_key=True, serialize=False)),
('nome', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Turma_Aluno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('periodo', models.CharField(max_length=6)),
('aluno', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Aluno')),
('turma', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Turma')),
],
),
migrations.AddField(
model_name='turma',
name='alunos',
field=models.ManyToManyField(through='sistema_fichas.Turma_Aluno', to='sistema_fichas.Aluno'),
),
migrations.AddField(
model_name='tipo_ficha',
name='turma',
field=models.ManyToManyField(to='sistema_fichas.Turma'),
),
migrations.AddField(
model_name='professor',
name='turmas',
field=models.ManyToManyField(to='sistema_fichas.Turma'),
),
migrations.AddField(
model_name='paciente',
name='turma_aluno',
field=models.ManyToManyField(through='sistema_fichas.Atendimento', to='sistema_fichas.Turma_Aluno'),
),
migrations.AddField(
model_name='ficha_periodontia',
name='odontograma',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='sistema_fichas.Odontograma'),
),
migrations.AddField(
model_name='ficha_ortodontia',
name='odontograma',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='sistema_fichas.Odontograma'),
),
migrations.AddField(
model_name='ficha_diagnostico',
name='odontograma',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.PROTECT, to='sistema_fichas.Odontograma'),
),
migrations.AddField(
model_name='dados_dentes',
name='ficha_periodontia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Ficha_Periodontia'),
),
migrations.AddField(
model_name='atendimento',
name='paciente',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Paciente'),
),
migrations.AddField(
model_name='atendimento',
name='tipo_ficha',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Tipo_Ficha'),
),
migrations.AddField(
model_name='atendimento',
name='turma_aluno',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema_fichas.Turma_Aluno'),
),
]
| 77.494624
| 339
| 0.586212
|
fb7433f6d0e11b20f7798826091977669863151e
| 2,832
|
py
|
Python
|
gulpy/inputs/writer.py
|
learningmatter-mit/gulpy
|
a1894a40392fd9fa95cdee06b0253b340add8f49
|
[
"MIT"
] | 1
|
2021-05-27T18:39:45.000Z
|
2021-05-27T18:39:45.000Z
|
gulpy/inputs/writer.py
|
learningmatter-mit/gulpy
|
a1894a40392fd9fa95cdee06b0253b340add8f49
|
[
"MIT"
] | null | null | null |
gulpy/inputs/writer.py
|
learningmatter-mit/gulpy
|
a1894a40392fd9fa95cdee06b0253b340add8f49
|
[
"MIT"
] | 1
|
2022-01-16T19:15:29.000Z
|
2022-01-16T19:15:29.000Z
|
class FileWriter:
def __init__(self):
pass
def __repr__(self):
return self.__str__()
def __str__(self):
return f"<FileWriter base class>"
def write_file(self, filename):
with open(filename, "w") as f:
f.write(self.__str__())
class InputWriter(FileWriter):
def __init__(
self, keywords, options, structure, library, *args, title="", **kwargs
):
self.keywords = keywords
self.options = options
self.structure = structure
self.library = library
self.title = title
def __repr__(self):
return "<{} with keywords: {}>".format(
self.__class__.__name__, " ".join(self.keywords)
)
def _render_keywords(self):
return " ".join(self.keywords)
def _render_title(self):
return """title\n {}\nend\n""".format(self.title)
def _render_options(self):
options_str = []
for opt, val in self.options.items():
if type(val) == list:
options_str += ["\n".join([opt, *val])]
else:
options_str += ["{} {}".format(opt, val)]
return "\n".join(options_str)
def _render_lattice(self):
lattice = self.structure.get_lattice()
if lattice is not None:
vectors = "\n".join(
["{:>12.7f} {:>12.7f} {:>12.7f}".format(*row) for row in lattice]
)
return """\nvectors\n{} \n""".format(vectors)
return "\n"
def _render_coords(self):
def format_coords(label, cs, xyz):
return "{:>5} {:>5} {:>10.5f} {:>10.5f} {:>10.5f}".format(
label, cs, xyz[0], xyz[1], xyz[2]
)
labels, core_shell, coords = self.structure.get_labels_shells_coords()
coord_lines = "\n".join(
[
format_coords(label, cs, xyz)
for label, cs, xyz in zip(labels, core_shell, coords)
]
)
return """cartesian\n{}\n""".format(coord_lines)
def _render_library(self):
species = set(self.structure.get_labels())
return "\n".join(self.library.get_library(species))
def _render_extras(self):
return ""
def _render_bonds(self):
def format_bonds(tup):
return "connect %d %d %s" % tup
return "\n".join([format_bonds(tup) for tup in self.structure.get_bonds()])
def __str__(self):
return "\n".join(
[
self._render_keywords(),
self._render_title(),
self._render_options(),
self._render_lattice(),
self._render_coords(),
self._render_bonds(),
self._render_extras(),
self._render_library(),
]
)
| 28.606061
| 83
| 0.52613
|
505c493684d15d1bc58e108f11dc57fbf2ce8583
| 7,813
|
py
|
Python
|
board/views.py
|
bluebamus/django_function_based_web_site
|
5d3b453334110b6d49e5dbe09607df839bc5b649
|
[
"MIT"
] | null | null | null |
board/views.py
|
bluebamus/django_function_based_web_site
|
5d3b453334110b6d49e5dbe09607df839bc5b649
|
[
"MIT"
] | null | null | null |
board/views.py
|
bluebamus/django_function_based_web_site
|
5d3b453334110b6d49e5dbe09607df839bc5b649
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.views.decorators.http import require_POST
from django.core.paginator import Paginator
from django.http import Http404
from user.models import Usert
from tag.models import Tag
from .models import Board, Comment
from .forms import BoardForm, BoardUpdateForm
from user.decorators import *
# Create your views here.
def board_detail(request, pk):
try:
board = Board.objects.get(pk=pk)
comments = Comment.objects.filter(board=pk, is_deleted=False)
except Board.DoesNotExist:
raise Http404("게시글을 찾을 수 없습니다")
return render(request, "board_detail.html", {"board": board, "comments": comments})
@login_required
def board_update(request, pk):
try:
# board = Board.objects.get(pk=pk)
board = get_object_or_404(Board, pk=pk)
except Board.DoesNotExist:
raise Http404("게시글을 찾을 수 없습니다")
user_id = request.session.get("user")
usert = Usert.objects.get(pk=user_id)
if usert != board.writer:
err_msg = "글을 작성한 본인만 수정할 수 있습니다."
return render(request, "board_detail.html", {"board": board, "err_msg": err_msg})
if request.method == "POST":
form = BoardUpdateForm(request.POST or None, request.FILES or None, instance=board)
if form.is_valid():
user_id = request.session.get("user")
usert = Usert.objects.get(pk=user_id)
tags = form.cleaned_data["tags"].split(",")
board.title = form.cleaned_data["title"]
board.contents = form.cleaned_data["contents"]
board.writer = usert
if form.cleaned_data["photo"] == None:
board.photo = "default/no_img_lg.png"
elif form.cleaned_data["photo"] != board.photo:
board.photo = form.cleaned_data["photo"]
else:
pass
board.save()
# 보드 생성 이후 pk가 만들어지고 나서 만들어야 에러 발생이 안됨
for tag in tags:
if not tag:
continue
# _tag, created = Tag.objects.get_or_create(name=tag)
_tag, _data = Tag.objects.get_or_create(name=tag.strip())
# '_XXXX'는 protected를 의미함
# '_'는 사용하지 않는 변수를 의미함
# Tag.objects.get_or_create(name=tag)는 가지고 있으면 가져오고 없으면 생성함
# 이름과 작성자가 모두 똑같은 사람이 하고 싶다면 Tag.objects.get_or_create(name=tag, writer=writer)
# 이름과 작성자가 다르면 새로 만듬
# 이름만 확인하고 작성자가 없으면 기본값으로 생성
# Tag.objects.get_or_create(name=tag, defaults={'wr'})
board.tags.add(_tag)
return redirect("board_detail", pk=pk)
else:
print(form.errors)
form = BoardUpdateForm(instance=board)
# ModelForm에서는 initial이 아닌 instance로 객체를 전달해야한다.
"""form = BoardUpdateForm(
initial={
"title": board.title,
"contents": board.contents,
"tags": [tag.name for tag in board.tags.all()],
"photo": board.photo,
}
)"""
return render(request, "board_update.html", {"form": form, "board": board})
@login_required
def board_delete(request, pk):
try:
board = Board.objects.get(pk=pk)
except Board.DoesNotExist:
raise Http404("게시글을 찾을 수 없습니다")
if not request.session.get("user"):
err_msg = "로그인을 한 사용자만 글을 삭제할 수 있습니다."
return render(request, "board_detail.html", {"board": board, "err_msg": err_msg})
user_id = request.session.get("user")
usert = Usert.objects.get(pk=user_id)
if usert == board.writer:
Board.objects.get(pk=pk).delete()
else:
err_msg = "글을 작성한 본인만 삭제할 수 있습니다."
return render(request, "board_detail.html", {"board": board, "err_msg": err_msg})
return redirect("/board/list/")
@login_required
def board_write(request):
if not request.session.get("user"):
return redirect("/login/")
if request.method == "POST":
form = BoardForm(request.POST, request.FILES or None)
if form.is_valid():
user_id = request.session.get("user")
usert = Usert.objects.get(pk=user_id)
tags = form.cleaned_data["tags"].split(",")
board = Board()
board.title = form.cleaned_data["title"]
board.contents = form.cleaned_data["contents"]
board.writer = usert
board.photo = form.cleaned_data["photo"]
if board.photo == None:
board.photo = "default/no_img_lg.png"
board.save()
# 보드 생성 이후 pk가 만들어지고 나서 만들어야 에러 발생이 안됨
for tag in tags:
if not tag:
continue
# _tag, created = Tag.objects.get_or_create(name=tag)
_tag, _ = Tag.objects.get_or_create(name=tag.strip())
# '_XXXX'는 protected를 의미함
# '_'는 사용하지 않는 변수를 의미함
# Tag.objects.get_or_create(name=tag)는 가지고 있으면 가져오고 없으면 생성함
# 이름과 작성자가 모두 똑같은 사람이 하고 싶다면 Tag.objects.get_or_create(name=tag, writer=writer)
# 이름과 작성자가 다르면 새로 만듬
# 이름만 확인하고 작성자가 없으면 기본값으로 생성
# Tag.objects.get_or_create(name=tag, defaults={'wr'})
board.tags.add(_tag)
return redirect("/board/list/")
else:
form = BoardForm()
return render(request, "board_write.html", {"form": form})
def board_list(request):
all_boards = Board.objects.all().order_by("-id")
# -id 역순, 최신순으로 가져오겠다는 옵션
page = int(request.GET.get("p", 1))
paginator = Paginator(all_boards, 3)
boards = paginator.get_page(page)
return render(request, "board_list.html", {"boards": boards})
@login_required
def likes(request, pk):
try:
like_blog = get_object_or_404(Board, pk=pk)
except Board.DoesNotExist:
raise Http404("게시글을 찾을 수 없습니다")
user_id = request.session.get("user")
# item = like_blog.like.values_list("id")
if like_blog.like.filter(id=user_id):
like_blog.like.remove(user_id)
like_blog.like_count -= 1
like_blog.save()
else:
like_blog.like.add(user_id)
like_blog.like_count += 1
like_blog.save()
return redirect("board_detail", pk=pk)
@login_required
def comment_write(request):
errors = []
if request.method == "POST":
post_id = request.POST.get("post_id", "").strip()
content = request.POST.get("content", "").strip()
if not content:
errors.append("댓글을 입력해주세요.")
if not errors:
comment = Comment.objects.create(
board=Board.objects.get(pk=post_id),
user=Usert.objects.get(pk=request.session.get("user")),
content=content,
parent_comment=None,
)
return redirect(reverse("board_detail", kwargs={"pk": post_id}))
return render(request, "blogs/post_detail.html", {"user": request.user, "cmt_errors": errors})
# return render(request, "board_detail.html", {"board": board, "err_msg": err_msg})
@login_required
def comment_delete(request, pk):
errors = []
try:
comment = Comment.objects.get(pk=pk)
except Board.DoesNotExist:
raise Http404("게시 댓글을 찾을 수 없습니다")
user_id = request.session.get("user")
user = Usert.objects.get(pk=user_id)
if user == comment.user:
# Comment.objects.get(pk=pk).delete()
comment.delete()
else:
errors.append("글을 작성한 본인만 삭제할 수 있습니다.")
comments = Comment.objects.filter(board=comment.board.id, is_deleted=False)
return render(
request,
"board_detail.html",
{"board": comment.board, "comments": comments, "err_msg": errors},
)
| 33.388889
| 98
| 0.596826
|
075544f1815e04d3655877f2aaf0575a72b009c2
| 9,283
|
py
|
Python
|
OpenCLGA/shuffler_chromosome.py
|
czarnobylu/OpenCLGA
|
c002b5177104db5bcdbb0192db25fbbb45516f27
|
[
"MIT"
] | 112
|
2017-04-07T06:02:10.000Z
|
2022-02-18T11:49:11.000Z
|
OpenCLGA/shuffler_chromosome.py
|
czarnobylu/OpenCLGA
|
c002b5177104db5bcdbb0192db25fbbb45516f27
|
[
"MIT"
] | 25
|
2016-11-22T08:22:53.000Z
|
2017-03-01T14:46:33.000Z
|
OpenCLGA/shuffler_chromosome.py
|
czarnobylu/OpenCLGA
|
c002b5177104db5bcdbb0192db25fbbb45516f27
|
[
"MIT"
] | 34
|
2017-05-22T02:56:08.000Z
|
2022-02-06T05:20:56.000Z
|
#!/usr/bin/python3
import numpy
import pyopencl as cl
from .simple_gene import SimpleGene
class ShufflerChromosome:
# ShufflerChromosome - a chromosome contains a list of Genes.
# __genes - an ordered list of Genes
# __name - name of the chromosome
# __improving_func - function name in kernel to gurantee a better mutation result.
# dna - an listed of Gene's dna
# dna_total_length - sum of the lenght of all genes's dna
def __init__(self, genes, name = ''):
assert all(isinstance(gene, SimpleGene) for gene in genes)
assert type(genes) == list
self.__genes = genes
self.__name = name
self.__improving_func = None
@property
def num_of_genes(self):
return len(self.__genes)
@property
def name(self):
return self.__name
@property
def dna_total_length(self):
return self.num_of_genes
@property
def dna(self):
return [gene.dna for gene in self.__genes]
@dna.setter
def dna(self, dna):
assert self.num_of_genes == len(dna)
for i, gene in enumerate(self.__genes):
gene.dna = dna[i]
@property
def genes(self):
return self.__genes
@property
def gene_elements(self):
return [] if len(self.__genes) == 0 else self.__genes[0].elements
@property
def gene_elements_in_kernel(self):
return [] if len(self.__genes) == 0 else self.__genes[0].elements_in_kernel
@property
def kernel_file(self):
return 'shuffler_chromosome.cl'
@property
def struct_name(self):
return '__ShufflerChromosome';
@property
def chromosome_size_define(self):
return 'SHUFFLER_CHROMOSOME_GENE_SIZE'
def early_terminated(self, best, worst):
return False
def from_kernel_value(self, data):
assert len(data) == self.num_of_genes
genes = [self.__genes[idx].from_kernel_value(v) for idx, v in enumerate(data)]
return ShufflerChromosome(genes, self.__name)
def use_improving_only_mutation(self, helper_func_name):
self.__improving_func = helper_func_name
def kernelize(self):
improving_func = self.__improving_func if self.__improving_func is not None\
else 'shuffler_chromosome_dummy_improving_func'
candidates = '#define SIMPLE_GENE_ELEMENTS ' + self.__genes[0].elements_in_kernel_str
defines = '#define SHUFFLER_CHROMOSOME_GENE_SIZE ' + str(self.num_of_genes) + '\n' +\
'#define IMPROVED_FITNESS_FUNC ' + improving_func + '\n'
improving_func_header = 'int ' + improving_func + '(global int* c,' +\
'int idx,' +\
'int chromosome_size FITNESS_ARGS);'
return candidates + defines + improving_func_header
def save(self, data, ctx, queue, population):
total_dna_size = population * self.dna_total_length
# prepare memory
other_chromosomes = numpy.zeros(total_dna_size, dtype=numpy.int32)
cross_map = numpy.zeros(total_dna_size, dtype=numpy.int32)
ratios = numpy.zeros(population, dtype=numpy.float32)
# read data from cl
cl.enqueue_read_buffer(queue, self.__dev_ratios, ratios)
cl.enqueue_read_buffer(queue, self.__dev_other_chromosomes, other_chromosomes)
cl.enqueue_read_buffer(queue, self.__dev_cross_map, cross_map).wait()
# save all of them
data['other_chromosomes'] = other_chromosomes
data['cross_map'] = cross_map
data['ratios'] = ratios
def restore(self, data, ctx, queue, population):
other_chromosomes = data['other_chromosomes']
cross_map = data['cross_map']
ratios = data['ratios']
# build CL memory from restored memory
mf = cl.mem_flags
self.__dev_ratios = cl.Buffer(ctx, mf.WRITE_ONLY, ratios.nbytes)
self.__dev_other_chromosomes = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=other_chromosomes)
self.__dev_cross_map = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=cross_map)
def preexecute_kernels(self, ctx, queue, population):
## initialize global variables for kernel execution
total_dna_size = population * self.dna_total_length
other_chromosomes = numpy.zeros(total_dna_size, dtype=numpy.int32)
cross_map = numpy.zeros(total_dna_size, dtype=numpy.int32)
ratios = numpy.zeros(population, dtype=numpy.float32)
mf = cl.mem_flags
self.__dev_ratios = cl.Buffer(ctx, mf.WRITE_ONLY, ratios.nbytes)
self.__dev_other_chromosomes = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=other_chromosomes)
self.__dev_cross_map = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=cross_map)
def get_populate_kernel_names(self):
return ['shuffler_chromosome_populate']
def get_crossover_kernel_names(self):
return ['shuffler_chromosome_calc_ratio',\
'shuffler_chromosome_pick_chromosomes',\
'shuffler_chromosome_do_crossover']
def get_mutation_kernel_names(self):
return ['shuffler_chromosome_single_gene_mutate']
def execute_populate(self, prg, queue, population, dev_chromosomes, dev_rnum):
prg.shuffler_chromosome_populate(queue,
(population,),
(1,),
dev_chromosomes,
dev_rnum).wait()
def selection_preparation(self, prg, queue, dev_fitnesses):
prg.shuffler_chromosome_calc_ratio(queue,
(1,),
(1,),
dev_fitnesses,
self.__dev_ratios).wait()
def execute_get_current_elites(self, prg, queue, top,
dev_chromosomes, dev_current_elites,
dev_best_indices):
prg.shuffler_chromosome_get_the_elites(queue, (1,), (1,),
dev_best_indices,
dev_chromosomes,
dev_current_elites,
numpy.int32(top)).wait()
def execute_update_current_elites(self, prg, queue, top, dev_worst_indices,
dev_chromosomes, dev_updated_elites,
dev_fitnesses, dev_updated_elite_fitness):
prg.shuffler_chromosome_update_the_elites(queue, (1,), (1,),
numpy.int32(top),
dev_worst_indices,
dev_chromosomes,
dev_updated_elites,
dev_fitnesses,
dev_updated_elite_fitness).wait()
def execute_crossover(self, prg, queue, population, generation_idx, prob_crossover,
dev_chromosomes, dev_fitnesses, dev_rnum, best_fitness):
prg.shuffler_chromosome_pick_chromosomes(queue,
(population,),
(1,),
dev_chromosomes,
dev_fitnesses,
self.__dev_other_chromosomes,
self.__dev_ratios,
dev_rnum).wait()
prg.shuffler_chromosome_do_crossover(queue,
(population,),
(1,),
dev_chromosomes,
dev_fitnesses,
self.__dev_other_chromosomes,
self.__dev_cross_map,
dev_rnum,
numpy.float32(best_fitness),
numpy.float32(prob_crossover)).wait()
def execute_mutation(self, prg, queue, population, generation_idx, prob_mutate,
dev_chromosomes, dev_fitnesses, dev_rnum, extra_list):
args = [dev_chromosomes,
dev_rnum,
numpy.float32(prob_mutate),
numpy.int32(self.__improving_func is not None)]
args = args + extra_list
prg.shuffler_chromosome_single_gene_mutate(queue,
(population,),
(1,),
*args).wait()
| 43.787736
| 94
| 0.537434
|
cc404a9b8472cd20e5a44ee64a650b35ba7bd0af
| 2,297
|
py
|
Python
|
datalad_neuroimaging/tests/test_aggregation.py
|
mih/datalad-neuroimaging
|
f83ba994a6b208c297501be3f18aeabb3da51e8b
|
[
"MIT"
] | null | null | null |
datalad_neuroimaging/tests/test_aggregation.py
|
mih/datalad-neuroimaging
|
f83ba994a6b208c297501be3f18aeabb3da51e8b
|
[
"MIT"
] | null | null | null |
datalad_neuroimaging/tests/test_aggregation.py
|
mih/datalad-neuroimaging
|
f83ba994a6b208c297501be3f18aeabb3da51e8b
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# -*- coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test metadata aggregation"""
from datalad.distribution.dataset import Dataset
from datalad.tests.utils import with_tree
from datalad.tests.utils import assert_equal
from datalad.tests.utils import assert_not_in
from ..extractors.tests.test_bids import bids_template
@with_tree(tree=bids_template)
def test_nested_metadata(path):
ds = Dataset(path).create(force=True)
ds.add('.')
ds.aggregate_metadata()
# BIDS returns participant info as a nested dict for each file in the
# content metadata. On the dataset-level this should automatically
# yield a sequence of participant info dicts, without any further action
# or BIDS-specific configuration
meta = ds.metadata('.', reporton='datasets', return_type='item-or-list')['metadata']
assert_equal(
meta['datalad_unique_content_properties']['bids']['participant'],
[
{
"age(years)": "20-25",
"id": "03",
"gender": "female",
"handedness": "r",
"hearing_problems_current": "n",
"language": "english"
},
{
"age(years)": "30-35",
"id": "01",
"gender": "male",
"handedness": "r",
"hearing_problems_current": "n",
"language": u"русский"
},
])
# we can turn off this kind of auto-summary
ds.config.add('datalad.metadata.generate-unique-bids', 'false', where='dataset')
ds.aggregate_metadata()
meta = ds.metadata('.', reporton='datasets', return_type='item-or-list')['metadata']
# protect next test a little, in case we enhance our core extractor in the future
# to provide more info
if 'datalad_unique_content_properties' in meta:
assert_not_in('bids', meta['datalad_unique_content_properties'])
| 38.932203
| 92
| 0.578581
|
fbae392cd679790bb480391e764f8dc74bcd1c3d
| 14,909
|
py
|
Python
|
DataExtractor.py
|
godfatherlmh/LoLAnalyzer
|
0b265b33230316ab1a1459a9767ef7527a4a4f89
|
[
"MIT"
] | null | null | null |
DataExtractor.py
|
godfatherlmh/LoLAnalyzer
|
0b265b33230316ab1a1459a9767ef7527a4a4f89
|
[
"MIT"
] | null | null | null |
DataExtractor.py
|
godfatherlmh/LoLAnalyzer
|
0b265b33230316ab1a1459a9767ef7527a4a4f89
|
[
"MIT"
] | null | null | null |
# Extract the useful data from game files (json)
# Append the useful data to a csv file
import pickle
import os
import queue
import sys
from collections import OrderedDict
import multiprocessing
from multiprocessing.managers import BaseManager, NamespaceProxy
import time
import Modes
import pandas as pd
from collections import Counter
CHUNK_SIZE = 100
def extracted_writer(extracted_file, q, stop):
with open(extracted_file, 'a+') as f:
while not stop.is_set():
try:
game_path = q.get(timeout=1)
except queue.Empty:
continue
f.write(game_path)
f.write('\n')
print('Closing writer', file=sys.stderr)
class Extractor:
def __init__(self, mode, extracted_files, current_index, rot_length, writing_q):
self.mode = mode
self.rot_length = rot_length
self.writing_q = writing_q
self.current_index = current_index
if len(extracted_files) >= self.current_index > 0: # the file already exist
self.csv_file = os.path.join(mode.EXTRACTED_DIR, extracted_files[self.current_index - 1])
self.csv_index = len(pd.read_csv(self.csv_file, skiprows=1))
print(self.csv_file, 'lines', self.csv_index, file=sys.stderr)
else:
self.csv_file = None
self.csv_index = mode.DATA_LINES
class ExManager(BaseManager):
pass
class ExProxy(NamespaceProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__', 'b')
ExManager.register('Extractor', Extractor, ExProxy)
def run(mode, cpu):
extracted_file = mode.EXTRACTED_FILE
if os.path.isfile(extracted_file):
with open(extracted_file, 'r') as f:
extracted_list = [x.strip() for x in f.readlines()]
else:
extracted_list = []
gamePaths = []
for patch in mode.learning_patches:
for region in mode.REGIONS:
if os.path.isdir(os.path.join(mode.DATABASE, 'patches', patch, region)):
gamePaths.extend(
[os.path.join(mode.DATABASE, 'patches', patch, region, f) for f in
os.listdir(os.path.join(mode.DATABASE, 'patches', patch, region))])
print('%d game files found' % len(gamePaths), file=sys.stderr)
gamePaths = list(set(gamePaths) - set(extracted_list))
print('%d new games to extract' % len(gamePaths), file=sys.stderr)
if not os.path.isdir(mode.EXTRACTED_DIR):
os.makedirs(mode.EXTRACTED_DIR)
extracted_files = [f for f in os.listdir(mode.EXTRACTED_DIR)]
l = list(map(lambda x: int(x.replace('data_', '').replace('.csv', '')), extracted_files))
l = sorted(range(len(l)), key=lambda k: l[k])
extracted_files = [extracted_files[k] for k in l]
# multiprocessing
manager = multiprocessing.Manager()
writing_q = manager.Queue()
stop = manager.Event()
writer = multiprocessing.Process(target=extracted_writer, args=(extracted_file, writing_q, stop))
writer.start()
ex_manager = ExManager()
ex_manager.start()
available_extractors = []
running_extractors = []
for i in range(cpu):
current_index = len(extracted_files) - i
# noinspection PyUnresolvedReferences
available_extractors.append(ex_manager.Extractor(mode, extracted_files, current_index, cpu, writing_q))
while gamePaths:
# we work with chunks in order to save time (no need to hand over the extractor for every single game
chunk = gamePaths[:CHUNK_SIZE]
gamePaths = gamePaths[CHUNK_SIZE:]
print(len(gamePaths), 'left', file=sys.stderr)
while not available_extractors: # wait until an extractor is available
for p, ex in running_extractors:
if p.is_alive():
continue
available_extractors.append(ex)
running_extractors.remove((p, ex))
if not available_extractors: # wait a bit
time.sleep(0.001)
# start a new job
ex = available_extractors.pop()
p = multiprocessing.Process(target=analyze_game, args=(ex, chunk,))
running_extractors.append((p, ex))
p.start()
for p, ex in running_extractors:
p.join()
stop.set()
writer.join()
print('-- Extraction complete --')
def analyze_game(ex, gamePaths):
for gamePath in gamePaths:
raw_data = OrderedDict([('s_' + champ, []) for champ in ex.mode.CHAMPIONS_LABEL] + [('p_' + champ, []) for champ in ex.mode.CHAMPIONS_LABEL])
raw_data['patch'] = []
raw_data['win'] = []
raw_data['file'] = []
print(ex.csv_file, gamePath)
game = pickle.load(open(gamePath, 'rb'))
bans = []
game_patch = '_'.join(game['gameVersion'].split('.')[:2])
if game['gameDuration'] < 300:
print(gamePath, 'FF afk', game['gameDuration'], file=sys.stderr)
ex.writing_q.put(gamePath)
continue
blueTeam = None
redTeam = None
for team in game['teams']:
if team['teamId'] == 100:
blueTeam = team
elif team['teamId'] == 200:
redTeam = team
else:
print(gamePath, 'Unrecognized team %d' % team['teamId'], file=sys.stderr)
break
for ban in team['bans']:
championId = ban['championId']
if championId not in bans:
bans.append(championId)
if not blueTeam or not redTeam:
print(gamePath, 'Teams are not recognized', file=sys.stderr)
ex.writing_q.put(gamePath)
continue
# not sure what is written for voided games, so it's safer to check both
# if we get something else than true/false or false/true we just ignore the file
blueWin = blueTeam['win'] == 'Win'
redWin = redTeam['win'] == 'Win'
if not blueWin ^ redWin:
print(gamePath, 'No winner found', blueWin, redWin, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
participants = game['participants']
# Blank, everything is available
state = OrderedDict()
state['win'] = int(blueWin)
state['patch'] = game_patch
state['file'] = os.path.basename(gamePath)
state.update([('s_' + champ_name, 'A') for champ_name in ex.mode.CHAMPIONS_LABEL]) # Status
state.update([('p_' + champ_name, 'N') for champ_name in ex.mode.CHAMPIONS_LABEL]) # Position
for key, value in state.items():
raw_data[key].append(value)
# Bans
state = OrderedDict(state) # don't forget to create a clean copy
for championId in bans:
for champ_name, champ_id in ex.mode.CHAMPIONS_ID.items():
if champ_id == championId:
state['s_' + champ_name] = 'N' # None
break
for key, value in state.items():
raw_data[key].append(value)
# Smart lane-role
# The Api doesn't precisely give players role, so we have to deduce it
b_roles = OrderedDict()
r_roles = OrderedDict()
for i in range(0, 10):
p = participants[i]
lane = p['timeline']['lane']
if i < 5:
if lane == 'TOP':
b_roles[i] = 'T'
elif lane == 'JUNGLE':
b_roles[i] = 'J'
elif lane == 'MIDDLE':
b_roles[i] = 'M'
elif lane == 'BOTTOM':
b_roles[i] = 'C'
elif lane == 'NONE':
b_roles[i] = '?' # Fill missing lane if possible
else:
raise Exception(p, lane)
else:
if lane == 'TOP':
r_roles[i] = 'T'
elif lane == 'JUNGLE':
r_roles[i] = 'J'
elif lane == 'MIDDLE':
r_roles[i] = 'M'
elif lane == 'BOTTOM':
r_roles[i] = 'C'
elif lane == 'NONE':
r_roles[i] = '?' # Fill missing lane if possible
else:
raise Exception(p, lane)
# Fill missing role '?'
# target at this point is something like 'T', 'J', 'M', 'C', 'C'
b_toFillCount = Counter(b_roles.values())['?']
if b_toFillCount > 1:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
elif b_toFillCount == 1:
fill_index = list(b_roles.keys())[list(b_roles.values()).index('?')]
possible_roles = ['T', 'J', 'M', 'C']
missing_roles = list(set(possible_roles)-set(b_roles.values()))
if len(missing_roles) == 1:
# non-bot role
b_roles[fill_index] = missing_roles[0]
elif len(missing_roles) == 0:
# bot, whether it is support will be determined later
b_roles[fill_index] = 'C'
else:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
r_toFillCount = Counter(r_roles.values())['?']
if r_toFillCount > 1:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
elif r_toFillCount == 1:
fill_index = list(r_roles.keys())[list(r_roles.values()).index('?')]
possible_roles = ['T', 'J', 'M', 'C']
missing_roles = list(set(possible_roles)-set(r_roles.values()))
if len(missing_roles) == 1:
# non-bot role
r_roles[fill_index] = missing_roles[0]
elif len(missing_roles) == 0:
# bot, whether it is support will be determined later
r_roles[fill_index] = 'C'
else:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
# need to find the support in both team
# a lane will appear twice, most likely 'C'
# the support will either be tagged as 'SUPPORT' or have a low cs count
b_doubleRole = Counter(b_roles.values()).most_common(1)[0][0]
b_doublei = [i for i, r in b_roles.items() if r == b_doubleRole]
if len(b_doublei) > 2:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
if 'SUPPORT' in participants[b_doublei[0]]['timeline']['role']:
b_roles[b_doublei[0]] = 'S'
elif 'SUPPORT' in participants[b_doublei[1]]['timeline']['role']:
b_roles[b_doublei[1]] = 'S'
else: # Last resort -> check cs
if 'creepsPerMinDeltas' in participants[b_doublei[0]]['timeline']:
if participants[b_doublei[0]]['timeline']['creepsPerMinDeltas']['0-10'] < \
participants[b_doublei[1]]['timeline']['creepsPerMinDeltas']['0-10']:
b_roles[b_doublei[0]] = 'S'
else:
b_roles[b_doublei[1]] = 'S'
else:
if participants[b_doublei[0]]['stats']['totalMinionsKilled'] < participants[b_doublei[1]]['stats']['totalMinionsKilled']:
b_roles[b_doublei[0]] = 'S'
else:
b_roles[b_doublei[1]] = 'S'
r_doubleRole = Counter(r_roles.values()).most_common(1)[0][0]
r_doublei = [i for i, r in r_roles.items() if r == r_doubleRole]
if len(r_doublei) > 2:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
if 'SUPPORT' in participants[r_doublei[0]]['timeline']['role']:
r_roles[r_doublei[0]] = 'S'
elif 'SUPPORT' in participants[r_doublei[1]]['timeline']['role']:
r_roles[r_doublei[1]] = 'S'
else: # Last resort -> check cs
if 'creepsPerMinDeltas' in participants[r_doublei[0]]['timeline']:
if participants[r_doublei[0]]['timeline']['creepsPerMinDeltas']['0-10'] < \
participants[r_doublei[1]]['timeline']['creepsPerMinDeltas']['0-10']:
r_roles[r_doublei[0]] = 'S'
else:
r_roles[r_doublei[1]] = 'S'
else:
if participants[r_doublei[0]]['stats']['totalMinionsKilled'] < participants[r_doublei[1]]['stats']['totalMinionsKilled']:
r_roles[r_doublei[0]] = 'S'
else:
r_roles[r_doublei[1]] = 'S'
roles = OrderedDict()
roles.update(b_roles)
roles.update(r_roles)
# Draft
DRAFT_ORDER = [0, 5, 6, 1, 2, 7, 8, 3, 4, 9] # This is not exact. This order is not pick order but end-draft order: if some players
# trade, this order is wrong. Unfortunatelly there is no way to know the real pick order. So we just assume people don't trade often and
# that trading does not have a huge impact anyway.
for i in DRAFT_ORDER:
state = OrderedDict(state)
bluePick = i < 5
p = participants[i]
championId = p['championId']
for champ_name, champ_id in ex.mode.CHAMPIONS_ID.items():
if champ_id == championId:
state['s_' + champ_name] = 'B' if bluePick else 'R'
state['p_' + champ_name] = roles[i]
break
for key, value in state.items():
raw_data[key].append(value)
df = pd.DataFrame(raw_data, columns=ex.mode.COLUMNS)
if ex.csv_index + len(df) < ex.mode.DATA_LINES:
df.to_csv(ex.csv_file, mode='a', header=False, index=False)
ex.csv_index += len(df)
else: # split the data in two: finish prev file and start another
to_current = df.iloc[:ex.mode.DATA_LINES - ex.csv_index]
to_next = df.iloc[ex.mode.DATA_LINES - ex.csv_index:]
to_current.to_csv(ex.csv_file, mode='a', header=False, index=False)
# preparing new file
ex.current_index += ex.rot_length
current_file = 'data_' + str(ex.current_index) + '.csv'
ex.csv_file = os.path.join(ex.mode.EXTRACTED_DIR, current_file)
ex.csv_index = 0
to_next.to_csv(ex.csv_file, mode='a', header=True, index=False)
ex.csv_index += len(to_next)
# File fully explored
ex.writing_q.put(gamePath)
if __name__ == '__main__':
m = Modes.ABR_TJMCS_Mode(['7.16', '7.17'])
run(m, max(multiprocessing.cpu_count() - 1, 1))
| 40.958791
| 149
| 0.563284
|
1dd5dfdbaa47fa3805caac66e75ff94a042ded92
| 26,812
|
py
|
Python
|
pennylane/fourier/visualize.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | 539
|
2018-11-13T08:45:42.000Z
|
2020-07-27T18:17:16.000Z
|
pennylane/fourier/visualize.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | 588
|
2018-11-14T10:21:47.000Z
|
2020-07-28T06:27:14.000Z
|
pennylane/fourier/visualize.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | 165
|
2018-11-13T18:58:56.000Z
|
2020-07-27T17:18:17.000Z
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains visualization functions for Fourier series and coefficients."""
from itertools import product
import numpy as np
# pylint:disable=too-many-arguments,blacklisted-name
# Matplotlib is not a hard requirement for PennyLane in general, but it *is*
# a hard requirement for everything in this module.
try:
from matplotlib.colors import to_rgb
except (ModuleNotFoundError, ImportError) as e: # pragma: no cover
raise ImportError(
"Module matplotlib is required for visualization in the Fourier module. "
"You can install matplolib via \n\n pip install matplotlib"
) from e
from .utils import to_dict, format_nvec
def _validate_coefficients(coeffs, n_inputs, can_be_list=True):
"""Helper function to validate input coefficients of plotting functions.
Args:
coeffs (array[complex]): A set (or list of sets) of Fourier coefficients of a
n_inputs-dimensional function.
n_inputs (int): The number of inputs (dimension) of the function the coefficients are for.
can_be_list (bool): Whether or not the plotting function accepts a list of
coefficients, or only a single set.
Raises:
TypeError: If the coefficients are not a list or array.
ValueError: if the coefficients are not a suitable type for the plotting function.
"""
# Make sure we have a list or numpy array
if not isinstance(coeffs, list) and not isinstance(coeffs, np.ndarray):
raise TypeError(
"Input to coefficient plotting functions must be a list of numerical "
f"Fourier coefficients. Received input of type {type(coeffs)}"
)
# In case we have a list, turn it into a numpy array
if isinstance(coeffs, list):
coeffs = np.array(coeffs)
# Check if the user provided a single set of coefficients to a function that is
# meant to accept multiple samples; add an extra dimension around it if needed
if len(coeffs.shape) == n_inputs and can_be_list:
coeffs = np.array([coeffs])
# Check now that we have the right number of axes for the type of function
required_shape_size = n_inputs + 1 if can_be_list else n_inputs
if len(coeffs.shape) != required_shape_size:
raise ValueError(
f"Plotting function expected a list of {n_inputs}-dimensional inputs. "
f"Received coefficients of {len(coeffs.shape)}-dimensional function."
)
# Shape in all dimensions of a single set of coefficients must be the same
shape_set = set(coeffs.shape[1:]) if can_be_list else set(coeffs.shape)
if len(shape_set) != 1:
raise ValueError(
"All dimensions of coefficient array must be the same. "
f"Received array with dimensions {coeffs.shape}"
)
# Size of each sample dimension must be 2d + 1 where d is the degree
shape_dim = coeffs.shape[1] if can_be_list else coeffs.shape[0]
if (shape_dim - 1) % 2 != 0:
raise ValueError(
"Shape of input coefficients must be 2d + 1, where d is the largest frequency. "
f"Coefficient array with shape {coeffs.shape} is invalid."
)
# Return the coefficients; we may have switched to a numpy array or added a needed extra dimension
return coeffs
def _extract_data_and_labels(coeffs):
"""Helper function for creating frequency labels and partitioning data.
Args:
coeffs (array[complex]): A list of sets of Fourier coefficients.
Returns:
(list(str), dict[str, array[complex]): The set of frequency labels, and a data
dictionary split into real and imaginary parts.
"""
# extract the x ticks
nvecs = list(to_dict(coeffs[0]).keys())
nvecs_formatted = [format_nvec(nvec) for nvec in nvecs]
# make data
data = {}
data["real"] = np.array([[c[nvec].real for nvec in nvecs] for c in coeffs])
data["imag"] = np.array([[c[nvec].imag for nvec in nvecs] for c in coeffs])
return nvecs_formatted, data
def _adjust_spine_placement(ax):
"""Helper function to set some common axis properties when plotting."""
ax.xaxis.grid()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_position("zero")
ax.spines["right"].set_visible(False)
ax.set_axisbelow(True)
def violin(coeffs, n_inputs, ax, colour_dict=None, show_freqs=True):
"""Plots a list of sets of Fourier coefficients as a violin plot.
Args:
coeffs (list[array[complex]]): A list of sets of Fourier coefficients. The shape of the
coefficient arrays should resemble that of the output of NumPy/SciPy's ``fftn`` function, or
:func:`~.pennylane.fourier.coefficients`.
n_inputs (int): The number of input variables in the function.
ax (array[matplotlib.axes.Axes]): Axis on which to plot. Must
be a pair of axes from a subplot where ``sharex="row"`` and ``sharey="col"``.
colour_dict (dict[str, str]): A dictionary of the form ``{"real" : colour_string,
"imag" : other_colour_string}`` indicating which colours should be used in the plot.
show_freqs (bool): Whether or not to print the frequency labels on the plot axis.
Returns:
array[matplotlib.axes.Axes]: The axes on which the data is plotted.
**Example**
Suppose we have the following quantum function:
.. code-block:: python
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit_with_weights(w, x):
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.Rot(*w[0], wires=0)
qml.Rot(*w[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
return qml.expval(qml.PauliZ(0))
We would like to compute and plot the distribution of Fourier coefficients
for many random values of the weights ``w``. First, we generate all the coefficients:
.. code-block:: python
from functools import partial
coeffs = []
n_inputs = 2
degree = 2
for _ in range(100):
weights = np.random.normal(0, 1, size=(2, 3))
c = coefficients(partial(circuit_with_weights, weights), n_inputs, degree)
coeffs.append(c)
We can now plot by setting up a pair of ``matplotlib`` axes and passing them
to the plotting function:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(2, 1, sharey=True, figsize=(15, 4))
>>> violinplt(coeffs, n_inputs, ax, show_freqs=True)
.. image:: ../../_static/fourier_vis_violin.png
:align: center
:width: 800px
:target: javascript:void(0);
"""
coeffs = _validate_coefficients(coeffs, n_inputs, True)
# Check axis shape
if ax.size != 2:
raise ValueError("Matplotlib axis should consist of two subplots.")
if colour_dict is None:
colour_dict = {"real": "purple", "imag": "green"}
# Get the labels and data
nvecs_formatted, data = _extract_data_and_labels(coeffs)
for (data_type, axis) in zip(["real", "imag"], ax):
violinplt = axis.violinplot(data[data_type], showextrema=False)
for bd in violinplt["bodies"]:
bd.set_color(colour_dict[data_type])
bd.set_alpha(0.7)
axis.set_ylabel(data_type)
axis.xaxis.set_ticks(np.arange(1, len(data[data_type][0]) + 1))
_adjust_spine_placement(axis)
# Format axes
ax[0].tick_params(axis="x", colors="white") # hack to get rid of ticks but keep grid
if show_freqs:
ax[1].tick_params(axis="x", which="both", length=0) # remove ticks without removing labels
ax[1].xaxis.set_ticklabels(nvecs_formatted, fontsize=10, color="grey")
ax[1].xaxis.set_ticks_position("top")
else:
ax[1].tick_params(axis="x", colors="white") # hack to get rid of ticks but keep grid
return ax
def box(coeffs, n_inputs, ax, colour_dict=None, show_freqs=True, show_fliers=True):
"""Plot a list of sets of Fourier coefficients as a box plot.
Args:
coeffs (list[array[complex]]): A list of sets of Fourier coefficients. The shape of the
coefficient arrays should resemble that of the output of numpy/scipy's ``fftn``
function, or :func:`~.pennylane.fourier.coefficients`.
n_inputs (int): The number of input variables in the function.
ax (array[matplotlib.axes.Axes]): Axis on which to plot. Must
be a pair of axes from a subplot where ``sharex="row"`` and ``sharey="col"``.
colour_dict (dict[str, str]): A dictionary of the form {"real" : colour_string,
"imag" : other_colour_string} indicating which colours should be used in the plot.
show_freqs (bool): Whether or not to print the frequency labels on the plot axis.
show_fliers (bool): Whether to display the box plot outliers.
Returns:
array[matplotlib.axes.Axes]: The axes after plotting is complete.
**Example**
Suppose we have the following quantum function:
.. code-block:: python
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit_with_weights(w, x):
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.Rot(*w[0], wires=0)
qml.Rot(*w[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
return qml.expval(qml.PauliZ(0))
We would like to compute and plot the distribution of Fourier coefficients
for many random values of the weights ``w``. First, we generate all the coefficients:
.. code-block:: python
from functools import partial
coeffs = []
n_inputs = 2
degree = 2
for _ in range(100):
weights = np.random.normal(0, 1, size=(2, 3))
c = coefficients(partial(circuit_with_weights, weights), n_inputs, degree)
coeffs.append(c)
We can now plot by setting up a pair of ``matplotlib`` axes and passing them
to the plotting function:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(2, 1, sharey=True, figsize=(15, 4))
>>> box(coeffs, n_inputs, ax, show_freqs=True)
.. image:: ../../_static/fourier_vis_box.png
:align: center
:width: 800px
:target: javascript:void(0);
"""
coeffs = _validate_coefficients(coeffs, n_inputs, True)
# Check axis shape
if ax.size != 2:
raise ValueError("Matplotlib axis should consist of two subplots.")
# The axis received must be a pair of axes in a subplot.
if colour_dict is None:
colour_dict = {"real": "purple", "imag": "green"}
# Get the labels and data
nvecs_formatted, data = _extract_data_and_labels(coeffs)
for (data_type, axis) in zip(["real", "imag"], ax):
data_colour = colour_dict[data_type]
axis.boxplot(
data[data_type],
boxprops=dict(
facecolor=to_rgb(data_colour) + (0.4,), color=data_colour, edgecolor=data_colour
),
medianprops=dict(color=data_colour, linewidth=1.5),
flierprops=dict(markeredgecolor=data_colour),
whiskerprops=dict(color=data_colour),
capprops=dict(color=data_colour),
patch_artist=True,
showfliers=show_fliers,
)
_adjust_spine_placement(axis)
axis.set_ylabel(data_type)
axis.xaxis.set_ticks(np.arange(1, len(nvecs_formatted) + 1))
ax[0].tick_params(axis="x", colors="white") # hack to get rid of ticks but keep grid
if show_freqs:
ax[1].tick_params(axis="x", which="both", length=0) # remove ticks without removing labels
ax[1].xaxis.set_ticklabels(nvecs_formatted, fontsize=10, color="grey")
ax[1].xaxis.set_ticks_position("top")
else:
ax[1].tick_params(axis="x", colors="white") # hack to get rid of ticks but keep grid
return ax
def bar(coeffs, n_inputs, ax, colour_dict=None, show_freqs=True):
"""Plot a set of Fourier coefficients as a bar plot.
Args:
coeffs (array[complex]): A single set of Fourier coefficients. The dimensions of the coefficient
array should be ``(2d + 1, ) * n_inputs`` where ``d`` is the largest frequency.
n_inputs (int): The number of input variables in the function.
ax (list[matplotlib.axes.Axes]): Axis on which to plot. Must
be a pair of axes from a subplot where ``sharex="row"`` and ``sharey="col"``.
colour_dict (dict[str, str]): A dictionary of the form ``{"real" : colour_string,
"imag" : other_colour_string}`` indicating which colours should be used in the plot.
show_freqs (bool): Whether or not to print the frequency labels on the plot axis.
Returns:
array[matplotlib.axes.Axes]: The axes after plotting is complete.
**Example**
Suppose we have the following quantum function:
.. code-block:: python
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit_with_weights(w, x):
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.Rot(*w[0], wires=0)
qml.Rot(*w[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
return qml.expval(qml.PauliZ(0))
We would like to compute and plot a single set of Fourier coefficients. We will
choose some values for ``w`` at random:
.. code-block:: python
from functools import partial
n_inputs = 2
degree = 2
weights = np.random.normal(0, 1, size=(2, 3))
coeffs = coefficients(partial(circuit_with_weights, weights), n_inputs, degree)
We can now plot by setting up a pair of ``matplotlib`` axes and passing them
to the plotting function:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(2, 1, sharey=True, figsize=(15, 4))
>>> bar(coeffs, n_inputs, ax, colour_dict={"real" : "red", "imag" : "blue"})
.. image:: ../../_static/fourier_vis_bar_plot_2.png
:align: center
:width: 800px
:target: javascript:void(0);
"""
coeffs = _validate_coefficients(coeffs, n_inputs, False)
# Check axis shape
if ax.size != 2:
raise ValueError("Matplotlib axis should consist of two subplots.")
# The axis received must be a pair of axes in a subplot.
if colour_dict is None:
colour_dict = {"real": "purple", "imag": "green"}
# Get the labels and data
nvecs_formatted, data = _extract_data_and_labels(np.array([coeffs]))
data_len = len(data["real"][0])
for (data_type, axis) in zip(["real", "imag"], ax):
axis.bar(np.arange(data_len), data[data_type][0], color=colour_dict[data_type], alpha=0.7)
axis.set_ylabel(data_type)
axis.xaxis.set_ticks(np.arange(data_len))
_adjust_spine_placement(axis)
ax[0].tick_params(axis="x", colors="white") # hack to get rid of ticklabels but keep grid
if show_freqs:
ax[1].tick_params(axis="x", which="both", length=0) # remove ticks without removing labels
ax[1].xaxis.set_ticklabels(nvecs_formatted, fontsize=10, color="grey")
ax[1].xaxis.set_ticks_position("top")
else:
ax[1].tick_params(axis="x", colors="white") # hack to get rid of ticklabels but keep grid
return ax
def panel(coeffs, n_inputs, ax, colour=None):
"""Plot a list of sets of coefficients in the complex plane for a 1- or 2-dimensional function.
Args:
coeffs (list[array[complex]]): A list of sets of Fourier coefficients. The shape of the
coefficient arrays must all be either 1- or 2-dimensional, i.e.,
each array should have shape ``(2d + 1,)``
for the 1-dimensional case, or ``(2d + 1, 2d + 1)`` where ``d`` is the
degree, i.e., the maximum frequency of present in the coefficients.
Such an array may be the output of the numpy/scipy ``fft``/``fft2`` functions,
or :func:`~.pennylane.fourier.coefficients`.
n_inputs (int): The number of variables in the function.
ax (array[matplotlib.axes._subplots.AxesSubplot]): Axis on which to plot. For
1-dimensional data, length must be the number of frequencies. For 2-dimensional
data, must be a grid that matches the dimensions of a single set of coefficients.
colour (str): The outline colour of the points on the plot.
Returns:
array[matplotlib.axes.Axes]: The axes after plotting is complete.
**Example**
Suppose we have the following quantum function:
.. code-block:: python
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit_with_weights(w, x):
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.Rot(*w[0], wires=0)
qml.Rot(*w[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
return qml.expval(qml.PauliZ(0))
We would like to compute and plot the distribution of Fourier coefficients
for many random values of the weights ``w``. First, we generate all the coefficients:
.. code-block:: python
from functools import partial
coeffs = []
n_inputs = 2
degree = 2
for _ in range(100):
weights = np.random.normal(0, 1, size=(2, 3))
c = coefficients(partial(circuit_with_weights, weights), n_inputs, degree)
coeffs.append(c)
We can now plot by setting up a pair of ``matplotlib`` axes and passing them
to the plotting function. The of axes must be large enough to represent all
the available coefficients (in this case, since we have 2 variables and use
degree 2, we need a 5x5 grid.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(5, 5, figsize=(12, 10), sharex=True, sharey=True)
>>> panel(coeffs, n_inputs, ax)
.. image:: ../../_static/fourier_vis_panel.png
:align: center
:width: 800px
:target: javascript:void(0);
"""
if n_inputs in [1, 2]:
coeffs = _validate_coefficients(coeffs, n_inputs, True)
else:
raise ValueError(
"Panel plot function accepts input coefficients for only 1- or 2-dimensional functions."
)
if ax.shape != coeffs[0].shape:
raise ValueError("Shape of subplot axes must match the shape of the coefficient data.")
if colour is None:
colour = "tab:blue"
# This could probably be more efficient.
# Plot 1D case
if n_inputs == 1:
# Range is (0, ..., degree) for rfft, (0, ... degree, -degree, ..., -1) for fft
n_freqs = coeffs.shape[1] // 2 + (coeffs.shape[1] % 2)
frequency_range = list(range(n_freqs)) + list(range(-n_freqs + 1, 0))
for coeff in range(coeffs.shape[1]):
ax[coeff].scatter(
coeffs[:, coeff].real, coeffs[:, coeff].imag, facecolor="white", edgecolor=colour
)
ax[coeff].set_title(f"{frequency_range[coeff]}", fontsize=14)
ax[coeff].grid(True)
ax[coeff].set_aspect("equal")
# Plot 2D case
else:
n_freqs = coeffs.shape[1] // 2 + (coeffs.shape[1] % 2)
frequency_range = list(range(n_freqs)) + list(range(-n_freqs + 1, 0))
for coeff_1, coeff_2 in product(list(range(coeffs.shape[1])), list(range(coeffs.shape[2]))):
ax[coeff_1, coeff_2].scatter(
coeffs[:, coeff_1, coeff_2].real,
coeffs[:, coeff_1, coeff_2].imag,
facecolor="white",
edgecolor=colour,
)
ax[coeff_1, coeff_2].set_title(
f"{frequency_range[coeff_1]}, {frequency_range[coeff_2]}", fontsize=14
)
ax[coeff_1, coeff_2].grid(True)
ax[coeff_1, coeff_2].set_aspect("equal")
return ax
def radial_box(coeffs, n_inputs, ax, show_freqs=True, colour_dict=None, show_fliers=True):
"""Plot a list of sets of Fourier coefficients on a radial plot as box plots.
Produces a 2-panel plot in which the left panel represents the real parts of
Fourier coefficients. This method accepts multiple sets of coefficients, and
plots the distribution of each coefficient as a boxplot.
Args:
coeffs (list[array[complex]]): A list of sets of Fourier coefficients. The shape of the
coefficient arrays should resemble that of the output of numpy/scipy's ``fftn`` function, or
:func:`~.pennylane.fourier.coefficients`.
n_inputs (int): Dimension of the transformed function.
ax (array[matplotlib.axes.Axes]): Axes to plot on. For this function, subplots
must specify ``subplot_kw=dict(polar=True)`` upon construction.
show_freqs (bool): Whether or not to label the frequencies on
the radial axis. Turn off for large plots.
colour_dict (dict[str, str]): Specify a colour mapping for positive and negative
real/imaginary components. If none specified, will default to:
``{"real" : "red", "imag" : "black"}``
showfliers (bool): Whether or not to plot outlying "fliers" on the boxplots.
merge_plots (bool): Whether to plot real/complex values on the same panel, or
on separate panels. Default is to plot real/complex values on separate panels.
Returns:
array[matplotlib.axes.Axes]: The axes after plotting is complete.
**Example**
Suppose we have the following quantum function:
.. code-block:: python
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit_with_weights(w, x):
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.Rot(*w[0], wires=0)
qml.Rot(*w[1], wires=1)
qml.CNOT(wires=[1, 0])
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=1)
qml.CNOT(wires=[1, 0])
return qml.expval(qml.PauliZ(0))
We would like to compute and plot the distribution of Fourier coefficients
for many random values of the weights ``w``. First, we generate all the coefficients:
.. code-block:: python
from functools import partial
coeffs = []
n_inputs = 2
degree = 2
for _ in range(100):
weights = np.random.normal(0, 1, size=(2, 3))
c = coefficients(partial(circuit_with_weights, weights), n_inputs, degree)
coeffs.append(c)
We can now plot by setting up a pair of ``matplotlib`` axes and passing them
to the plotting function. Note that the axes passed must use polar coordinates.
.. code-block:: python
import matplotlib.pyplot as plt
fig, ax = plt.subplots(
1, 2, sharex=True, sharey=True,
subplot_kw=dict(polar=True),
figsize=(15, 8)
)
radial_box(coeffs, 2, ax, show_freqs=True, show_fliers=False)
.. image:: ../../_static/fourier_vis_radial_box.png
:align: center
:width: 800px
:target: javascript:void(0);
"""
coeffs = _validate_coefficients(coeffs, n_inputs, True)
# Check axis shape
if ax.size != 2:
raise ValueError("Matplotlib axis should consist of two subplots.")
if ax[0].name != "polar" or ax[1].name != "polar":
raise ValueError("Matplotlib axes for radial_box must be polar.")
if colour_dict is None:
colour_dict = {"real": "red", "imag": "black"}
# Number, width, and placement of pie slices
N = coeffs[0].size
angles = np.linspace(0, 2 * np.pi, N, endpoint=False)
angles = np.concatenate((angles[-N // 2 + 1 :], angles[: -N // 2 + 1]))[::-1]
width = (angles[1] - angles[0]) / 2
# Get the labels and data
nvecs_formatted, data = _extract_data_and_labels(coeffs)
# Set up the violin plots
for data_type, a in zip(["real", "imag"], ax):
data_colour = colour_dict[data_type]
a.boxplot(
data[data_type],
positions=angles,
widths=width,
boxprops=dict(
facecolor=to_rgb(data_colour) + (0.4,), color=data_colour, edgecolor=data_colour
),
medianprops=dict(color=data_colour, linewidth=1.5),
flierprops=dict(markeredgecolor=data_colour),
whiskerprops=dict(color=data_colour),
capprops=dict(color=data_colour),
patch_artist=True,
showfliers=show_fliers,
)
# Rotate so that the 0 frequency is at the to
a.set_thetagrids((180 / np.pi) * angles, labels=nvecs_formatted)
a.set_theta_zero_location("N")
a.set_rlabel_position(0)
# Set and rotate the tickmarks; taken from SO
# https://stackoverflow.com/questions/46719340/how-to-rotate-tick-labels-in-polar-matplotlib-plot
for a in ax:
if show_freqs:
for label, angle in zip(a.get_xticklabels(), angles):
x, y = label.get_position()
lab = a.text(
x,
y,
label.get_text(),
transform=label.get_transform(),
ha=label.get_ha(),
va=label.get_va(),
fontsize=14,
color="grey",
)
if angle > np.pi:
lab.set_rotation((180 / np.pi) * angle + 90)
else:
lab.set_rotation((180 / np.pi) * angle + 270)
a.tick_params(pad=7 * n_inputs)
a.set_xticklabels([])
return ax
| 37.033149
| 104
| 0.622184
|
380cf98e5945843fdfa2aa354b3f06c71f8a2460
| 7,028
|
py
|
Python
|
model/evaluator.py
|
nikhilrayaprolu/LM-IT-DIL-CNN-CRF
|
22f3e4d1b766447d1992ccb479dd17ec9e28811d
|
[
"Apache-2.0"
] | 1
|
2019-10-03T05:59:08.000Z
|
2019-10-03T05:59:08.000Z
|
model/evaluator.py
|
nikhilrayaprolu/LM-IT-DIL-CNN-CRF
|
22f3e4d1b766447d1992ccb479dd17ec9e28811d
|
[
"Apache-2.0"
] | null | null | null |
model/evaluator.py
|
nikhilrayaprolu/LM-IT-DIL-CNN-CRF
|
22f3e4d1b766447d1992ccb479dd17ec9e28811d
|
[
"Apache-2.0"
] | null | null | null |
"""
.. module:: evaluator
:synopsis: evaluation method (f1 score and accuracy)
.. moduleauthor:: Liyuan Liu, Frank Xu
"""
import torch
import numpy as np
import itertools
import model.utils as utils
from torch.autograd import Variable
from model.crf import CRFDecode_vb
class eval_batch:
"""Base class for evaluation, provide method to calculate f1 score and accuracy
args:
packer: provide method to convert target into original space [TODO: need to improve]
l_map: dictionary for labels
"""
def __init__(self, packer, l_map):
self.packer = packer
self.l_map = l_map
self.r_l_map = utils.revlut(l_map)
def reset(self):
"""
re-set all states
"""
self.correct_labels = 0
self.total_labels = 0
self.gold_count = 0
self.guess_count = 0
self.overlap_count = 0
def calc_f1_batch(self, decoded_data, target_data):
"""
update statics for f1 score
args:
decoded_data (batch_size, seq_len): prediction sequence
target_data (batch_size, seq_len): ground-truth
"""
batch_decoded = torch.unbind(decoded_data, 1)
batch_targets = torch.unbind(target_data, 0)
for decoded, target in zip(batch_decoded, batch_targets):
gold = self.packer.convert_for_eval(target)
# remove padding
length = utils.find_length_from_labels(gold, self.l_map)
gold = gold[:length]
best_path = decoded[:length]
correct_labels_i, total_labels_i, gold_count_i, guess_count_i, overlap_count_i = self.eval_instance(best_path.numpy(), gold.numpy())
self.correct_labels += correct_labels_i
self.total_labels += total_labels_i
self.gold_count += gold_count_i
self.guess_count += guess_count_i
self.overlap_count += overlap_count_i
def calc_acc_batch(self, decoded_data, target_data):
"""
update statics for accuracy
args:
decoded_data (batch_size, seq_len): prediction sequence
target_data (batch_size, seq_len): ground-truth
"""
batch_decoded = torch.unbind(decoded_data, 1)
batch_targets = torch.unbind(target_data, 0)
for decoded, target in zip(batch_decoded, batch_targets):
gold = self.packer.convert_for_eval(target)
# remove padding
length = utils.find_length_from_labels(gold, self.l_map)
gold = gold[:length].numpy()
best_path = decoded[:length].numpy()
self.total_labels += length
self.correct_labels += np.sum(np.equal(best_path, gold))
def f1_score(self):
"""
calculate f1 score based on statics
"""
if self.guess_count == 0:
return 0.0, 0.0, 0.0, 0.0
precision = self.overlap_count / float(self.guess_count)
recall = self.overlap_count / float(self.gold_count)
if precision == 0.0 or recall == 0.0:
return 0.0, 0.0, 0.0, 0.0
f = 2 * (precision * recall) / (precision + recall)
accuracy = float(self.correct_labels) / self.total_labels
return f, precision, recall, accuracy
def acc_score(self):
"""
calculate accuracy score based on statics
"""
if 0 == self.total_labels:
return 0.0
accuracy = float(self.correct_labels) / self.total_labels
return accuracy
def eval_instance(self, best_path, gold):
"""
update statics for one instance
args:
best_path (seq_len): predicted
gold (seq_len): ground-truth
"""
total_labels = len(best_path)
correct_labels = np.sum(np.equal(best_path, gold))
gold_chunks = utils.iobes_to_spans(gold, self.r_l_map)
gold_count = len(gold_chunks)
guess_chunks = utils.iobes_to_spans(best_path, self.r_l_map)
guess_count = len(guess_chunks)
overlap_chunks = gold_chunks & guess_chunks
overlap_count = len(overlap_chunks)
return correct_labels, total_labels, gold_count, guess_count, overlap_count
class eval_w(eval_batch):
"""evaluation class for word level model (LSTM-CRF)
args:
packer: provide method to convert target into original space [TODO: need to improve]
l_map: dictionary for labels
score_type: use f1score with using 'f'
"""
def __init__(self, packer, l_map, score_type):
eval_batch.__init__(self, packer, l_map)
self.decoder = CRFDecode_vb(len(l_map), l_map['<start>'], l_map['<pad>'])
if 'f' in score_type:
self.eval_b = self.calc_f1_batch
self.calc_s = self.f1_score
else:
self.eval_b = self.calc_acc_batch
self.calc_s = self.acc_score
def calc_score(self, ner_model, dataset_loader):
"""
calculate score for pre-selected metrics
args:
ner_model: LSTM-CRF model
dataset_loader: loader class for test set
"""
ner_model.eval()
self.reset()
for feature, tg, mask in itertools.chain.from_iterable(dataset_loader):
fea_v, _, mask_v = self.packer.repack_vb(feature, tg, mask)
scores, _ = ner_model(fea_v)
decoded = self.decoder.decode(scores.data, mask_v.data)
self.eval_b(decoded, tg)
return self.calc_s()
class eval_wc(eval_batch):
"""evaluation class for LM-LSTM-CRF
args:
packer: provide method to convert target into original space [TODO: need to improve]
l_map: dictionary for labels
score_type: use f1score with using 'f'
"""
def __init__(self, packer, l_map, score_type):
eval_batch.__init__(self, packer, l_map)
self.decoder = CRFDecode_vb(len(l_map), l_map['<start>'], l_map['<pad>'])
if 'f' in score_type:
self.eval_b = self.calc_f1_batch
self.calc_s = self.f1_score
else:
self.eval_b = self.calc_acc_batch
self.calc_s = self.acc_score
def calc_score(self, ner_model, dataset_loader):
"""
calculate score for pre-selected metrics
args:
ner_model: LM-LSTM-CRF model
dataset_loader: loader class for test set
"""
ner_model.eval()
self.reset()
for f_f, f_p, b_f, b_p, w_f, tg, mask_v, len_v in itertools.chain.from_iterable(dataset_loader):
f_f, f_p, b_f, b_p, w_f, _, mask_v = self.packer.repack_vb(f_f, f_p, b_f, b_p, w_f, tg, mask_v, len_v)
scores = ner_model(f_f, f_p, b_f, b_p, w_f)
score = scores[-1]
#print(score.data.size(), mask_v.data.size())
decoded = self.decoder.decode(score.data, mask_v.data)
self.eval_b(decoded, tg)
return self.calc_s()
| 32.688372
| 144
| 0.609704
|
79040c03d6dad6d08291960465ba46fab0ba9ac7
| 1,634
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/build/v4_1/models/source_provider_attributes.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/build/v4_1/models/source_provider_attributes.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/build/v4_1/models/source_provider_attributes.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class SourceProviderAttributes(Model):
"""SourceProviderAttributes.
:param name: The name of the source provider.
:type name: str
:param supported_capabilities: The capabilities supported by this source provider.
:type supported_capabilities: dict
:param supported_triggers: The types of triggers supported by this source provider.
:type supported_triggers: list of :class:`SupportedTrigger <build.v4_1.models.SupportedTrigger>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'supported_capabilities': {'key': 'supportedCapabilities', 'type': '{bool}'},
'supported_triggers': {'key': 'supportedTriggers', 'type': '[SupportedTrigger]'}
}
def __init__(self, name=None, supported_capabilities=None, supported_triggers=None):
super(SourceProviderAttributes, self).__init__()
self.name = name
self.supported_capabilities = supported_capabilities
self.supported_triggers = supported_triggers
| 48.058824
| 101
| 0.585679
|
40d0ce524357f045f6a05a4e8be18aa8d31a632f
| 3,566
|
py
|
Python
|
rl_algorithms/per/ddpg_agent.py
|
MrSyee/rl_algorithms
|
5b5276982032f8a8a614b9466849b7b3ef245b3e
|
[
"MIT"
] | 1
|
2020-05-18T04:53:07.000Z
|
2020-05-18T04:53:07.000Z
|
rl_algorithms/per/ddpg_agent.py
|
minseop4898/rl_algorithms
|
d56ea25a294e850f433b1120faebea3a0bfe0c54
|
[
"MIT"
] | 2
|
2020-03-25T08:22:17.000Z
|
2020-03-26T06:02:05.000Z
|
rl_algorithms/per/ddpg_agent.py
|
MrSyee/rl_algorithms
|
5b5276982032f8a8a614b9466849b7b3ef245b3e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DDPG agent with PER for episodic tasks in OpenAI Gym.
- Author: Kh Kim
- Contact: kh.kim@medipixel.io
- Paper: https://arxiv.org/pdf/1509.02971.pdf
https://arxiv.org/pdf/1511.05952.pdf
"""
from typing import Tuple
import torch
import torch.nn as nn
from rl_algorithms.common.buffer.priortized_replay_buffer import PrioritizedReplayBuffer
import rl_algorithms.common.helper_functions as common_utils
from rl_algorithms.ddpg.agent import DDPGAgent
from rl_algorithms.registry import AGENTS
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@AGENTS.register_module
class PERDDPGAgent(DDPGAgent):
"""ActorCritic interacting with environment.
Attributes:
memory (PrioritizedReplayBuffer): replay memory
per_beta (float): beta parameter for prioritized replay buffer
"""
# pylint: disable=attribute-defined-outside-init
def _initialize(self):
"""Initialize non-common things."""
self.per_beta = self.hyper_params.per_beta
if not self.args.test:
# replay memory
self.memory = PrioritizedReplayBuffer(
self.hyper_params.buffer_size,
self.hyper_params.batch_size,
alpha=self.hyper_params.per_alpha,
)
def update_model(self) -> Tuple[torch.Tensor, ...]:
"""Train the model after each episode."""
experiences = self.memory.sample(self.per_beta)
states, actions, rewards, next_states, dones, weights, indices, _ = experiences
# G_t = r + gamma * v(s_{t+1}) if state != Terminal
# = r otherwise
masks = 1 - dones
next_actions = self.actor_target(next_states)
next_values = self.critic_target(torch.cat((next_states, next_actions), dim=-1))
curr_returns = rewards + self.hyper_params.gamma * next_values * masks
curr_returns = curr_returns.to(device).detach()
# train critic
gradient_clip_ac = self.hyper_params.gradient_clip_ac
gradient_clip_cr = self.hyper_params.gradient_clip_cr
values = self.critic(torch.cat((states, actions), dim=-1))
critic_loss_element_wise = (values - curr_returns).pow(2)
critic_loss = torch.mean(critic_loss_element_wise * weights)
self.critic_optim.zero_grad()
critic_loss.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), gradient_clip_cr)
self.critic_optim.step()
# train actor
actions = self.actor(states)
actor_loss_element_wise = -self.critic(torch.cat((states, actions), dim=-1))
actor_loss = torch.mean(actor_loss_element_wise * weights)
self.actor_optim.zero_grad()
actor_loss.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), gradient_clip_ac)
self.actor_optim.step()
# update target networks
common_utils.soft_update(self.actor, self.actor_target, self.hyper_params.tau)
common_utils.soft_update(self.critic, self.critic_target, self.hyper_params.tau)
# update priorities in PER
new_priorities = critic_loss_element_wise
new_priorities = new_priorities.data.cpu().numpy() + self.hyper_params.per_eps
self.memory.update_priorities(indices, new_priorities)
# increase beta
fraction = min(float(self.i_episode) / self.args.episode_num, 1.0)
self.per_beta = self.per_beta + fraction * (1.0 - self.per_beta)
return actor_loss.item(), critic_loss.item()
| 37.93617
| 88
| 0.678351
|
2930ecbeb8b33c7c9df98a1314c28955891daff2
| 1,035
|
py
|
Python
|
TextMessage.py
|
Vaishnavy080/Email-and-Text-Messages-Scheduler
|
98fa0a50a34c2bed1fa3b84112e9a2bf668ade82
|
[
"Apache-2.0"
] | null | null | null |
TextMessage.py
|
Vaishnavy080/Email-and-Text-Messages-Scheduler
|
98fa0a50a34c2bed1fa3b84112e9a2bf668ade82
|
[
"Apache-2.0"
] | null | null | null |
TextMessage.py
|
Vaishnavy080/Email-and-Text-Messages-Scheduler
|
98fa0a50a34c2bed1fa3b84112e9a2bf668ade82
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
API_ENDPOINT = "https://api.sms-magic.com/v1/sms/send"
msg_delivery_status_url = 'https://api.sms-magic.com/v1/sms/status'
API_KEY = "54b197ee146649517b9e5b47172cd1ee"
# data to be sent to api
def send_sms_get(number,msg):
headers = {'apiKey': API_KEY}
send_sms_data = {'mobile_number': number,
'sms_text': msg,
'sender_id': 'testSenderIDSMSMagic'}
response_get = requests.request("GET", API_ENDPOINT, headers=headers, params=send_sms_data)
return response_get
def send_sms_post(number,body):
payload = {'mobile_number': number,
'sms_text' : body,
'sender_id' : 'testSenderIDSMSMagic'}
headers3 = {'apiKey': API_KEY,
'content-type': "application/x-www-form-urlencoded",
}
response_post = requests.request("POST", API_ENDPOINT, data=payload, headers=headers3)
res = json.loads(response_post.text)
get_id = res["id"]
return get_id
| 34.5
| 96
| 0.635749
|
0da6f7177a88e40f44a66a3573c87d916a3a75de
| 5,500
|
py
|
Python
|
configs/shareresnet/faster_rcnn_r50_fpn_1x_shareresnet_7s.py
|
ziming-liu/ObjectDet
|
6e25fa784114b9773b052d9d5465aa6fed93468a
|
[
"Apache-2.0"
] | null | null | null |
configs/shareresnet/faster_rcnn_r50_fpn_1x_shareresnet_7s.py
|
ziming-liu/ObjectDet
|
6e25fa784114b9773b052d9d5465aa6fed93468a
|
[
"Apache-2.0"
] | null | null | null |
configs/shareresnet/faster_rcnn_r50_fpn_1x_shareresnet_7s.py
|
ziming-liu/ObjectDet
|
6e25fa784114b9773b052d9d5465aa6fed93468a
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='FasterRCNN',
pretrained='modelzoo://resnet50',
backbone=dict(
type='shareResNet',
depth=50,
num_stages=7,
num_branch = 4,
strides=(1, 2, 2, 2,2 ,2,2),
dilations=(1, 1, 1, 1,1,1,1),
out_indices=(0, 1, 2, 3,4,5,6),
stage_with_dcn=(False, False, False, False,False, False, False),
gcb=None,
stage_with_gcb=(False, False, False, False,False, False, False),
gen_attention=None,
stage_with_gen_attention=((), (), (), (),(), (), ()),
frozen_stages=1,
style='pytorch', with_cp=True),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048, 2048, 2048, 2048],
out_channels=256,
keep=0,
num_outs=7),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64,128,256],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32,]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
#loss_adv=dict(type='AdversarialLoss', ), with_adv=True,
))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32*4,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32*4,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/image_info_test-dev2017.json',
img_prefix=data_root + 'test2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32*4,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[7,11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_1x_shareresnet_7s'
load_from = None
resume_from = None #'./work_dirs/faster_rcnn_r50_fpn_1x_shareresnet/epoch_5.pth'
workflow = [('train', 1)]
| 30.726257
| 80
| 0.588727
|
5397677a22ba9fcc95ec2af2332d48402ca329d1
| 7,231
|
py
|
Python
|
vision/object_detection/core/batcher_test.py
|
TeamAutonomousCarOffenburg/TACO_2018
|
ebb63e466578fc3911269d4a714ebff0a516dbf6
|
[
"BSD-3-Clause"
] | 1
|
2019-09-11T23:06:25.000Z
|
2019-09-11T23:06:25.000Z
|
vision/object_detection/core/batcher_test.py
|
TeamAutonomousCarOffenburg/TACO_2018
|
ebb63e466578fc3911269d4a714ebff0a516dbf6
|
[
"BSD-3-Clause"
] | null | null | null |
vision/object_detection/core/batcher_test.py
|
TeamAutonomousCarOffenburg/TACO_2018
|
ebb63e466578fc3911269d4a714ebff0a516dbf6
|
[
"BSD-3-Clause"
] | 1
|
2019-04-08T09:52:24.000Z
|
2019-04-08T09:52:24.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.batcher."""
import numpy as np
import tensorflow as tf
from object_detection.core import batcher
slim = tf.contrib.slim
class BatcherTest(tf.test.TestCase):
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(
self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
boxes = tf.tile(
tf.reshape(tf.range(4), [1, 4]),
tf.stack([counter, tf.constant(1)]))
batch_queue = batcher.BatchQueue(
tensor_dict={'boxes': boxes},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, 4],
tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor,
np.tile(np.arange(4), (i, 1)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions(
self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
image = tf.reshape(
tf.range(counter * counter), tf.stack([counter, counter]))
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, None],
tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor,
np.arange(i * i).reshape((i,
i)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(1, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 1)
image = tf.reshape(tf.range(1, 13), [4, 3]) * counter
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([4, 3], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 1
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor,
np.arange(1, 13).reshape(
(4, 3)) * i)
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batcher_when_batch_size_is_one(self):
with self.test_session() as sess:
batch_size = 1
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
image = tf.reshape(
tf.range(counter * counter), tf.stack([counter, counter]))
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, None],
tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor,
np.arange(i * i).reshape((i,
i)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
if __name__ == '__main__':
tf.test.main()
| 42.786982
| 80
| 0.514313
|
d651685d324465ead219c6ddfb3033bdc8f60b00
| 8,159
|
py
|
Python
|
{{cookiecutter.project_name}}/manager/cli.py
|
pratik-shivarkar/fastapi-boilerplate
|
5885e538b391ff8e4b5d53d966f5cfa16acb39fc
|
[
"MIT"
] | 1
|
2021-09-21T17:54:17.000Z
|
2021-09-21T17:54:17.000Z
|
{{cookiecutter.project_name}}/manager/cli.py
|
pratik-shivarkar/fastapi-cookiecutter
|
5885e538b391ff8e4b5d53d966f5cfa16acb39fc
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/manager/cli.py
|
pratik-shivarkar/fastapi-cookiecutter
|
5885e538b391ff8e4b5d53d966f5cfa16acb39fc
|
[
"MIT"
] | null | null | null |
import os
import sys
import click
from datetime import date
from urllib.parse import urlparse
from sqlalchemy.orm import Session
from hypercorn.asyncio import serve
from hypercorn.config import Config
from sqlalchemy import create_engine, sql
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
from app.config import logger
from models import UserBase
from models.user import User, Role, Permission, Resource, Policy
# Platform specific imports
try:
import uvloop
except ImportError:
import asyncio
else:
import asyncio
# TODO: Change commit code to "commit as you go" style.
# Ref: https://docs.sqlalchemy.org/en/14/tutorial/dbapi_transactions.html#committing-changes
class RootOptions(object):
def __init__(self, production: bool):
self.production: bool = production
@click.group()
@click.option("--production/--no-production", default=False)
@click.pass_context
def cli(ctx, production):
click.echo("Production mode is %s" % ('on' if production else 'off'))
if production:
os.environ["PRODUCTION"] = "1"
else:
os.environ["PRODUCTION"] = "0"
if not os.getenv("AUTH_MODE") or os.getenv("AUTH_MODE") != "native":
click.secho("WARNING! SERVER RUNNING WITHOUT NATIVE SECURITY, ENSURE PRIVATE DEPLOYMENT BEHIND API GATEWAY",
fg="yellow")
ctx.obj = RootOptions(production)
@cli.command()
@click.argument("connection_uri", envvar="MASTER_DB_URI")
@click.argument("db_password", envvar="DB_PASSWORD")
@click.pass_obj
def init(options, connection_uri, db_password):
click.echo("Initializing database ...")
try:
engine = create_engine(connection_uri, future=True)
conn = engine.connect()
conn.execute(sql.text("commit"))
conn.execute(sql.text("CREATE DATABASE {{cookiecutter.project_name}}"))
conn.close()
except Exception as e:
click.secho("Failed to create database ...", fg="red")
click.echo(e)
else:
click.secho("Database created successfully ...", fg="green")
try:
engine = create_engine(connection_uri, future=True)
conn = engine.connect()
conn.execute(sql.text("commit"))
create_user_query = sql.text(
"CREATE USER {{cookiecutter.project_name}} WITH PASSWORD :password;"
)
conn.execute(create_user_query, {"password": db_password})
conn.close()
except Exception as e:
click.secho("Failed to create user ...", fg="red")
click.echo(e)
else:
click.secho("User '{{cookiecutter.project_name}}' created successfully ...", fg="green")
try:
parsed_uri = urlparse(connection_uri)
parsed_uri = parsed_uri._replace(path="/{{cookiecutter.project_name}}").geturl()
engine = create_engine(parsed_uri, future=True)
conn = engine.connect()
conn.execute(sql.text("commit"))
conn.execute(sql.text("GRANT CONNECT ON DATABASE {{cookiecutter.project_name}} TO {{cookiecutter.project_name}};"))
conn.execute(sql.text("GRANT USAGE ON SCHEMA public TO {{cookiecutter.project_name}};"))
conn.execute(sql.text("""
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO {{cookiecutter.project_name}};
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO {{cookiecutter.project_name}};
"""))
conn.execute(sql.text("""
ALTER DEFAULT PRIVILEGES IN SCHEMA public
GRANT ALL ON TABLES TO {{cookiecutter.project_name}};
ALTER DEFAULT PRIVILEGES IN SCHEMA public
GRANT ALL ON SEQUENCES TO {{cookiecutter.project_name}};
"""))
conn.close()
except Exception as e:
click.secho("Failed to assign db privileges ...", fg="red")
click.echo(e)
else:
click.secho("User '{{cookiecutter.project_name}}' given priviledges successfully ...", fg="green")
try:
parsed_uri = urlparse(connection_uri)
parsed_uri = parsed_uri._replace(path="/{{cookiecutter.project_name}}").geturl()
engine = create_engine(parsed_uri, future=True)
UserBase.metadata.create_all(engine)
except Exception as e:
click.secho("Failed to implement database models ...", fg="red")
click.echo(e)
else:
click.secho("Database models initialized successfully ...", fg="green")
if not options.production:
try:
parsed_uri = urlparse(connection_uri)
parsed_uri = parsed_uri._replace(path="/{{cookiecutter.project_name}}").geturl()
engine = create_engine(parsed_uri, future=True)
session = Session(engine)
admin_role = Role(title='Admin')
session.add(admin_role)
session.commit()
session.refresh(admin_role)
resource_all = Resource(name='*')
session.add(resource_all)
session.commit()
session.refresh(resource_all)
admin_permission = Permission(action='*', resource_id=resource_all.id)
session.add(admin_permission)
session.commit()
session.refresh(admin_permission)
admin_policy = Policy(name='Admin', permission_id=admin_permission.id, role_id=admin_role.id)
session.add(admin_policy)
session.commit()
session.refresh(admin_policy)
admin_user = User(
first_name='Pratik',
last_name='Shivarkar',
username='pratik.shivarkar',
phone_number='+19999999998',
email='pratik@shivarkar.org',
role_id=admin_role.id,
dob=date(1989, 1, 1)
)
admin_user.set_password("reset123")
session.add(admin_user)
session.commit()
session.refresh(admin_user)
except Exception as e:
click.secho("Failed to insert development data ...", fg="red")
click.echo(e)
else:
click.secho("Development data added ...", fg="green")
@cli.command()
@click.pass_obj
def test(options):
click.echo("Running tests ...")
@cli.command()
@click.argument("connection_uri", envvar="DB_URI")
@click.pass_obj
def run(options, connection_uri):
from app.main import app
click.secho("Checking configuration ...", fg="yellow")
try:
urlparse(connection_uri)._replace(path="/{{cookiecutter.project_name}}").geturl()
assert (hasattr(options, 'production'))
except Exception as e:
click.echo(e)
click.secho("Failed to validate database URI and password", fg="red")
click.secho("Starting server ...", fg="yellow")
config = Config()
config.bind = ["0.0.0.0:8080"]
config.errorlog = logger
config.accesslog = logger
if options.production:
config.loglevel = "DEBUG"
else:
config.loglevel = "INFO"
if 'uvloop' in sys.modules:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(serve(app, config))
else:
asyncio.run(serve(app, config))
@cli.command()
@click.argument("connection_uri", envvar="MASTER_DB_URI")
@click.pass_obj
def clean(options, connection_uri):
click.secho("CLEAR ALL DATA AND DATABASES ...", fg="red")
if click.confirm("Do you want to continue?"):
click.echo("This will clear all data")
try:
engine = create_engine(connection_uri, future=True)
conn = engine.connect()
conn.execute(sql.text("commit"))
conn.execute(sql.text("DROP DATABASE IF EXISTS {{cookiecutter.project_name}}"))
conn.execute(sql.text("DROP USER IF EXISTS {{cookiecutter.project_name}}"))
conn.close()
except Exception as e:
click.secho("Failed to clean all data ...", fg="red")
click.echo(e)
else:
click.secho("All data and databases are removed ...", fg="green")
click.echo("Run `{{cookiecutter.project_name}} init` to initialize database again.")
| 35.942731
| 123
| 0.638926
|
5f3a12799004ca1cd8f29cf251fbd12ac6f307fa
| 50,519
|
py
|
Python
|
rmgpy/test_data/testing_database/kinetics/libraries/GRI-Mech3.0/reactions.py
|
keceli/RMG-Py
|
17c7870195a4feb6e8bf8974292f9bcdca1a1d9d
|
[
"MIT"
] | 7
|
2017-10-04T16:04:14.000Z
|
2021-03-27T21:54:41.000Z
|
rmgpy/test_data/testing_database/kinetics/libraries/GRI-Mech3.0/reactions.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 72
|
2016-06-06T18:18:49.000Z
|
2019-11-17T03:21:10.000Z
|
rmgpy/test_data/testing_database/kinetics/libraries/GRI-Mech3.0/reactions.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 6
|
2017-10-04T15:37:05.000Z
|
2021-12-29T06:50:16.000Z
|
#!/usr/bin/env python
# encoding: utf-8
name = "GRI-Mech3.0"
shortDesc = u"Natural Gas Combustion Mechanism (without NOx chemistry)"
longDesc = u"""
The thermodynamic and kinetic parameters in the GRI-Mech 3.0 mechanism have
been collectively estimated from literature search and then optimized to a set
of representative experimental targets. For this reason you should generally
use GRI-Mech in its entirety, and generally should not tweak any of its
parameter values.
GRI-Mech is the result of collaborative research of the Gas Research Institute
and carried out at The University of California at Berkeley, Stanford
University, The University of Texas at Austin, and SRI International.
http://combustion.berkeley.edu/gri-mech/
"""
entry(
index = 1,
label = "O + H2 <=> H + OH",
degeneracy = 1,
kinetics = Arrhenius(A=(38700, 'cm^3/(mol*s)'), n=2.7, Ea=(6260, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 2,
label = "O + HO2 <=> OH + O2",
degeneracy = 1,
kinetics = Arrhenius(A=(2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 3,
label = "O + H2O2 <=> OH + HO2",
degeneracy = 1,
kinetics = Arrhenius(A=(9.63e+06, 'cm^3/(mol*s)'), n=2, Ea=(4000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 4,
label = "O + CH <=> H + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(5.7e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 5,
label = "O + CH2 <=> H + HCO",
degeneracy = 1,
kinetics = Arrhenius(A=(8e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 6,
label = "O + CH2(S) <=> H2 + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1.5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 7,
label = "O + CH2(S) <=> H + HCO",
degeneracy = 1,
kinetics = Arrhenius(A=(1.5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 8,
label = "O + CH3 <=> H + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(5.06e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 9,
label = "O + CH4 <=> OH + CH3",
degeneracy = 1,
kinetics = Arrhenius(
A = (1.02e+09, 'cm^3/(mol*s)'),
n = 1.5,
Ea = (8600, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 10,
label = "O + HCO <=> OH + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 11,
label = "O + HCO <=> H + CO2",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 12,
label = "O + CH2O <=> OH + HCO",
degeneracy = 1,
kinetics = Arrhenius(A=(3.9e+13, 'cm^3/(mol*s)'), n=0, Ea=(3540, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 13,
label = "O + CH2OH <=> OH + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 14,
label = "O + CH3O <=> OH + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 15,
label = "O + CH3OH <=> OH + CH2OH",
degeneracy = 1,
kinetics = Arrhenius(A=(388000, 'cm^3/(mol*s)'), n=2.5, Ea=(3100, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 16,
label = "O + CH3OH <=> OH + CH3O",
degeneracy = 1,
kinetics = Arrhenius(A=(130000, 'cm^3/(mol*s)'), n=2.5, Ea=(5000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 17,
label = "O + C2H <=> CH + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 18,
label = "O + C2H2 <=> H + HCCO",
degeneracy = 1,
kinetics = Arrhenius(A=(1.35e+07, 'cm^3/(mol*s)'), n=2, Ea=(1900, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 19,
label = "O + C2H2 <=> OH + C2H",
degeneracy = 1,
kinetics = Arrhenius(
A = (4.6e+19, 'cm^3/(mol*s)'),
n = -1.41,
Ea = (28950, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 20,
label = "O + C2H2 <=> CO + CH2",
degeneracy = 1,
kinetics = Arrhenius(A=(6.94e+06, 'cm^3/(mol*s)'), n=2, Ea=(1900, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 21,
label = "O + C2H3 <=> H + CH2CO",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 22,
label = "O + C2H4 <=> CH3 + HCO",
degeneracy = 1,
kinetics = Arrhenius(
A = (1.25e+07, 'cm^3/(mol*s)'),
n = 1.83,
Ea = (220, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 23,
label = "O + C2H5 <=> CH3 + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(2.24e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 24,
label = "O + C2H6 <=> OH + C2H5",
degeneracy = 1,
kinetics = Arrhenius(
A = (8.98e+07, 'cm^3/(mol*s)'),
n = 1.92,
Ea = (5690, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 25,
label = "O + HCCO <=> H + CO + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+14, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 26,
label = "O + CH2CO <=> OH + HCCO",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+13, 'cm^3/(mol*s)'), n=0, Ea=(8000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 27,
label = "O + CH2CO <=> CH2 + CO2",
degeneracy = 1,
kinetics = Arrhenius(A=(1.75e+12, 'cm^3/(mol*s)'), n=0, Ea=(1350, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 28,
label = "O2 + CO <=> O + CO2",
degeneracy = 1,
kinetics = Arrhenius(A=(2.5e+12, 'cm^3/(mol*s)'), n=0, Ea=(47800, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 29,
label = "O2 + CH2O <=> HO2 + HCO",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+14, 'cm^3/(mol*s)'), n=0, Ea=(40000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 30,
label = "H + O2 + O2 <=> HO2 + O2",
degeneracy = 1,
kinetics = Arrhenius(
A = (2.08e+19, 'cm^6/(mol^2*s)'),
n = -1.24,
Ea = (0, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 31,
label = "H + O2 + H2O <=> HO2 + H2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (1.126e+19, 'cm^6/(mol^2*s)'),
n = -0.76,
Ea = (0, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 32,
label = "H + O2 <=> O + OH",
degeneracy = 1,
kinetics = Arrhenius(
A = (2.65e+16, 'cm^3/(mol*s)'),
n = -0.6707,
Ea = (17041, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 33,
label = "H + H + H2 <=> H2 + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(9e+16, 'cm^6/(mol^2*s)'), n=-0.6, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 34,
label = "H + H + H2O <=> H2 + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(6e+19, 'cm^6/(mol^2*s)'), n=-1.25, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 35,
label = "H + H + CO2 <=> H2 + CO2",
degeneracy = 1,
kinetics = Arrhenius(A=(5.5e+20, 'cm^6/(mol^2*s)'), n=-2, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 36,
label = "H + HO2 <=> O + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(3.97e+12, 'cm^3/(mol*s)'), n=0, Ea=(671, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 37,
label = "H + HO2 <=> O2 + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(4.48e+13, 'cm^3/(mol*s)'), n=0, Ea=(1068, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 38,
label = "H + HO2 <=> OH + OH",
degeneracy = 1,
kinetics = Arrhenius(A=(8.4e+13, 'cm^3/(mol*s)'), n=0, Ea=(635, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 39,
label = "H + H2O2 <=> HO2 + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(1.21e+07, 'cm^3/(mol*s)'), n=2, Ea=(5200, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 40,
label = "H + H2O2 <=> OH + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+13, 'cm^3/(mol*s)'), n=0, Ea=(3600, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 41,
label = "H + CH <=> C + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(1.65e+14, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 42,
label = "H + CH2(S) <=> CH + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 43,
label = "H + CH4 <=> CH3 + H2",
degeneracy = 1,
kinetics = Arrhenius(
A = (6.6e+08, 'cm^3/(mol*s)'),
n = 1.62,
Ea = (10840, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 44,
label = "H + HCO <=> H2 + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(7.34e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 45,
label = "H + CH2O <=> HCO + H2",
degeneracy = 1,
kinetics = Arrhenius(
A = (5.74e+07, 'cm^3/(mol*s)'),
n = 1.9,
Ea = (2742, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 46,
label = "H + CH2OH <=> H2 + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 47,
label = "H + CH2OH <=> OH + CH3",
degeneracy = 1,
kinetics = Arrhenius(
A = (1.65e+11, 'cm^3/(mol*s)'),
n = 0.65,
Ea = (-284, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 48,
label = "H + CH2OH <=> CH2(S) + H2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (3.28e+13, 'cm^3/(mol*s)'),
n = -0.09,
Ea = (610, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 49,
label = "H + CH3O <=> H + CH2OH",
degeneracy = 1,
kinetics = Arrhenius(
A = (4.15e+07, 'cm^3/(mol*s)'),
n = 1.63,
Ea = (1924, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 50,
label = "H + CH3O <=> H2 + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 51,
label = "H + CH3O <=> OH + CH3",
degeneracy = 1,
kinetics = Arrhenius(A=(1.5e+12, 'cm^3/(mol*s)'), n=0.5, Ea=(-110, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 52,
label = "H + CH3O <=> CH2(S) + H2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (2.62e+14, 'cm^3/(mol*s)'),
n = -0.23,
Ea = (1070, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 53,
label = "H + CH3OH <=> CH2OH + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(1.7e+07, 'cm^3/(mol*s)'), n=2.1, Ea=(4870, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 54,
label = "H + CH3OH <=> CH3O + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(4.2e+06, 'cm^3/(mol*s)'), n=2.1, Ea=(4870, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 55,
label = "H + C2H3 <=> H2 + C2H2",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 56,
label = "H + C2H4 <=> C2H3 + H2",
degeneracy = 1,
kinetics = Arrhenius(
A = (1.325e+06, 'cm^3/(mol*s)'),
n = 2.53,
Ea = (12240, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 57,
label = "H + C2H5 <=> H2 + C2H4",
degeneracy = 1,
kinetics = Arrhenius(A=(2e+12, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 58,
label = "H + C2H6 <=> C2H5 + H2",
degeneracy = 1,
kinetics = Arrhenius(
A = (1.15e+08, 'cm^3/(mol*s)'),
n = 1.9,
Ea = (7530, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 59,
label = "H + HCCO <=> CH2(S) + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+14, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 60,
label = "H + CH2CO <=> HCCO + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+13, 'cm^3/(mol*s)'), n=0, Ea=(8000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 61,
label = "H + CH2CO <=> CH3 + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1.13e+13, 'cm^3/(mol*s)'), n=0, Ea=(3428, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 62,
label = "H + HCCOH <=> H + CH2CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 63,
label = "OH + H2 <=> H + H2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (2.16e+08, 'cm^3/(mol*s)'),
n = 1.51,
Ea = (3430, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 64,
label = "OH + OH <=> O + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(35700, 'cm^3/(mol*s)'), n=2.4, Ea=(-2110, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 65,
label = "OH + HO2 <=> O2 + H2O",
degeneracy = 1,
duplicate = True,
kinetics = MultiArrhenius(
arrhenius = [
Arrhenius(A=(1.45e+13, 'cm^3/(mol*s)'), n=0, Ea=(-500, 'cal/mol'), T0=(1, 'K')),
Arrhenius(A=(5e+15, 'cm^3/(mol*s)'), n=0, Ea=(17330, 'cal/mol'), T0=(1, 'K')),
],
),
)
entry(
index = 66,
label = "OH + H2O2 <=> HO2 + H2O",
degeneracy = 1,
duplicate = True,
kinetics = MultiArrhenius(
arrhenius = [
Arrhenius(A=(2e+12, 'cm^3/(mol*s)'), n=0, Ea=(427, 'cal/mol'), T0=(1, 'K')),
Arrhenius(A=(1.7e+18, 'cm^3/(mol*s)'), n=0, Ea=(29410, 'cal/mol'), T0=(1, 'K')),
],
),
)
entry(
index = 68,
label = "OH + C <=> H + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 69,
label = "OH + CH <=> H + HCO",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 70,
label = "OH + CH2 <=> H + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 71,
label = "OH + CH2 <=> CH + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1.13e+07, 'cm^3/(mol*s)'), n=2, Ea=(3000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 72,
label = "OH + CH2(S) <=> H + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 73,
label = "OH + CH3 <=> CH2 + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(5.6e+07, 'cm^3/(mol*s)'), n=1.6, Ea=(5420, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 74,
label = "OH + CH3 <=> CH2(S) + H2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (6.44e+17, 'cm^3/(mol*s)'),
n = -1.34,
Ea = (1417, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 75,
label = "OH + CH4 <=> CH3 + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+08, 'cm^3/(mol*s)'), n=1.6, Ea=(3120, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 76,
label = "OH + CO <=> H + CO2",
degeneracy = 1,
kinetics = Arrhenius(
A = (4.76e+07, 'cm^3/(mol*s)'),
n = 1.228,
Ea = (70, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 77,
label = "OH + HCO <=> H2O + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 78,
label = "OH + CH2O <=> HCO + H2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (3.43e+09, 'cm^3/(mol*s)'),
n = 1.18,
Ea = (-447, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 79,
label = "OH + CH2OH <=> H2O + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+12, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 80,
label = "OH + CH3O <=> H2O + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+12, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 81,
label = "OH + CH3OH <=> CH2OH + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1.44e+06, 'cm^3/(mol*s)'), n=2, Ea=(-840, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 82,
label = "OH + CH3OH <=> CH3O + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(6.3e+06, 'cm^3/(mol*s)'), n=2, Ea=(1500, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 83,
label = "OH + C2H <=> H + HCCO",
degeneracy = 1,
kinetics = Arrhenius(A=(2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 84,
label = "OH + C2H2 <=> H + CH2CO",
degeneracy = 1,
kinetics = Arrhenius(
A = (0.000218, 'cm^3/(mol*s)'),
n = 4.5,
Ea = (-1000, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 85,
label = "OH + C2H2 <=> H + HCCOH",
degeneracy = 1,
kinetics = Arrhenius(A=(504000, 'cm^3/(mol*s)'), n=2.3, Ea=(13500, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 86,
label = "OH + C2H2 <=> C2H + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(3.37e+07, 'cm^3/(mol*s)'), n=2, Ea=(14000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 87,
label = "OH + C2H2 <=> CH3 + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(0.000483, 'cm^3/(mol*s)'), n=4, Ea=(-2000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 88,
label = "OH + C2H3 <=> H2O + C2H2",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+12, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 89,
label = "OH + C2H4 <=> C2H3 + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(3.6e+06, 'cm^3/(mol*s)'), n=2, Ea=(2500, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 90,
label = "OH + C2H6 <=> C2H5 + H2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (3.54e+06, 'cm^3/(mol*s)'),
n = 2.12,
Ea = (870, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 91,
label = "OH + CH2CO <=> HCCO + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(7.5e+12, 'cm^3/(mol*s)'), n=0, Ea=(2000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 92,
label = "HO2 + HO2 <=> O2 + H2O2",
degeneracy = 1,
duplicate = True,
kinetics = MultiArrhenius(
arrhenius = [
Arrhenius(A=(1.3e+11, 'cm^3/(mol*s)'), n=0, Ea=(-1630, 'cal/mol'), T0=(1, 'K')),
Arrhenius(A=(4.2e+14, 'cm^3/(mol*s)'), n=0, Ea=(12000, 'cal/mol'), T0=(1, 'K')),
],
),
)
entry(
index = 94,
label = "HO2 + CH2 <=> OH + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 95,
label = "HO2 + CH3 <=> O2 + CH4",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+12, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 96,
label = "HO2 + CH3 <=> OH + CH3O",
degeneracy = 1,
kinetics = Arrhenius(A=(3.78e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 97,
label = "HO2 + CO <=> OH + CO2",
degeneracy = 1,
kinetics = Arrhenius(A=(1.5e+14, 'cm^3/(mol*s)'), n=0, Ea=(23600, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 98,
label = "HO2 + CH2O <=> HCO + H2O2",
degeneracy = 1,
kinetics = Arrhenius(A=(5.6e+06, 'cm^3/(mol*s)'), n=2, Ea=(12000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 99,
label = "C + O2 <=> O + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(5.8e+13, 'cm^3/(mol*s)'), n=0, Ea=(576, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 100,
label = "C + CH2 <=> H + C2H",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 101,
label = "C + CH3 <=> H + C2H2",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 102,
label = "CH + O2 <=> O + HCO",
degeneracy = 1,
kinetics = Arrhenius(A=(6.71e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 103,
label = "CH + H2 <=> H + CH2",
degeneracy = 1,
kinetics = Arrhenius(A=(1.08e+14, 'cm^3/(mol*s)'), n=0, Ea=(3110, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 104,
label = "CH + H2O <=> H + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(5.71e+12, 'cm^3/(mol*s)'), n=0, Ea=(-755, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 105,
label = "CH + CH2 <=> H + C2H2",
degeneracy = 1,
kinetics = Arrhenius(A=(4e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 106,
label = "CH + CH3 <=> H + C2H3",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 107,
label = "CH + CH4 <=> H + C2H4",
degeneracy = 1,
kinetics = Arrhenius(A=(6e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 108,
label = "CH + CO2 <=> HCO + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1.9e+14, 'cm^3/(mol*s)'), n=0, Ea=(15792, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 109,
label = "CH + CH2O <=> H + CH2CO",
degeneracy = 1,
kinetics = Arrhenius(A=(9.46e+13, 'cm^3/(mol*s)'), n=0, Ea=(-515, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 110,
label = "CH + HCCO <=> CO + C2H2",
degeneracy = 1,
kinetics = Arrhenius(A=(5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 111,
label = "CH2 + O2 => OH + H + CO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(5e+12, 'cm^3/(mol*s)'), n=0, Ea=(1500, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 112,
label = "CH2 + H2 <=> H + CH3",
degeneracy = 1,
kinetics = Arrhenius(A=(500000, 'cm^3/(mol*s)'), n=2, Ea=(7230, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 113,
label = "CH2 + CH2 <=> H2 + C2H2",
degeneracy = 1,
kinetics = Arrhenius(A=(1.6e+15, 'cm^3/(mol*s)'), n=0, Ea=(11944, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 114,
label = "CH2 + CH3 <=> H + C2H4",
degeneracy = 1,
kinetics = Arrhenius(A=(4e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 115,
label = "CH2 + CH4 <=> CH3 + CH3",
degeneracy = 1,
kinetics = Arrhenius(A=(2.46e+06, 'cm^3/(mol*s)'), n=2, Ea=(8270, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 116,
label = "CH2 + HCCO <=> C2H3 + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 117,
label = "CH2(S) + O2 <=> H + OH + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(2.8e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 118,
label = "CH2(S) + O2 <=> CO + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1.2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 119,
label = "CH2(S) + H2 <=> CH3 + H",
degeneracy = 1,
kinetics = Arrhenius(A=(7e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 120,
label = "CH2(S) + H2O <=> CH2 + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 121,
label = "CH2(S) + CH3 <=> H + C2H4",
degeneracy = 1,
kinetics = Arrhenius(A=(1.2e+13, 'cm^3/(mol*s)'), n=0, Ea=(-570, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 122,
label = "CH2(S) + CH4 <=> CH3 + CH3",
degeneracy = 1,
kinetics = Arrhenius(A=(1.6e+13, 'cm^3/(mol*s)'), n=0, Ea=(-570, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 123,
label = "CH2(S) + CO <=> CH2 + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(9e+12, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 124,
label = "CH2(S) + CO2 <=> CH2 + CO2",
degeneracy = 1,
kinetics = Arrhenius(A=(7e+12, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 125,
label = "CH2(S) + CO2 <=> CO + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1.4e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 126,
label = "CH2(S) + C2H6 <=> CH3 + C2H5",
degeneracy = 1,
kinetics = Arrhenius(A=(4e+13, 'cm^3/(mol*s)'), n=0, Ea=(-550, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 127,
label = "CH3 + O2 <=> O + CH3O",
degeneracy = 1,
kinetics = Arrhenius(A=(3.56e+13, 'cm^3/(mol*s)'), n=0, Ea=(30480, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 128,
label = "CH3 + O2 <=> OH + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(2.31e+12, 'cm^3/(mol*s)'), n=0, Ea=(20315, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 129,
label = "CH3 + H2O2 <=> HO2 + CH4",
degeneracy = 1,
kinetics = Arrhenius(A=(24500, 'cm^3/(mol*s)'), n=2.47, Ea=(5180, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 130,
label = "CH3 + CH3 <=> H + C2H5",
degeneracy = 1,
kinetics = Arrhenius(
A = (6.84e+12, 'cm^3/(mol*s)'),
n = 0.1,
Ea = (10600, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 131,
label = "CH3 + HCO <=> CH4 + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(2.648e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 132,
label = "CH3 + CH2O <=> HCO + CH4",
degeneracy = 1,
kinetics = Arrhenius(A=(3320, 'cm^3/(mol*s)'), n=2.81, Ea=(5860, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 133,
label = "CH3 + CH3OH <=> CH2OH + CH4",
degeneracy = 1,
kinetics = Arrhenius(A=(3e+07, 'cm^3/(mol*s)'), n=1.5, Ea=(9940, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 134,
label = "CH3 + CH3OH <=> CH3O + CH4",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+07, 'cm^3/(mol*s)'), n=1.5, Ea=(9940, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 135,
label = "CH3 + C2H4 <=> C2H3 + CH4",
degeneracy = 1,
kinetics = Arrhenius(A=(227000, 'cm^3/(mol*s)'), n=2, Ea=(9200, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 136,
label = "CH3 + C2H6 <=> C2H5 + CH4",
degeneracy = 1,
kinetics = Arrhenius(
A = (6.14e+06, 'cm^3/(mol*s)'),
n = 1.74,
Ea = (10450, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 137,
label = "HCO + H2O <=> H + CO + H2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1.5e+18, 'cm^3/(mol*s)'), n=-1, Ea=(17000, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 138,
label = "HCO + O2 <=> HO2 + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1.345e+13, 'cm^3/(mol*s)'), n=0, Ea=(400, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 139,
label = "CH2OH + O2 <=> HO2 + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(1.8e+13, 'cm^3/(mol*s)'), n=0, Ea=(900, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 140,
label = "CH3O + O2 <=> HO2 + CH2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (4.28e-13, 'cm^3/(mol*s)'),
n = 7.6,
Ea = (-3530, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 141,
label = "C2H + O2 <=> HCO + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+13, 'cm^3/(mol*s)'), n=0, Ea=(-755, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 142,
label = "C2H + H2 <=> H + C2H2",
degeneracy = 1,
kinetics = Arrhenius(
A = (5.68e+10, 'cm^3/(mol*s)'),
n = 0.9,
Ea = (1993, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 143,
label = "C2H3 + O2 <=> HCO + CH2O",
degeneracy = 1,
kinetics = Arrhenius(
A = (4.58e+16, 'cm^3/(mol*s)'),
n = -1.39,
Ea = (1015, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 144,
label = "C2H5 + O2 <=> HO2 + C2H4",
degeneracy = 1,
kinetics = Arrhenius(A=(8.4e+11, 'cm^3/(mol*s)'), n=0, Ea=(3875, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 145,
label = "HCCO + O2 <=> OH + CO + CO",
degeneracy = 1,
kinetics = Arrhenius(A=(3.2e+12, 'cm^3/(mol*s)'), n=0, Ea=(854, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 146,
label = "HCCO + HCCO <=> CO + CO + C2H2",
degeneracy = 1,
kinetics = Arrhenius(A=(1e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 147,
label = "O + CH3 => H + H2 + CO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(3.37e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 148,
label = "O + C2H4 <=> H + CH2CHO",
degeneracy = 1,
kinetics = Arrhenius(A=(6.7e+06, 'cm^3/(mol*s)'), n=1.83, Ea=(220, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 149,
label = "O + C2H5 <=> H + CH3CHO",
degeneracy = 1,
kinetics = Arrhenius(A=(1.096e+14, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 151,
label = "OH + CH3 => H2 + CH2O",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(8e+09, 'cm^3/(mol*s)'), n=0.5, Ea=(-1755, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 152,
label = "CH2 + O2 => H + H + CO2",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(5.8e+12, 'cm^3/(mol*s)'), n=0, Ea=(1500, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 153,
label = "CH2 + O2 <=> O + CH2O",
degeneracy = 1,
kinetics = Arrhenius(A=(2.4e+12, 'cm^3/(mol*s)'), n=0, Ea=(1500, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 154,
label = "CH2 + CH2 => H + H + C2H2",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(2e+14, 'cm^3/(mol*s)'), n=0, Ea=(10989, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 155,
label = "CH2(S) + H2O => H2 + CH2O",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(
A = (6.82e+10, 'cm^3/(mol*s)'),
n = 0.25,
Ea = (-935, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 156,
label = "C2H3 + O2 <=> O + CH2CHO",
degeneracy = 1,
kinetics = Arrhenius(A=(3.03e+11, 'cm^3/(mol*s)'), n=0.29, Ea=(11, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 157,
label = "C2H3 + O2 <=> HO2 + C2H2",
degeneracy = 1,
kinetics = Arrhenius(
A = (1.337e+06, 'cm^3/(mol*s)'),
n = 1.61,
Ea = (-384, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 158,
label = "O + CH3CHO <=> OH + CH2CHO",
degeneracy = 1,
kinetics = Arrhenius(A=(2.92e+12, 'cm^3/(mol*s)'), n=0, Ea=(1808, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 159,
label = "O + CH3CHO => OH + CH3 + CO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(2.92e+12, 'cm^3/(mol*s)'), n=0, Ea=(1808, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 160,
label = "O2 + CH3CHO => HO2 + CH3 + CO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(3.01e+13, 'cm^3/(mol*s)'), n=0, Ea=(39150, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 161,
label = "H + CH3CHO <=> CH2CHO + H2",
degeneracy = 1,
kinetics = Arrhenius(
A = (2.05e+09, 'cm^3/(mol*s)'),
n = 1.16,
Ea = (2405, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 162,
label = "H + CH3CHO => CH3 + H2 + CO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(
A = (2.05e+09, 'cm^3/(mol*s)'),
n = 1.16,
Ea = (2405, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 163,
label = "OH + CH3CHO => CH3 + H2O + CO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(
A = (2.343e+10, 'cm^3/(mol*s)'),
n = 0.73,
Ea = (-1113, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 164,
label = "HO2 + CH3CHO => CH3 + H2O2 + CO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(3.01e+12, 'cm^3/(mol*s)'), n=0, Ea=(11923, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 165,
label = "CH3 + CH3CHO => CH3 + CH4 + CO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(
A = (2.72e+06, 'cm^3/(mol*s)'),
n = 1.77,
Ea = (5920, 'cal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 166,
label = "O + CH2CHO => H + CH2 + CO2",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(1.5e+14, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 167,
label = "O2 + CH2CHO => OH + CO + CH2O",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(1.81e+10, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 168,
label = "O2 + CH2CHO => OH + HCO + HCO",
degeneracy = 1,
reversible = False,
kinetics = Arrhenius(A=(2.35e+10, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 169,
label = "H + CH2CHO <=> CH3 + HCO",
degeneracy = 1,
kinetics = Arrhenius(A=(2.2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 170,
label = "H + CH2CHO <=> CH2CO + H2",
degeneracy = 1,
kinetics = Arrhenius(A=(1.1e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 171,
label = "OH + CH2CHO <=> H2O + CH2CO",
degeneracy = 1,
kinetics = Arrhenius(A=(1.2e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 172,
label = "OH + CH2CHO <=> HCO + CH2OH",
degeneracy = 1,
kinetics = Arrhenius(A=(3.01e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
)
entry(
index = 173,
label = "O + O <=> O2",
degeneracy = 1,
kinetics = ThirdBody(
arrheniusLow = Arrhenius(A=(1.2e+17, 'cm^6/(mol^2*s)'), n=-1, Ea=(0, 'cal/mol'), T0=(1, 'K')),
efficiencies = {'C': 2, 'O=C=O': 3.6, 'CC': 3, 'O': 15.4, '[H][H]': 2.4, '[C]=O': 1.75, '[Ar]': 0.83},
),
)
entry(
index = 174,
label = "O + H <=> OH",
degeneracy = 1,
kinetics = ThirdBody(
arrheniusLow = Arrhenius(A=(5e+17, 'cm^6/(mol^2*s)'), n=-1, Ea=(0, 'cal/mol'), T0=(1, 'K')),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 175,
label = "H + O2 <=> HO2",
degeneracy = 1,
kinetics = ThirdBody(
arrheniusLow = Arrhenius(
A = (2.8e+18, 'cm^6/(mol^2*s)'),
n = -0.86,
Ea = (0, 'cal/mol'),
T0 = (1, 'K'),
),
efficiencies = {'O=C=O': 1.5, 'CC': 1.5, 'O': 0, '[O][O]': 0, 'N#N': 0, '[C]=O': 0.75, '[Ar]': 0},
),
)
entry(
index = 176,
label = "H + H <=> H2",
degeneracy = 1,
kinetics = ThirdBody(
arrheniusLow = Arrhenius(A=(1e+18, 'cm^6/(mol^2*s)'), n=-1, Ea=(0, 'cal/mol'), T0=(1, 'K')),
efficiencies = {'C': 2, 'O=C=O': 0, 'CC': 3, 'O': 0, '[H][H]': 0, '[Ar]': 0.63},
),
)
entry(
index = 177,
label = "H + OH <=> H2O",
degeneracy = 1,
kinetics = ThirdBody(
arrheniusLow = Arrhenius(A=(2.2e+22, 'cm^6/(mol^2*s)'), n=-2, Ea=(0, 'cal/mol'), T0=(1, 'K')),
efficiencies = {'CC': 3, 'C': 2, '[H][H]': 0.73, 'O': 3.65, '[Ar]': 0.38},
),
)
entry(
index = 178,
label = "HCO <=> H + CO",
degeneracy = 1,
kinetics = ThirdBody(
arrheniusLow = Arrhenius(
A = (1.87e+17, 'cm^3/(mol*s)'),
n = -1,
Ea = (17000, 'cal/mol'),
T0 = (1, 'K'),
),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 0, '[H][H]': 2, '[C]=O': 1.5},
),
)
entry(
index = 179,
label = "O + CO <=> CO2",
degeneracy = 1,
kinetics = Lindemann(
arrheniusHigh = Arrhenius(A=(1.8e+10, 'cm^3/(mol*s)'), n=0, Ea=(2385, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (6.02e+14, 'cm^6/(mol^2*s)'),
n = 0,
Ea = (3000, 'cal/mol'),
T0 = (1, 'K'),
),
efficiencies = {'C': 2, 'O=C=O': 3.5, 'CC': 3, 'O': 6, '[H][H]': 2, '[O][O]': 6, '[C]=O': 1.5, '[Ar]': 0.5},
),
)
entry(
index = 180,
label = "H + CH2 <=> CH3",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(6e+14, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (1.04e+26, 'cm^6/(mol^2*s)'),
n = -2.76,
Ea = (1600, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.562,
T3 = (91, 'K'),
T1 = (5836, 'K'),
T2 = (8552, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 181,
label = "H + CH3 <=> CH4",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (1.39e+16, 'cm^3/(mol*s)'),
n = -0.534,
Ea = (536, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (2.62e+33, 'cm^6/(mol^2*s)'),
n = -4.76,
Ea = (2440, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.783,
T3 = (74, 'K'),
T1 = (2941, 'K'),
T2 = (6964, 'K'),
efficiencies = {'C': 3, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 182,
label = "H + HCO <=> CH2O",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (1.09e+12, 'cm^3/(mol*s)'),
n = 0.48,
Ea = (-260, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (2.47e+24, 'cm^6/(mol^2*s)'),
n = -2.57,
Ea = (425, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.7824,
T3 = (271, 'K'),
T1 = (2755, 'K'),
T2 = (6570, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 183,
label = "H + CH2O <=> CH2OH",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (5.4e+11, 'cm^3/(mol*s)'),
n = 0.454,
Ea = (3600, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (1.27e+32, 'cm^6/(mol^2*s)'),
n = -4.82,
Ea = (6530, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.7187,
T3 = (103, 'K'),
T1 = (1291, 'K'),
T2 = (4160, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5},
),
)
entry(
index = 184,
label = "H + CH2O <=> CH3O",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (5.4e+11, 'cm^3/(mol*s)'),
n = 0.454,
Ea = (2600, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (2.2e+30, 'cm^6/(mol^2*s)'),
n = -4.8,
Ea = (5560, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.758,
T3 = (94, 'K'),
T1 = (1555, 'K'),
T2 = (4200, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5},
),
)
entry(
index = 185,
label = "H + CH2OH <=> CH3OH",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(1.055e+12, 'cm^3/(mol*s)'), n=0.5, Ea=(86, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (4.36e+31, 'cm^6/(mol^2*s)'),
n = -4.65,
Ea = (5080, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.6,
T3 = (100, 'K'),
T1 = (90000, 'K'),
T2 = (10000, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5},
),
)
entry(
index = 186,
label = "H + CH3O <=> CH3OH",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (2.43e+12, 'cm^3/(mol*s)'),
n = 0.515,
Ea = (50, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (4.66e+41, 'cm^6/(mol^2*s)'),
n = -7.44,
Ea = (14080, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.7,
T3 = (100, 'K'),
T1 = (90000, 'K'),
T2 = (10000, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5},
),
)
entry(
index = 187,
label = "H + C2H <=> C2H2",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(1e+17, 'cm^3/(mol*s)'), n=-1, Ea=(0, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (3.75e+33, 'cm^6/(mol^2*s)'),
n = -4.8,
Ea = (1900, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.6464,
T3 = (132, 'K'),
T1 = (1315, 'K'),
T2 = (5566, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 188,
label = "H + C2H2 <=> C2H3",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(5.6e+12, 'cm^3/(mol*s)'), n=0, Ea=(2400, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (3.8e+40, 'cm^6/(mol^2*s)'),
n = -7.27,
Ea = (7220, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.7507,
T3 = (98.5, 'K'),
T1 = (1302, 'K'),
T2 = (4167, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 189,
label = "H + C2H3 <=> C2H4",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (6.08e+12, 'cm^3/(mol*s)'),
n = 0.27,
Ea = (280, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (1.4e+30, 'cm^6/(mol^2*s)'),
n = -3.86,
Ea = (3320, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.782,
T3 = (207.5, 'K'),
T1 = (2663, 'K'),
T2 = (6095, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 190,
label = "H + C2H4 <=> C2H5",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (5.4e+11, 'cm^3/(mol*s)'),
n = 0.454,
Ea = (1820, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (6e+41, 'cm^6/(mol^2*s)'),
n = -7.62,
Ea = (6970, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.9753,
T3 = (210, 'K'),
T1 = (984, 'K'),
T2 = (4374, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 191,
label = "H + C2H5 <=> C2H6",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (5.21e+17, 'cm^3/(mol*s)'),
n = -0.99,
Ea = (1580, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (1.99e+41, 'cm^6/(mol^2*s)'),
n = -7.08,
Ea = (6685, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.8422,
T3 = (125, 'K'),
T1 = (2219, 'K'),
T2 = (6882, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 192,
label = "H2 + CO <=> CH2O",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (4.3e+07, 'cm^3/(mol*s)'),
n = 1.5,
Ea = (79600, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (5.07e+27, 'cm^6/(mol^2*s)'),
n = -3.42,
Ea = (84350, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.932,
T3 = (197, 'K'),
T1 = (1540, 'K'),
T2 = (10300, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 193,
label = "OH + OH <=> H2O2",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(7.4e+13, 'cm^3/(mol*s)'), n=-0.37, Ea=(0, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (2.3e+18, 'cm^6/(mol^2*s)'),
n = -0.9,
Ea = (-1700, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.7346,
T3 = (94, 'K'),
T1 = (1756, 'K'),
T2 = (5182, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 194,
label = "OH + CH3 <=> CH3OH",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (2.79e+18, 'cm^3/(mol*s)'),
n = -1.43,
Ea = (1330, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (4e+36, 'cm^6/(mol^2*s)'),
n = -5.92,
Ea = (3140, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.412,
T3 = (195, 'K'),
T1 = (5900, 'K'),
T2 = (6394, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5},
),
)
entry(
index = 195,
label = "CH + CO <=> HCCO",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(5e+13, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (2.69e+28, 'cm^6/(mol^2*s)'),
n = -3.74,
Ea = (1936, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.5757,
T3 = (237, 'K'),
T1 = (1652, 'K'),
T2 = (5069, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 196,
label = "CH2 + CO <=> CH2CO",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(8.1e+11, 'cm^3/(mol*s)'), n=0.5, Ea=(4510, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (2.69e+33, 'cm^6/(mol^2*s)'),
n = -5.11,
Ea = (7095, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.5907,
T3 = (275, 'K'),
T1 = (1226, 'K'),
T2 = (5185, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 197,
label = "CH2(S) + H2O <=> CH3OH",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (4.82e+17, 'cm^3/(mol*s)'),
n = -1.16,
Ea = (1145, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (1.88e+38, 'cm^6/(mol^2*s)'),
n = -6.36,
Ea = (5040, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.6027,
T3 = (208, 'K'),
T1 = (3922, 'K'),
T2 = (10180, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5},
),
)
entry(
index = 198,
label = "CH3 + CH3 <=> C2H6",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (6.77e+16, 'cm^3/(mol*s)'),
n = -1.18,
Ea = (654, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (3.4e+41, 'cm^6/(mol^2*s)'),
n = -7.03,
Ea = (2762, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.619,
T3 = (73.2, 'K'),
T1 = (1180, 'K'),
T2 = (9999, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 199,
label = "C2H4 <=> H2 + C2H2",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(8e+12, 's^-1'), n=0.44, Ea=(86770, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (1.58e+51, 'cm^3/(mol*s)'),
n = -9.3,
Ea = (97800, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.7345,
T3 = (180, 'K'),
T1 = (1035, 'K'),
T2 = (5417, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 200,
label = "CH + H2 <=> CH3",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (1.97e+12, 'cm^3/(mol*s)'),
n = 0.43,
Ea = (-370, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (4.82e+25, 'cm^6/(mol^2*s)'),
n = -2.8,
Ea = (590, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.578,
T3 = (122, 'K'),
T1 = (2535, 'K'),
T2 = (9365, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 201,
label = "H + CH2CO <=> CH2CHO",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(
A = (4.865e+11, 'cm^3/(mol*s)'),
n = 0.422,
Ea = (-1755, 'cal/mol'),
T0 = (1, 'K'),
),
arrheniusLow = Arrhenius(
A = (1.012e+42, 'cm^6/(mol^2*s)'),
n = -7.63,
Ea = (3854, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.465,
T3 = (201, 'K'),
T1 = (1773, 'K'),
T2 = (5333, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
entry(
index = 202,
label = "CH3 + C2H5 <=> C3H8",
degeneracy = 1,
kinetics = Troe(
arrheniusHigh = Arrhenius(A=(9.43e+12, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')),
arrheniusLow = Arrhenius(
A = (2.71e+74, 'cm^6/(mol^2*s)'),
n = -16.82,
Ea = (13065, 'cal/mol'),
T0 = (1, 'K'),
),
alpha = 0.1527,
T3 = (291, 'K'),
T1 = (2742, 'K'),
T2 = (7748, 'K'),
efficiencies = {'C': 2, 'O=C=O': 2, 'CC': 3, 'O': 6, '[H][H]': 2, '[C]=O': 1.5, '[Ar]': 0.7},
),
)
| 25.096374
| 116
| 0.433045
|
0dc80f833349f7bba5975586cdf2be2820142c90
| 21,049
|
py
|
Python
|
storage/fase2/team15/storage/isam/InterfazBD.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
storage/fase2/team15/storage/isam/InterfazBD.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
storage/fase2/team15/storage/isam/InterfazBD.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from tkinter import *
from tkinter import messagebox
from tkinter import simpledialog
from tkinter import ttk
import json
from . import Table
from tkinter import filedialog
from . import ISAMMode as Storage
from .DataBase import DataBase
from .Table import Table
import os
import pickle
#--------------------------------------------------opciones de las bases de datos-------------------------------------------------
class PantallaBD():
def __init__(self,vectorBases):
self.ventana = Tk()
self.vectorBases=vectorBases
self.ventana.title("Opciones Bases de Datos")
self.contenedor = Frame(self.ventana, width=500, height=400)
self.contenedor.pack(fill="both", expand=True)
self.titulo=Label(self.contenedor, text="Bases de Datos", font=("Comic Sans MS", 18)).place(x=150, y=20)
# botones crear eliminar y editar bases de datos
self.BtnCrearBase = Button(self.contenedor, text="Crear Base de Datos", command=self.crear, width=20).place(x=300, y=100)
self.BtnEliminarBase = Button(self.contenedor, text="Eliminar Base de Datos", command=self.EliminarBase, width=20).place(x=300, y=160)
self.BtnEditarBase = Button(self.contenedor, text="Editar Nombre de BD", command=self.EditarBase, width=20).place(x=300, y=220)
self.BtnTablas = Button(self.contenedor, text="Ver Tablas", command=self.Tablas,width=20).place(x=300, y=280)
self.BtnGraficar = Button(self.contenedor, text="Graficar Tablas", command=self.graficar, width=20).place(x=300, y=340)
self.listboxBases = Listbox(self.contenedor, width=40, height=18)
self.CargarBases()
self.listboxBases.place(x=35, y=80)
self.ventana.mainloop()
#eliminar elemento del listbox y de el arreglo de bases de datos
def EliminarBase(self):
if len(self.listboxBases.curselection()) != 0:
elemento=self.listboxBases.get(self.listboxBases.curselection()[0])
a=messagebox.askquestion("Eliminar","Quieres eliminar La base de datos \n\t"+elemento)
if a=="yes":
self.listboxBases.delete(self.listboxBases.curselection())
indice=0
for i in range(0,len(self.vectorBases)):
if self.vectorBases[i]==elemento:
indice=i
print(Storage.dropDatabase(elemento))
#cambiar nombre de la base de datos
def EditarBase(self):
if len(self.listboxBases.curselection()) != 0:
elemento=self.listboxBases.get(self.listboxBases.curselection()[0])
self.ventana.destroy()
PantallaEditarBD(elemento)
#cargar los elementos del arreglo de bases de datos
def CargarBases(self):
for i in range(0,len(self.vectorBases)):
self.listboxBases.insert(i,self.vectorBases[i])
#llamando crear
def crear(self):
self.ventana.destroy()
PantallaCrearBD()
#para visualizar las tablas
def Tablas(self):
if len(self.listboxBases.curselection()) != 0:
nombreBD = self.listboxBases.get(self.listboxBases.curselection()[0])
self.ventana.destroy()
#hay que mandar el vector de tablas
PantallaTablas(nombreBD)
def graficar(self):
if len(self.listboxBases.curselection()) != 0:
nombreBD = self.listboxBases.get(self.listboxBases.curselection()[0])
self.ventana.destroy()
PantallaGraficoTablas(nombreBD)
class PantallaCrearBD:
def __init__(self):
self.ventana = Tk()
self.val = StringVar()
self.ventana.geometry('300x130')
self.ventana.title('Crear BD')
self.titulo = Label(self.ventana, text="Ingrese el nombre de la BD", font=("Comic Sans MS", 15)).place(x=20, y=5)
self.Entrada = Entry(self.ventana, textvariable=self.val, width=40).place(x=20, y=40)
self.Boton = Button(self.ventana, text='CREAR', command=self.llamar(), width=15).place(x=80, y=70)
self.ventana.mainloop()
def llamar(self):
return lambda: self._llamar()
def _llamar(self):
# para crear la base de datos valv es el nombre
valv = str(self.val.get())
print(Storage.createDatabase(valv))
self.ventana.destroy()
PantallaBD(Storage.showDatabases())
class PantallaEditarBD:
def __init__(self, nombreViejo):
self.nombreViejo=nombreViejo
self.ventana = Tk()
self.val = StringVar()
self.ventana.geometry('300x130')
self.ventana.title('Editar BD')
self.titulo = Label(self.ventana, text="Ingrese el nuevo nombre de la BD", font=("Comic Sans MS", 15)).place(x=15, y=5)
self.Entrada = Entry(self.ventana, textvariable=self.val, width=40).place(x=20, y=40)
self.Boton = Button(self.ventana, text='EDITAR', command=self.llamar(), width=15).place(x=80, y=70)
self.ventana.mainloop()
def llamar(self):
return lambda: self._llamar()
def _llamar(self):
nombreNuevo = str(self.val.get())
print(Storage.alterDatabase(self.nombreViejo,nombreNuevo))
self.ventana.destroy()
PantallaBD(Storage.showDatabases())
# ------------------------------------------------------graficas de tablas--------------------------------
class PantallaGraficoTablas:
def __init__(self, nombreBD):
self.ventana = Tk()
self.nombreBD = nombreBD
# obteniendo grafico de la tabla indicada
Storage.checkDirs()
Storage.chartList(Storage.showTables(self.nombreBD))
self.ventana.title("BD "+self.nombreBD)
self.ventana.geometry("450x300")
self.contenedor = Frame(self.ventana)
self.contenedor.pack(fill="both", expand=True)
self.canvas = Canvas(self.contenedor)
self.canvas.pack(side=LEFT, fill=BOTH, expand=1)
self.scroll = ttk.Scrollbar(self.contenedor, orient=VERTICAL, command=self.canvas.yview)
self.scroll.pack(side=RIGHT, fill=Y)
self.canvas.configure(yscrollcommand=self.scroll.set)
self.canvas.bind('<Configure>', lambda e: self.canvas.configure(scrollregion=self.canvas.bbox("all")))
self.segundocontenedor = Frame(self.canvas, width=300, height=300)
self.canvas.create_window((0, 0), window=self.segundocontenedor, anchor="nw")
imagen = PhotoImage(file="list.png")
labelimagen = Label(self.segundocontenedor, image=imagen).pack()
Button(self.segundocontenedor, text="Salir", command=self.salir, width=20).pack()
self.ventana.mainloop()
def salir(self):
self.ventana.destroy()
PantallaBD(Storage.showDatabases())
#--------------------------------------------------------opciones de las tablas-----------------------------------------------
class PantallaTablas:
def __init__(self, nombreBD):
self.ventana = Tk()
self.nombreBD=nombreBD
self.ventana.title("Opciones de las tablas")
self.contenedor = Frame(self.ventana, width=500, height=400)
self.contenedor.pack(fill="both", expand=True)
self.titulo = Label(self.contenedor, text="Tablas de la BD: "+nombreBD, font=("Comic Sans MS", 18)).place(x=110, y=20)
# boton crear tabla
Button(self.contenedor, text="Crear Tabla de Datos", command=self.crear, width=20).place(x=300, y=80)
Button(self.contenedor, text="Borrar Tabla", command=self.borrar, width=20).place(x=300, y=110)
Button(self.contenedor, text="Cambiar nombre", command=self.renombrar, width=20).place(x=300,y=140)
Button(self.contenedor, text="Agregar Columna", command=self.agregarC, width=20).place(x=300, y=170)
Button(self.contenedor, text="Eliminar Columna", command=self.borrarC, width=20).place(x=300, y=200)
Button(self.contenedor, text="Agregar PK", command=self.agregarPK, width=20).place(x=300, y=230)
Button(self.contenedor, text="Eliminar PK", command=self.eliminarPK, width=20).place(x=300, y=260)
Button(self.contenedor, text="Ver Tabla", command=self.extraerTabla, width=20).place(x=300, y=290)
Button(self.contenedor, text="Graficar", command=self.graficar, width=20).place(x=300, y=320)
Button(self.contenedor, text="Regresar", command=self.salir, width=20).place(x=300, y=350)
self.listboxTablas= Listbox(self.contenedor, width=40, height=18)
self.Cargartablas()
self.listboxTablas.place(x=35, y=80)
self.ventana.mainloop()
# cargar los elementos del arreglo de tablas
def Cargartablas(self):
for i in range(0, len(Storage.showTables(self.nombreBD))):
self.listboxTablas.insert(i, Storage.showTables(self.nombreBD)[i])
def crear(self):
try:
nombretabla=simpledialog.askstring('Crear Tabla Datos','ingrese el nombre de la tabla')
cantcolumnas=simpledialog.askinteger('Crear Tabla Datos', 'ingrese el numero de columnas que desea')
print(Storage.createTable(self.nombreBD,nombretabla,cantcolumnas))
self.listboxTablas.delete(0, END)
self.Cargartablas()
except:
""
def borrar(self):
try:
if len(self.listboxTablas.curselection()) != 0:
elemento = self.listboxTablas.get(self.listboxTablas.curselection()[0])
a = messagebox.askquestion("Eliminar", "Quieres eliminar La tabla \n\t" + elemento)
if a == "yes":
print(Storage.dropTable(self.nombreBD, elemento))
self.listboxTablas.delete(0, END)
self.Cargartablas()
except:
""
def renombrar(self):
try:
if len(self.listboxTablas.curselection()) != 0:
nombreviejo = self.listboxTablas.get(self.listboxTablas.curselection()[0])
nombrenuevo=simpledialog.askstring('Editar Tabla','ingrese el nombre nuevo de la tabla')
a = messagebox.askquestion("Editar", "Quieres Cambiar el nombre La tabla " + nombreviejo+"\npor "+nombrenuevo)
if a == "yes":
print(Storage.alterTable(self.nombreBD,nombreviejo,nombrenuevo))
self.listboxTablas.delete(0, END)
self.Cargartablas()
except:
""
def agregarC(self):
try:
if len(self.listboxTablas.curselection()) != 0:
nombretabla = self.listboxTablas.get(self.listboxTablas.curselection()[0])
valor = simpledialog.askstring('Agregar Columna', 'ingrese el valor por default')
print(Storage.alterAddColumn(self.nombreBD,nombretabla,valor))
except:
""
def borrarC(self):
try:
if len(self.listboxTablas.curselection()) != 0:
nombretabla = self.listboxTablas.get(self.listboxTablas.curselection()[0])
columna = simpledialog.askinteger('Borrar Columna', 'ingrese el numero de columna')
print(Storage.alterDropColumn(self.nombreBD,nombretabla,columna))
except:
""
def extraerTabla(self):
if len(self.listboxTablas.curselection()) != 0:
nombretabla = self.listboxTablas.get(self.listboxTablas.curselection()[0])
self.ventana.destroy()
PantallaTuplas(self.nombreBD,nombretabla,Storage.extractTable(self.nombreBD, nombretabla))
def agregarPK(self):
try:
if len(self.listboxTablas.curselection()) != 0:
nombretabla = self.listboxTablas.get(self.listboxTablas.curselection()[0])
entrada = simpledialog.askstring('Listado de # de columnas', 'ingrese el listado separado por , sin espacios')
lista=entrada.split(",")
listafinal = []
for i in lista:
listafinal.append(int(i))
print(Storage.alterAddPK(self.nombreBD,nombretabla,listafinal))
except:
""
def eliminarPK(self):
if len(self.listboxTablas.curselection()) != 0:
nombretabla = self.listboxTablas.get(self.listboxTablas.curselection()[0])
print(Storage.alterDropPK(self.nombreBD,nombretabla))
def graficar(self):
if len(self.listboxTablas.curselection()) != 0:
nombretabla = self.listboxTablas.get(self.listboxTablas.curselection()[0])
self.ventana.destroy()
PantallaGrafico(self.nombreBD,nombretabla)
def salir(self):
self.ventana.destroy()
PantallaBD(Storage.showDatabases())
#------------------------------------------------------graficas de arboles--------------------------------
class PantallaGrafico:
def __init__(self,nombreBD, nombreTabla):
self.ventana = Tk()
self.nombreBD=nombreBD
self.nombreTabla=nombreTabla
#obteniendo grafico de la tabla indicada
Storage.checkDirs()
tab=Storage.rollback('tables/' + nombreBD + nombreTabla)
tab.chart()
self.ventana.title("Tabla "+self.nombreTabla)
self.ventana.geometry("450x300")
self.contenedor = Frame(self.ventana)
self.contenedor.pack(fill="both", expand=True)
#self.titulo = Label(self.contenedor, text="Tuplas de la tabla: " + self.nombreTabla, font=("Comic Sans MS", 18)).place(x=150, y=5)
self.canvas=Canvas(self.contenedor)
self.canvas.pack(side=LEFT,fill=BOTH,expand=1)
self.scroll=ttk.Scrollbar(self.contenedor, orient=VERTICAL,command=self.canvas.yview)
self.scroll.pack(side=RIGHT,fill=Y)
self.canvas.configure(yscrollcommand=self.scroll.set)
self.canvas.bind('<Configure>',lambda e:self.canvas.configure(scrollregion=self.canvas.bbox("all")))
self.segundocontenedor=Frame(self.canvas,width=300, height=300)
self.canvas.create_window((0,0),window=self.segundocontenedor,anchor="nw")
imagen=PhotoImage(file="isam.png")
labelimagen=Label(self.segundocontenedor,image=imagen).pack()
Button(self.segundocontenedor, text="Salir", command=self.salir, width=20).pack()
self.ventana.mainloop()
def salir(self):
self.ventana.destroy()
PantallaTablas(self.nombreBD)
#----------------------------------------------------tuplas extract-------------------------------------------------------------------
class PantallaTuplas:
def __init__(self, nombreBD, nombreTabla, listaTuplas):
self.ventana = Tk()
self.nombreBD = nombreBD
self.nombreTabla=nombreTabla
self.listaTuplas=listaTuplas
self.ventana.title("Opciones de las Tuplas")
self.contenedor = Frame(self.ventana, width=500, height=380)
self.contenedor.pack(fill="both", expand=True)
self.titulo = Label(self.contenedor, text="Tuplas de la tabla: " + self.nombreTabla, font=("Comic Sans MS", 18)).place(x=110, y=10)
self.titulo = Label(self.contenedor, text="Posee "+str(Storage.rollback('tables/' + self.nombreBD + self.nombreTabla).numberColumns)+" Columnas",font=("Comic Sans MS", 14)).place(x=150, y=40)
# boton crear tabla
Button(self.contenedor, text="Extraer Tabla Completa", command=self.extraertabla, width=20).place(x=300, y=80)
Button(self.contenedor, text="Extraer Por Rangos", command=self.extraerrango, width=20).place(x=300, y=110)
Button(self.contenedor, text="Extraer Row (tupla)", command=self.extraertupla, width=20).place(x=300, y=140)
Button(self.contenedor, text="Insertar Registro", command=self.insertar, width=20).place(x=300, y=170)
Button(self.contenedor, text="Actualizar Registro", command=self.actualizar, width=20).place(x=300, y=200)
Button(self.contenedor, text="Eliminar Registro", command=self.eliminar, width=20).place(x=300, y=230)
Button(self.contenedor, text="Eliminar Todo", command=self.eliminartodo, width=20).place(x=300, y=260)
Button(self.contenedor, text="Cargar CSV", command=self.cargarCSV, width=20).place(x=300, y=290)
Button(self.contenedor, text="Regresar", command=self.salir, width=20).place(x=300, y=320)
self.listboxTuplas = Listbox(self.contenedor, width=40, height=16)
self.Cargartuplas()
self.listboxTuplas.place(x=35, y=80)
self.ventana.mainloop()
def Cargartuplas(self):
for i in range(0, len(self.listaTuplas)):
self.listboxTuplas.insert(i, self.listaTuplas[i])
def extraertabla(self):
self.listboxTuplas.delete(0, END)
self.listaTuplas=Storage.extractTable(self.nombreBD, self.nombreTabla)
self.Cargartuplas()
def extraerrango(self):
try:
columna = simpledialog.askinteger('Rango', 'ingrese el numero de indice de la columna')
inicio = simpledialog.askstring('Rango', 'ingrese el valor de inicio')
final = simpledialog.askstring('Rango', 'ingrese el valor del final')
self.listboxTuplas.delete(0, END)
self.listaTuplas=Storage.extractRangeTable(self.nombreBD,self.nombreTabla,columna,inicio,final)
self.Cargartuplas()
except:
""
def extraertupla(self):
try:
lista = simpledialog.askstring('Extraer Registro', 'ingrese el id del registro que desea extraer\n'
'si tiene llave compuesta separela por ,')
listafinal=lista.split(",")
Tupla = Storage.extractRow(self.nombreBD,self.nombreTabla,listafinal)
self.listboxTuplas.delete(0, END)
self.listboxTuplas.insert(0, Tupla)
except:
""
def insertar(self):
try:
entrada = simpledialog.askstring('Insertar tuplas', 'ingrese el listado separado por , sin espacios')
lista=entrada.split(",")
Storage.insert(self.nombreBD, self.nombreTabla, lista)
self.listboxTuplas.delete(0, END)
self.listaTuplas=Storage.extractTable(self.nombreBD, self.nombreTabla)
self.Cargartuplas()
except:
""
def actualizar(self):
try:
entrada = simpledialog.askstring('Actualizar Registro por un diccionario', 'Ingrese los datos separados por : y ,\n'
'por ejemplo id:valor,id2:valor2\n'
'sin espacios')
llave= simpledialog.askstring('Ingrese la llave primaria',
'si la llave es compuesta ingresela separada por ,'
'sin espacios')
entrada2=entrada.split(",")
entrada3=[]
for i in entrada2:
aux=i.split(":")
for j in aux:
try:
k=int(j)
esnumero=True
except:
esnumero=False
if esnumero:
entrada3.append(k)
else:
entrada3.append(j)
diccionario={}
for i in range(0,len(entrada3),2):
diccionario[entrada3[i]]=entrada3[i+1]
pk=llave.split(",")
print(Storage.update(self.nombreBD,self.nombreTabla,diccionario,pk))
self.listboxTuplas.delete(0, END)
self.listaTuplas = Storage.extractTable(self.nombreBD, self.nombreTabla)
self.Cargartuplas()
except:
""
def eliminar(self):
try:
entrada = simpledialog.askstring('Eliminar tuplas', 'ingrese las pk separado por , sin espacios')
lista = entrada.split(",")
print(Storage.delete(self.nombreBD,self.nombreTabla,lista))
self.listboxTuplas.delete(0, END)
self.listaTuplas = Storage.extractTable(self.nombreBD, self.nombreTabla)
self.Cargartuplas()
except:
""
def eliminartodo(self):
try:
a = messagebox.askquestion("Eliminar", "Quieres eliminar todos los registros de la tabla \n\t" + self.nombreTabla)
if a == "yes":
print(Storage.truncate(self.nombreBD,self.nombreTabla))
self.listboxTuplas.delete(0, END)
self.listaTuplas = Storage.extractTable(self.nombreBD, self.nombreTabla)
self.Cargartuplas()
except:
""
def cargarCSV(self):
try:
archivo=filedialog.askopenfilename(title="Cargar")
Storage.loadCSV(archivo,self.nombreBD,self.nombreTabla)
self.listboxTuplas.delete(0, END)
self.listaTuplas = Storage.extractTable(self.nombreBD, self.nombreTabla)
self.Cargartuplas()
except:
""
def salir(self):
self.ventana.destroy()
PantallaTablas(self.nombreBD)
| 46.67184
| 199
| 0.612286
|
4c18f6c8e916e7975cb39cfccff154bbab143fb4
| 12,396
|
py
|
Python
|
impacket/dcerpc/v5/even.py
|
iamjmat/impacket
|
61da73fcc793259e6d1487fff9835e638487fe04
|
[
"Apache-1.1"
] | 61
|
2020-07-23T14:07:59.000Z
|
2021-11-24T14:15:57.000Z
|
impacket/dcerpc/v5/even.py
|
3ozir/impacket
|
d1ced941eb2235ed365b13f661b1d5b4bc2683f3
|
[
"Apache-1.1"
] | null | null | null |
impacket/dcerpc/v5/even.py
|
3ozir/impacket
|
d1ced941eb2235ed365b13f661b1d5b4bc2683f3
|
[
"Apache-1.1"
] | 21
|
2021-06-29T23:14:54.000Z
|
2022-03-24T13:13:58.000Z
|
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
# Itamar Mizrahi (@MrAnde7son)
#
# Description:
# [MS-EVEN] Interface implementation
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/SecureAuthCorp/impacket/tree/master/tests/SMB_RPC
#
# Some calls have helper functions, which makes it even easier to use.
# They are located at the end of this file.
# Helper functions start with "h"<name of the call>.
# There are test cases for them too.
#
from __future__ import division
from __future__ import print_function
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT, NDR, NDRPOINTERNULL, NDRUniConformantArray
from impacket.dcerpc.v5.dtypes import ULONG, LPWSTR, RPC_UNICODE_STRING, LPSTR, NTSTATUS, NULL, PRPC_UNICODE_STRING, PULONG, USHORT, PRPC_SID, LPBYTE
from impacket.dcerpc.v5.lsad import PRPC_UNICODE_STRING_ARRAY
from impacket.structure import Structure
from impacket import nt_errors
from impacket.uuid import uuidtup_to_bin
from impacket.dcerpc.v5.rpcrt import DCERPCException
MSRPC_UUID_EVEN = uuidtup_to_bin(('82273FDC-E32A-18C3-3F78-827929DC23EA','0.0'))
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
key = self.error_code
if key in nt_errors.ERROR_MESSAGES:
error_msg_short = nt_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = nt_errors.ERROR_MESSAGES[key][1]
return 'EVEN SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'EVEN SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# CONSTANTS
################################################################################
# 2.2.2 EventType
EVENTLOG_SUCCESS = 0x0000
EVENTLOG_ERROR_TYPE = 0x0001
EVENTLOG_WARNING_TYPE = 0x0002
EVENTLOG_INFORMATION_TYPE = 0x0004
EVENTLOG_AUDIT_SUCCESS = 0x0008
EVENTLOG_AUDIT_FAILURE = 0x0010
# 2.2.7 EVENTLOG_HANDLE_A and EVENTLOG_HANDLE_W
#EVENTLOG_HANDLE_A
EVENTLOG_HANDLE_W = LPWSTR
# 2.2.9 Constants Used in Method Definitions
MAX_STRINGS = 0x00000100
MAX_SINGLE_EVENT = 0x0003FFFF
MAX_BATCH_BUFF = 0x0007FFFF
# 3.1.4.7 ElfrReadELW (Opnum 10)
EVENTLOG_SEQUENTIAL_READ = 0x00000001
EVENTLOG_SEEK_READ = 0x00000002
EVENTLOG_FORWARDS_READ = 0x00000004
EVENTLOG_BACKWARDS_READ = 0x00000008
################################################################################
# STRUCTURES
################################################################################
class IELF_HANDLE(NDRSTRUCT):
structure = (
('Data','20s=""'),
)
def getAlignment(self):
return 1
# 2.2.3 EVENTLOGRECORD
class EVENTLOGRECORD(Structure):
structure = (
('Length','<L=0'),
('Reserved','<L=0'),
('RecordNumber','<L=0'),
('TimeGenerated','<L=0'),
('TimeWritten','<L=0'),
('EventID','<L=0'),
('EventType','<H=0'),
('NumStrings','<H=0'),
('EventCategory','<H=0'),
('ReservedFlags','<H=0'),
('ClosingRecordNumber','<L=0'),
('StringOffset','<L=0'),
('UserSidLength','<L=0'),
('UserSidOffset','<L=0'),
('DataLength','<L=0'),
('DataOffset','<L=0'),
('SourceName','z'),
('Computername','z'),
('UserSidPadding',':'),
('_UserSid','_-UserSid', 'self["UserSidLength"]'),
('UserSid',':'),
('Strings',':'),
('_Data','_-Data', 'self["DataLength"]'),
('Data',':'),
('Padding',':'),
('Length2','<L=0'),
)
# 2.2.4 EVENTLOG_FULL_INFORMATION
class EVENTLOG_FULL_INFORMATION(NDRSTRUCT):
structure = (
('dwFull', ULONG),
)
# 2.2.8 RPC_CLIENT_ID
class RPC_CLIENT_ID(NDRSTRUCT):
structure = (
('UniqueProcess', ULONG),
('UniqueThread', ULONG),
)
# 2.2.12 RPC_STRING
class RPC_STRING(NDRSTRUCT):
structure = (
('Length','<H=0'),
('MaximumLength','<H=0'),
('Data',LPSTR),
)
def __setitem__(self, key, value):
if key == 'Data' and isinstance(value, NDR) is False:
self['Length'] = len(value)
self['MaximumLength'] = len(value)
return NDRSTRUCT.__setitem__(self, key, value)
def dump(self, msg = None, indent = 0):
if msg is None: msg = self.__class__.__name__
if msg != '':
print("%s" % msg, end=' ')
if isinstance(self.fields['Data'] , NDRPOINTERNULL):
print(" NULL", end=' ')
elif self.fields['Data']['ReferentID'] == 0:
print(" NULL", end=' ')
else:
return self.fields['Data'].dump('',indent)
################################################################################
# RPC CALLS
################################################################################
# 3.1.4.9 ElfrClearELFW (Opnum 0)
class ElfrClearELFW(NDRCALL):
opnum = 0
structure = (
('LogHandle', IELF_HANDLE),
('BackupFileName', PRPC_UNICODE_STRING),
)
class ElfrClearELFWResponse(NDRCALL):
structure = (
('ErrorCode', NTSTATUS),
)
# 3.1.4.11 ElfrBackupELFW (Opnum 1)
class ElfrBackupELFW(NDRCALL):
opnum = 1
structure = (
('LogHandle', IELF_HANDLE),
('BackupFileName', RPC_UNICODE_STRING),
)
class ElfrBackupELFWResponse(NDRCALL):
structure = (
('ErrorCode', NTSTATUS),
)
# 3.1.4.21 ElfrCloseEL (Opnum 2)
class ElfrCloseEL(NDRCALL):
opnum = 2
structure = (
('LogHandle', IELF_HANDLE),
)
class ElfrCloseELResponse(NDRCALL):
structure = (
('LogHandle', IELF_HANDLE),
('ErrorCode', NTSTATUS),
)
# 3.1.4.18 ElfrNumberOfRecords (Opnum 4)
class ElfrNumberOfRecords(NDRCALL):
opnum = 4
structure = (
('LogHandle', IELF_HANDLE),
)
class ElfrNumberOfRecordsResponse(NDRCALL):
structure = (
('NumberOfRecords', ULONG),
('ErrorCode', NTSTATUS),
)
# 3.1.4.19 ElfrOldestRecord (Opnum 5)
class ElfrOldestRecord(NDRCALL):
opnum = 5
structure = (
('LogHandle', IELF_HANDLE),
)
class ElfrOldestRecordResponse(NDRCALL):
structure = (
('OldestRecordNumber', ULONG),
('ErrorCode', NTSTATUS),
)
# 3.1.4.3 ElfrOpenELW (Opnum 7)
class ElfrOpenELW(NDRCALL):
opnum = 7
structure = (
('UNCServerName', EVENTLOG_HANDLE_W),
('ModuleName', RPC_UNICODE_STRING),
('RegModuleName', RPC_UNICODE_STRING),
('MajorVersion', ULONG),
('MinorVersion', ULONG),
)
class ElfrOpenELWResponse(NDRCALL):
structure = (
('LogHandle', IELF_HANDLE),
('ErrorCode', NTSTATUS),
)
# 3.1.4.5 ElfrRegisterEventSourceW (Opnum 8)
class ElfrRegisterEventSourceW(NDRCALL):
opnum = 8
structure = (
('UNCServerName', EVENTLOG_HANDLE_W),
('ModuleName', RPC_UNICODE_STRING),
('RegModuleName', RPC_UNICODE_STRING),
('MajorVersion', ULONG),
('MinorVersion', ULONG),
)
class ElfrRegisterEventSourceWResponse(NDRCALL):
structure = (
('LogHandle', IELF_HANDLE),
('ErrorCode', NTSTATUS),
)
# 3.1.4.1 ElfrOpenBELW (Opnum 9)
class ElfrOpenBELW(NDRCALL):
opnum = 9
structure = (
('UNCServerName', EVENTLOG_HANDLE_W),
('BackupFileName', RPC_UNICODE_STRING),
('MajorVersion', ULONG),
('MinorVersion', ULONG),
)
class ElfrOpenBELWResponse(NDRCALL):
structure = (
('LogHandle', IELF_HANDLE),
('ErrorCode', NTSTATUS),
)
# 3.1.4.7 ElfrReadELW (Opnum 10)
class ElfrReadELW(NDRCALL):
opnum = 10
structure = (
('LogHandle', IELF_HANDLE),
('ReadFlags', ULONG),
('RecordOffset', ULONG),
('NumberOfBytesToRead', ULONG),
)
class ElfrReadELWResponse(NDRCALL):
structure = (
('Buffer', NDRUniConformantArray),
('NumberOfBytesRead', ULONG),
('MinNumberOfBytesNeeded', ULONG),
('ErrorCode', NTSTATUS),
)
# 3.1.4.13 ElfrReportEventW (Opnum 11)
class ElfrReportEventW(NDRCALL):
opnum = 11
structure = (
('LogHandle', IELF_HANDLE),
('Time', ULONG),
('EventType', USHORT),
('EventCategory', USHORT),
('EventID', ULONG),
('NumStrings', USHORT),
('DataSize', ULONG),
('ComputerName', RPC_UNICODE_STRING),
('UserSID', PRPC_SID),
('Strings', PRPC_UNICODE_STRING_ARRAY),
('Data', LPBYTE),
('Flags', USHORT),
('RecordNumber', PULONG),
('TimeWritten', PULONG),
)
class ElfrReportEventWResponse(NDRCALL):
structure = (
('RecordNumber', PULONG),
('TimeWritten', PULONG),
('ErrorCode', NTSTATUS),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (ElfrClearELFW, ElfrClearELFWResponse),
1 : (ElfrBackupELFW, ElfrBackupELFWResponse),
2 : (ElfrCloseEL, ElfrCloseELResponse),
4 : (ElfrNumberOfRecords, ElfrNumberOfRecordsResponse),
5 : (ElfrOldestRecord, ElfrOldestRecordResponse),
7 : (ElfrOpenELW, ElfrOpenELWResponse),
8 : (ElfrRegisterEventSourceW, ElfrRegisterEventSourceWResponse),
9 : (ElfrOpenBELW, ElfrOpenBELWResponse),
10 : (ElfrReadELW, ElfrReadELWResponse),
11 : (ElfrReportEventW, ElfrReportEventWResponse),
}
################################################################################
# HELPER FUNCTIONS
################################################################################
def hElfrOpenBELW(dce, backupFileName = NULL):
request = ElfrOpenBELW()
request['UNCServerName'] = NULL
request['BackupFileName'] = backupFileName
request['MajorVersion'] = 1
request['MinorVersion'] = 1
return dce.request(request)
def hElfrOpenELW(dce, moduleName = NULL, regModuleName = NULL):
request = ElfrOpenELW()
request['UNCServerName'] = NULL
request['ModuleName'] = moduleName
request['RegModuleName'] = regModuleName
request['MajorVersion'] = 1
request['MinorVersion'] = 1
return dce.request(request)
def hElfrCloseEL(dce, logHandle):
request = ElfrCloseEL()
request['LogHandle'] = logHandle
resp = dce.request(request)
return resp
def hElfrRegisterEventSourceW(dce, moduleName = NULL, regModuleName = NULL):
request = ElfrRegisterEventSourceW()
request['UNCServerName'] = NULL
request['ModuleName'] = moduleName
request['RegModuleName'] = regModuleName
request['MajorVersion'] = 1
request['MinorVersion'] = 1
return dce.request(request)
def hElfrReadELW(dce, logHandle = '', readFlags = EVENTLOG_SEEK_READ|EVENTLOG_FORWARDS_READ,
recordOffset = 0, numberOfBytesToRead = MAX_BATCH_BUFF):
request = ElfrReadELW()
request['LogHandle'] = logHandle
request['ReadFlags'] = readFlags
request['RecordOffset'] = recordOffset
request['NumberOfBytesToRead'] = numberOfBytesToRead
return dce.request(request)
def hElfrClearELFW(dce, logHandle = '', backupFileName = NULL):
request = ElfrClearELFW()
request['LogHandle'] = logHandle
request['BackupFileName'] = backupFileName
return dce.request(request)
def hElfrBackupELFW(dce, logHandle = '', backupFileName = NULL):
request = ElfrBackupELFW()
request['LogHandle'] = logHandle
request['BackupFileName'] = backupFileName
return dce.request(request)
def hElfrNumberOfRecords(dce, logHandle):
request = ElfrNumberOfRecords()
request['LogHandle'] = logHandle
resp = dce.request(request)
return resp
def hElfrOldestRecordNumber(dce, logHandle):
request = ElfrOldestRecord()
request['LogHandle'] = logHandle
resp = dce.request(request)
return resp
| 30.912718
| 149
| 0.605679
|
dec616c19128d689d72a89eab2cc6c3dd0a8b163
| 4,133
|
py
|
Python
|
src/common/admin.py
|
danpercic86/e-notary
|
ba15a9a80e2091593fb088feacfaf9574c816d6e
|
[
"Apache-2.0"
] | null | null | null |
src/common/admin.py
|
danpercic86/e-notary
|
ba15a9a80e2091593fb088feacfaf9574c816d6e
|
[
"Apache-2.0"
] | null | null | null |
src/common/admin.py
|
danpercic86/e-notary
|
ba15a9a80e2091593fb088feacfaf9574c816d6e
|
[
"Apache-2.0"
] | null | null | null |
import os.path
from typing import Tuple, Dict
import pandas
from django.conf import settings
from django.contrib.admin import ModelAdmin, register
from django.forms import ModelForm, FileField, FileInput
from django.shortcuts import redirect
from faker import Faker
from common.models import Example, Client, IdUpload, Template
from common.ocr import ocr
class BaseModelAdmin(ModelAdmin):
list_filter: Tuple = ("created", "modified")
readonly_fields: Tuple = ("created", "modified")
class SlugableModelAdmin(ModelAdmin):
prepopulated_fields: Dict[str, Tuple] = {"slug": ("name",)}
CREATED_MODIFIED = (
"Created / Modified",
{
"fields": ("created", "modified"),
"description": "Info about the time this entry was added here or updated",
},
)
@register(Example)
class ExampleAdmin(BaseModelAdmin):
fieldsets = (
(None, {"fields": ("name", "status", "status_changed", "published_at")}),
CREATED_MODIFIED,
)
list_display = ("name", "status", "status_changed", "published_at")
list_editable = ("status",)
readonly_fields = BaseModelAdmin.readonly_fields + (
"status_changed",
"published_at",
)
@register(Client)
class ClientsAdmin(BaseModelAdmin):
fieldsets = (
(
"Date client",
{
"fields": (
"first_name",
"last_name",
"cnp",
"residence",
"birthday",
"id_series",
"id_number",
"id_emitted_by",
"id_emitted_at",
"registration_number",
"face",
"back",
"template",
"generated_doc",
)
},
),
(
"Chitanță",
{
"fields": (
"cost",
"tax",
"receipt_series",
"receipt_number",
"receipt",
)
},
),
CREATED_MODIFIED,
)
readonly_fields = ("generated_doc", "receipt") + BaseModelAdmin.readonly_fields
list_display = ("__str__", "template", "generated_doc", "receipt")
@register(IdUpload)
class IdUploadAdmin(ModelAdmin):
def has_change_permission(self, request, obj=None) -> bool:
return False
def has_view_permission(self, request, obj=None) -> bool:
return False
def save_model(self, request, obj: IdUpload, form, change: bool) -> None:
client = Client()
fake = Faker()
result = ocr(
os.path.join(settings.MEDIA_ROOT, obj.face.name),
os.path.join(settings.BASE_DIR, "templates", "template.jpeg"),
)
client.first_name = result["first_name"]
client.last_name = result["last_name"]
client.birthday = pandas.to_datetime(result["birthday"]).date()
client.id_emitted_at = pandas.to_datetime(result["id_emitted_at"]).date()
client.id_emitted_by = result["id_emitted_by"]
client.id_series = result["id_series"][:1]
client.id_number = result["id_series"][1:]
client.cnp = fake.unique.random_int(min=1000000000000, max=6999999999999)
client.residence = fake.address()
client.cost = obj.cost
client.tax = obj.tax
client.registration_number = obj.registration_number
client.face = obj.face
client.back = obj.back
client.template = obj.template
client.save()
obj.client = client
def response_add(self, request, obj, post_url_continue=None):
return redirect(to=obj.client)
class TemplatesForm(ModelForm):
file = FileField(
widget=FileInput(attrs={"accept": "application/docx"}),
error_messages={"invalid": "Incarcă numai fișiere Microsoft Word (.docx)"},
)
class Meta:
model = Template
fields = "__all__"
@register(Template)
class TemplatesAdmin(BaseModelAdmin):
pass
# form = TemplatesForm
| 29.312057
| 83
| 0.574401
|
d32eadff7a7392d9e1a74357342aab26728337e0
| 2,994
|
py
|
Python
|
ext/djangojinja2.py
|
captainmalloc/jinja
|
540b260198285f0ed41fbe80c0b1b6f13be579c1
|
[
"BSD-3-Clause"
] | 1
|
2020-07-06T05:53:18.000Z
|
2020-07-06T05:53:18.000Z
|
ext/djangojinja2.py
|
captainmalloc/jinja
|
540b260198285f0ed41fbe80c0b1b6f13be579c1
|
[
"BSD-3-Clause"
] | null | null | null |
ext/djangojinja2.py
|
captainmalloc/jinja
|
540b260198285f0ed41fbe80c0b1b6f13be579c1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
djangojinja2
~~~~~~~~~~~~
Adds support for Jinja to Django.
Configuration variables:
======================= =============================================
Key Description
======================= =============================================
`JINJA2_TEMPLATE_DIRS` List of template folders
`JINJA2_EXTENSIONS` List of Jinja extensions to use
`JINJA2_CACHE_SIZE` The size of the Jinja template cache.
======================= =============================================
:copyright: (c) 2009 by the Jinja Team.
:license: BSD.
"""
from itertools import chain
from django.conf import settings
from django.http import HttpResponse
from django.core.exceptions import ImproperlyConfigured
from django.template.context import get_standard_processors
from django.template import TemplateDoesNotExist
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
from jinja2.defaults import DEFAULT_NAMESPACE
# the environment is unconfigured until the first template is loaded.
_jinja_env = None
def get_env():
"""Get the Jinja env and initialize it if necessary."""
global _jinja_env
if _jinja_env is None:
_jinja_env = create_env()
return _jinja_env
def create_env():
"""Create a new Jinja environment."""
searchpath = list(settings.JINJA2_TEMPLATE_DIRS)
return Environment(loader=FileSystemLoader(searchpath),
auto_reload=settings.TEMPLATE_DEBUG,
cache_size=getattr(settings, 'JINJA2_CACHE_SIZE', 400),
extensions=getattr(settings, 'JINJA2_EXTENSIONS', ()))
def get_template(template_name, globals=None):
"""Load a template."""
try:
return get_env().get_template(template_name, globals=globals)
except TemplateNotFound as e:
raise TemplateDoesNotExist(str(e))
def select_template(templates, globals=None):
"""Try to load one of the given templates."""
env = get_env()
for template in templates:
try:
return env.get_template(template, globals=globals)
except TemplateNotFound:
continue
raise TemplateDoesNotExist(', '.join(templates))
def render_to_string(template_name, context=None, request=None,
processors=None):
"""Render a template into a string."""
context = dict(context or {})
if request is not None:
context['request'] = request
for processor in chain(get_standard_processors(), processors or ()):
context.update(processor(request))
return get_template(template_name).render(context)
def render_to_response(template_name, context=None, request=None,
processors=None, mimetype=None):
"""Render a template into a response object."""
return HttpResponse(render_to_string(template_name, context, request,
processors), mimetype=mimetype)
| 34.413793
| 78
| 0.629927
|
11c195e99e0b5759ef285fcfa2c5eb571b586b6e
| 10,796
|
py
|
Python
|
google/ads/google_ads/v3/services/conversion_adjustment_upload_service_client.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2019-11-30T23:42:39.000Z
|
2019-11-30T23:42:39.000Z
|
google/ads/google_ads/v3/services/conversion_adjustment_upload_service_client.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v3/services/conversion_adjustment_upload_service_client.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2020-09-30T17:04:06.000Z
|
2020-09-30T17:04:06.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v3.services ConversionAdjustmentUploadService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
from google.ads.google_ads.v3.services import conversion_adjustment_upload_service_client_config
from google.ads.google_ads.v3.services.transports import conversion_adjustment_upload_service_grpc_transport
from google.ads.google_ads.v3.proto.services import conversion_adjustment_upload_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class ConversionAdjustmentUploadServiceClient(object):
"""Service to upload conversion adjustments."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v3.services.ConversionAdjustmentUploadService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionAdjustmentUploadServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None):
"""Constructor.
Args:
transport (Union[~.ConversionAdjustmentUploadServiceGrpcTransport,
Callable[[~.Credentials, type], ~.ConversionAdjustmentUploadServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = conversion_adjustment_upload_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=conversion_adjustment_upload_service_grpc_transport.ConversionAdjustmentUploadServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = conversion_adjustment_upload_service_grpc_transport.ConversionAdjustmentUploadServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def upload_conversion_adjustments(
self,
customer_id,
conversion_adjustments,
partial_failure=None,
validate_only=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Processes the given conversion adjustments.
Args:
customer_id (str): Required. The ID of the customer performing the upload.
conversion_adjustments (list[Union[dict, ~google.ads.googleads_v3.types.ConversionAdjustment]]): Required. The conversion adjustments that are being uploaded.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.ads.googleads_v3.types.ConversionAdjustment`
partial_failure (bool): Required. If true, successful operations will be carried out and invalid
operations will return errors. If false, all operations will be carried out
in one transaction if and only if they are all valid. This should always be
set to true.
validate_only (bool): If true, the request is validated but not executed. Only errors are
returned, not results.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v3.types.UploadConversionAdjustmentsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'upload_conversion_adjustments' not in self._inner_api_calls:
self._inner_api_calls['upload_conversion_adjustments'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.upload_conversion_adjustments,
default_retry=self._method_configs['UploadConversionAdjustments'].retry,
default_timeout=self._method_configs['UploadConversionAdjustments'].timeout,
client_info=self._client_info,
)
request = conversion_adjustment_upload_service_pb2.UploadConversionAdjustmentsRequest(
customer_id=customer_id,
conversion_adjustments=conversion_adjustments,
partial_failure=partial_failure,
validate_only=validate_only,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('customer_id', customer_id)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['upload_conversion_adjustments'](request, retry=retry, timeout=timeout, metadata=metadata)
| 46.534483
| 170
| 0.663486
|
d39414a749071ddb7629db9ae8294506088e15f0
| 31,536
|
py
|
Python
|
test/selenium/src/lib/constants/locator.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | null | null | null |
test/selenium/src/lib/constants/locator.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 12
|
2015-01-08T14:50:19.000Z
|
2017-11-29T19:37:53.000Z
|
test/selenium/src/lib/constants/locator.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 1
|
2015-01-08T13:25:09.000Z
|
2015-01-08T13:25:09.000Z
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""Locators for all the elements"""
# pylint: disable=too-few-public-methods
from selenium.webdriver.common.by import By # pylint: disable=import-error
from lib.constants import objects
from lib.constants import attribute
class Login(object):
"""All locators for the login page"""
BUTTON_LOGIN = (By.CSS_SELECTOR, "a.btn.btn-large.btn-info")
class PageHeader(object):
"""All locators for the dashboard header (has the same name as the elemnt"""
TOGGLE_LHN = (By.CSS_SELECTOR, ".lhn-trigger")
BUTTON_DASHBOARD = (By.CSS_SELECTOR, '.header-content .to-my-work['
'href="/dashboard"]')
BUTTON_SEARCH = (By.CSS_SELECTOR, '.header-content ['
'data-toggle="unified-search"]')
BUTTON_MY_TASKS = (By.CSS_SELECTOR, '.header-content ['
'href="/dashboard#task_widget"]')
BUTTON_ALL_OBJECTS = (By.CSS_SELECTOR, '.header-content ['
'href="/objectBrowser"]')
TOGGLE_USER_DROPDOWN = (
By.CSS_SELECTOR, '.header-content .dropdown-toggle')
BUTTON_HELP = (By.CSS_SELECTOR, '.header-content [id="#page-help"]')
# dropdown toggle
BUTTON_ADMIN_DASHBOARD = (
By.CSS_SELECTOR, '.dropdown-menu [href="/admin#people_list_widget"]')
BUTTON_MY_WORK = (By.CSS_SELECTOR, '.dropdown-menu [href="/dashboard"]')
BUTTON_DATA_IMPORT = (By.CSS_SELECTOR, '.dropdown-menu [href="/import"]')
BUTTON_DATA_EXPORT = (By.CSS_SELECTOR, '.dropdown-menu [href="/export"]')
BUTTON_LOGOUT = (By.CSS_SELECTOR, '.dropdown-menu [href="/logout"]')
NOTIFICATIONS = (By.CSS_SELECTOR, '.menu .user-dropdown .notify-wrap')
CHECKBOX_DAILY_DIGEST = (By.CSS_SELECTOR, '.menu .user-dropdown input')
CHECKBOX_DISABLED = (By.CSS_SELECTOR, '.menu .user-dropdown input.disabled')
class Dashboard(object):
"""Locators for the dashbord page"""
BUTTON_START_NEW_PROGRAM = (
By.CSS_SELECTOR, '.quick-list [data-object-singular="Program"]')
BUTTON_START_NEW_AUDIT = (
By.CSS_SELECTOR, '.quick-list [data-object-singular="Audit"]')
BUTTON_START_NEW_WORKFLOW = (
By.CSS_SELECTOR, '.quick-list [data-object-singular="Workflow"]')
BUTTON_CREATE_NEW_OBJECT = (
By.CSS_SELECTOR, '.quick-list [href="#"]')
BUTTON_ALL_OBJECTS = (By.CSS_SELECTOR, '.quick-list '
'[href="/objectBrowser"]')
class LhnMenu(object):
"""Locators for the menu in header"""
class _Locator(object):
@staticmethod
def get_accordion_button(label):
return (By.CSS_SELECTOR, '[data-model-name="{}"]>a'.format(label))
@staticmethod
def get_create_new_button(label):
return (
By.CSS_SELECTOR,
'[data-model-name="{}"] [data-test-id="button_lhn_create_new_program'
'_522c563f"]'.format(label))
@staticmethod
def get_accordion_count(label):
return (By.CSS_SELECTOR, '[data-model-name="{}"] .item-count'.format(
label))
@staticmethod
def get_accordion_members(object_name):
return (
By.CSS_SELECTOR,
'[data-model-name="{}"]>.content>.sub-level>li'.format(object_name))
@staticmethod
def get_spinny(object_name):
return (By.CSS_SELECTOR, '[data-model-name="{}"] .spinny'.format(
object_name))
class __metaclass__(type):
def __init__(self, *args):
for object_singular, object_plural in zip(objects.ALL_SINGULAR,
objects.ALL_PLURAL):
capitalized_name = object_singular.title()
# handle underscore in object names
if "_" in capitalized_name:
capitalized_name = capitalized_name.title().replace("_", "")
# set lhn items
setattr(self, attribute.TOGGLE + object_plural,
self._Locator.get_accordion_button(capitalized_name))
setattr(self, attribute.BUTTON_CREATE_NEW + object_plural,
self._Locator.get_create_new_button(capitalized_name))
setattr(self, attribute.COUNT + object_plural,
self._Locator.get_accordion_count(capitalized_name))
setattr(self, attribute.SPINNY + object_plural,
self._Locator.get_spinny(capitalized_name))
setattr(self, attribute.ACCORDION_MEMBERS + object_plural,
self._Locator.get_accordion_members(capitalized_name))
LHN_MENU = (By.ID, "lhn")
MODAL = (By.CSS_SELECTOR, '[id="ajax-lhn_modal-javascript:--"]')
EXTENDED_INFO = (By.CSS_SELECTOR, '.extended-info.in')
FILTER = (By.CSS_SELECTOR, '.lhs-search')
FILTER_TEXT_BOX = (By.CSS_SELECTOR, '.lhs-search>.widgetsearch')
FILTER_SUBMIT_BUTTON = (
By.CSS_SELECTOR, '.lhs-search>.widgetsearch-submit')
FILTER_CLEAR_BUTTON = (
By.CSS_SELECTOR, '.lhs-search [data-title="Clear filters"]')
LHS_ITEM = (By.CSS_SELECTOR, '[test-data-id="lhs-item_3ad27b8b"]')
ALL_OBJECTS = (By.CSS_SELECTOR, '[data-test-id="all_objects_e0345ec4"]')
MY_OBJECTS = (By.CSS_SELECTOR, '[data-test-id="my_objects_6fa95ae1"]')
PIN = (By.CSS_SELECTOR, '.lhn-pin')
# lhn items
DIRECTIVES = (By.CSS_SELECTOR, '[data-test-id="directives_66116337"]')
TOGGLE_CONTROLS_OR_OBJECTIVES = (
By.CSS_SELECTOR, '[data-test-id="controls/objectives_66116337"]')
TOGGLE_PEOPLE_OR_GROUPS = (
By.CSS_SELECTOR, '[data-test-id="people/groups_66116337"]')
TOGGLE_ASSETS_OR_BUSINESS = (
By.CSS_SELECTOR, '[data-test-id="assets/business_66116337"]')
TOGGLE_RISK_OR_THREATS = (
By.CSS_SELECTOR, '[data-test-id="risk/threats_66116337"]')
# workflows labels
BUTTON_WORKFLOWS_ACTIVE = (
By.CSS_SELECTOR, '[data-for="Workflow"]>[data-value="Active"]')
BUTTON_WORKFLOWS_DRAFT = (
By.CSS_SELECTOR, '[data-for="Workflow"]>[data-value="Draft"]')
BUTTON_WORKFLOWS_INACTIVE = (
By.CSS_SELECTOR, '[data-for="Workflow"]>[data-value="Inactive"]')
class ExtendedInfo(object):
"""Locators for the extended info tooltip in LHN after hovering over a
member object"""
BUTTON_MAP_TO = (By.CSS_SELECTOR, '.extended-info.in .map-to-page-object')
ALREADY_MAPPED = (
By.CSS_SELECTOR,
'.extended-info.in .links .primary:not(.map-to-page-object)')
class BaseModalCreateNew(object):
"""Locators shared with create new object modals"""
# labels
MODAL_TITLE = (By.CSS_SELECTOR, '[id="ajax-modal-javascript:--"]>div>h2')
TITLE = (By.CSS_SELECTOR, '.modal-body form>div:nth-child(2) .span6>label')
# user input elements
UI_TITLE = (
By.CSS_SELECTOR,
'.modal-body form>div:nth-child(2) .span6>input')
class ModalCreateNewProgram(BaseModalCreateNew):
"""Locators for the program modal visible when creating a new modal from
LHN"""
UI_TITLE = (By.CSS_SELECTOR,
'[data-test-id="new_program_field_title_a63ed79d"]')
UI_DESCRIPTION = (By.CSS_SELECTOR,
'[data-test-id="new_program_field_description_1fb8bc06"]'
'>iframe.wysihtml5-sandbox')
UI_NOTES = (By.CSS_SELECTOR,
'[data-test-id="new_program_field_notes_75b8bc05"]'
'>iframe.wysihtml5-sandbox')
UI_CODE = (By.CSS_SELECTOR,
'[data-test-id="new_program_field_code_334276e2"]')
UI_STATE = (By.CSS_SELECTOR,
'[data-test-id="new_program_dropdown_state_036a1fa6"]')
BUTTON_HIDE_OPTIONAL_FIELDS = (By.ID, "formHide")
BUTTON_SHOW_ALL_OPTIONAL_FIELDS = (By.ID, "formHide")
UI_PRIMARY_CONTACT = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_primary_contact_'
'86160053"]')
DROPDOWN_CONTACT = (By.CSS_SELECTOR, '.ui-menu-item')
UI_SECONDARY_CONTACT = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_secondary_'
'contact_'
'86160053"]')
UI_PROGRAM_URL = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_program_url_'
'86160053"]')
UI_REFERENCE_URL = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_reference_url_'
'86160053"]')
UI_EFFECTIVE_DATE = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_effective_date_'
'f2783a28"]')
UI_STOP_DATE = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_stop_date_f2783a28"]')
DATE_PICKER = (By.CSS_SELECTOR, '.ui-datepicker-calendar ['
'data-handler="selectDay"]')
TITLE = (By.CSS_SELECTOR, '[data-test-id="label_title_2c925d94"]')
DESCRIPTION = (By.CSS_SELECTOR,
'[data-test-id="label_description_2c925d94"]')
PRIVACY = (By.CSS_SELECTOR, '[data-test-id="label_privacy_2c925d94"]')
PROGRAM_URL = (By.CSS_SELECTOR,
'[data-test-id="label_program_url_2c925d94"]')
class ModalCreateNewOrgGroup(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewRisk(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
UI_DESCRIPTION = (
By.CSS_SELECTOR, '.modal-body form>div:nth-child(3) iframe')
class ModalCreateRequest(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewDataAsset(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewProcess(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewProject(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewSystem(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewProduct(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewControl(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class _Locator(object):
@staticmethod
def get_asessor_row(first_id, second_id):
return (
By.CSS_SELECTOR,
'.modal-body div>form>div>div:nth-child({})>div:nth-child({}) '
'label'.format(first_id, second_id))
@staticmethod
def get_dropdown_item(first_id, second_id):
return (
By.CSS_SELECTOR,
'.modal-body div>form>div>div:nth-child({})>div:nth-child({}) '
'select'.format(first_id, second_id))
# labels
DESCRIPTION = (
By.CSS_SELECTOR, '.modal-body form>div:nth-child(3) .span6>label')
TEST_PLAN = (By.CSS_SELECTOR, '[data-id="test_plan_hidden"] label')
NOTES = (By.CSS_SELECTOR, '[data-id="note_hidden"] label')
CODE = (By.CSS_SELECTOR, '[data-id="code_hidden"] label')
KIND_OR_NATURE = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(6) div:nth-child(2) div:nth-child(1) label')
FRAUD_RELATED = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(6) div:nth-child(2) div:nth-child(2) label')
EFFECTIVE_DATE = _Locator.get_asessor_row(3, 1)
FREQUENCY = _Locator.get_asessor_row(4, 1)
ASSERTIONS = _Locator.get_asessor_row(4, 2)
PRINCIPAL_ASSESSOR = _Locator.get_asessor_row(5, 1)
SECONDARY_ASSESSOR = _Locator.get_asessor_row(5, 2)
OWNER = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(1)>form>div:nth-child(3) div:nth-child(2) '
'label')
PRIMARY_CONTACT = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(1)>form>div:nth-child(4) div:nth-child(2)>'
'div:nth-child(1)>div>label')
SECONDARY_CONTACT = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(1)>form>div:nth-child(4) div:nth-child(2)>'
'div:nth-child(2)>div>label')
CONTROL_URL = (
By.CSS_SELECTOR,
'.modal-body form>div:nth-child(5) div:nth-child(2)>div:nth-child(1)>div'
'>label')
REFERENCE_URL = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(1)>form>div:nth-child(5) div:nth-child(2)>'
'div:nth-child(2)>div>label')
SIGNIFICANCE = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(6) div:nth-child(2) div:nth-child(3) label')
TYPE_OR_MEANS = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(6) div:nth-child(2) div:nth-child(4) label')
STOP_DATE = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(6) div:nth-child(3) div:nth-child(2) label')
CATEGORIES = (
By.CSS_SELECTOR,
'.modal-body div>form>div>div:nth-child(4)>div:nth-child(3) label')
STATE = (
By.CSS_SELECTOR,
'.modal-body div>form>div>div:nth-child(5)>div:nth-child(3) label')
# user input elements
UI_DESCRIPTION = (
By.CSS_SELECTOR, '.modal-body form>div:nth-child(3) iframe')
UI_TEST_PLAN = (By.CSS_SELECTOR, '[data-id="test_plan_hidden"] iframe')
UI_NOTES = (By.CSS_SELECTOR, '[data-id="note_hidden"] iframe')
UI_CODE = (By.CSS_SELECTOR, '[data-id="code_hidden"] input')
UI_PRIMARY_CONTACT = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(1)>form>div:nth-child(4) div:nth-child(2)>'
'div:nth-child(1)>div>input')
UI_SECONDARY_CONTACT = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(1)>form>div:nth-child(4) div:nth-child(2)>'
'div:nth-child(2)>div>input')
UI_CONTROL_URL = (
By.CSS_SELECTOR,
'.modal-body form>div:nth-child(5) div:nth-child(2)>div:nth-child(1)>div'
'>input')
UI_REFERENCE_URL = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(1)>form>div:nth-child(5) div:nth-child(2)>'
'div:nth-child(2)>div>input')
DATEPICKER_EFFECTIVE_DATE = (
By.CSS_SELECTOR,
'.modal-body div>form>div>div:nth-child(3)>div:nth-child(1) input')
DATEPICKER_STOP_DATE = (
By.CSS_SELECTOR,
'.modal-body div>form>div>div:nth-child(3)>div:nth-child(2) input')
# dorpdowns
DROPDOWN_KIND_OR_NATURE = _Locator.get_dropdown_item(2, 1)
DROPDOWN_FRAUD_RELATED = _Locator.get_dropdown_item(2, 2)
DROPDOWN_SIGNIFICANCE = _Locator.get_dropdown_item(2, 3)
DROPDOWN_TYPE_OR_MEANS = _Locator.get_dropdown_item(2, 4)
DROPDOWN_FREQUENCY = _Locator.get_dropdown_item(4, 1)
DROPDOWN_STATE = _Locator.get_dropdown_item(5, 3)
SELECTABLE_ASSERTIONS = _Locator.get_dropdown_item(4, 2)
SELECTABLE_CATEGORIES = _Locator.get_dropdown_item(4, 3)
# buttons
BUTTON_ADD_OWNER = (By.CSS_SELECTOR, 'isolate-form .btn')
BUTTON_HIDE_ALL_OPTIONAL_FIELDS = (By.CSS_SELECTOR, '#formHide')
class ModalCreateNewIssue(BaseModalCreateNew):
"""Locators for the issue modal visible when creating a new modal from
LHN"""
class ModalCreateNewRequest(BaseModalCreateNew):
"""Locators for the request modal visible when creating a new modal from
LHN"""
class ModalEditObject(BaseModalCreateNew):
"""Locators for a generic edit object modal"""
BUTTON_DELETE = (
By.CSS_SELECTOR, '.deny-buttons [data-toggle="modal-ajax-deleteform"]')
class ModalCreateNewObject(BaseModalCreateNew):
"""Locators for a generic new object modal"""
UI_TITLE = (By.CSS_SELECTOR, '[data-id="title_txtbx"]')
BUTTON_SAVE_AND_CLOSE = (
By.CSS_SELECTOR,
'.modal-footer .confirm-buttons [data-toggle="modal-submit"]')
BUTTON_SAVE_AND_ADD_ANOTHER = (
By.CSS_SELECTOR,
'.confirm-buttons [data-toggle="modal-submit-addmore"]')
class ModalCustomAttribute(object):
"""Locators for a generic custom attributes modal in admin dashboard"""
MODAL_TITLE = (By.CSS_SELECTOR, '.modal-header h2')
ATTRIBUTE_TITLE = (By.CSS_SELECTOR, '.modal-body div:nth-child(1)>label')
INLINE_HELP = (By.CSS_SELECTOR, '.modal-body div:nth-child(2)>label')
ATTRIBUTE_TYPE = (By.CSS_SELECTOR, '.modal-header h2')
PLACEHOLDER = (By.CSS_SELECTOR, '.modal-header h2')
MANDATORY = (By.CSS_SELECTOR, '.modal-header h2')
UI_ATTRIBUTE_TITLE = (
By.CSS_SELECTOR, '.modal-body div:nth-child(1)>input[tabindex="1"]')
UI_INLINE_HELP = (
By.CSS_SELECTOR,
'.modal-body div:nth-child(1)>input[tabindex="4"]')
UI_PLACEHOLDER = (By.CSS_SELECTOR, '.modal-body div:nth-child(2)>input')
CHECKBOX_MANDATORY = (By.CSS_SELECTOR, '.modal-body [type="checkbox"]')
BUTTON_ADD_ANOTHER = (
By.CSS_SELECTOR,
'.confirm-buttons [data-toggle="modal-submit-addmore"]')
BUTTON_SAVE_AND_CLOSE = (
By.CSS_SELECTOR,
'.modal-footer .confirm-buttons [data-toggle="modal-submit"]')
class WidgetBar(object):
"""Locators for the bar containing the widgets/tabs"""
class _Locator(object):
@staticmethod
def get_widget(object_name):
return (By.CSS_SELECTOR, '[href="#{}_widget"]'.format(object_name))
class __metaclass__(type):
def __init__(self, *args):
for object_singular, object_plural in zip(objects.ALL_SINGULAR,
objects.ALL_PLURAL):
name = object_singular.lower()
setattr(self, object_plural, self._Locator.get_widget(name))
BUTTON_ADD = (By.CSS_SELECTOR,
'[data-test-id="button_widget_add_2c925d94"]')
TAB_WIDGET = (By.CSS_SELECTOR, ".object-nav .active")
ADMIN_PEOPLE = _Locator.get_widget("people_list")
ADMIN_ROLES = _Locator.get_widget("roles_list")
ADMIN_EVENTS = _Locator.get_widget("events_list")
ADMIN_CUSTOM_ATTRIBUTE = _Locator.get_widget("custom_attribute")
INFO = _Locator.get_widget("info")
CUSTOM_ATTRIBUTES = _Locator.get_widget("custom_attribute")
EVENTS = _Locator.get_widget("events_list")
ROLES = _Locator.get_widget("roles_list")
RISK_ASSESSMENTS = _Locator.get_widget("risk_assessment")
TASKS = _Locator.get_widget("task")
class WidgetBarButtonAddDropdown(object):
"""Locators for the button/dropdown "add widget" in widget bar"""
class _Locator(object):
@staticmethod
def get_dropdown_item(object_name):
return (By.CSS_SELECTOR, '[data-test-id="button_widget_add_2c925d94"] '
'[href="#{}_widget"]'.format(object_name))
class __metaclass__(type):
def __init__(self, *args):
for object_ in objects.ALL_PLURAL:
name = object_.lower()
setattr(self, object_, self._Locator.get_dropdown_item(name))
THREAD_ACTORS = _Locator.get_dropdown_item("threat_actor")
WORKFLOW_TASKS = _Locator.get_dropdown_item("workflow_task")
class ObjectWidget(object):
"""Locators for a generic widget"""
CONTROL_COLUMN_TITLE = (
By.CSS_SELECTOR, '.header .span4 .title-heading .widget-col-title')
CONTROL_OWNER = (
By.CSS_SELECTOR, '.header .span4 [data-field="contact.name|email"]')
COTNROL_STATE = (
By.CSS_SELECTOR, '.header .span4 [data-field="status"]')
MEMBERS_TITLE_LIST = (
By.CSS_SELECTOR,
'.object-area .tree-structure .select .span4:nth-child(1)')
INFO_PANE = (By.CSS_SELECTOR, '.sticky-info-panel')
class ModalDeleteObject(object):
MODAL_TITLE = (By.CSS_SELECTOR, '.modal-header>h2')
CONFIRMATION_TEXT = (By.CSS_SELECTOR, '.modal-body>div>p')
OBJECT_TITLE = (By.CSS_SELECTOR, '.modal-body>div>p>span')
BUTTON_DELETE = (
By.CSS_SELECTOR, '.modal-footer .confirm-buttons>[data-toggle="delete"]')
class BaseInfoWidget(object):
"""Locators that are common to all info widgets"""
BUTTON_SETTINGS = (By.CSS_SELECTOR, '.info-pane-utility')
TITLE = (By.CSS_SELECTOR, '[data-test-id="title_0ad9fbaf"] h6')
TITLE_ENTERED = (By.CSS_SELECTOR, '[data-test-id="title_0ad9fbaf"] h3')
class WidgetInfoProgram(BaseInfoWidget):
"""Locators for the info program widget"""
PERMALINK_ALERT = (By.CSS_SELECTOR, '.content>.flash>.alert-success')
ALERT_LINK_COPIED = (By.CSS_SELECTOR, '.alert.alert-success')
MODAL_DELETE = (By.ID, '[id="ajax-lhn_modal-javascript:--"]')
MODAL_DELETE_CLOSE = (By.CSS_SELECTOR, '.lhn_modal .grcicon-x-grey')
OBJECT_REVIEW = (By.CSS_SELECTOR,
'[data-test-id="title_review_0ad9fbaf"] h6')
SUBMIT_FOR_REVIEW = (By.CSS_SELECTOR,
'[data-test-id="title_review_0ad9fbaf"] '
'[href="javascript://"]')
DESCRIPTION = (By.CSS_SELECTOR,
'[data-test-id="title_description_7a906d2e"] h6')
DESCRIPTION_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_description_'
'content_7a906d2e"]')
NOTES = (By.CSS_SELECTOR, '[data-test-id="title_notes_ef5bc3a71e88"] '
'h6')
NOTES_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_notes_ef5bc3a71e88"]>div')
MANAGER = (By.CSS_SELECTOR, '[data-test-id="title_manager_7a906d2e"] '
'h6')
MANAGER_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_manager_7a906d2e"] '
'[data-test-id="text_manager_7a906d2e"]')
PROGRAM_URL = (By.CSS_SELECTOR,
'[data-test-id="title_program_url_aa7d1a65"] h6')
PROGRAM_URL_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="text_program_url_aa7d1a65"]')
REFERENCE_URL = (By.CSS_SELECTOR,
'[data-test-id="title_reference_url_aa7d1a65"]')
REFERENCE_URL_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="text_reference_url_aa7d1a65"]')
TOGGLE_SHOW_ADVANCED = (By.CSS_SELECTOR,
'[data-test-id="button_advanced_cf47bc01"]')
TOGGLE_SHOW_ADVANCED_ACTIVATED = (
By.CSS_SELECTOR, '[data-test-id="button_advanced_cf47bc01"].active')
CODE = (By.CSS_SELECTOR, '[data-test-id="title_code_cf47bc01"] h6')
CODE_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_code_cf47bc01"] p')
EFFECTIVE_DATE = (By.CSS_SELECTOR,
'[data-test-id="title_effective_date_cf47bc01"] h6')
EFFECTIVE_DATE_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_effective_date_'
'cf47bc01"] p')
STOP_DATE = (By.CSS_SELECTOR,
'[data-test-id="title_stop_date_cf47bc01"] h6')
STOP_DATE_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_stop_date_cf47bc01"] p')
STATE = (By.CSS_SELECTOR,
'[dadata-test-id="new_program_button_save_and_new_86160053"'
' ta-test-id="title_state_0ad9fbaf"] h6')
STATE_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_state_value_0ad9fbaf"]')
PRIMARY_CONTACT = (By.CSS_SELECTOR, '[data-test-id="title_primary_'
'contact_696de7244b84"] h6')
PRIMARY_CONTACT_ENTERED = (
By.CSS_SELECTOR, '[data-test-id="text_primary_contact_'
'696de7244b84"] [data-test-id="text_'
'manager_7a906d2e"]')
SECONDARY_CONTACT = (
By.CSS_SELECTOR, '[data-test-id="title_contacts_696de7244b84"] '
'h6:nth-child(2)')
SECONDARY_CONTACT_ENTERED = (
By.CSS_SELECTOR, '[data-test-id="text_secondary_contact_'
'696de7244b84"] [data-test-id="text_manager_'
'7a906d2e"]')
PRIVATE_PROGRAM = (By.CSS_SELECTOR,
'[data-test-id="title_private_ec758af9"] h6')
ICON_LOCK = (By.CSS_SELECTOR, '[data-test-id="icon_private_ec758af9"]')
class WidgetInfoRequest(BaseInfoWidget):
"""Locators for the request info widget"""
class WidgetInfoRisk(BaseInfoWidget):
"""Locators for the risk info widget"""
class WidgetInfoOrgGroup(BaseInfoWidget):
"""Locators for the org group info widget"""
class WidgetInfoIssue(BaseInfoWidget):
"""Locators for the org group info widget"""
class WidgetInfoRegulations(BaseInfoWidget):
"""Locators for the regulation info widget"""
class WidgetInfoWorkflow(BaseInfoWidget):
"""Locators for the workflow info widget"""
class WidgetInfoAudit(BaseInfoWidget):
"""Locators for the audit info widget"""
class WidgetInfoAssessment(BaseInfoWidget):
"""Locators for the assessment info widget"""
class WidgetInfoPolicy(BaseInfoWidget):
"""Locators for the regulation info widget"""
class WidgetInfoStandard(BaseInfoWidget):
"""Locators for the standard info widget"""
class WidgetInfoContract(BaseInfoWidget):
"""Locators for the contract info widget"""
class WidgetInfoClause(BaseInfoWidget):
"""Locators for the clause info widget"""
class WidgetInfoSection(BaseInfoWidget):
"""Locators for the section info widget"""
class WidgetInfoControl(BaseInfoWidget):
"""Locators for the control info widget"""
class WidgetInfoObjective(BaseInfoWidget):
"""Locators for the objective info widget"""
class WidgetInfoPeople(BaseInfoWidget):
"""Locators for the people info widget"""
class WidgetInfoVendor(BaseInfoWidget):
"""Locators for the vendor info widget"""
class WidgetInfoAccessGroup(BaseInfoWidget):
"""Locators for the access group info widget"""
class WidgetInfoSystem(BaseInfoWidget):
"""Locators for the system info widget"""
class WidgetInfoProcess(BaseInfoWidget):
"""Locators for the process info widget"""
class WidgetInfoProduct(BaseInfoWidget):
"""Locators for the product info widget"""
class WidgetInfoFacility(BaseInfoWidget):
"""Locators for the facility info widget"""
class WidgetInfoProject(BaseInfoWidget):
"""Locators for the project info widget"""
class WidgetInfoMarket(BaseInfoWidget):
"""Locators for the market info widget"""
class WidgetInfoDataAsset(BaseInfoWidget):
"""Locators for the data asset info widget"""
class WidgetInfoThreat(BaseInfoWidget):
"""Locators for the data asset info widget"""
class WidgetAdminRoles(object):
"""Locators for the roles widget on the admin dashboard"""
class _Locator(object):
@staticmethod
def get_role(child_id):
return (By.CSS_SELECTOR,
'[id="roles_list_widget"] li:nth-child({}) .span8>div'
.format(child_id))
@staticmethod
def get_scope(child_id):
return (By.CSS_SELECTOR,
'[id="roles_list_widget"] li:nth-child({}) .span4 '
'.scope'.format(child_id))
class __metaclass__(type):
def __init__(self, *args):
items = (
"EDITOR", "GRC_ADMIN", "PROGRAM_EDITOR", "PROGRAM_OWNER",
"PROGRAM_READER", "READER", "WORKFLOW_MEMBER", "WORKFLOW_OWNER")
for id_, name in enumerate(items, start=2):
setattr(self, attribute.ROLE + name, self._Locator.get_role(id_))
setattr(self, attribute.SCOPE + name, self._Locator.get_scope(id_))
class WidgetInfoSettingsButton(object):
"""Locators for the control info widget"""
class _Locator(object):
@staticmethod
def get_dropdown_item(child_id):
return (By.CSS_SELECTOR,
'.info-pane-utility .dropdown-menu li:nth-child({})'
.format(child_id))
TITLE_ENTERED = (By.CSS_SELECTOR, '[data-test-id="title_0ad9fbaf"]>h3')
DROPDOWN_SETTINGS_EDIT = _Locator.get_dropdown_item(1)
DROPDOWN_SETTINGS_PERMALINK = _Locator.get_dropdown_item(2)
DROPDOWN_SETTINGS_DELETE = _Locator.get_dropdown_item(3)
class BaseWidgetGeneric(object):
"""Locators shared amongst non info&admin widgets"""
_object_name = None
class __metaclass__(type):
"""For sharing parametrized class attributes we simply define how a
class should look like. Note that the same functionality can be
implemented using properties though with more code."""
def __init__(self, *args):
self.TITLE = (
By.CSS_SELECTOR, '#{}_widget .sticky-filter .filter-title h6'
.format(self._object_name))
self.TEXTFIELD = (
By.CSS_SELECTOR,
'#{}_widget .sticky-filter .filter-input'.format(self._object_name))
self.BUTTON_SUBMIT = (
By.CSS_SELECTOR,
'#{}_widget .sticky-filter .filter-button [type="submit"]'
.format(self._object_name))
self.BUTTON_RESET = (
By.CSS_SELECTOR,
'#{}_widget .sticky-filter .filter-button [type="reset"]'
.format(self._object_name))
self.BUTTON_HELP = (
By.CSS_SELECTOR,
'#{}_widget .sticky-filter .filter-button #page-help'
.format(self._object_name))
class WidgetControls(BaseWidgetGeneric):
"""Locators for control widget"""
_object_name = "control"
class WidgetProducts(BaseWidgetGeneric):
"""Locators for product widget"""
_object_name = "product"
class WidgetProjects(BaseWidgetGeneric):
"""Locators for project widget"""
_object_name = "project"
class WidgetSystems(BaseWidgetGeneric):
"""Locators for system widget"""
_object_name = "system"
class WidgetDataAssets(BaseWidgetGeneric):
"""Locators for system widget"""
_object_name = "data_asset"
class WidgetProcesses(BaseWidgetGeneric):
"""Locators for system widget"""
_object_name = "process"
class WidgetIssues(BaseWidgetGeneric):
"""Locators for system widget"""
_object_name = "issue"
class AdminCustomAttributes(object):
"""Locators for the widget custom attributes in admin dashboard"""
class _Locator(object):
@staticmethod
def get_toggle(child_id):
return (By.CSS_SELECTOR, '.tree-structure li:nth-child({}) div '
'.openclose'.format(child_id))
@staticmethod
def get_programs_label(child_id):
return (
By.CSS_SELECTOR,
'.tree-structure li:nth-child(5) div thead>tr>th:nth-child({})'
.format(child_id))
class __metaclass__(type):
def __init__(self, *args):
items = (
objects.WORKFLOWS, "RISK_ASSESSMENTS", objects.THREATS,
objects.RISKS, objects.PROGRAMS, objects.AUDITS,
objects.OBJECTIVES, objects.SECTIONS, objects.CONTROLS,
objects.ISSUES, objects.ASSESSMENTS, objects.STANDARDS,
objects.REGULATIONS, objects.POLICIES, objects.CONTRACTS,
objects.CLAUSES, objects.REQUESTS, objects.VENDORS, objects.PEOPLE,
objects.ACCESS_GROUPS, objects.ORG_GROUPS, objects.PRODUCTS,
objects.MARKETS, objects.PROCESSES, objects.FACILITIES,
objects.PROJECTS, objects.DATA_ASSETS, objects.SYSTEMS)
for id_, name in enumerate(items, start=1):
setattr(self,
attribute.TOGGLE + name.upper(),
self._Locator.get_toggle(id_))
FILTER_INPUT_FIELD = (By.CLASS_NAME, 'filter-input')
FILTER_BUTTON_SUBMIT = (By.CSS_SELECTOR, '.filter-button>[type="submit"]')
FILTER_BUTTON_RESET = (By.CSS_SELECTOR, '.filter-button>[type="reset"]')
# programs dropdown
BUTTON_ADD_CUSTOM_PROGRAM_ATTR = (
By.CSS_SELECTOR, '.tree-structure li:nth-child(5)'
' [data-toggle="modal-ajax-form"]')
PROGRAMS_LABEL_ATTRIBUTE_NAME = _Locator.get_programs_label(1)
PROGRAMS_LABEL_ATTRIBUTE_TYPE = _Locator.get_programs_label(2)
PROGRAMS_LABEL_MANDATORY = _Locator.get_programs_label(3)
PROGRAMS_LABEL_EDIT = _Locator.get_programs_label(4)
LISTED_MEMBERS = (
By.CSS_SELECTOR,
'.tree-structure li:nth-child(5) div tbody>tr')
BUTTON_LISTED_MEMBERS_EDIT = (
By.CSS_SELECTOR,
'.tree-structure li:nth-child(5) div tbody>tr>td>ul .fa-pencil-square-o')
| 37.101176
| 79
| 0.668411
|
120801f0b5021e3d0b3926ac63f4f25c736a7caa
| 341
|
py
|
Python
|
biobb_dna/utils/transform.py
|
bioexcel/biobb_dna
|
7b61937d1683629949ffd7e1abb55831dcd25060
|
[
"Apache-2.0"
] | null | null | null |
biobb_dna/utils/transform.py
|
bioexcel/biobb_dna
|
7b61937d1683629949ffd7e1abb55831dcd25060
|
[
"Apache-2.0"
] | 1
|
2021-10-30T07:25:54.000Z
|
2021-10-30T07:25:54.000Z
|
biobb_dna/utils/transform.py
|
bioexcel/biobb_dna
|
7b61937d1683629949ffd7e1abb55831dcd25060
|
[
"Apache-2.0"
] | null | null | null |
def inverse_complement(sequence, dna=True):
"""compute inverse complement sequence."""
if dna:
A_complement = "T"
else:
A_complement = "U"
complement = {
"A": A_complement,
A_complement: "A",
"G": "C",
"C": "G"
}
return "".join([complement[b] for b in sequence])[::-1]
| 24.357143
| 59
| 0.524927
|
5b29ed2d28e0d9be6e72da847eac5332061bd8c9
| 11,006
|
py
|
Python
|
main.py
|
yunsujeon/BMT
|
a1598c589fa5cdb5a6cb257cda1da1593d05f8d7
|
[
"MIT"
] | null | null | null |
main.py
|
yunsujeon/BMT
|
a1598c589fa5cdb5a6cb257cda1da1593d05f8d7
|
[
"MIT"
] | null | null | null |
main.py
|
yunsujeon/BMT
|
a1598c589fa5cdb5a6cb257cda1da1593d05f8d7
|
[
"MIT"
] | null | null | null |
import argparse
from pprint import pprint
from utilities.config_constructor import Config
from scripts.train_captioning_module import train_cap
from scripts.train_proposal_generator import train_prop
from scripts.eval_on_learned_props import eval_on_learned_props
def main(cfg):
if cfg.procedure == 'train_cap':
train_cap(cfg)
elif cfg.procedure == 'train_prop':
train_prop(cfg)
elif cfg.procedure == 'evaluate':
eval_on_learned_props(cfg)
else:
raise NotImplementedError
if __name__ == "__main__":
'''
Note, that the arguments are shared for both train_cap and train_prop that leads to the
situation in which an argument is defined but unused (--word_emb_caps for train_prop case).
'''
parser = argparse.ArgumentParser(description='Run experiment')
## DATA
# paths to the precalculated train meta files
parser.add_argument('--train_meta_path', type=str, default='./data/train.csv')
parser.add_argument('--val_1_meta_path', type=str, default='./data/val_1.csv')
parser.add_argument('--val_2_meta_path', type=str, default='./data/val_2.csv')
parser.add_argument('--modality', type=str, default='audio_video',
choices=['audio', 'video', 'audio_video'],
help='modality to use. if audio_video both audio and video are used')
parser.add_argument('--video_feature_name', type=str, default='i3d')
parser.add_argument('--audio_feature_name', type=str, default='vggish')
parser.add_argument('--video_features_path', type=str,
default='./data/i3d_25fps_stack64step64_2stream_npy/')
parser.add_argument('--audio_features_path', type=str,
default='./data/vggish_npy/')
parser.add_argument('--d_vid', type=int, default=1024, help='raw feature dimension')
parser.add_argument('--d_aud', type=int, default=128, help='raw feature dimension')
parser.add_argument('--word_emb_caps', default='glove.840B.300d', type=str,
help='Embedding code name from torchtext.vocab.Vocab')
parser.add_argument('--unfreeze_word_emb', dest='unfreeze_word_emb', action='store_true',
default=False, help='Whether to finetune the pre-trained text embeddings')
parser.add_argument('--feature_timespan_in_fps', type=int, default=36,#default : 64
help='how many fps the input features will temporally cover')
parser.add_argument('--fps_at_extraction', type=int, default=15,# default : 25
help='how many fps were used at feature extraction')
parser.add_argument('--audio_feature_timespan', type=float,
default=0.96, help='audio feature timespan')
parser.add_argument('--train_json_path', type=str, default='./data/train.json')
## TRAINING
parser.add_argument('--procedure', type=str, required=True,
choices=['train_cap', 'train_prop', 'evaluate'])
parser.add_argument('--device_ids', type=int, nargs='+', default=[0], help='separated by a whitespace')
parser.add_argument('--start_token', type=str, default='<s>', help='starting token')
parser.add_argument('--end_token', type=str, default='</s>', help='ending token')
parser.add_argument('--pad_token', type=str, default='<blank>', help='padding token')
parser.add_argument('--max_len', type=int, default=30, help='maximum size of 1by1 prediction')
parser.add_argument('--min_freq_caps', type=int, default=1,
help='a word should appear min_freq times in train dataset to be in the vocab')
parser.add_argument('--optimizer', type=str, default='adam', choices=['adam', 'sgd'])
parser.add_argument('--betas', type=float, nargs=2, default=[0.9, 0.999], help='betas in adam')
parser.add_argument('--eps', type=float, default=1e-8, help='eps in adam')
parser.add_argument('--momentum', type=float, default=0.0)
parser.add_argument('--scheduler', type=str, default='constant',
choices=['constant', 'reduce_on_plateau'], help='lr scheduler')
parser.add_argument('--lr', type=float, default=5e-5, help='lr (if scheduler is constant)')
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--lr_patience', type=int, help='ReduceLROnPlateau arguments')
parser.add_argument('--lr_reduce_factor', type=float,
help='ReduceLROnPlateau arguments, (use 0.2 for 1/5)')
parser.add_argument('--B', type=int, default=32, help='batch size per device')
parser.add_argument('--inf_B_coeff', type=int, default=2,
help='The batch size on inference will be inf_B_coeff times B arg')
parser.add_argument('--epoch_num', type=int, default=20, help='number of epochs to train') # default = 100
parser.add_argument('--one_by_one_starts_at', type=int, default=1,
help='# of epochs to skip before starting 1-by-1 validation (saves time)')
parser.add_argument('--early_stop_after', type=int, default=20, # default = 30
help='number of epochs to wait for best metric to change before stopping')
parser.add_argument(
'--smoothing', type=float, default=0.7,
help='smoothing coeff (= 0 cross ent loss, more -- stronger smoothing) must be in [0, 1]'
)
parser.add_argument('--grad_clip', type=float, help='max grad norm for gradients')
parser.add_argument('--pretrained_prop_model_path', type=str,
help='path to pre-trained cap model .pt')
parser.add_argument('--finetune_prop_encoder', dest='finetune_prop_encoder',
action='store_true', default=False)
parser.add_argument('--pretrained_cap_model_path', type=str,
help='path to pre-trained cap model .pt')
parser.add_argument('--finetune_cap_encoder', dest='finetune_cap_encoder',
action='store_true', default=False)
parser.add_argument('--obj_coeff', type=float, default=1, help='objectness coeff in loss')
parser.add_argument('--noobj_coeff', type=float, default=100, help='noobjectness coeff in loss')
parser.add_argument('--pad_audio_feats_up_to', type=int, default=800, #default=800
help='max feature length to pad other features to')
parser.add_argument('--pad_video_feats_up_to', type=int, default=300, #default=300
help='max feature length to pad other features to')
parser.add_argument('--nms_tiou_thresh', type=float, help='non-max suppression objectness thr')
parser.add_argument('--log_dir', type=str, default='./log/')
## EVALUATION
parser.add_argument('--prop_pred_path', type=str, help='path to a .json file with prop preds')
parser.add_argument('--avail_mp4_path', type=str, default='./data/available_mp4.txt',
help='list of available videos')
parser.add_argument('--reference_paths', type=str, nargs='+',
default=['./data/val_1_no_missings.json', './data/val_2_no_missings.json'],
help='reference paths for 1-by-1 validation')
parser.add_argument('--tIoUs', type=float, default=[0.3, 0.5, 0.7, 0.9], nargs='+',
help='thresholds for tIoU to be used for 1-by-1 validation')
parser.add_argument(
'--max_prop_per_vid', type=int, default=100,
help='max number of proposals to take into considetation in 1-by-1 validation'
)
parser.add_argument('--val_prop_meta_path', type=str, help='Only used in eval_on_learnd_props')
## MODEL
parser.add_argument('--model', type=str, default='av_transformer',
choices=['transformer', 'av_transformer'], help='caption model type')
parser.add_argument('--dout_p', type=float, default=0.1, help='dropout probability: in [0, 1]')
parser.add_argument('--N', type=int, default=2, help='number of layers in a model')
parser.add_argument(
'--d_model', type=int, default=1024,
help='the internal space in the mullti-headed attention (when input dims of Q, K, V differ)')
parser.add_argument(
'--d_model_video', type=int,
help='If use_linear_embedder is true, this is going to be the d_model size for video model'
)
parser.add_argument(
'--d_model_audio', type=int,
help='If use_linear_embedder is true, this is going to be the d_model size for audio model'
)
parser.add_argument(
'--d_model_caps', type=int, default=300,
help='hidden size of the crossmodal decoder (caption tokens are mapped into this dim)'
)
parser.add_argument(
'--use_linear_embedder', dest='use_linear_embedder', action='store_true', default=False,
help='Whether to include a dense layer between the raw features and input to the model'
)
parser.add_argument('--H', type=int, default=4, help='number of heads in multiheaded attention')
parser.add_argument(
'--d_ff_video', type=int, help='size of the internal layer of PositionwiseFeedForward')
parser.add_argument(
'--d_ff_audio', type=int, help='size of the internal layer of PositionwiseFeedForward')
parser.add_argument(
'--d_ff_caps', type=int, help='size of the internal layer of PositionwiseFeedForward')
parser.add_argument('--anchors_num_video', type=int, default=128)
parser.add_argument('--anchors_num_audio', type=int, default=48)
parser.add_argument('--kernel_sizes_audio', type=int, nargs='+',
default=[5, 13, 23, 35, 51, 69, 91, 121, 161, 211])
parser.add_argument('--kernel_sizes_video', type=int, nargs='+',
default=[1, 5, 9, 13, 19, 25, 35, 45, 61, 79])
parser.add_argument('--conv_layers_audio', type=int, nargs='*', default=[512, 512],
help='intermediate layer dims in proposal gen heads')
parser.add_argument('--conv_layers_video', type=int, nargs='*', default=[512, 512],
help='intermediate layer dims in proposal gen heads')
parser.add_argument('--layer_norm', dest='layer_norm', action='store_true', default=False,
help='whether to use layer norm in proposal generation heads')
## DEBUGGING
parser.add_argument('--debug', dest='debug', action='store_true', default=False,
help='runs test() instead of main()')
parser.add_argument('--dont_log', dest='to_log', action='store_false',
help='Prevent logging in the experiment.')
parser.set_defaults(to_log=True)
args = parser.parse_args()
pprint(vars(args))
cfg = Config(args)
if args.debug:
# load your test to debug something using the same config as main() would
# from tests import test_features_max_length
# test_features_max_length(cfg)
pass
else:
main(cfg)
| 59.815217
| 110
| 0.658822
|
07a91a657f319617dcf963e3ca7a752037049897
| 3,038
|
py
|
Python
|
Exec/hydro_tests/Sod_stellar/testsuite_analysis/test3-helm.py
|
MargotF/Castro
|
5cdb549af422ef44c9b1822d0fefe043b3533c57
|
[
"BSD-3-Clause-LBNL"
] | 178
|
2017-05-03T18:07:03.000Z
|
2022-03-31T22:34:53.000Z
|
Exec/hydro_tests/Sod_stellar/testsuite_analysis/test3-helm.py
|
MargotF/Castro
|
5cdb549af422ef44c9b1822d0fefe043b3533c57
|
[
"BSD-3-Clause-LBNL"
] | 1,334
|
2017-05-04T14:23:24.000Z
|
2022-03-28T00:12:06.000Z
|
Exec/hydro_tests/Sod_stellar/testsuite_analysis/test3-helm.py
|
MargotF/Castro
|
5cdb549af422ef44c9b1822d0fefe043b3533c57
|
[
"BSD-3-Clause-LBNL"
] | 86
|
2017-06-12T15:27:51.000Z
|
2022-03-09T22:21:44.000Z
|
#!/usr/bin/env python3
# run as: ./test3-helm.py castro_exec_dir plotfle
# note: this relies on fextract.XXXX.ex being in your path somewhere
import sys
import os
import shutil
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
def process(castro_exec_dir, plotfile):
run_dir = os.getcwd()
# 1. find the fextract tool by looking through the User's path
path = os.environ["PATH"].split(":")
for d in path:
full_dir = os.path.expanduser(d)
if not os.path.isdir(full_dir):
continue
for f in os.listdir(full_dir):
if (os.path.isfile(full_dir+"/"+f) and
f.startswith("fextract") and f.endswith(".ex")):
analysis_routine = full_dir+"/"+f
break
print("analysis_routine = ", analysis_routine)
shutil.copy(analysis_routine, run_dir)
# 2. analyze the data
# output the average profile
os.system("./{} -s {} {}".format(os.path.basename(analysis_routine), "test3-helm.out", plotfile))
analytic = castro_exec_dir + "Exec/hydro_tests/Sod_stellar/Verification/test3.exact.128.out"
analytic_data = np.loadtxt(analytic)
# need to be more flexible with the data from the simulations, as the
# columns shift depending on the dimensionality. This gets the column
# names from the header
data = np.genfromtxt("test3-helm.out", skip_header=2, names=True)
# 3. make the plot
plt.subplot(411)
plt.plot(analytic_data[:,1], analytic_data[:,2])
plt.scatter(data["x"], data["density"], marker="+", color="r")
plt.xlabel("x")
plt.ylabel("density")
plt.xlim(0,2.e5)
plt.subplot(412)
# figure out which dimensions are present
d = 1
for n in data.dtype.names:
if n == "ymom" or n == "zmom": d += 1
dim = "xmom"
if d >= 2 and data["ymom"].ptp() > data["xmom"].ptp(): dim = "ymom"
if d == 3 and data["zmom"].ptp() > data[dim].ptp(): dim = "zmom"
plt.plot(analytic_data[:,1], analytic_data[:,3])
plt.scatter(data["x"], data[dim]/data["density"], marker="+", color="r")
plt.xlabel("x")
plt.ylabel("velocity")
plt.xlim(0,2.e5)
plt.subplot(413)
plt.plot(analytic_data[:,1], analytic_data[:,4])
plt.scatter(data["x"], data["pressure"], marker="+", color="r")
plt.xlabel("x")
plt.ylabel("pressure")
plt.xlim(0,2.e5)
plt.subplot(414)
plt.plot(analytic_data[:,1], analytic_data[:,5])
plt.scatter(data["x"], data["Temp"], marker="+", color="r")
plt.xlabel("x")
plt.ylabel("temperature")
plt.xlim(0,2.e5)
ax = plt.gca()
ax.set_yscale("log")
f = plt.gcf()
f.set_size_inches(6.0, 9.0)
plt.tight_layout()
index = plotfile.rfind("_plt")
if (index > 0):
plt.savefig(plotfile[:index] + ".png")
else:
plt.savefig("test3-helm.png")
if __name__ == "__main__":
castro_exec_dir = str(sys.argv[1])
plotfile = str(sys.argv[2])
process(castro_exec_dir, plotfile)
| 24.304
| 101
| 0.615866
|
d5a34e835cb6a9d40750be9723412e496de5ddc6
| 559
|
py
|
Python
|
setup.py
|
axju/pyclean
|
16ad3f44ee6ce3f2b0b12eaae220633c897580b1
|
[
"MIT"
] | 1
|
2018-11-23T22:45:07.000Z
|
2018-11-23T22:45:07.000Z
|
setup.py
|
axju/pyclean
|
16ad3f44ee6ce3f2b0b12eaae220633c897580b1
|
[
"MIT"
] | 2
|
2018-11-23T23:00:05.000Z
|
2019-10-10T07:39:11.000Z
|
setup.py
|
axju/pyclean
|
16ad3f44ee6ce3f2b0b12eaae220633c897580b1
|
[
"MIT"
] | 1
|
2019-09-23T06:43:29.000Z
|
2019-09-23T06:43:29.000Z
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='pycleanup',
version='0.0.1',
description='Clean up your working directory.',
long_description=readme(),
keywords='Clean up',
url='https://github.com/axju/pyclean',
author='Axel Juraske',
author_email='axel.juraske@short-report.de',
license='MIT',
packages=['pycleanup'],
entry_points = {
'console_scripts': ['pycleanup=pycleanup.__main__:main'],
},
zip_safe=False)
| 26.619048
| 65
| 0.617174
|
f581a6178138afd921d7ace13828e898e68ee137
| 4,408
|
py
|
Python
|
rivalcfg/handlers/buttons/layout_qwerty.py
|
Clueninja/rivalcfg
|
f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346
|
[
"WTFPL"
] | 604
|
2016-03-31T12:22:26.000Z
|
2022-03-31T18:51:50.000Z
|
rivalcfg/handlers/buttons/layout_qwerty.py
|
Clueninja/rivalcfg
|
f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346
|
[
"WTFPL"
] | 162
|
2016-04-17T10:58:08.000Z
|
2022-03-11T18:59:18.000Z
|
rivalcfg/handlers/buttons/layout_qwerty.py
|
Clueninja/rivalcfg
|
f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346
|
[
"WTFPL"
] | 89
|
2016-04-10T08:56:58.000Z
|
2022-03-18T21:04:10.000Z
|
"""
This file contains the layout for QWERTY (en_US) keyboards.
"""
# fmt: off
#: The layout
layout = {
# Alphanumeric
"A": 0x04,
"B": 0x05,
"C": 0x06,
"D": 0x07,
"E": 0x08,
"F": 0x09,
"G": 0x0A,
"H": 0x0B,
"I": 0x0C,
"J": 0x0D,
"K": 0x0E,
"L": 0x0F,
"M": 0x10,
"N": 0x11,
"O": 0x12,
"P": 0x13,
"Q": 0x14,
"R": 0x15,
"S": 0x16,
"T": 0x17,
"U": 0x18,
"V": 0x19,
"W": 0x1A,
"X": 0x1B,
"Y": 0x1C,
"Z": 0x1D,
"1": 0x1E,
"2": 0x1F,
"3": 0x20,
"4": 0x21,
"5": 0x22,
"6": 0x23,
"7": 0x24,
"8": 0x25,
"9": 0x26,
"0": 0x27,
# Editing
"Enter": 0x28,
"Escape": 0x29,
"BackSpace": 0x2A,
"Tab": 0x2B,
"Space": 0x2C,
"Delete": 0x4C,
# Symbols
"-": 0x2D,
"=": 0x2E,
"[": 0x2F,
"]": 0x30,
"\\": 0x31,
"#": 0x32, # International!?
";": 0x33,
"'": 0x34,
"`": 0x35,
",": 0x36,
".": 0x37,
"/": 0x38,
"\\(inter)": 0x64, # International!?
# Typing Mode
"CapsLock": 0x39,
"ScrollLock": 0x47,
"Insert": 0x49,
"NumLock": 0x53,
# Functions
"F1": 0x3A,
"F2": 0x3B,
"F3": 0x3C,
"F4": 0x3D,
"F5": 0x3E,
"F6": 0x3F,
"F7": 0x40,
"F8": 0x41,
"F9": 0x42,
"F10": 0x43,
"F11": 0x44,
"F12": 0x45,
"F13": 0x68,
"F14": 0x69,
"F15": 0x6A,
"F16": 0x6B,
"F17": 0x6C,
"F18": 0x6D,
"F19": 0x6E,
"F20": 0x6F,
"F21": 0x70,
"F22": 0x71,
"F23": 0x72,
"F24": 0x73,
# Commands
"PrintScreen": 0x46,
"PauseBreak": 0x48,
"ContextMenu": 0x65,
# Navigation
"Home": 0x4A,
"PageUp": 0x4B,
"End": 0x4D,
"PageDown": 0x4E,
# Arrows
"Right": 0x4F,
"Left": 0x50,
"Down": 0x51,
"Up": 0x52,
# Numpad
"Keypad/": 0x54,
"Keypad*": 0x55,
"Keypad-": 0x56,
"Keypad+": 0x57,
"KeypadEnter": 0x58,
"Keypad1": 0x59,
"Keypad2": 0x5A,
"Keypad3": 0x5B,
"Keypad4": 0x5C,
"Keypad5": 0x5D,
"Keypad6": 0x5E,
"Keypad7": 0x5F,
"Keypad8": 0x60,
"Keypad9": 0x61,
"Keypad0": 0x62,
"Keypad.": 0x63,
"Keypad,": 0x85, # ??
"Keypad=": 0x86, # ??
# Modifiers
"LeftCtrl": 0xE0,
"LeftShift": 0xE1,
"LeftAlt": 0xE2,
"LeftSuper": 0xE3, # Command / Win logo
"RightCtrl": 0xE4,
"RightShift": 0xE5,
"RightAlt": 0xE6,
"RightSuper": 0xE7, # Command / Win logo
}
#: Alias for some keys of the layout
aliases = {
"esc": "Escape",
"bksp": "BackSpace",
"bkspace": "BackSpace",
"del": "Delete",
"dash": "-",
"minus": "-",
"equal": "=",
"eq": "=",
"leftbracket": "[",
"rightbracket": "]",
"backslash": "\\",
"hash": "#",
"semicolon": ";",
"semi": ";",
"quote": "'",
"backtick": "`",
"backquote": "`",
"comma": ",",
"dot": ".",
"point": ".",
"slash": "/",
"capslck": "CapsLock",
"capslk": "CapsLock",
"cpslck": "CapsLock",
"cpslk": "CapsLock",
"scrolllck": "ScrollLock",
"scrolllk": "ScrollLock",
"scrllck": "ScrollLock",
"scrllk": "ScrollLock",
"scrlck": "ScrollLock",
"scrlk": "ScrollLock",
"ins": "Insert",
"num": "NumLock",
"numlck": "NumLock",
"numlk": "NumLock",
"prntscr": "PrintScreen",
"prtscr": "PrintScreen",
"prtsc": "PrintScreen",
"psbrk": "PauseBreak",
"psbr": "PauseBreak",
"ctx": "ContextMenu",
"menu": "ContextMenu",
"ctxmenu": "ContextMenu",
"ctxmn": "ContextMenu",
"pgup": "PageUp",
"pgdown": "PageDown",
"pgdwn": "PageDown",
"pgdn": "PageDown",
"lctrl": "LeftCtrl",
"lshift": "LeftShift",
"lalt": "LeftAlt",
"alt": "LeftAlt",
"super": "LeftSuper",
"lsuper": "LeftSuper",
"windows": "LeftSuper",
"leftwindows": "LeftSuper",
"win": "LeftSuper",
"lwin": "LeftSuper",
"command": "LeftSuper",
"leftcommand": "LeftSuper",
"cmd": "LeftSuper",
"lcmd": "LeftSuper",
"rctrl": "RightCtrl",
"rshift": "RightShift",
"ralt": "RightAlt",
"altgr": "RightAlt",
"rsuper": "RightSuper",
"rightwindows": "RightSuper",
"rwin": "RightSuper",
"rightcommand": "RightSuper",
"rcmd": "RightSuper",
}
# fmt: on
| 18.065574
| 59
| 0.463702
|
5d463d168910c2c4ac777c6202405f3ef6c834dd
| 6,518
|
py
|
Python
|
traffic_control/admin/road_marking.py
|
City-of-Helsinki/city-infrastructure-platform
|
c14513a9e54405412085f1047f91ec58b263eac0
|
[
"CC0-1.0"
] | 2
|
2020-11-23T22:08:58.000Z
|
2022-03-02T13:13:20.000Z
|
traffic_control/admin/road_marking.py
|
City-of-Helsinki/city-infrastructure-platform
|
c14513a9e54405412085f1047f91ec58b263eac0
|
[
"CC0-1.0"
] | 170
|
2019-12-31T13:37:04.000Z
|
2022-03-12T14:03:35.000Z
|
traffic_control/admin/road_marking.py
|
City-of-Helsinki/city-infrastructure-platform
|
c14513a9e54405412085f1047f91ec58b263eac0
|
[
"CC0-1.0"
] | 3
|
2020-05-08T05:58:02.000Z
|
2022-03-15T16:07:25.000Z
|
from django.contrib.gis import admin
from django.db import models
from django.utils.translation import gettext_lazy as _
from ..constants import HELSINKI_LATITUDE, HELSINKI_LONGITUDE
from ..forms import AdminFileWidget
from ..mixins import (
EnumChoiceValueDisplayAdminMixin,
SoftDeleteAdminMixin,
UserStampedAdminMixin,
UserStampedInlineAdminMixin,
)
from ..models import (
RoadMarkingPlan,
RoadMarkingPlanFile,
RoadMarkingReal,
RoadMarkingRealFile,
)
from .audit_log import AuditLogHistoryAdmin
from .common import TrafficControlOperationInlineBase
__all__ = (
"RoadMarkingPlanAdmin",
"RoadMarkingPlanFileInline",
"RoadMarkingRealAdmin",
"RoadMarkingRealFileInline",
)
from ..models.road_marking import RoadMarkingRealOperation
class RoadMarkingPlanFileInline(admin.TabularInline):
formfield_overrides = {
models.FileField: {"widget": AdminFileWidget},
}
model = RoadMarkingPlanFile
@admin.register(RoadMarkingPlan)
class RoadMarkingPlanAdmin(
EnumChoiceValueDisplayAdminMixin,
SoftDeleteAdminMixin,
UserStampedAdminMixin,
admin.OSMGeoAdmin,
AuditLogHistoryAdmin,
):
default_lon = HELSINKI_LONGITUDE
default_lat = HELSINKI_LATITUDE
default_zoom = 12
fieldsets = (
(
_("General information"),
{
"fields": (
"owner",
"device_type",
"type_specifier",
"value",
"symbol",
"amount",
"additional_info",
"source_id",
"source_name",
)
},
),
(
_("Location information"),
{
"fields": (
"location",
"road_name",
"lane_number",
"lane_type",
"location_specifier",
)
},
),
(
_("Physical properties"),
{
"fields": (
"arrow_direction",
"line_direction",
"size",
"length",
"width",
"is_raised",
"is_grinded",
"material",
"color",
)
},
),
(_("Related models"), {"fields": ("plan", "traffic_sign_plan")}),
(
_("Validity"),
{
"fields": (
("validity_period_start", "validity_period_end"),
("seasonal_validity_period_start", "seasonal_validity_period_end"),
"lifecycle",
)
},
),
(
_("Metadata"),
{"fields": ("created_at", "updated_at", "created_by", "updated_by")},
),
)
list_display = (
"id",
"device_type",
"lifecycle",
"location",
)
list_filter = SoftDeleteAdminMixin.list_filter + ["owner"]
readonly_fields = (
"created_at",
"updated_at",
"created_by",
"updated_by",
"source_name",
"source_id",
)
raw_id_fields = ("plan", "traffic_sign_plan")
ordering = ("-created_at",)
inlines = (RoadMarkingPlanFileInline,)
class RoadMarkingRealFileInline(admin.TabularInline):
formfield_overrides = {
models.FileField: {"widget": AdminFileWidget},
}
model = RoadMarkingRealFile
class RoadMarkingRealOperationInline(TrafficControlOperationInlineBase):
model = RoadMarkingRealOperation
@admin.register(RoadMarkingReal)
class RoadMarkingRealAdmin(
EnumChoiceValueDisplayAdminMixin,
SoftDeleteAdminMixin,
UserStampedAdminMixin,
UserStampedInlineAdminMixin,
admin.OSMGeoAdmin,
AuditLogHistoryAdmin,
):
default_lon = HELSINKI_LONGITUDE
default_lat = HELSINKI_LATITUDE
default_zoom = 12
fieldsets = (
(
_("General information"),
{
"fields": (
"owner",
"device_type",
"type_specifier",
"value",
"symbol",
"amount",
"additional_info",
"missing_traffic_sign_real_txt",
"source_id",
"source_name",
)
},
),
(
_("Location information"),
{
"fields": (
"location",
"road_name",
"lane_number",
"lane_type",
"location_specifier",
)
},
),
(
_("Physical properties"),
{
"fields": (
"arrow_direction",
"line_direction",
"size",
"length",
"width",
"is_raised",
"is_grinded",
"material",
"color",
"condition",
)
},
),
(_("Related models"), {"fields": ("road_marking_plan", "traffic_sign_real")}),
(
_("Installation information"),
{"fields": ("installation_date", "installation_status")},
),
(
_("Validity"),
{
"fields": (
("validity_period_start", "validity_period_end"),
("seasonal_validity_period_start", "seasonal_validity_period_end"),
"lifecycle",
)
},
),
(
_("Metadata"),
{"fields": ("created_at", "updated_at", "created_by", "updated_by")},
),
)
list_display = (
"id",
"device_type",
"lifecycle",
"location",
"installation_date",
)
list_filter = SoftDeleteAdminMixin.list_filter + ["owner"]
readonly_fields = (
"created_at",
"updated_at",
"created_by",
"updated_by",
"source_name",
"source_id",
)
raw_id_fields = ("road_marking_plan", "traffic_sign_real")
ordering = ("-created_at",)
inlines = (RoadMarkingRealFileInline, RoadMarkingRealOperationInline)
| 27.158333
| 87
| 0.486652
|
80ab6b07fe31868e61bf742c8915572f28ddceb3
| 2,409
|
py
|
Python
|
test/test-tools/Archive/mathv1.py
|
LeastAuthority/Benchmark-Pendle-Smart-Contracts
|
5116f44cb0828d74b77ebfb14f394d61103112bb
|
[
"MIT"
] | null | null | null |
test/test-tools/Archive/mathv1.py
|
LeastAuthority/Benchmark-Pendle-Smart-Contracts
|
5116f44cb0828d74b77ebfb14f394d61103112bb
|
[
"MIT"
] | null | null | null |
test/test-tools/Archive/mathv1.py
|
LeastAuthority/Benchmark-Pendle-Smart-Contracts
|
5116f44cb0828d74b77ebfb14f394d61103112bb
|
[
"MIT"
] | null | null | null |
'''
Python version of Pendle's V1 Math Lib
'''
import helper
PRECISION_BITS = 40
ONE = 1 << PRECISION_BITS
PRECISION_POW = 100
def rmul(x, y):
return ((ONE // 2 + (x) * (y)) >> PRECISION_BITS)
def rdiv(x, y):
return (y // 2 + x * ONE) // (y)
def countLeadingZeros(_p, _q):
denomator = (1 << 255)
cnt = 0
for i in range(255, -1, -1):
if (_p // (_q * denomator) > 0):
assert(i == countv2(_p, _q))
return i
cnt += 1
denomator = denomator // 2
return -1
def log2ForSmallNumber(_x):
res = 0
one = ONE
two = 2 * one
addition = one
for i in range(PRECISION_BITS, 0, -1):
_x = (_x * _x) // one
addition = addition // 2
if (_x >= two):
_x = _x // 2
res += addition
return res
def logBase2(_p, _q):
n = 0
if (_p > _q):
n = countLeadingZeros(_p, _q)
y = (_p * ONE) // (_q * (1 << n))
log2Small = log2ForSmallNumber(y)
return n * ONE + log2Small
def ln(p, q=ONE):
ln2Numerator = 6931471805599453094172
ln2Denomerator = 10000000000000000000000
log2x = logBase2(p, q)
return (ln2Numerator * log2x) // ln2Denomerator
def rpowi(_x, _n):
z = ONE
if (_n % 2 != 0):
z = _x
_n //= 2
while(_n != 0):
_x = rmul(_x, _x)
if (_n % 2 != 0):
z = rmul(z, _x)
_n //= 2
return z
def rfloor(x):
return rtoi(x) * ONE
def rtoi(x):
return x // ONE
def rpow(_base, _exp):
whole = rfloor(_exp)
remain = _exp - whole
wholePow = rpowi(_base, rtoi(whole))
if (remain == 0):
return wholePow
partialResult = rpowApprox(_base, remain)
return rmul(wholePow, partialResult)
def rpowApprox(_base, _exp):
a = _exp
(x, xneg) = helper.rsignSub(_base, ONE)
term = ONE
sum = term
negative = False
i = 0
while(term >= PRECISION_POW):
i = i + 1
bigK = i * ONE
(c, cneg) = helper.rsignSub(a, bigK - ONE)
term = rmul(term, rmul(c, x))
term = rdiv(term, bigK)
if (term == 0):
break
if (xneg):
negative = ~negative
if (cneg):
negative = ~negative
if (negative):
assert(sum >= term)
sum = sum - term
else:
sum = sum + term
print("converge:", i)
return sum
| 19.272
| 53
| 0.50851
|
4446a09b60cf868c2d2ee54975500925ea0b5493
| 9,956
|
py
|
Python
|
pypicloud/storage/s3.py
|
pepastach/pypicloud
|
42ad82a52bc4b7f1da79179f089ee2b094c76865
|
[
"MIT"
] | 1
|
2021-04-10T04:18:36.000Z
|
2021-04-10T04:18:36.000Z
|
pypicloud/storage/s3.py
|
jweede/pypicloud
|
3015160ed6fd7f8ffc3a3e4513e7fb91dd340921
|
[
"MIT"
] | null | null | null |
pypicloud/storage/s3.py
|
jweede/pypicloud
|
3015160ed6fd7f8ffc3a3e4513e7fb91dd340921
|
[
"MIT"
] | null | null | null |
""" Store packages in S3 """
import logging
import posixpath
from datetime import datetime, timedelta
from urllib.parse import quote, urlparse
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
from botocore.signers import CloudFrontSigner
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
from pyramid.settings import asbool, falsey
from pyramid_duh.settings import asdict
from pypicloud.models import Package
from pypicloud.util import get_settings, normalize_metadata, parse_filename
from .object_store import ObjectStoreStorage
LOG = logging.getLogger(__name__)
class S3Storage(ObjectStoreStorage):
""" Storage backend that uses S3 """
test = False
def __init__(self, request=None, bucket=None, **kwargs):
super(S3Storage, self).__init__(request=request, **kwargs)
self.bucket = bucket
@classmethod
def _subclass_specific_config(cls, settings, common_config):
sse = settings.get("storage.server_side_encryption")
if sse not in [None, "AES256", "aws:kms"]:
LOG.warning(
"Unrecognized value %r for 'storage.sse'. See "
"https://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Object.put "
"for more details",
sse,
)
bucket_name = settings.get("storage.bucket")
if bucket_name is None:
raise ValueError("You must specify the 'storage.bucket'")
return {"sse": sse, "bucket": cls.get_bucket(bucket_name, settings)}
@classmethod
def get_bucket(cls, bucket_name, settings):
config_settings = get_settings(
settings,
"storage.",
region_name=str,
signature_version=str,
user_agent=str,
user_agent_extra=str,
connect_timeout=int,
read_timeout=int,
parameter_validation=asbool,
max_pool_connections=int,
proxies=asdict,
)
config_settings["s3"] = get_settings(
settings,
"storage.",
use_accelerate_endpoint=asbool,
payload_signing_enabled=asbool,
addressing_style=str,
signature_version=str,
)
config = Config(**config_settings)
def verify_value(val):
""" Verify can be a boolean (False) or a string """
s = str(val).strip().lower()
if s in falsey:
return False
else:
return str(val)
s3conn = boto3.resource(
"s3",
config=config,
**get_settings(
settings,
"storage.",
region_name=str,
api_version=str,
use_ssl=asbool,
verify=verify_value,
endpoint_url=str,
aws_access_key_id=str,
aws_secret_access_key=str,
aws_session_token=str,
)
)
bucket = s3conn.Bucket(bucket_name)
try:
head = s3conn.meta.client.head_bucket(Bucket=bucket_name)
except ClientError as e:
if e.response["Error"]["Code"] == "404":
LOG.info("Creating S3 bucket %s", bucket_name)
if config.region_name:
location = {"LocationConstraint": config.region_name}
bucket.create(CreateBucketConfiguration=location)
else:
bucket.create()
bucket.wait_until_exists()
else:
if e.response["Error"]["Code"] == "301":
LOG.error(
"Bucket found in different region. Check that "
"the S3 bucket specified in 'storage.bucket' is "
"in 'storage.region_name'"
)
raise
return bucket
@classmethod
def package_from_object(cls, obj, factory):
""" Create a package from a S3 object """
filename = posixpath.basename(obj.key)
name = obj.metadata.get("name")
version = obj.metadata.get("version")
metadata = Package.read_metadata(obj.metadata)
# We used to not store metadata. This is for backwards
# compatibility
if name is None or version is None:
try:
name, version = parse_filename(filename)
except ValueError:
LOG.warning("S3 file %s has no package name", obj.key)
return None
return factory(
name, version, filename, obj.last_modified, path=obj.key, **metadata
)
def list(self, factory=Package):
keys = self.bucket.objects.filter(Prefix=self.bucket_prefix)
for summary in keys:
# ObjectSummary has no metadata, so we have to fetch it.
obj = summary.Object()
pkg = self.package_from_object(obj, factory)
if pkg is not None:
yield pkg
def _generate_url(self, package):
""" Generate a signed url to the S3 file """
if self.public_url:
if self.region_name:
return "https://s3.{0}.amazonaws.com/{1}/{2}".format(
self.region_name, self.bucket.name, self.get_path(package)
)
else:
if "." in self.bucket.name:
self._log_region_warning()
return "https://{0}.s3.amazonaws.com/{1}".format(
self.bucket.name, self.get_path(package)
)
url = self.bucket.meta.client.generate_presigned_url(
"get_object",
Params={"Bucket": self.bucket.name, "Key": self.get_path(package)},
ExpiresIn=self.expire_after,
)
# There is a special case if your bucket has a '.' in the name. The
# generated URL will return a 301 and the pip downloads will fail.
# If you provide a region_name, boto should correctly generate a url in
# the form of `s3.<region>.amazonaws.com`
# See https://github.com/stevearc/pypicloud/issues/145
if "." in self.bucket.name:
pieces = urlparse(url)
if pieces.netloc == "s3.amazonaws.com" and self.region_name is None:
self._log_region_warning()
return url
def _log_region_warning(self):
""" Spit out a warning about including region_name """
LOG.warning(
"Your signed S3 urls may not work! "
"Try adding the bucket region to the config with "
"'storage.region_name = <region>' or using a bucket "
"without any dots ('.') in the name."
)
def upload(self, package, datastream):
key = self.bucket.Object(self.get_path(package))
kwargs = {}
if self.sse is not None:
kwargs["ServerSideEncryption"] = self.sse
if self.object_acl:
kwargs["ACL"] = self.object_acl
if self.storage_class is not None:
kwargs["StorageClass"] = self.storage_class
metadata = package.get_metadata()
metadata["name"] = package.name
metadata["version"] = package.version
metadata = normalize_metadata(metadata)
key.put(Metadata=metadata, Body=datastream, **kwargs)
def delete(self, package):
self.bucket.delete_objects(
Delete={"Objects": [{"Key": self.get_path(package)}]}
)
def check_health(self):
try:
self.bucket.meta.client.head_bucket(Bucket=self.bucket.name)
except ClientError as e:
return False, str(e)
else:
return True, ""
class CloudFrontS3Storage(S3Storage):
""" Storage backend that uses S3 and CloudFront """
def __init__(
self, request=None, domain=None, crypto_pk=None, key_id=None, **kwargs
):
super(CloudFrontS3Storage, self).__init__(request, **kwargs)
self.domain = domain
self.crypto_pk = crypto_pk
self.key_id = key_id
self.cf_signer = None
if key_id is not None:
self.cf_signer = CloudFrontSigner(self.key_id, self._rsa_signer)
self.client = boto3.client("cloudfront")
@classmethod
def configure(cls, settings):
kwargs = super(CloudFrontS3Storage, cls).configure(settings)
kwargs["domain"] = settings["storage.cloud_front_domain"]
kwargs["key_id"] = settings.get("storage.cloud_front_key_id")
private_key = settings.get("storage.cloud_front_key_string")
if private_key is None:
key_file = settings.get("storage.cloud_front_key_file")
if key_file:
with open(key_file, "rb") as ifile:
private_key = ifile.read()
else:
private_key = private_key.encode("utf-8")
crypto_pk = serialization.load_pem_private_key(
private_key, password=None, backend=default_backend()
)
kwargs["crypto_pk"] = crypto_pk
return kwargs
def _rsa_signer(self, message):
""" Generate a RSA signature for a message """
return self.crypto_pk.sign(message, padding.PKCS1v15(), hashes.SHA1())
def _generate_url(self, package):
""" Get the fully-qualified CloudFront path for a package """
path = self.get_path(package)
url = self.domain + "/" + quote(path)
# No key id, no signer, so we don't have to sign the URL
if self.cf_signer is None:
return url
# To sign with a canned policy:
expires = datetime.utcnow() + timedelta(seconds=self.expire_after)
return self.cf_signer.generate_presigned_url(url, date_less_than=expires)
| 36.072464
| 98
| 0.590398
|
2e3542f9c3cb701d805b00055da3dbc97e3fb13f
| 22,072
|
py
|
Python
|
inter_MSA.py
|
he-h/Covid-Mobility-Network-Analysis
|
8464b0a25db03585219c1fc6d8e257a9ed826628
|
[
"MIT"
] | 1
|
2021-01-07T19:57:46.000Z
|
2021-01-07T19:57:46.000Z
|
inter_MSA.py
|
hhe-bot/Covid-Mobility-Network-Analysis
|
8464b0a25db03585219c1fc6d8e257a9ed826628
|
[
"MIT"
] | 1
|
2020-09-23T06:10:29.000Z
|
2020-09-25T15:41:33.000Z
|
inter_MSA.py
|
haoyu0831/Covid-Mobility-Network-Analysis
|
8464b0a25db03585219c1fc6d8e257a9ed826628
|
[
"MIT"
] | null | null | null |
from model import *
from plot import *
import powerlaw
from statistics import median
from matplotlib.lines import Line2D
from read_file import *
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap as Basemap
import json
import csv
import os
from haversine import haversine
# NY NJ PA 5602
# LA 4472
# Chicago 1602
# Dallas 1922
# Houston 3362
with open('data/pos.json', 'r') as o:
pos = json.load(o)
def largest_size_qc(g, device_count):
thresholds, num_g, num_sg, num_r, dev_g, dev_sg, edge_size = calc_g_sg(g, 10, 2.5, device_count)
index_qc, index_qcb = l_sl_value(num_sg)
if len(num_g) == 0:
return 0, 0
gc_node_size = num_g[index_qc]
qc = thresholds[index_qc]
return gc_node_size, qc
class InterMsaG:
def __init__(self, date, dest, device):#, qc):
print(date)
self.date = date
self.device_count = device
self.g = generate_network(dest)
# self.msa_qc = qc
self.setup()
def setup(self):
self.flux = total_flux(self.g)
self.sum_device = sum(self.device_count.values())
# calculate qc and following features
self.thresholds, self.num_g, self.num_sg, self.num_r, self.dev_g, self.dev_sg, self.edge_size = calc_g_sg(self.g, 10, 2.5, self.device_count)
index_qc, index_qcb = l_sl_value(self.num_sg)
interval = self.thresholds[1] - self.thresholds[0]
self.gc_node_size = self.num_g[index_qc]
self.qc = self.thresholds[index_qc]
self.qcb = self.thresholds[index_qcb]
self.qca = self.thresholds[[i for i, j in enumerate(self.num_r) if j == max(self.num_r)][0]]
self.qcf = self.thresholds[-1]
g_perco = generate_network_threshold(self.g, self.qc)
# self.g_perco_1 = generate_network_threshold(self.g, self.qcb)
# self.bottleneck = calc_bottleneck_c(self.g, self.thresholds, self.qc)
# self.bottleneck1 = calc_bottleneck_c(self.g, self.thresholds, self.qcb)
self.bottleneck = calc_bn_set_diff(generate_network_threshold(self.g, self.qc-interval), generate_network_threshold(self.g, self.qc))
self.bottleneck1 = calc_bn_set_diff(generate_network_threshold(self.g, self.qcb-interval), generate_network_threshold(self.g, self.qcb))
# self.plot_map(g_perco)
self.indegree = []
for i in self.g.nodes():
self.indegree.append(self.g.degree(i))
self.indegree_median = median(self.indegree)
self.indegree_25 = np.percentile(self.indegree, 25)
self.indegree_75 = np.percentile(self.indegree, 75)
self.edge_w = np.zeros(len(self.g.edges()))
for j,i in enumerate(self.g.edges()):
self.edge_w[j] = self.g.edges[i]['weight']
self.edge_w_median = median(self.edge_w)
self.edge_w_25 = np.percentile(self.edge_w, 25)
self.edge_w_75 = np.percentile(self.edge_w, 75)
self.edge_w_ave = self.flux / self.g.number_of_nodes()
self.distances = []
for i, j in self.g.edges:
dis = haversine(pos[i], pos[j])
self.distances += [dis]*int(self.g.edges[i,j]['weight'])
self.distances = np.array(self.distances)
dc = np.array(list(self.device_count.values()))
self.device_median = median(dc)
self.device_25 = np.percentile(dc, 25)
self.device_75 = np.percentile(dc, 75)
self.qc_setup()
# self.plot_map(self.g_perco, 1)
# self.plot_map(self.g_perco_1, 0)
def qc_setup(self):
self.qc_m = dict()
self.qca_m = dict()
self.qcf_m = dict()
df = pd.read_csv(qc_str(self.date))
for i in df.index:
self.qc_m[df['msa'][i]] = df['qc'][i]
self.qca_m[df['msa'][i]] = df['qca'][i]
self.qcf_m[df['msa'][i]] = df['qcf'][i]
def result_dir(self):
return 'results/interMSA/'+aug_str(self.date.month)+'/'+aug_str(self.date.day) + '/'
def plot_g_sg(self):
plt.clf()
figure, axis_1 = plt.subplots()
axis_1.axvline(self.qc, linestyle='-.', color='red', label=r'$q_c$')
axis_1.axvline(self.qcb, linestyle='-.', color='orange', label=r'$q_{c2}$')
axis_1.set_ylabel('GC Component size', color='dodgerblue')
axis_1.plot(self.thresholds, self.num_g, color='dodgerblue', label='GC')
axis_1.set_xlabel('thresholds')
axis_2 = axis_1.twinx()
axis_2.plot(self.thresholds, self.num_sg, color='grey', label='SGC')
axis_2.set_ylabel('SGC Component size', color='grey')
lines_1, labels_1 = axis_1.get_legend_handles_labels()
lines_2, labels_2 = axis_2.get_legend_handles_labels()
lines = lines_1 + lines_2
labels = labels_1 + labels_2
axis_1.legend(lines, labels, loc=0)
plt.title('Inter MSA ' + self.date.strftime('%m/%d') + ' percolation component size')
plt.savefig(self.result_dir() + self.date.strftime('%m_%d') + '_g_sg_size.jpg')
return
def plot_g_sg_log(self):
plt.clf()
figure, axis_1 = plt.subplots()
axis_1.axvline(self.qc, linestyle='-.', color='red', label=r'$q_c$')
axis_1.axvline(self.qcb, linestyle='-.', color='orange', label=r'$q_{c2}$')
# axis_1.set_ylabel('GC Component size', color='dodgerblue')
axis_1.plot(self.thresholds, self.num_g, color='dodgerblue', label=r'$1_{st} CC$')
# axis_1.set_xlabel('thresholds', fontsize=18)
axis_1.tick_params(axis='y', colors='dodgerblue')
axis_2 = axis_1.twinx()
axis_2.plot(self.thresholds, self.num_sg, color='grey', label=r'$2_{nd} CC$')
# axis_2.set_ylabel('SGC Component size', color='grey')
lines_1, labels_1 = axis_1.get_legend_handles_labels()
lines_2, labels_2 = axis_2.get_legend_handles_labels()
lines = lines_1 + lines_2
labels = labels_1 + labels_2
if self.date == dt.date(2020,2,1):
axis_1.legend(lines, labels, loc=0, prop={'size':17})
plt.title('Inter MSA percolation', fontsize=22)
else:
axis_1.set_xlabel('thresholds', fontsize=18)
plt.xscale('log')
# plt.title('Inter MSA percolation', fontsize=22)
plt.savefig(self.result_dir() + self.date.strftime('%m_%d') + '_g_sg_log_size.jpg')
return
def plot_g_sg_c(self):
plt.clf()
figure, axis_1 = plt.subplots()
axis_1.axvline(self.qca, linestyle='-.', color='red', label=r'$q_c$')
axis_1.axvline(self.thresholds[-1], linestyle='-.', color='orange', label=r'$q_{c2}$')
# axis_1.set_ylabel('GC Component size', color='dodgerblue')
axis_1.plot(self.thresholds, self.num_g, color='dodgerblue', label='GC')
# axis_1.set_xlabel('thresholds')
axis_2 = axis_1.twinx()
axis_2.plot(self.thresholds, self.num_r, color='grey', label='RGC')
# axis_2.set_ylabel('Average rest Component size', color='grey')
lines_1, labels_1 = axis_1.get_legend_handles_labels()
lines_2, labels_2 = axis_2.get_legend_handles_labels()
lines = lines_1 + lines_2
labels = labels_1 + labels_2
axis_1.legend(lines, labels, loc=0)
plt.title('Inter MSA ' + self.date.strftime('%m/%d') + ' continuous component size')
plt.savefig(self.result_dir() + self.date.strftime('%m_%d') + '_g_rg_size.jpg')
return
def plot_hist(self):
plt.clf()
powerlaw.plot_ccdf(self.edge_w, linestyle='-', color='#2b8cbe', label='CCDF')
plt.ylabel('CCDF')
plt.xlabel(r'$W_{ij}$')
plt.title('Inter MSA ' + self.date.strftime('%m/%d') + ' CCDF')
plt.savefig(self.result_dir() + self.date.strftime('%m_%d') + '_hist.jpg')
return
def plot_g_sg_device(self):
plt.clf()
figure, axis_1 = plt.subplots()
axis_1.axvline(self.qc, linestyle='-.', color='red', label=r'$q_c$')
axis_1.set_ylabel('GC device count', color='dodgerblue')
axis_1.plot(self.thresholds, self.dev_g, color='dodgerblue', label='GC')
axis_1.axvline(self.qcb, linestyle='-.', color='orange', label=r'$q_{c2}$')
axis_1.set_xlabel('thresholds')
axis_1.set_ylabel('SGC device count', color='grey')
axis_2 = axis_1.twinx()
axis_2.plot(self.thresholds, self.dev_sg, color='grey', label='SGC')
axis_2.set_ylabel('SGC device count', color='grey')
lines_1, labels_1 = axis_1.get_legend_handles_labels()
lines_2, labels_2 = axis_2.get_legend_handles_labels()
lines = lines_1 + lines_2
labels = labels_1 + labels_2
axis_1.legend(lines, labels, loc=0)
plt.title('Inter MSA ' + self.date.strftime('%m/%d') + ' percolation device count')
plt.savefig(self.result_dir() + self.date.strftime('%m_%d') + '_g_sg_device.jpg')
return
def plot_msa_qc(self):
plt.clf()
th = np.arange(1, 50, .5)
remain_msa = []
for i in th:
tmp = 0
for j in self.qc_m.keys():
if i < self.qc_m[j]:
tmp += self.device_count[j]
remain_msa.append(tmp)
plt.plot(th, remain_msa, color='royalblue')
plt.grid(True)
plt.xlabel('Thresholds')
plt.ylabel('device count')
plt.title('Sum of remaining MSAs device count ' + self.date.strftime('%m/%d'))
plt.savefig(self.result_dir() + self.date.strftime('%m_%d') + '_MSAs_device.jpg')
return
def plot_qc_map(self):
plt.clf()
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
m = Basemap(
projection='merc',
llcrnrlon=-130,
llcrnrlat=25,
urcrnrlon=-60,
urcrnrlat=50,
lat_ts=0,
resolution='i',
suppress_ticks=True)
m.readshapefile('tl_2017_us_state/tl_2017_us_state', 'states', drawbounds=True)
x, y = [], []
for i in pos.keys():
x.append(pos[i][0])
y.append(pos[i][1])
mx, my = m(x, y)
pos1 = dict()
for i, j in enumerate(pos.keys()):
pos1[j] = (mx[i], my[i])
msas = {0:[], 3:[], 6:[], 9:[], 12:[]}
for i in self.qc_m.keys():
j = str(i)
if self.qc_m[i] > 12:
msas[12].append(j)
elif self.qc_m[i] > 9:
msas[9].append(j)
elif self.qc_m[i] > 6:
msas[6].append(j)
elif self.qc_m[i] > 3:
msas[3].append(j)
else:
msas[0].append(j)
colors = ['#fef0d9', '#fdcc8a', '#fc8d59', '#e34a33', '#b30000']
iter=0
for i in msas.keys():
tmp = nx.Graph()
tmp.add_nodes_from(msas[i])
nx.draw_networkx_nodes(G=tmp, pos=pos1, nodelist=tmp.nodes(), node_color=colors[iter], label=r"$q_c$>"+str(i),
node_size=[(self.device_count[i]/250)**(1/2) for i in tmp.nodes()])
iter += 1
plt.legend()
plt.title('MSA qc map ' + self.date.strftime('%m/%d'))
plt.savefig(self.result_dir() + self.date.strftime('%m_%d') + '_MSAs_qc_map.jpg')
return
def plot_map(self, g):
plt.clf()
m = Basemap(
projection='merc',
llcrnrlon=-130,
llcrnrlat=25,
urcrnrlon=-60,
urcrnrlat=50,
lat_ts=0,
resolution='i',
suppress_ticks=True)
# m.drawcountries(linewidth=3)
# m.drawstates(linewidth=0.2)
# m.drawcoastlines(linewidth=1)
# m.fillcontinents(alpha=0.3)
# m.drawcounties(linewidth=0.1)
m.readshapefile('tl_2017_us_state/tl_2017_us_state', 'states', drawbounds=True)
with open(self.date.strftime('edge_list/interMSA%m_%d.csv'), 'w') as e:
csvwriter = csv.writer(e)
csvwriter.writerow(['from', 'to', 'edge', 'weight'])
x, y = [], []
for i in pos.keys():
x.append(pos[i][0])
y.append(pos[i][1])
mx, my = m(x, y)
pos1 = dict()
for i, j in enumerate(pos.keys()):
pos1[j] = (mx[i], my[i])
cc = list(nx.connected_components(g))
cc.sort(key=len, reverse=True)
ax = plt.gca()
g0 = g.subgraph(cc[0])
nx.draw_networkx_nodes(G=g0, node_color='cornflowerblue', nodelist=g0.nodes(), pos=pos1, alpha=1,
node_size=[(self.device_count[i]/250)**(1/2) for i in g0.nodes()])
for i, j in g0.edges():
csvwriter.writerow([i, j, 'gc', self.g.edges[i, j]['weight']])
ax.annotate("",
xy=pos1[i], xycoords='data',
xytext=pos1[j], textcoords='data',
arrowprops=dict(arrowstyle="-", color='cornflowerblue',
shrinkA=5, shrinkB=5,
patchA=None, patchB=None,
connectionstyle="arc3,rad=0.3",
),
)
g1 = g.subgraph(cc[1])
nx.draw_networkx_nodes(G=g1, node_color='lightgreen', nodelist=g1.nodes(), pos=pos1, alpha=1,
node_size=[(self.device_count[i]/250)**(1/2) for i in g1.nodes()])
for i, j in g1.edges():
csvwriter.writerow([i, j, 'sgc', self.g.edges[i, j]['weight']])
ax.annotate("",
xy=pos1[i], xycoords='data',
xytext=pos1[j], textcoords='data',
arrowprops=dict(arrowstyle="-", color='lightgreen',
shrinkA=5, shrinkB=5,
patchA=None, patchB=None,
connectionstyle="arc3,rad=0.3",
),
)
g2 = g.subgraph(cc[2])
nx.draw_networkx_nodes(G=g2, node_color='peachpuff', nodelist=g2.nodes(), pos=pos1, alpha=1,
node_size=[(self.device_count[i] / 250) ** (1 / 2) for i in g2.nodes()])
for i, j in g2.edges():
csvwriter.writerow([i, j, 'tgc', self.g.edges[i, j]['weight']])
ax.annotate("",
xy=pos1[i], xycoords='data',
xytext=pos1[j], textcoords='data',
arrowprops=dict(arrowstyle="-", color='peachpuff',
shrinkA=5, shrinkB=5,
patchA=None, patchB=None,
connectionstyle="arc3,rad=0.3",
),
)
tmp = set()
for i in cc[3:]:
if len(i) > 1:
tmp |= i
g3 = g.subgraph(tmp)
nx.draw_networkx_nodes(G=g3, node_color='silver', nodelist=g3.nodes(), pos=pos1, alpha=1,
node_size=[(self.device_count[i]/250) ** (1 / 2) for i in g3.nodes()])
for i, j in g3.edges():
csvwriter.writerow([i, j, 'rest', self.g.edges[i, j]['weight']])
ax.annotate("",
xy=pos1[i], xycoords='data',
xytext=pos1[j], textcoords='data',
arrowprops=dict(arrowstyle="-", color='silver',
shrinkA=5, shrinkB=5,
patchA=None, patchB=None,
connectionstyle="arc3,rad=0.3",
),
)
bn1 = nx.Graph()
bn1.add_edges_from(self.bottleneck1)
nx.draw_networkx_nodes(G=bn1, node_color='gold', nodelist=bn1.nodes(), pos=pos1, alpha=1,
node_size=[(self.device_count[i] / 250) ** (1 / 2) for i in bn1.nodes()])
for i, j in bn1.edges():
csvwriter.writerow([i, j, 'bn', self.g.edges[i, j]['weight']])
ax.annotate("",
xy=pos1[i], xycoords='data',
xytext=pos1[j], textcoords='data',
arrowprops=dict(arrowstyle="-", color='gold',
shrinkA=5, shrinkB=5,
patchA=None, patchB=None,
connectionstyle="arc3,rad=0.3",
),
)
bn = nx.Graph()
bn.add_edges_from(self.bottleneck)
nx.draw_networkx_nodes(G=bn, node_color='r', nodelist=bn.nodes(), pos=pos1, alpha=1,
node_size=[(self.device_count[i]/250) ** (1 / 2) for i in bn.nodes()])
for i, j in bn.edges():
csvwriter.writerow([i, j, 'sbn', self.g.edges[i, j]['weight']])
ax.annotate("",
xy=pos1[i], xycoords='data',
xytext=pos1[j], textcoords='data',
arrowprops=dict(arrowstyle="-", color='r',
shrinkA=5, shrinkB=5,
patchA=None, patchB=None,
connectionstyle="arc3,rad=0.3",
),
)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
labels = ['GC', 'SGC', 'TGC', 'Bottleneck(GC)', 'Bottleneck(non GC)', 'Rest']
colors = ['cornflowerblue', 'lightgreen', 'peachpuff', 'r', 'gold', 'silver']
lines = [Line2D([0], [0], color=c, linewidth=2, alpha=0.85) for c in colors]
plt.tight_layout()
plt.legend(lines, labels, fontsize=7, loc=4)
plt.title('Inter MSA ' + self.date.strftime('%m/%d') + ' map')
plt.savefig(self.result_dir() + self.date.strftime('%m_%d') + '_map.jpg')
return
def plot_w_qc_perco(self):
colors = ['#fef0d9', '#fdcc8a', '#fc8d59', '#e34a33', '#b30000']
num = [0, 25, 50, 75, 100]
color_c = {}
for i in self.qc_m.keys():
j = str(i)
if self.qc_m[i] > 12:
color_c[j] = '#b30000'
elif self.qc_m[i] > 9:
color_c[j] = '#e34a33'
elif self.qc_m[i] > 6:
color_c[j] = '#fc8d59'
elif self.qc_m[i] > 3:
color_c[j] = '#fdcc8a'
else:
color_c[j] = '#fef0d9'
tmp = []
for i in range(0, 18, 5):
tmp_nodes = select(self.qc_m, i)
qc_g = self.g.subgraph(tmp_nodes)
for j in num:
if i == j == 0:
continue
tmp_g = generate_network_threshold(qc_g, j)
if j == 0:
tmp.append((i, *largest_size_qc(tmp_g, self.device_count)))
plot_qc_map(tmp_g, 'qc', color_c, self.device_count, pos, i, j, self.date, self.g)
if not os.path.exists('perco_diff_level/'+ self.date.strftime('%m_%d')+'.csv'):
a='x'
else:
a='w'
with open('perco_diff_level/'+ self.date.strftime('%m_%d')+'.csv', mode=a) as edges:
csvwriter = csv.writer(edges)
csvwriter.writerow(['inner_qc_perco', 'qc', 'gc_size'])
csvwriter.writerow([0, self.qc, self.gc_node_size])
for i in tmp:
csvwriter.writerow(i)
# color_c = {}
# for i in self.qca_m.keys():
# j = str(i)
# if self.qca_m[i] > 12:
# color_c[j] = 'red'
# elif self.qca_m[i] > 9:
# color_c[j] = 'darkorange'
# elif self.qca_m[i] > 6:
# color_c[j] = 'orange'
# elif self.qca_m[i] > 3:
# color_c[j] = 'gold'
# else:
# color_c[j] = 'wheat'
#
# for i in range(0, 18, 5):
# tmp_nodes = select(self.qca_m, i)
# qc_g = self.g.subgraph(tmp_nodes)
# for j in num:
# if i == j == 0:
# continue
# tmp_g = generate_network_threshold(qc_g, j)
# plot_qc_map(tmp_g, 'qca', color_c, self.device_count, pos, i, j, self.date)
#
# color_c = {}
# for i in self.qcf_m.keys():
# j = str(i)
# if self.qcf_m[i] > 60:
# color_c[j] = 'red'
# elif self.qcf_m[i] > 45:
# color_c[j] = 'darkorange'
# elif self.qcf_m[i] > 30:
# color_c[j] = 'orange'
# elif self.qcf_m[i] > 15:
# color_c[j] = 'gold'
# else:
# color_c[j] = 'wheat'
#
# for i in range(0, 75, 30):
# tmp_nodes = select(self.qcf_m, i)
# qc_g = self.g.subgraph(tmp_nodes)
# for j in num:
# if i == j == 0:
# continue
# tmp_g = generate_network_threshold(qc_g, j)
# plot_qc_map(tmp_g, 'qcf', color_c, self.device_count, pos, i, j, self.date)
return
| 38.790861
| 149
| 0.509197
|
3509989fa9e5f811b55a930f69c060902c3edcc6
| 1,296
|
py
|
Python
|
ddns/config.py
|
dongbum/DDNS-Route53
|
936b2ebe433a2f90f020b88bce13a236de3b5fbc
|
[
"MIT"
] | 11
|
2019-08-26T12:44:21.000Z
|
2022-01-25T08:54:11.000Z
|
ddns/config.py
|
dongbum/DDNS-Route53
|
936b2ebe433a2f90f020b88bce13a236de3b5fbc
|
[
"MIT"
] | 2
|
2019-10-01T01:22:56.000Z
|
2019-10-02T01:34:35.000Z
|
ddns/config.py
|
dongbum/DDNS-Route53
|
936b2ebe433a2f90f020b88bce13a236de3b5fbc
|
[
"MIT"
] | 1
|
2021-04-23T18:15:58.000Z
|
2021-04-23T18:15:58.000Z
|
# -*- coding: utf-8 -*-
import os, configparser
from ddns.customerror import ConfigParserError
class Config:
def __init__(self):
try:
if os.path.exists('config.ini'):
config = configparser.ConfigParser()
config.read('config.ini')
self.domain = config['DEFAULT']['DOMAIN']
self.aws_hosted_zone_id = config['DEFAULT']['AWS_HOSTED_ZONE_ID']
self.aws_access_key_id = config['DEFAULT']['AWS_ACCESS_KEY_ID']
self.aws_secret_access_key = config['DEFAULT']['AWS_ACCESS_SECRET_KEY']
self.get_ip = config['DEFAULT']['GET_IP']
self.check_url = config['DEFAULT']['CHECK_URL']
self.log = config['DEFAULT']['LOG']
else:
self.domain = os.environ['DOMAIN']
self.aws_hosted_zone_id = os.environ['AWS_HOSTED_ZONE_ID']
self.aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID']
self.aws_secret_access_key = os.environ['AWS_ACCESS_SECRET_KEY']
self.get_ip = os.environ['GET_IP']
self.check_url = os.environ['CHECK_URL']
self.log = os.environ['LOG']
except:
raise ConfigParserError('Load config failed.')
| 44.689655
| 87
| 0.579475
|
e3a0dca60469edc07da75854bb783a292ee5ff1f
| 7,839
|
py
|
Python
|
videoanalyst/data/target/target_impl/utils/make_densebox_target_v1.py
|
TragedyN/SiamFCpp
|
65d80a66eb40d81ca09fa2dbf32636fbc414ec0d
|
[
"MIT"
] | 737
|
2019-12-24T13:34:43.000Z
|
2022-03-28T11:38:24.000Z
|
videoanalyst/data/target/target_impl/utils/make_densebox_target_v1.py
|
ShiAngWang/video_analyst
|
de4f86363cc408695428b423e8d6e346aa35149b
|
[
"MIT"
] | 129
|
2020-02-13T04:08:28.000Z
|
2022-03-17T04:13:09.000Z
|
videoanalyst/data/target/target_impl/utils/make_densebox_target_v1.py
|
ShiAngWang/video_analyst
|
de4f86363cc408695428b423e8d6e346aa35149b
|
[
"MIT"
] | 179
|
2019-12-31T04:53:12.000Z
|
2022-03-25T06:32:20.000Z
|
# encoding: utf-8
# [Version 1] this is a archived version of densebox target maker
# current version is under _make_densebox_target.py_ (i.e. without "_v1" suffix)
import os
from typing import Dict, Tuple
import numpy as np
DUMP_FLAG = False # dump intermediate results for debugging
DUMP_DIR = "dump"
DUMP_SUFFIX = "v1"
if not os.path.exists(DUMP_DIR):
os.makedirs(DUMP_DIR)
def make_densebox_target(gt_boxes: np.array, config: Dict) -> Tuple:
""" v1
Model training target generation function for densebox
Arguments
---------
gt_boxes : np.array
ground truth bounding boxes with class, shape=(N, 5), order=(x0, y0, x1, y1, class)
config: configuration of target making (old format)
Keys
----
x_size : int
search image size
score_size : int
score feature map size
total_stride : int
total stride of backbone
score_offset : int
offset between the edge of score map and the border of the search image
Returns
-------
Tuple
cls_res_final : np.array
class
shape=(N, 1)
ctr_res_final : np.array
shape=(N, 1)
gt_boxes_res_final : np.array
shape=(N, 4)
# previous format
# shape=(N, 6), order=(class, center-ness, left_offset, top_offset, right_offset, bottom_offset)
"""
x_size = config["x_size"]
score_size = config["score_size"]
total_stride = config["total_stride"]
score_offset = config["score_offset"]
eps = 1e-5
raw_height, raw_width = x_size, x_size
# append class dimension to gt_boxes if ignored
if gt_boxes.shape[1] == 4:
gt_boxes = np.concatenate(
[gt_boxes, np.ones(
(gt_boxes.shape[0], 1))], axis=1) # boxes_cnt x 5
# l, t, r, b
gt_boxes = np.concatenate([np.zeros((1, 5)), gt_boxes]) # (boxes_cnt, 5)
gt_boxes_area = (np.abs(
(gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] - gt_boxes[:, 1])))
gt_boxes = gt_boxes[np.argsort(
gt_boxes_area)] # sort gt_boxes by area, ascending order
boxes_cnt = len(gt_boxes) # number of gt_boxes
shift_x = np.arange(0, raw_width).reshape(-1, 1)
shift_y = np.arange(0, raw_height).reshape(-1, 1)
shift_x, shift_y = np.meshgrid(shift_x, shift_y) # (H, W)
# (H, W, #boxes, 1d-offset(l/t/r/b) )
off_l = (shift_x[:, :, np.newaxis, np.newaxis] -
gt_boxes[np.newaxis, np.newaxis, :, 0, np.newaxis])
off_t = (shift_y[:, :, np.newaxis, np.newaxis] -
gt_boxes[np.newaxis, np.newaxis, :, 1, np.newaxis])
off_r = -(shift_x[:, :, np.newaxis, np.newaxis] -
gt_boxes[np.newaxis, np.newaxis, :, 2, np.newaxis])
off_b = -(shift_y[:, :, np.newaxis, np.newaxis] -
gt_boxes[np.newaxis, np.newaxis, :, 3, np.newaxis])
if DUMP_FLAG:
off_l.dump("{}/off_l_{}.npz".format(DUMP_DIR, DUMP_SUFFIX))
off_t.dump("{}/off_t_{}.npz".format(DUMP_DIR, DUMP_SUFFIX))
off_r.dump("{}/off_r_{}.npz".format(DUMP_DIR, DUMP_SUFFIX))
off_b.dump("{}/off_b_{}.npz".format(DUMP_DIR, DUMP_SUFFIX))
# centerness
center = ((np.minimum(off_l, off_r) * np.minimum(off_t, off_b)) /
(np.maximum(off_l, off_r) * np.maximum(off_t, off_b) + eps))
if DUMP_FLAG:
center.dump("{}/center_{}.npz".format(DUMP_DIR, DUMP_SUFFIX))
center = np.squeeze(np.sqrt(np.abs(center)))
center[:, :, 0] = 0
offset = np.concatenate([off_l, off_t, off_r, off_b],
axis=3) # h x w x boxes_cnt * 4
if DUMP_FLAG:
offset.dump("{}/offset_{}.npz".format(DUMP_DIR, DUMP_SUFFIX))
cls = gt_boxes[:, 4]
cls_res_list = []
ctr_res_list = []
gt_boxes_res_list = []
fm_height, fm_width = score_size, score_size
fm_size_list = []
fm_strides = [total_stride]
fm_offsets = [score_offset]
for fm_i in range(len(fm_strides)):
fm_size_list.append([fm_height, fm_width])
fm_height = int(np.ceil(fm_height / 2))
fm_width = int(np.ceil(fm_width / 2))
fm_size_list = fm_size_list[::-1]
for fm_i, (stride, fm_offset) in enumerate(zip(fm_strides, fm_offsets)):
fm_height = fm_size_list[fm_i][0]
fm_width = fm_size_list[fm_i][1]
shift_x = np.arange(0, fm_width)
shift_y = np.arange(0, fm_height)
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
xy = np.vstack(
(shift_y.ravel(), shift_x.ravel())).transpose() # (hxw) x 2
# floor(stride / 2) + x * stride?
off_xy = offset[fm_offset + xy[:, 0] * stride,
fm_offset + xy[:, 1] * stride] # will reduce dim by 1
# off_max_xy = off_xy.max(axis=2) # max of l,t,r,b
off_valid = np.zeros((fm_height, fm_width, boxes_cnt))
is_in_boxes = (off_xy > 0).all(axis=2)
# is_in_layer = (off_max_xy <=
# config.sep_win[fm_i]) & (off_max_xy >= config.sep_win[fm_i + 1])
off_valid[
xy[:, 0],
xy[:,
1], :] = is_in_boxes #& is_in_layer # xy[:, 0], xy[:, 1] reduce dim by 1 to match is_in_boxes.shape & is_in_layer.shape
off_valid[:, :, 0] = 0 # h x w x boxes_cnt
hit_gt_ind = np.argmax(off_valid, axis=2) # h x w
# gt_boxes
gt_boxes_res = np.zeros((fm_height, fm_width, 4))
gt_boxes_res[xy[:, 0],
xy[:, 1]] = gt_boxes[hit_gt_ind[xy[:, 0], xy[:, 1]], :4]
gt_boxes_res_list.append(gt_boxes_res.reshape(-1, 4))
# cls
cls_res = np.zeros((fm_height, fm_width))
cls_res[xy[:, 0], xy[:, 1]] = cls[hit_gt_ind[xy[:, 0], xy[:, 1]]]
cls_res_list.append(cls_res.reshape(-1))
# center
center_res = np.zeros((fm_height, fm_width))
center_res[xy[:, 0], xy[:, 1]] = center[fm_offset +
xy[:, 0] * stride, fm_offset +
xy[:, 1] * stride,
hit_gt_ind[xy[:, 0], xy[:, 1]]]
ctr_res_list.append(center_res.reshape(-1))
# from IPython import embed;embed()
cls_res_final = np.concatenate(cls_res_list,
axis=0)[:, np.newaxis].astype(np.float32)
ctr_res_final = np.concatenate(ctr_res_list,
axis=0)[:, np.newaxis].astype(np.float32)
gt_boxes_res_final = np.concatenate(gt_boxes_res_list,
axis=0).astype(np.float32)
# choose pos and neg point
# labels = np.empty((len(cls_res_final),), dtype=np.float32)
# labels.fill(-1)
#
# pos_index= np.where(cls_res_final > 0)
# neg_index = np.where(cls_res_final == 0)
# if len(pos_index[0]) > config.rpn_pos_samples:
# np.random.shuffle(pos_index[0])
# selected_pos = pos_index[0][:config.rpn_pos_samples]
# else:
# selected_pos = pos_index[0]
#
# neg_num = config.rpn_total_samples - len(selected_pos)
# np.random.shuffle(neg_index[0])
# selected_neg = neg_index[0][:neg_num]
#
# labels[selected_pos] = 1
# labels[selected_neg] = 0
# labels = labels[:, np.newaxis]
# return np.concatenate([cls_res_final, ctr_res_final, gt_boxes_res_final], axis=1)
return cls_res_final, ctr_res_final, gt_boxes_res_final
if __name__ == '__main__':
# gt_boxes
gt_boxes = np.asarray([[13, 25, 100, 140, 1]])
config_dict = dict(
x_size=303,
score_size=17,
total_stride=8,
score_offset=(303 - 1 - (17 - 1) * 8) // 2,
)
target = make_densebox_target(gt_boxes, config_dict)
for v in target:
print("{}".format(v.shape))
from IPython import embed
embed()
| 37.328571
| 136
| 0.579028
|
80d539219eb1f2429b8f8f63bf623bb6e856a614
| 539
|
py
|
Python
|
sandbox/length-dist.py
|
ctSkennerton/khmer
|
f5428c5bdfe009ce39b125fa6e18077c534dc747
|
[
"BSD-3-Clause"
] | null | null | null |
sandbox/length-dist.py
|
ctSkennerton/khmer
|
f5428c5bdfe009ce39b125fa6e18077c534dc747
|
[
"BSD-3-Clause"
] | null | null | null |
sandbox/length-dist.py
|
ctSkennerton/khmer
|
f5428c5bdfe009ce39b125fa6e18077c534dc747
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
import sys
import screed
from screed import fasta
filein = sys.argv[1]
fp = open(filein)
lengths = [0] * 100
for n, record in enumerate(fasta.fasta_iter(fp)):
length = len(record['sequence']) - 32
lengths[length] += 1
for n, i in enumerate(lengths):
print n, i
| 22.458333
| 74
| 0.697588
|
4d8ae1edbbe0b0f29458d56894d484cc2f3dc5fb
| 2,006
|
py
|
Python
|
src/bookmarklet_result/show.py
|
artilf/bookmarks-lambda
|
f1d0c9d94f5793240e844e4d99e30fa2e5a331cf
|
[
"MIT"
] | null | null | null |
src/bookmarklet_result/show.py
|
artilf/bookmarks-lambda
|
f1d0c9d94f5793240e844e4d99e30fa2e5a331cf
|
[
"MIT"
] | null | null | null |
src/bookmarklet_result/show.py
|
artilf/bookmarks-lambda
|
f1d0c9d94f5793240e844e4d99e30fa2e5a331cf
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Optional
from jinja2 import Template
from logger.my_logger import MyLogger
from models.article import Article
from tools.base64 import urlsafe_decode
from tools.bookmarklet_tools import (
get_raw_encoded_article,
get_raw_encoded_message,
is_success,
)
logger = MyLogger(__name__)
SECOND = 5
def main(event):
article = get_article(event)
if is_success(event):
page = create_success_page(article)
return create_response(page)
else:
message = get_message(event)
page = create_error_page(message, article)
return create_response(page)
def create_response(page: str):
return {"statusCode": 200, "headers": {"Content-Type": "text/html"}, "body": page}
def get_article(event) -> Optional[Article]:
raw_encoded_article = get_raw_encoded_article(event)
if raw_encoded_article is None:
return None
try:
text = urlsafe_decode(raw_encoded_article).decode()
return Article.loads(text)
except Exception as e:
logger.warning(f"Exception occurred: {e}")
return None
def get_message(event) -> str:
raw_encoded_message = get_raw_encoded_message(event)
if raw_encoded_message is None:
return str(None)
return urlsafe_decode(raw_encoded_message).decode()
def get_failed_template():
rel_path = "templates/failed.html.j2"
path = Path(__file__).parent.joinpath(rel_path).resolve()
return open(str(path)).read()
def get_success_template():
rel_path = "templates/success.html.j2"
path = Path(__file__).parent.joinpath(rel_path).resolve()
return open(str(path)).read()
def create_error_page(message: str, article: Optional[Article]) -> str:
template = Template(get_failed_template())
return template.render(message=message, article=article)
def create_success_page(article: Optional[Article]) -> str:
template = Template(get_success_template())
return template.render(article=article)
| 27.479452
| 86
| 0.721336
|
9513a05700830089ee0d3201247913353cd3e934
| 1,400
|
py
|
Python
|
airbyte-integrations/bases/base-normalization/normalization/destination_type.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | null | null | null |
airbyte-integrations/bases/base-normalization/normalization/destination_type.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | 4
|
2021-04-30T08:10:26.000Z
|
2021-04-30T13:53:34.000Z
|
airbyte-integrations/bases/base-normalization/normalization/destination_type.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import Enum
class DestinationType(Enum):
BIGQUERY = "bigquery"
POSTGRES = "postgres"
REDSHIFT = "redshift"
SNOWFLAKE = "snowflake"
@classmethod
def from_string(cls, string_value: str) -> "DestinationType":
return DestinationType[string_value.upper()]
| 38.888889
| 80
| 0.757857
|
fc5c2cb55b6b215970099f86c6bdfbcda89c8934
| 8,087
|
py
|
Python
|
Result.py
|
UmeedChandel/FaceGrab
|
9d7e226c790bcd9f5b3c4297765b6d403a7a7838
|
[
"MIT"
] | 1
|
2021-06-07T16:22:53.000Z
|
2021-06-07T16:22:53.000Z
|
Result.py
|
Tulikajain1210/FaceGrab
|
b1453dc1ccd077fdd67c4683dcb0098fb37f4068
|
[
"MIT"
] | 1
|
2021-06-07T16:33:14.000Z
|
2021-06-07T16:33:14.000Z
|
Result.py
|
Tulikajain1210/FaceGrab
|
b1453dc1ccd077fdd67c4683dcb0098fb37f4068
|
[
"MIT"
] | 2
|
2021-06-07T03:04:32.000Z
|
2021-06-07T16:21:51.000Z
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPixmap
class Ui_ResultPage(object):
def openWindow(self):
from Search import Ui_SearchPage
self.window = QtWidgets.QDialog()
self.ui = Ui_SearchPage()
self.ui.setupUi(self.window)
self.window.show()
def setupUi(self, Dialog,name,date,time,loc,cam_id,id):
Dialog.setObjectName("Dialog")
Dialog.resize(700, 700)
Dialog.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.label_bg = QtWidgets.QLabel(Dialog)
self.label_bg.setGeometry(QtCore.QRect(0, 0, 700, 700))
self.label_bg.setStyleSheet("background-image: url(:/newPrefix/BG-FB1.jpg);")
self.label_bg.setText("")
self.label_bg.setObjectName("label_bg")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(60, 470, 121, 31))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setStyleSheet("color: rgb(47, 67, 195);\n"
"color: rgb(0, 0, 0);")
self.label_5.setObjectName("label_5")
self.label_1 = QtWidgets.QLabel(Dialog)
self.label_1.setGeometry(QtCore.QRect(130, 110, 81, 31))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(17)
font.setBold(False)
font.setWeight(50)
self.label_1.setFont(font)
self.label_1.setStyleSheet("color: rgb(9, 13, 84);")
self.label_1.setObjectName("label_1")
self.label_6 = QtWidgets.QLabel(Dialog)
self.label_6.setGeometry(QtCore.QRect(400, 480, 55, 21))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.label_6.setFont(font)
self.label_6.setStyleSheet("color: rgb(47, 67, 195);\n"
"color: rgb(0, 0, 0);")
self.label_6.setObjectName("label_6")
self.result_name = QtWidgets.QLabel(Dialog)
self.result_name.setGeometry(QtCore.QRect(230, 200, 201, 16))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.result_name.setFont(font)
self.result_name.setObjectName("result_name")
self.result_name.setText(name)
self.label_8 = QtWidgets.QLabel(Dialog)
self.label_8.setGeometry(QtCore.QRect(400, 520, 55, 16))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.label_8.setFont(font)
self.label_8.setStyleSheet("color: rgb(47, 67, 195);\n"
"color: rgb(0, 0, 0);")
self.label_8.setObjectName("label_8")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(210, 90, 181, 51))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(35)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color: rgb(9, 13, 84);")
self.label_2.setObjectName("label_2")
self.back = QtWidgets.QPushButton(Dialog)
self.back.setGeometry(QtCore.QRect(440, 600, 93, 28))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.back.setFont(font)
self.back.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))
self.back.setStyleSheet("border-radius : 10;\n"
"background-color: rgb(0,0,0);\n"
"color:rgb(255,255,255);")
self.back.setObjectName("back")
self.back.clicked.connect(Dialog.close)
self.result_cam_id = QtWidgets.QLabel(Dialog)
self.result_cam_id.setGeometry(QtCore.QRect(190, 480, 161, 16))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.result_cam_id.setFont(font)
self.result_cam_id.setObjectName("result_cam_id")
self.result_cam_id.setText(cam_id)
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(390, 100, 181, 41))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(17)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color: rgb(9, 13, 84);")
self.label_3.setObjectName("label_3")
self.result_date = QtWidgets.QLabel(Dialog)
self.result_date.setGeometry(QtCore.QRect(480, 520, 161, 16))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.result_date.setFont(font)
self.result_date.setObjectName("result_date")
self.result_date.setText(date)
self.result_time = QtWidgets.QLabel(Dialog)
self.result_time.setGeometry(QtCore.QRect(480, 480, 161, 16))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.result_time.setFont(font)
self.result_time.setObjectName("result_time")
self.result_time.setText(time)
self.result_photo = QtWidgets.QLabel(Dialog)
self.result_photo.setGeometry(QtCore.QRect(230, 250, 200, 200))
self.result_photo.setStyleSheet("border: 0.5px solid rgb(9, 13, 84);\n")
self.result_photo.setText("")
img = "ImageSourceDirectory/" + id + ".jpg"
self.result_photo.setPixmap(QtGui.QPixmap(img))
self.result_photo.setScaledContents(True)
self.result_photo.setObjectName("result_photo")
self.result_loc = QtWidgets.QLabel(Dialog)
self.result_loc.setGeometry(QtCore.QRect(190, 520, 161, 16))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.result_loc.setFont(font)
self.result_loc.setObjectName("result_loc")
self.result_loc.setText(loc)
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(100, 200, 71, 21))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setStyleSheet("color: rgb(47, 67, 195);\n"
"color: rgb(0, 0, 0);")
self.label_4.setObjectName("label_4")
self.label_7 = QtWidgets.QLabel(Dialog)
self.label_7.setGeometry(QtCore.QRect(80, 520, 91, 21))
font = QtGui.QFont()
font.setFamily("Arial Rounded MT Bold")
font.setPointSize(12)
self.label_7.setFont(font)
self.label_7.setStyleSheet("color: rgb(47, 67, 195);\n"
"color: rgb(0, 0, 0);")
self.label_7.setObjectName("label_7")
self.result_name.setStyleSheet("color: rgb(255,0,0);\n""")
self.result_cam_id.setStyleSheet("color: rgb(255,0,0);\n""")
self.result_time.setStyleSheet("color: rgb(255,0,0);\n""")
self.result_date.setStyleSheet("color: rgb(255,0,0);\n""")
self.result_loc.setStyleSheet("color: rgb(255,0,0);\n""")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_5.setText(_translate("Dialog", "Camera Id:"))
self.label_1.setText(_translate("Dialog", "FACE"))
self.label_6.setText(_translate("Dialog", "Time:"))
self.label_8.setText(_translate("Dialog", "Date:"))
self.label_2.setText(_translate("Dialog", "GRAB"))
self.label_3.setText(_translate("Dialog", "RESULTS"))
self.back.setText(_translate("Dialog", "BACK"))
self.label_4.setText(_translate("Dialog", "Name:"))
self.label_7.setText(_translate("Dialog", "Location:"))
import img_result_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_ResultPage()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| 42.119792
| 85
| 0.645975
|
dabf65e6165ab5872b2e42ca21ab0572435cd718
| 3,149
|
py
|
Python
|
data/process_data.py
|
kschuessler/udacity_disaster_response_pipeline
|
68ed6c79d50ce131a975a20eef1a860de6162a58
|
[
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null |
data/process_data.py
|
kschuessler/udacity_disaster_response_pipeline
|
68ed6c79d50ce131a975a20eef1a860de6162a58
|
[
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null |
data/process_data.py
|
kschuessler/udacity_disaster_response_pipeline
|
68ed6c79d50ce131a975a20eef1a860de6162a58
|
[
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
INPUT:
messages_filepath - path to the csv file holding messages data
categories_filepath - path to the csv file holding categories data
OUTPUT:
df - a dataframe including all columns from the messages and categories files
"""
# load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = pd.read_csv(categories_filepath)
# merge datasets
df = messages.merge(categories, how='outer', on=['id'])
return df
def clean_data(df):
"""
INPUT:
df - a dataframe including all columns from the messages and categories files
OUTPUT:
df - a dataframe with separate columns for all response categories without duplicates
"""
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(';', expand=True)
# extract new column names using the first row of the categories dataframe
row = categories[:1].values.flatten().tolist()
category_colnames = [x[:-2] for x in row]
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = [x.strip()[-1] for x in categories[column]]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# drop the original categories column from `df`
df = df.drop(columns=['categories'])
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df, categories], axis=1)
# drop duplicates
df = df[-df.duplicated()]
return df
def save_data(df, database_filename):
"""
INPUT:
df - cleaned dataframe
database_filename - name of the database
OUTPUT:
n/a
"""
engine = create_engine(f'sqlite:///{database_filename}')
df.to_sql('messages', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 32.802083
| 93
| 0.656716
|
8d3ede7cac8abe7314143e211b13847bed4f0e15
| 812
|
py
|
Python
|
source_files/example_1/example_1_plot_beam_vibration_data.py
|
ARTS-Laboratory/Introducton-to-Python-for-engineers
|
e852b3b35a48afea7839fca5043708caf2f84b03
|
[
"MIT"
] | null | null | null |
source_files/example_1/example_1_plot_beam_vibration_data.py
|
ARTS-Laboratory/Introducton-to-Python-for-engineers
|
e852b3b35a48afea7839fca5043708caf2f84b03
|
[
"MIT"
] | null | null | null |
source_files/example_1/example_1_plot_beam_vibration_data.py
|
ARTS-Laboratory/Introducton-to-Python-for-engineers
|
e852b3b35a48afea7839fca5043708caf2f84b03
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 16:05:28 2019
@author: austin
"""
import IPython as IP
IP.get_ipython().magic('reset -sf')
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.close('all')
#%% Load and plot data
D = np.loadtxt('vibration_data/Vibration_measurement.txt',skiprows=23)
tt = D[:,0]
ac = D[:,1]
plt.figure(figsize=(6.5,3))
plt.plot(tt,ac,'-',label='test 1')
plt.plot(tt+0.1,ac,'--',label='test 1')
plt.plot(tt+0.2,ac,':',label='test 1')
plt.plot(tt+0.3,ac,'-.',label='test 1')
plt.grid(True)
plt.xlabel('time (s)')
plt.ylabel('acceleration (m/s$^2$)')
plt.legend(loc=2)
plt.tight_layout()
plt.savefig('example_1_150.png',dpi=150)
plt.savefig('example_1_300.png',dpi=300)
plt.savefig('example_1_pdf.pdf')
| 20.3
| 70
| 0.681034
|
02913434d5db4e3c09f6aa02eb890cf3c26c0d78
| 2,059
|
py
|
Python
|
setup.py
|
heylouiz/spidermon
|
3ae2c46d1cf5b46efb578798b881264be3e68394
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
heylouiz/spidermon
|
3ae2c46d1cf5b46efb578798b881264be3e68394
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
heylouiz/spidermon
|
3ae2c46d1cf5b46efb578798b881264be3e68394
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
test_requirements = [
"pytest>=2.7.0",
"pytest-cov",
"pytest-mock",
"jinja2",
"lxml",
"scrapy",
"slackclient>=1.3.0,<2.0.0",
"twisted>=19.7.0",
]
setup(
name="spidermon",
version="1.11.0",
url="https://github.com/scrapinghub/spidermon",
author="Scrapinghub",
author_email="info@scrapinghub.com",
description=("Spidermon is a framework to build monitors for Scrapy spiders."),
long_description=("Spidermon is a framework to build monitors for Scrapy spiders."),
license="BSD",
packages=find_packages(),
package_data={"spidermon": ["VERSION"]},
zip_safe=False,
include_package_data=True,
install_requires=["jsonschema[format]", "python-slugify", "six>=1.11.0"],
tests_require=test_requirements,
extras_require={
# Specific monitors and tools to support notifications and reports
"monitoring": [
"scrapy",
"Jinja2",
"slackclient>=1.3.0,<2.0.0",
"boto",
"premailer",
"sentry-sdk",
],
# Data validation
"validation": ["schematics"],
# Tools to run the tests
"tests": test_requirements,
"pep8": ["black"],
# Tools to build and publish the documentation
"docs": ["sphinx", "sphinx-rtd-theme", "s3cmd"],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Scrapy",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: System :: Monitoring",
],
)
| 32.171875
| 88
| 0.581836
|
2e87e8e8e61abe9afb627a3a71c1f408cc48dffb
| 7,903
|
py
|
Python
|
tests/units/consensus/change_candidate_test.py
|
iconloop/LFT2
|
3862d63aaf148f99240c6d4a54408dc2520b94c6
|
[
"Apache-2.0"
] | 23
|
2020-02-10T10:08:20.000Z
|
2021-11-17T02:02:06.000Z
|
tests/units/consensus/change_candidate_test.py
|
iconloop/LFT2
|
3862d63aaf148f99240c6d4a54408dc2520b94c6
|
[
"Apache-2.0"
] | 3
|
2020-03-05T07:18:16.000Z
|
2020-06-01T09:03:11.000Z
|
tests/units/consensus/change_candidate_test.py
|
iconloop/LFT2
|
3862d63aaf148f99240c6d4a54408dc2520b94c6
|
[
"Apache-2.0"
] | 6
|
2020-02-21T02:02:40.000Z
|
2021-12-01T23:35:07.000Z
|
import os
import pytest
from typing import Tuple
from mock import MagicMock
from lft.app.data import DefaultDataFactory, DefaultData
from lft.app.vote import DefaultVoteFactory
from lft.app.epoch import RotateEpoch
from lft.consensus.messages.vote import Vote
from lft.event import EventSystem
from lft.consensus import Consensus
from lft.consensus.messages.data import Data
from lft.consensus.events import RoundEndEvent
@pytest.mark.asyncio
async def test_candidate_change_by_vote():
event_system, consensus, genesis_data = await setup_consensus()
# Genesis(E0R0) -> Data10(E1R0)
data10 = await new_and_receive_data(consensus=consensus,
candidate=genesis_data,
new_epoch_num=1,
new_round_num=0)
event_system.simulator.raise_event.assert_not_called()
await new_and_receive_votes(consensus=consensus,
data=data10)
event_system.simulator.raise_event.assert_called_once()
round_end_event = event_system.simulator.raise_event.call_args_list[0][0][0]
verify_round_end_event(round_end_event, data10)
event_system.simulator.raise_event.reset_mock()
# Genesis(E0R0) -> Data11(E1R1)
data11 = await new_and_receive_data(consensus=consensus,
candidate=genesis_data,
new_epoch_num=1,
new_round_num=1)
event_system.simulator.raise_event.assert_not_called()
await new_and_receive_votes(consensus=consensus,
data=data11)
event_system.simulator.raise_event.assert_called_once()
round_end_event = event_system.simulator.raise_event.call_args_list[0][0][0]
verify_round_end_event(round_end_event, data11)
@pytest.mark.asyncio
async def test_candidate_change_by_data():
event_system, consensus, genesis_data = await setup_consensus()
# Genesis(E0R0) -> Data10(E1R0)
data10 = await new_and_receive_data(consensus=consensus,
candidate=genesis_data,
new_epoch_num=1,
new_round_num=0)
event_system.simulator.raise_event.assert_not_called()
await new_and_receive_votes(consensus=consensus,
data=data10)
event_system.simulator.raise_event.assert_called_once()
round_end_event = event_system.simulator.raise_event.call_args_list[0][0][0]
verify_round_end_event(round_end_event, data10)
event_system.simulator.raise_event.reset_mock()
# Genesis(E0R0) -> Data11(E1R1)
data11 = await new_and_receive_data(consensus=consensus,
candidate=genesis_data,
new_epoch_num=1,
new_round_num=1)
event_system.simulator.raise_event.assert_not_called()
# Candidate11(E1R1) -> Data12(E1R2)
data12 = await new_and_receive_data(consensus=consensus,
candidate=data11,
new_epoch_num=1,
new_round_num=2)
round_end_event = event_system.simulator.raise_event.call_args_list[0][0][0]
verify_round_end_event(round_end_event, data11)
event_system.simulator.raise_event.reset_mock()
@pytest.mark.asyncio
async def test_candidate_change_recursively():
event_system, consensus, genesis_data = await setup_consensus()
data10 = await new_data(consensus=consensus,
candidate=genesis_data,
new_epoch_num=1,
new_round_num=0)
data11 = await new_data(consensus=consensus,
candidate=data10,
new_epoch_num=1,
new_round_num=1)
data12 = await new_data(consensus=consensus,
candidate=data11,
new_epoch_num=1,
new_round_num=2)
data13 = await new_data(consensus=consensus,
candidate=data12,
new_epoch_num=1,
new_round_num=3)
data14 = await new_data(consensus=consensus,
candidate=data13,
new_epoch_num=1,
new_round_num=4)
await receive_data(consensus, data10)
await receive_data(consensus, data12)
await receive_data(consensus, data14)
votes12 = await new_votes(consensus, data12)
await receive_votes(consensus, votes12)
event_system.simulator.raise_event.assert_not_called()
await receive_data(consensus, data11)
assert len(event_system.simulator.raise_event.call_args_list) == 3
round_end_event10 = event_system.simulator.raise_event.call_args_list[0][0][0]
verify_round_end_event(round_end_event10, data10)
round_end_event11 = event_system.simulator.raise_event.call_args_list[1][0][0]
verify_round_end_event(round_end_event11, data11)
round_end_event12 = event_system.simulator.raise_event.call_args_list[2][0][0]
verify_round_end_event(round_end_event12, data12)
async def new_data(consensus: Consensus, candidate: Data, new_epoch_num: int, new_round_num: int):
epoch = consensus._epoch_pool.get_epoch(new_epoch_num)
proposer = epoch.get_proposer_id(new_round_num)
data_factory = DefaultDataFactory(proposer)
prev_votes = await new_votes(consensus, candidate)
return await data_factory.create_data(candidate.number + 1, candidate.id, epoch.num, new_round_num, prev_votes)
async def new_and_receive_data(consensus: Consensus, candidate: Data, new_epoch_num: int, new_round_num: int):
data = await new_data(consensus, candidate, new_epoch_num, new_round_num)
await receive_data(consensus, data)
return data
async def receive_data(consensus: Consensus, data: Data):
await consensus.receive_data(data)
async def new_votes(consensus: Consensus, data: Data):
epoch = consensus._epoch_pool.get_epoch(data.epoch_num)
vote_factories = [
DefaultVoteFactory(voter) for voter in epoch.voters
]
return tuple([
await vote_factory.create_vote(data.id, data.prev_id, data.epoch_num, data.round_num)
for vote_factory in vote_factories
])
async def receive_votes(consensus: Consensus, votes: Tuple[Vote, ...]):
for vote in votes:
await consensus.receive_vote(vote)
async def new_and_receive_votes(consensus: Consensus, data: Data):
votes = await new_votes(consensus, data)
await receive_votes(consensus, votes)
return votes
def verify_round_end_event(round_end_event: RoundEndEvent, candidate_data: Data):
assert isinstance(round_end_event, RoundEndEvent)
assert round_end_event.epoch_num == candidate_data.epoch_num
assert round_end_event.round_num == candidate_data.round_num
assert round_end_event.candidate_id == candidate_data.id
assert round_end_event.commit_id == candidate_data.prev_id
async def setup_consensus():
node_id = b'x'
event_system = MagicMock(EventSystem())
data_factory = DefaultDataFactory(node_id)
vote_factory = DefaultVoteFactory(node_id)
consensus = Consensus(event_system, node_id=node_id, data_factory=data_factory, vote_factory=vote_factory)
voters = [os.urandom(16)]
epochs = [RotateEpoch(0, []), RotateEpoch(1, voters)]
datums = [DefaultData(id_=b'genesis', prev_id=b'', proposer_id=b'', number=0, epoch_num=0, round_num=0, prev_votes=())]
votes = []
await consensus.initialize(datums[0].prev_id, epochs, datums, votes)
event_system.simulator.raise_event.reset_mock()
return event_system, consensus, datums[0]
| 40.116751
| 123
| 0.670884
|
d8aec1f758f0d86a2f97b97b75372a3fadd57d40
| 358
|
py
|
Python
|
servicectl/exceptions.py
|
TheEdgeOfRage/servicectl
|
c3a52bc00caaa8272dcda96e5f63e43b84bffd56
|
[
"BSD-3-Clause"
] | null | null | null |
servicectl/exceptions.py
|
TheEdgeOfRage/servicectl
|
c3a52bc00caaa8272dcda96e5f63e43b84bffd56
|
[
"BSD-3-Clause"
] | null | null | null |
servicectl/exceptions.py
|
TheEdgeOfRage/servicectl
|
c3a52bc00caaa8272dcda96e5f63e43b84bffd56
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Pavle Portic <pavle.portic@tilda.center>
#
# Distributed under terms of the BSD-3-Clause license.
class ServiceUpdateError(Exception):
pass
class StackRevertFailed(Exception):
pass
class ImageNotFound(Exception):
pass
class InternalDockerError(Exception):
pass
| 14.916667
| 59
| 0.73743
|
8083dab912703045d1ed97107132acfe420e7433
| 2,171
|
py
|
Python
|
src/cc_catalog_airflow/dags/provider_api_scripts/common/requester.py
|
ayushbansal07/cccatalog
|
27d1ae2b8b65d2bebf1e32d4541346a92382ba4a
|
[
"MIT"
] | 1
|
2020-02-07T22:47:47.000Z
|
2020-02-07T22:47:47.000Z
|
src/cc_catalog_airflow/dags/provider_api_scripts/common/requester.py
|
ayushbansal07/cccatalog
|
27d1ae2b8b65d2bebf1e32d4541346a92382ba4a
|
[
"MIT"
] | null | null | null |
src/cc_catalog_airflow/dags/provider_api_scripts/common/requester.py
|
ayushbansal07/cccatalog
|
27d1ae2b8b65d2bebf1e32d4541346a92382ba4a
|
[
"MIT"
] | null | null | null |
import logging
import requests
import time
logger = logging.getLogger(__name__)
class DelayedRequester:
"""
Provides a method `get` that is a wrapper around `get` from the
`requests` module (i.e., it simply passes along whatever arguments it
receives). The difference is that when this class is initialized
with a non-zero `delay` parameter, it waits for at least that number
of seconds between consecutive requests. This is to avoid hitting
rate limits of APIs.
Optional Arguments:
delay: an integer giving the minimum number of seconds to wait
between consecutive requests via the `get` method.
"""
def __init__(self, delay=0):
self._DELAY = delay
self._last_request = 0
def get(self, url, params=None, **kwargs):
"""
Make a get request, and return the response json if it exists.
Required Arguments:
url: URL to make the request as a string.
params: Dictionary of query string params
**kwargs: Optional arguments that will be passed to `requests.get`
"""
logger.info(f'Processing request for url: {url}')
logger.info(f'Using query parameters {params}')
logger.info(f'Using headers {kwargs.get("headers")}')
self._delay_processing()
response = requests.get(url, params, **kwargs)
self._last_request = time.time()
try:
response = requests.get(url, params=params, **kwargs)
if response.status_code == requests.codes.ok:
return response
else:
logger.warning(
f'Unable to request URL: {url}. '
f'Status code: {response.status_code}'
)
return response
except Exception as e:
logger.error('There was an error with the request.')
logger.info(f'{type(e).__name__}: {e}')
return None
def _delay_processing(self):
wait = self._DELAY - (time.time() - self._last_request)
if wait >= 0:
logging.debug(f'Waiting {wait} second(s)')
time.sleep(wait)
| 35.590164
| 74
| 0.607554
|
d3d93d4929b4fc5c6f03e4506ebd5da375be4b7d
| 4,583
|
py
|
Python
|
src/t/scripts/update.py
|
danpalmer/t
|
0f8f9eb90c754d477a19200b31c3b3c7cc142d8d
|
[
"MIT"
] | 1
|
2022-01-11T11:17:14.000Z
|
2022-01-11T11:17:14.000Z
|
src/t/scripts/update.py
|
danpalmer/t
|
0f8f9eb90c754d477a19200b31c3b3c7cc142d8d
|
[
"MIT"
] | null | null | null |
src/t/scripts/update.py
|
danpalmer/t
|
0f8f9eb90c754d477a19200b31c3b3c7cc142d8d
|
[
"MIT"
] | null | null | null |
import pathlib
import platform
import stat
import sys
import tarfile
import tempfile
from typing import Any, List, Tuple
import click
import httpx
from t import cli
from t.__version__ import VERSION
from t.settings import UPDATE_REPO
from t.utils import github, output
@cli.command(help=f"Check {'/'.join(UPDATE_REPO)} for updates")
@click.option(
"--repo",
type=(str, str),
metavar=" ".join(UPDATE_REPO),
default=UPDATE_REPO,
)
def check_for_update(repo: Tuple[str, str]) -> None:
releases = get_available_releases(repo)
if not releases:
output.fatal("No releases found")
update = get_update(releases)
if not update:
output.default("No update available", exit_with_code=0)
try:
asset = get_asset(update)
except UnsupportedPlatform as e:
output.fatal(str(e))
output.success("New version found")
output.default(f" {VERSION} => {update.tag_name} {asset.browser_download_url}")
output.default(" Update with `t self-update` or download manually")
@cli.command(help=f"Update to the latest release from {'/'.join(UPDATE_REPO)}")
@click.option(
"--path",
type=pathlib.Path,
default=pathlib.Path(sys.executable),
help="Override install path",
)
@click.option(
"--repo",
type=(str, str),
metavar=" ".join(UPDATE_REPO),
default=UPDATE_REPO,
)
@click.option(
"--force",
is_flag=True,
help="Update to latest regardless of version",
)
def self_update(path: pathlib.Path, repo: Tuple[str, str], force: bool) -> None:
if "python" in path.name:
output.fatal("Aborting update! (Looks like your in dev mode)")
output.default(f"Updating {path} from {'/'.join(repo)}")
if not click.confirm("Continue?"):
output.fatal("Aborting update")
releases = get_available_releases(repo)
if not releases:
output.fatal("No releases found")
update = get_update(releases, force=force)
if not update:
output.default("No update available", exit_with_code=0)
try:
asset = get_asset(update)
except UnsupportedPlatform as e:
output.fatal(str(e))
github_client = github.get_authenticated_client()
with tempfile.TemporaryDirectory() as tempdir:
temp_asset_path = pathlib.Path(tempdir) / asset.name
with temp_asset_path.open("wb") as f:
with httpx.stream(
"GET",
asset.url,
headers={
**github_client.headers,
"User-Agent": "t",
"Accept": "application/octet-stream",
},
) as response:
response.raise_for_status()
with click.progressbar(response.iter_bytes()) as bar:
for chunk in bar:
f.write(chunk)
tar = tarfile.open(temp_asset_path)
tar_members = tar.getmembers()
if not tar_members:
output.fatal("No items in release archive")
binary_contents = tar.extractfile(tar_members[0].name)
if binary_contents is None:
output.fatal("Could not extract 't' from release archive")
path.write_bytes(binary_contents.read())
path.chmod(stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
def get_available_releases(repo: Tuple[str, str]) -> List[Any]:
gh = github.get_authenticated_client()
try:
return gh.repos.list_releases(*repo)
except Exception:
output.fatal("Failed to get releases, have you run `config github-login`?")
def get_update(releases: List[Any], force=False) -> Any:
latest_release = releases[0]
if force:
return latest_release
new_version = int(latest_release.tag_name[1:])
if VERSION == "dev":
current_version = -1
else:
current_version = int(VERSION[1:])
if new_version <= current_version:
return None
return latest_release
def get_asset(release: Any) -> Any:
platform = get_platform_name()
try:
return next(x for x in release.assets if x.name.startswith(platform))
except StopIteration:
raise UnsupportedPlatform(
f"No compatible asset found on release '{release.name}' "
f"for platform {platform}",
)
def get_platform_name() -> str:
if sys.platform.startswith("darwin"):
if platform.machine() == "arm64":
return "macos-arm64"
return "macos-x86_64"
if sys.platform.startswith("linux"):
return "linux-x86_64"
class UnsupportedPlatform(ValueError):
pass
| 27.608434
| 84
| 0.634737
|
9ddbdf80e1894186454f2f6193d9774c47cff3c8
| 612
|
py
|
Python
|
hackerrank/Python/HTML Parser - Part 2/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerrank/Python/HTML Parser - Part 2/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerrank/Python/HTML Parser - Part 2/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
from abc import ABC
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser, ABC):
def handle_comment(self, comment):
if '\n' in comment:
print('>>> Multi-line Comment')
else:
print('>>> Single-line Comment')
print(comment)
def handle_data(self, data):
if data == '\n':
return
print('>>> Data')
print(data)
if __name__ == '__main__':
html = ''
for i in range(int(input())):
html += input().rstrip()
html += '\n'
parser = MyHTMLParser()
parser.feed(html)
parser.close()
| 21.103448
| 44
| 0.542484
|
b3c08db3749a720d96f023204a519ecca5181f8b
| 124
|
py
|
Python
|
mypython/forloop1.py
|
priyalbhatewara123/Python-programs
|
90b84310101b76c14b89f256ee9206711908a4ae
|
[
"bzip2-1.0.6"
] | null | null | null |
mypython/forloop1.py
|
priyalbhatewara123/Python-programs
|
90b84310101b76c14b89f256ee9206711908a4ae
|
[
"bzip2-1.0.6"
] | null | null | null |
mypython/forloop1.py
|
priyalbhatewara123/Python-programs
|
90b84310101b76c14b89f256ee9206711908a4ae
|
[
"bzip2-1.0.6"
] | null | null | null |
num = [11,89,7,48,100]
for nums in num:
if nums % 5 == 0:
print(nums)
break
else:
print("not found")
| 17.714286
| 22
| 0.516129
|
59156d9f3d98665c18144e626347dd2c3632106d
| 5,716
|
py
|
Python
|
opencga-client/src/main/python/pyopencga/rest_clients/user_client.py
|
opencb/opencga
|
b598b0adbda424c98bf6aa8b862cbb6d2992419b
|
[
"Apache-2.0"
] | 146
|
2015-03-05T19:14:22.000Z
|
2022-03-30T03:46:48.000Z
|
opencga-client/src/main/python/pyopencga/rest_clients/user_client.py
|
opencb/opencga
|
b598b0adbda424c98bf6aa8b862cbb6d2992419b
|
[
"Apache-2.0"
] | 1,623
|
2015-01-27T00:30:36.000Z
|
2022-03-31T14:42:33.000Z
|
opencga-client/src/main/python/pyopencga/rest_clients/user_client.py
|
opencb/opencga
|
b598b0adbda424c98bf6aa8b862cbb6d2992419b
|
[
"Apache-2.0"
] | 93
|
2015-01-28T17:13:01.000Z
|
2022-03-09T20:46:47.000Z
|
"""
WARNING: AUTOGENERATED CODE
This code was generated by a tool.
Autogenerated on: 2021-05-21 14:06:25
Manual changes to this file may cause unexpected behavior in your application.
Manual changes to this file will be overwritten if the code is regenerated.
"""
from pyopencga.rest_clients._parent_rest_clients import _ParentRestClient
class User(_ParentRestClient):
"""
This class contains methods for the 'Users' webservices
Client version: 2.0.3
PATH: /{apiVersion}/users
"""
def __init__(self, configuration, token=None, login_handler=None, *args, **kwargs):
super(User, self).__init__(configuration, token, login_handler, *args, **kwargs)
def create(self, data=None, **options):
"""
Create a new user.
PATH: /{apiVersion}/users/create
:param dict data: JSON containing the parameters. (REQUIRED)
"""
return self._post(category='users', resource='create', data=data, **options)
def login(self, data=None, **options):
"""
Get identified and gain access to the system.
PATH: /{apiVersion}/users/login
:param dict data: JSON containing the authentication parameters.
"""
return self._post(category='users', resource='login', data=data, **options)
def password(self, data=None, **options):
"""
Change the password of a user.
PATH: /{apiVersion}/users/password
:param dict data: JSON containing the change of password parameters.
(REQUIRED)
"""
return self._post(category='users', resource='password', data=data, **options)
def configs(self, user, **options):
"""
Fetch a user configuration.
PATH: /{apiVersion}/users/{user}/configs
:param str user: User ID. (REQUIRED)
:param str name: Unique name (typically the name of the application).
"""
return self._get(category='users', resource='configs', query_id=user, **options)
def update_configs(self, user, data=None, **options):
"""
Add or remove a custom user configuration.
PATH: /{apiVersion}/users/{user}/configs/update
:param dict data: JSON containing anything useful for the application
such as user or default preferences. When removing, only the id
will be necessary. (REQUIRED)
:param str user: User ID. (REQUIRED)
:param str action: Action to be performed: ADD or REMOVE a group.
Allowed values: ['ADD', 'REMOVE']
"""
return self._post(category='users', resource='update', query_id=user, subcategory='configs', data=data, **options)
def filters(self, user, **options):
"""
Fetch user filters.
PATH: /{apiVersion}/users/{user}/filters
:param str user: User ID. (REQUIRED)
:param str id: Filter id. If provided, it will only fetch the
specified filter.
"""
return self._get(category='users', resource='filters', query_id=user, **options)
def update_filters(self, user, data=None, **options):
"""
Add or remove a custom user filter.
PATH: /{apiVersion}/users/{user}/filters/update
:param dict data: Filter parameters. When removing, only the 'name' of
the filter will be necessary. (REQUIRED)
:param str user: User ID. (REQUIRED)
:param str action: Action to be performed: ADD or REMOVE a group.
Allowed values: ['ADD', 'REMOVE']
"""
return self._post(category='users', resource='update', query_id=user, subcategory='filters', data=data, **options)
def update_filter(self, user, filter_id, data=None, **options):
"""
Update a custom filter.
PATH: /{apiVersion}/users/{user}/filters/{filterId}/update
:param dict data: Filter parameters. (REQUIRED)
:param str filter_id: Filter id. (REQUIRED)
:param str user: User ID. (REQUIRED)
"""
return self._post(category='users', resource='update', query_id=user, subcategory='filters', second_query_id=filter_id, data=data, **options)
def info(self, user, **options):
"""
Return the user information including its projects and studies.
PATH: /{apiVersion}/users/{user}/info
:param str user: User ID. (REQUIRED)
:param str include: Fields included in the response, whole JSON path
must be provided.
:param str exclude: Fields excluded in the response, whole JSON path
must be provided.
"""
return self._get(category='users', resource='info', query_id=user, **options)
def projects(self, user, **options):
"""
Retrieve the projects of the user.
PATH: /{apiVersion}/users/{user}/projects
:param str user: User ID. (REQUIRED)
:param str include: Fields included in the response, whole JSON path
must be provided.
:param str exclude: Fields excluded in the response, whole JSON path
must be provided.
:param int limit: Number of results to be returned.
:param int skip: Number of results to skip.
"""
return self._get(category='users', resource='projects', query_id=user, **options)
def update(self, user, data=None, **options):
"""
Update some user attributes.
PATH: /{apiVersion}/users/{user}/update
:param dict data: JSON containing the params to be updated. (REQUIRED)
:param str user: User ID. (REQUIRED)
"""
return self._post(category='users', resource='update', query_id=user, data=data, **options)
| 35.725
| 149
| 0.627362
|
54d853097daa028f63b78cec5ea564979a0dca55
| 5,763
|
py
|
Python
|
bin/input_module_pagerduty_api_incidents.py
|
daianmartinho/TA-PagerDuty
|
d15e3709a80ffc8c3425b6dee2a6c2ebb22a9987
|
[
"MIT"
] | 3
|
2017-10-18T19:25:59.000Z
|
2021-06-28T19:18:00.000Z
|
bin/input_module_pagerduty_api_incidents.py
|
daianmartinho/TA-PagerDuty
|
d15e3709a80ffc8c3425b6dee2a6c2ebb22a9987
|
[
"MIT"
] | 2
|
2018-01-02T15:21:54.000Z
|
2018-01-23T16:11:44.000Z
|
bin/input_module_pagerduty_api_incidents.py
|
daianmartinho/TA-PagerDuty
|
d15e3709a80ffc8c3425b6dee2a6c2ebb22a9987
|
[
"MIT"
] | 2
|
2017-10-18T19:15:45.000Z
|
2018-03-28T16:55:17.000Z
|
# encoding = utf-8
import os
import sys
import time
import base64
import urlparse
import json
from datetime import datetime, timedelta
def validate_input(helper, definition):
pd_api_token = definition.parameters.get('api_token', None)
pd_pagesize = definition.parameters.get('api_limit', None)
pd_daysago = definition.parameters.get('days_ago', None)
pass
def collect_events(helper, ew):
# Retrieve runtime variables
api_key = helper.get_arg('api_token', None)
pd_pagesize = helper.get_arg('api_limit') or 100 #Page size of results
pd_daysago = helper.get_arg('days_ago') or 365 #Max days ago since commit
inputname = helper.get_input_stanza_names()
inputsource = helper.get_input_type() + ":" + inputname
helper.log_info("input_type=pagerduty_api_incidents input={0:s} message='Collecting events.'".format(inputname))
# Create initial time to query for commits
initial_status = (datetime.utcnow() - timedelta(int(pd_daysago))).strftime("%Y-%m-%d")
now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
# Create checkpoint key
opt_checkpoint = "pagerduty_api_incidents-{0:s}".format(inputname)
updated = now #Add meta value for troubleshooting
#Check for last query execution data in kvstore & generate if not present
try:
last_status = helper.get_check_point(opt_checkpoint) or initial_status
helper.log_debug("input_type=pagerduty_api_incidents input={0:s} message='Last successful checkpoint time.' last_status={1:s}".format(inputname,json.dumps(last_status)))
except Exception as e:
helper.log_error("input_type=pagerduty_api_incidents input={0:s} message='Unable to retrieve last execution checkpoint!'".format(inputname))
raise e
# Create API request parameters
header = {
'Authorization': 'Token token={0}'.format(api_key),
'Content-type': 'application/json',
'Accept': 'application/vnd.pagerduty+json;version=2'
}
url = "https://api.pagerduty.com/incidents"
method = "GET"
def get_incidents(since, until, offset):
params = {
'status': "acknowledged,triggered,resolved",
'since': since,
'until': until,
'offset': offset,
'limit': pd_pagesize
}
r = helper.send_http_request(url, method, parameters=params, payload=None, headers=header, cookies=None, verify=True, cert=None, timeout=None, use_proxy=True)
helper.log_info("input_type=pagerduty_api_incidents input={0:s} message='Requesting incident data from Pagerduty API.' url='{1:s}' parameters='{2:s}'".format(inputname,url,json.dumps(params)))
# Return API response code
r_status = r.status_code
# Return API request status_code
if r_status is 429:
helper.log_info("input_type=pagerduty_api_incidents input={0:s} message='Too many requests, API throttled. Will retry in 10 seconds.' status_code={1:d}".format(inputname,r_status))
time.sleep(10)
r = helper.send_http_request(url, method, parameters=params, payload=None, headers=header, cookies=None, verify=True, cert=None, timeout=None, use_proxy=True)
elif r_status is not 200:
helper.log_error("input_type=pagerduty_api_incidents input={0:s} message='API request unsuccessful.' status_code={1:d}".format(inputname,r_status))
r.raise_for_status()
return r.json()
try:
has_results = True
offset = 0 #iterator for records returned from Pagerduty
i = 0 #iterator for indexed records processed
while has_results:
pd_incidents = get_incidents(last_status, None, offset)
# Get log_entries via Pagerduty API as JSON
incidents = pd_incidents['incidents']
has_results = pd_incidents['more']
if len(incidents) == 0:
helper.log_info("input_type=pagerduty_api_incidents input={0:s} message='No records retrieved from Pagerduty API.' offset={1:d}".format(inputname,offset))
has_results = False
continue
for incident in incidents:
# Write event to index
ew.write_event(helper.new_event(source=inputsource, index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=json.dumps(incident)))
i += 1
helper.log_debug("input_type=pagerduty_api_incidents input={0:s} processed={1:d} entry_id={2:s}".format(inputname,i,incident['id']))
if pd_incidents['more']:
offset += pd_incidents['limit']
#helper.log_debug("input_type=pagerduty_api_incidents input={0:s} message='Getting next page.' link_next='{1:s}' offset='{2:s}'".format(inputname,url,offset))
#get_entries(since, until, offset)
else:
helper.log_debug("input_type=pagerduty_api_incidents input={0:s} message='No additional pages.'".format(inputname))
helper.log_debug("input_type=pagerduty_api_incidents input={0:s} processed={1:d}".format(inputname,i))
#Update last completed execution time
helper.save_check_point(opt_checkpoint,updated)
helper.log_info("input_type=pagerduty_api_incidents input={0:s} message='Collection complete.' indexed={1:d}".format(inputname,i))
helper.log_debug("input_type=pagerduty_api_incidents input={0:s} message='Storing checkpoint.' updated={1:s}".format(inputname,updated))
except Exception as error:
helper.log_error("input_type=pagerduty_api_incidents input={0:s} message='An unknown error occurred!'".format(inputname))
raise error
| 50.113043
| 200
| 0.671352
|
adc6f2e65df87623032490f50567d440e1a8c9a0
| 5,143
|
py
|
Python
|
nevergrad/instrumentation/test_variables.py
|
enthought/nevergrad
|
2a0f778aa316264e785d0e6305a897679b3b97f7
|
[
"MIT"
] | null | null | null |
nevergrad/instrumentation/test_variables.py
|
enthought/nevergrad
|
2a0f778aa316264e785d0e6305a897679b3b97f7
|
[
"MIT"
] | null | null | null |
nevergrad/instrumentation/test_variables.py
|
enthought/nevergrad
|
2a0f778aa316264e785d0e6305a897679b3b97f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import pytest
import numpy as np
from nevergrad.parametrization import parameter as p
from nevergrad.parametrization.test_parameter import check_parameter_features
from . import variables
from .variables import wrap_arg
def test_softmax_categorical_deterministic() -> None:
token = p.Instrumentation(variables.SoftmaxCategorical(["blu", "blublu", "blublublu"], deterministic=True))
assert token.data_to_arguments([1, 1, 1.01], deterministic=False) == wrap_arg("blublublu")
def test_softmax_categorical() -> None:
np.random.seed(12)
token = p.Instrumentation(variables.SoftmaxCategorical(["blu", "blublu", "blublublu"]))
assert token.data_to_arguments([0.5, 1.0, 1.5]) == wrap_arg("blublu")
assert token.data_to_arguments(token.arguments_to_data("blu"), deterministic=True) == wrap_arg("blu")
def test_ordered_discrete() -> None:
token = p.Instrumentation(variables.OrderedDiscrete(["blu", "blublu", "blublublu"]))
assert token.data_to_arguments([5]) == wrap_arg("blublublu")
assert token.data_to_arguments([0]) == wrap_arg("blublu")
assert token.data_to_arguments(token.arguments_to_data("blu"), deterministic=True) == wrap_arg("blu")
def test_gaussian() -> None:
token = variables.Gaussian(1, 3)
assert token.data_to_arguments([.5]) == wrap_arg(2.5)
data = token.arguments_to_data(12)
print(data)
assert token.data_to_arguments(data) == wrap_arg(12)
def test_scalar() -> None:
token = variables.Scalar(int)
assert token.data_to_arguments([.7]) == wrap_arg(1)
assert token.arguments_to_data(1).tolist() == [1.]
def test_array_as_ascalar() -> None:
var = variables.Array(1).exponentiated(10, -1).asscalar()
data = np.array([2])
output = var.data_to_arguments(data)
assert output == wrap_arg(0.01)
np.testing.assert_almost_equal(var.arguments_to_data(*output[0], **output[1]), data)
# int
var = variables.Array(1).asscalar(int)
assert var.data_to_arguments(np.array([.4])) == wrap_arg(0)
assert var.data_to_arguments(np.array([-.4])) == wrap_arg(0)
output = var.data_to_arguments(np.array([.6]))
assert output == wrap_arg(1)
assert type(output[0][0]) == int # pylint: disable=unidiomatic-typecheck
# errors
with pytest.raises(RuntimeError):
variables.Array(1).asscalar(int).asscalar(float)
with pytest.raises(RuntimeError):
variables.Array(2).asscalar(int)
with pytest.raises(ValueError):
variables.Array(1).asscalar(np.int64) # type: ignore
def test_array() -> None:
var = variables.Array(2, 2).affined(1000000).bounded(3, 5, transform="arctan")
data = np.array([-10, 10, 0, 0])
output = var.data_to_arguments(data)
np.testing.assert_almost_equal(output[0][0], [[3., 5], [4, 4]])
np.testing.assert_almost_equal(var.arguments_to_data(*output[0], **output[1]), data)
@pytest.mark.parametrize("value,expected", [(0, 0.01), (10, 0.1), (-10, 0.001), (20, 0.1), (9, 0.07943)]) # type: ignore
def test_log(value: float, expected: float) -> None:
var = variables.Log(0.001, 0.1)
out = var.data_to_arguments(np.array([value]))
np.testing.assert_approx_equal(out[0][0], expected, significant=4)
repr(var)
def test_log_int() -> None:
var = variables.Log(300, 10000, dtype=int)
out = var.data_to_arguments(np.array([0]))
assert out[0][0] == 1732
# note: 0.9/0.9482=0.9482/0.999
@pytest.mark.parametrize("value,expected", [(0, 0.9482), (-11, 0.9), (10, 0.999)]) # type: ignore
def test_log_9(value: float, expected: float) -> None:
var = variables.Log(0.9, 0.999)
out = var.data_to_arguments(np.array([value]))
np.testing.assert_approx_equal(out[0][0], expected, significant=4)
@pytest.mark.parametrize( # type: ignore
"var,data,expected",
[
(variables.Log(0.9, 0.999), [0], 0.9482),
(variables.Array(2).affined(10, 100), [0, 3], [100, 130]),
(variables.Scalar().affined(10, 100).bounded(-200, 200), [0], 198.7269),
(variables.Scalar(int).affined(10, 100).bounded(-200, 200), [0], 199),
(variables.Scalar().exponentiated(10, -1), [1], 0.1),
(variables.Scalar().exponentiated(2, 3), [4], 4096),
(variables.Scalar().affined(10, 100).bounded(-200, 200), [-10], 0),
(variables.Scalar().affined(10, 100).bounded(-200, 200, transform="clipping"), [1], 110),
(variables.Gaussian(3, 5, shape=(2,)), [-2, 1], [-7, 8]),
(variables.Gaussian(3, 5), [-2], -7),
(p.Instrumentation(variables.OrderedDiscrete(list(range(100)))), [1.4], 91),
]
)
def test_expected_value(var: variables.Variable, data: tp.List[float], expected: tp.Any) -> None:
check_parameter_features(var)
out = var.data_to_arguments(np.array(data))[0][0]
if isinstance(out, np.ndarray):
np.testing.assert_array_almost_equal(out, expected)
else:
np.testing.assert_approx_equal(out, expected, significant=4)
| 41.813008
| 121
| 0.674315
|
5358ad5539dd7b5b7e57aa46663bdb40adc56f57
| 4,681
|
bzl
|
Python
|
third_party/polymer.bzl
|
hakanhp/chanel
|
6825b60e86c46daabb18f40f1e45d3de2ff8e983
|
[
"Apache-2.0"
] | null | null | null |
third_party/polymer.bzl
|
hakanhp/chanel
|
6825b60e86c46daabb18f40f1e45d3de2ff8e983
|
[
"Apache-2.0"
] | null | null | null |
third_party/polymer.bzl
|
hakanhp/chanel
|
6825b60e86c46daabb18f40f1e45d3de2ff8e983
|
[
"Apache-2.0"
] | null | null | null |
"""TensorFlow Model Analysis Polymer Dependencies"""
load("@io_bazel_rules_closure//closure:defs.bzl", "web_library_external")
def tensorflow_model_analysis_polymer_workspace():
"""Download TensorFlow Model Analysis polymer dependencies."""
web_library_external(
name = "org_googlewebcomponents_google_apis",
licenses = ["notice"], # BSD-3-Clause
sha256 = "1e0a83f1af1978875789620edd837e6a06c1316f3bf6c2ed14d8450a7d4d3251",
urls = [
"https://mirror.bazel.build/github.com/GoogleWebComponents/google-apis/archive/v1.1.7.tar.gz",
"https://github.com/GoogleWebComponents/google-apis/archive/v1.1.7.tar.gz",
],
strip_prefix = "google-apis-1.1.7",
path = "/google-apis",
srcs = [
"google-apis.html",
"google-client-loader.html",
"google-js-api.html",
"google-legacy-loader.html",
"google-maps-api.html",
"google-plusone-api.html",
"google-realtime-api.html",
"google-youtube-api.html",
],
deps = [
"@org_polymer",
"@org_polymerelements_iron_jsonp_library",
],
)
web_library_external(
name = "org_googlewebcomponents_google_chart",
licenses = ["notice"], # BSD-3-Clause
sha256 = "e4a959deb8ad9660ea4ee5552e87e1e064f4b76008bf0fe37b4f4ce51817d480",
urls = [
"https://mirror.bazel.build/github.com/GoogleWebComponents/google-charts/archive/v1.1.1.tar.gz",
"https://github.com/GoogleWebComponents/google-chart/archive/v1.1.1.tar.gz",
],
strip_prefix = "google-chart-1.1.1",
path = "/google-chart",
srcs = [
"charts-loader.html",
"google-chart.css",
"google-chart.html",
"google-chart-loader.html",
],
deps = [
"@org_googlewebcomponents_google_apis",
"@org_polymer",
"@org_polymerelements_iron_ajax",
"@org_polymerlabs_promise_polyfill",
],
)
web_library_external(
name = "org_polymer_iron_pages",
licenses = ["notice"], # BSD-3-Clause
sha256 = "9a1b8e6b2d1dd11f94d7aa674c811a1e6b7dd766678e3650228deb109520612a",
urls = [
"https://mirror.bazel.build/github.com/PolymerElements/iron-pages/archive/v1.0.9.tar.gz",
"https://github.com/PolymerElements/iron-pages/archive/v1.0.9.tar.gz",
],
strip_prefix = "iron-pages-1.0.9",
path = "/iron-pages",
srcs = ["iron-pages.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_resizable_behavior",
"@org_polymer_iron_selector",
],
)
web_library_external(
name = "org_polymerelements_iron_ajax",
licenses = ["notice"], # BSD-3-Clause
sha256 = "4979d6e5601deeede65f35ec4c89416ea2fbebc78c113ff9305c6687acd9ddcc",
urls = [
"https://mirror.bazel.build/github.com/PolymerElements/iron-ajax/archive/v1.4.1.tar.gz",
"https://github.com/PolymerElements/iron-ajax/archive/v1.4.1.tar.gz",
],
strip_prefix = "iron-ajax-1.4.1",
path = "/iron-ajax",
srcs = [
"iron-ajax.html",
"iron-request.html",
],
deps = [
"@org_polymer",
"@org_polymerlabs_promise_polyfill",
],
)
web_library_external(
name = "org_polymerelements_iron_jsonp_library",
licenses = ["notice"], # BSD-3-Clause
sha256 = "15667734d1be5c1ec108e9753b6c66a0c44ee32da312ff1b483856eb0bd861a4",
urls = [
"https://mirror.bazel.build/github.com/PolymerElements/iron-jsonp-library/archive/v1.0.5.tar.gz",
"https://github.com/PolymerElements/iron-jsonp-library/archive/v1.0.5.tar.gz",
],
strip_prefix = "iron-jsonp-library-1.0.5",
path = "/iron-jsonp-library",
srcs = [
"iron-jsonp-library.html",
],
deps = [
"@org_polymer",
],
)
web_library_external(
name = "org_polymerlabs_promise_polyfill",
licenses = ["notice"], # BSD-3-Clause
sha256 = "d83edb667c393efb3e7b40a2c22d439e1d84056be5d36174be6507a45f709daa",
urls = [
"https://mirror.bazel.build/github.com/PolymerLabs/promise-polyfill/archive/v1.0.1.tar.gz",
"https://github.com/PolymerLabs/promise-polyfill/archive/v1.0.1.tar.gz",
],
strip_prefix = "promise-polyfill-1.0.1",
path = "/promise-polyfill",
srcs = [
"Gruntfile.js",
"Promise-Statics.js",
"Promise.js",
"Promise.min.js",
"promise-polyfill-lite.html",
"promise-polyfill.html",
],
deps = [
"@org_polymer",
],
)
| 34.419118
| 107
| 0.611835
|
845aaef83eeff048910fab30bbc15230f8a93062
| 10,072
|
py
|
Python
|
examples/pytorch/graphsage/train_sampling.py
|
ydwu4/dgl-hack
|
b0fee8b9723c71f45bcf00301653fc71bfa82bc1
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/graphsage/train_sampling.py
|
ydwu4/dgl-hack
|
b0fee8b9723c71f45bcf00301653fc71bfa82bc1
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/graphsage/train_sampling.py
|
ydwu4/dgl-hack
|
b0fee8b9723c71f45bcf00301653fc71bfa82bc1
|
[
"Apache-2.0"
] | null | null | null |
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import time
import argparse
from _thread import start_new_thread
from functools import wraps
from dgl.data import RedditDataset
import tqdm
import traceback
#### Neighbor sampler
class NeighborSampler(object):
def __init__(self, g, fanouts):
self.g = g
self.fanouts = fanouts
def sample_blocks(self, seeds):
seeds = th.LongTensor(np.asarray(seeds))
blocks = []
for fanout in self.fanouts:
# For each seed node, sample ``fanout`` neighbors.
frontier = dgl.sampling.sample_neighbors(self.g, seeds, fanout, replace=True)
# Then we compact the frontier into a bipartite graph for message passing.
block = dgl.to_block(frontier, seeds)
# Obtain the seed nodes for next layer.
seeds = block.srcdata[dgl.NID]
blocks.insert(0, block)
return blocks
class SAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super().__init__()
self.n_layers = n_layers
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))
for i in range(1, n_layers - 1):
self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))
self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))
self.dropout = nn.Dropout(dropout)
self.activation = activation
def forward(self, blocks, x):
h = x
for l, (layer, block) in enumerate(zip(self.layers, blocks)):
# We need to first copy the representation of nodes on the RHS from the
# appropriate nodes on the LHS.
# Note that the shape of h is (num_nodes_LHS, D) and the shape of h_dst
# would be (num_nodes_RHS, D)
h_dst = h[:block.number_of_dst_nodes()]
# Then we compute the updated representation on the RHS.
# The shape of h now becomes (num_nodes_RHS, D)
h = layer(block, (h, h_dst))
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
return h
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = th.arange(g.number_of_nodes())
for l, layer in enumerate(self.layers):
y = th.zeros(g.number_of_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes)
for start in tqdm.trange(0, len(nodes), batch_size):
end = start + batch_size
batch_nodes = nodes[start:end]
block = dgl.to_block(dgl.in_subgraph(g, batch_nodes), batch_nodes)
input_nodes = block.srcdata[dgl.NID]
h = x[input_nodes].to(device)
h_dst = h[:block.number_of_dst_nodes()]
h = layer(block, (h, h_dst))
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
y[start:end] = h.cpu()
x = y
return y
def prepare_mp(g):
"""
Explicitly materialize the CSR, CSC and COO representation of the given graph
so that they could be shared via copy-on-write to sampler workers and GPU
trainers.
This is a workaround before full shared memory support on heterogeneous graphs.
"""
g.in_degree(0)
g.out_degree(0)
g.find_edges([0])
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, inputs, labels, val_mask, batch_size, device):
"""
Evaluate the model on the validation set specified by ``val_mask``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
pred = model.inference(g, inputs, batch_size, device)
model.train()
return compute_acc(pred[val_mask], labels[val_mask])
def load_subtensor(g, labels, seeds, input_nodes, device):
"""
Copys features and labels of a set of nodes onto GPU.
"""
batch_inputs = g.ndata['features'][input_nodes].to(device)
batch_labels = labels[seeds].to(device)
return batch_inputs, batch_labels
#### Entry point
def run(args, device, data):
# Unpack data
train_mask, val_mask, in_feats, labels, n_classes, g = data
train_nid = th.LongTensor(np.nonzero(train_mask)[0])
val_nid = th.LongTensor(np.nonzero(val_mask)[0])
train_mask = th.BoolTensor(train_mask)
val_mask = th.BoolTensor(val_mask)
# Create sampler
sampler = NeighborSampler(g, [int(fanout) for fanout in args.fan_out.split(',')])
# Create PyTorch DataLoader for constructing blocks
dataloader = DataLoader(
dataset=train_nid.numpy(),
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
# Define model and optimizer
model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu, args.dropout)
model = model.to(device)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Training loop
avg = 0
iter_tput = []
for epoch in range(args.num_epochs):
tic = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
for step, blocks in enumerate(dataloader):
tic_step = time.time()
# The nodes for input lies at the LHS side of the first block.
# The nodes for output lies at the RHS side of the last block.
input_nodes = blocks[0].srcdata[dgl.NID]
seeds = blocks[-1].dstdata[dgl.NID]
# Load the input features as well as output labels
batch_inputs, batch_labels = load_subtensor(g, labels, seeds, input_nodes, device)
# Compute loss and prediction
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % args.log_every == 0:
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MiB'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))
toc = time.time()
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0 and epoch != 0:
eval_acc = evaluate(model, g, g.ndata['features'], labels, val_mask, args.batch_size, device)
print('Eval Acc {:.4f}'.format(eval_acc))
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--gpu', type=int, default=0,
help="GPU device ID. Use -1 for CPU training")
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--fan-out', type=str, default='10,25')
argparser.add_argument('--batch-size', type=int, default=1000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=5)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--dropout', type=float, default=0.5)
argparser.add_argument('--num-workers', type=int, default=0,
help="Number of sampling processes. Use 0 for no extra process.")
args = argparser.parse_args()
if args.gpu >= 0:
device = th.device('cuda:%d' % args.gpu)
else:
device = th.device('cpu')
# load reddit data
data = RedditDataset(self_loop=True)
train_mask = data.train_mask
val_mask = data.val_mask
features = th.Tensor(data.features)
in_feats = features.shape[1]
labels = th.LongTensor(data.labels)
n_classes = data.num_labels
# Construct graph
g = dgl.graph(data.graph.all_edges())
g.ndata['features'] = features
prepare_mp(g)
# Pack data
data = train_mask, val_mask, in_feats, labels, n_classes, g
run(args, device, data)
| 38.296578
| 137
| 0.625794
|
c9a219a85a16125a0525474708fcdaf322a59e18
| 2,070
|
py
|
Python
|
pythononwheels/start/handlers/powhandlermixin.py
|
fakegit/pow_devel
|
4288d9c48261ec54414f295b25d444eee523a85f
|
[
"MIT"
] | 24
|
2015-08-06T07:32:00.000Z
|
2022-03-24T22:05:01.000Z
|
pythononwheels/start/handlers/powhandlermixin.py
|
fakegit/pow_devel
|
4288d9c48261ec54414f295b25d444eee523a85f
|
[
"MIT"
] | 37
|
2017-04-14T19:16:40.000Z
|
2021-08-17T21:05:51.000Z
|
pythononwheels/start/handlers/powhandlermixin.py
|
fakegit/pow_devel
|
4288d9c48261ec54414f295b25d444eee523a85f
|
[
"MIT"
] | 2
|
2016-05-25T08:29:32.000Z
|
2020-11-21T10:44:31.000Z
|
import tornado.web
import werkzeug.security
from {{appname}}.conf.config import myapp
class PowHandlerMixin():
"""
The Base Pow Handler Mixin Handler
This is Place to put common stuff for all your
Standard AND WebSocket handlers which will remain unaffected by any PoW Changes.
Purely and only User or Extension controlled.
"""
show_list=[]
hide_list=[]
def get_current_user(self):
"""
very simple implementation.
change to you own needs here or in your own subclassed base handler.
"""
if myapp["enable_auth"]:
# try to find the user
# user_id = self.get_secure_cookie("user_id")
# if not user_id: return None
# u=User()
# u=u.find_one(User.id==user_id)
# return u
raise NotImplementedError("User Authentication not implemented, yet")
else:
# if authentication is disabled return a dummy guest user
return True
def check_password_hash(self, pwhash, password ):
"""
uses werkzeug.security.check_password_hash
see: http://werkzeug.pocoo.org/docs/0.14/utils/#module-werkzeug.security
get the password from for example a login form (make sure you use https)
get the hash from the user model table (see generate_password_hash below)
"""
return werkzeug.security.check_password_hash(pwhash, password)
def generate_password_hash(self, password ):
"""
uses werkzeug.security.generate_password_hash
see: http://werkzeug.pocoo.org/docs/0.14/utils/#module-werkzeug.security
store this returned hash in the user models table as password
when the user is first registered or changed his password.
Use https to secure the plaintext POSTed pwd.
"""
method = myapp["pwhash_method"]
return werkzeug.security.generate_password_hash(password, method=method, salt_length=8)
| 39.807692
| 95
| 0.632367
|
6288a32f68b30120ff5325bbcfa68243cf41f37d
| 13,248
|
py
|
Python
|
.history/src/Simulador_20200710221308.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
.history/src/Simulador_20200710221308.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
.history/src/Simulador_20200710221308.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix, lil_matrix
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.lista_infectados_tipo_2 = []
self.lista_infectados_tipo_1 = []
self.num_curados = 0
self.num_mortos = 0
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.fabrica_individuo = Fabrica_individuo(atualizacoes_cura)
self.df_individuos = pd.DataFrame(index= range(tamanho_matriz), columns=range(tamanho_matriz))
self.matriz_status = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
self.popular(tamanho_matriz)
self.lista_matrizes_posicionamento = []
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict,index = [0])
self.salvar_posicionamento()
def salvar_posicionamento(self):
self.lista_matrizes_posicionamento.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for X,Y in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(X, Y)
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for x,y in lista_vizinhos:
#verificação de SADIO
if self.matriz_status[x,y] == Individuo.SADIO:
#verificação do novo status
novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append((x,y))
#modifica o status na matriz de status
self.df_individuos.loc[x,y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(x,y))
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_1
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 2
lista_novos_infectados_tipo2.append((x,y))
#modifica o status na matriz de status
self.df_individuos.loc[x,y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(x,y))
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_2
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def verificar_morte(self, lista_infectantes_tipo2):
lista_curados = []
lista_mortos = []
for x,y in lista_infectantes_tipo2:
novo_status = self.df_individuos.loc[x,y].checagem_morte(self.chance_morte)
if novo_status == Individuo.MORTO:
self.matriz_status[x,y] = Individuo.MORTO
lista_mortos.append((x,y))
if novo_status == Individuo.CURADO:
self.matriz_status[x,y] = Individuo.CURADO
lista_curados.append((x,y))
return lista_mortos, lista_curados
def verificar_cura(self, lista_infectantes):
lista_curados = []
for x,y in lista_infectantes:
novo_status = self.df_individuos.loc[x,y].checagem_cura()
if novo_status == Individuo.CURADO:
self.matriz_status[x,y] = Individuo.CURADO
lista_curados.append((x,y))
return lista_curados
def iterar(self):
#Verifica os novos infectados a partir dos atuais infectantes na matriz
lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1 + self.lista_infectados_tipo_2)
#Verifica morte dos tipo 2
lista_mortos_atualizacao, lista_curados_t2_atualizacao = self.verificar_morte(self.lista_infectados_tipo_2)
self.lista_infectados_tipo_2 = [indice for indice in self.lista_infectados_tipo_2 if indice not in lista_mortos_atualizacao and indice not in lista_curados_t2_atualizacao]
#atualiza o novo número de mortos
self.num_mortos += len(lista_mortos_atualizacao)
#Verificar cura
lista_curados_t1_atualizacao = self.verificar_cura(self.lista_infectados_tipo_1)
self.lista_infectados_tipo_1 = [indice for indice in self.lista_infectados_tipo_1 if indice not in lista_curados_t1_atualizacao ]
#adiciona os novos curados na lista geral de curados
self.num_curados = self.num_curados + len(lista_curados_t1_atualizacao) + len(lista_curados_t2_atualizacao)
# self. #movimentar infectantes:
for x,y in self.lista_infectados_tipo_1:
self.mover_infectante((x,y))
for x,y in self.lista_infectados_tipo_2:
self.mover_infectante((x,y))
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + lista_novos_infectados_tipo2
self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + lista_novos_infectados_tipo1
dict = {
'num_sadios':self.populacao_inicial - self.num_mortos - self.num_curados - len(self.lista_infectados_tipo_1) - len(self.lista_infectados_tipo_2) ,
'num_infect_t1':len(self.lista_infectados_tipo_1),
'num_infect_t2':len(self.lista_infectados_tipo_2),
'num_curados':self.num_curados,
'num_mortos':self.num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
print("num t1: ", len(self.lista_infectados_tipo_1))
print("num t2: ", len(self.lista_infectados_tipo_2))
print("num curados: ", self.num_curados)
print("num mortos: ", self.num_mortos)
print("---------")
#salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def popular(self, tamanho_matriz):
# for index, row in self.df_individuos.iterrows():
# for item in row:
# item = self.fabrica_individuo.criar_individuo(Individuo.SADIO,(0,0))
self.df_individuos.iloc[:,:] = self.fabrica_individuo.criar_individuo(Individuo.SADIO,(0,0))
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_1.append((ind_x,ind_y))
#print(indice)
self.df_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
#print(self.df_individuos)
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_1.append((ind_x,ind_y))
self.df_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_2.append((ind_x,ind_y))
self.df_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_2
def trocar_status_localizacao(self,ponto_ini,ponto_final):
x_ini = ponto_ini[0]
y_ini = ponto_ini[1]
x_fin = ponto_final[0]
y_fin = ponto_final[1]
aux1 = self.matriz_status[x_fin,y_fin]
self.matriz_status[x_fin,y_fin] = self.matriz_status[x_ini,y_ini]
self.matriz_status[x_ini,y_ini] = aux1
aux2 = self.df_individuos.loc[x_fin,y_fin]
self.df_individuos.loc[x_fin,y_fin] = self.df_individuos.loc[x_ini,y_ini]
self.df_individuos.loc[x_ini,y_ini] = aux2
def mover_infectante(self, posicao_inicial):
pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
novo_x, novo_y = self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
#descobre qual individuo ocupa atualmente a posição para atribuí-lo a posição de quem o está substituindo
status = self.matriz_status[novo_x, novo_y]
self.trocar_status_localizacao(posicao_inicial,(novo_x, novo_y))
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.1
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.0
percentual_inicial_tipo2 = 0.00
sim = Simulador(
5,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.lista_infectados_tipo_2)
#print(sim.lista_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])
print(sim.dataframe[-1:]['num_infect_t1'])
while sim.dataframe.tail(1)['num_infect_t1'][1]+sim.dataframe.tail(1)['num_infect_t2'][1] > 0:
plt.matshow(sim.lista_matrizes_posicionamento[i].toarray(), cmap = cmap, vmin= 0, vmax = 4)
#plt.show()
sim.iterar()
print(sim.dataframe)
| 40.024169
| 179
| 0.642059
|
4f4678572e89a7b6f4e1e5faf00d0b4385a87cc4
| 1,944
|
py
|
Python
|
connectfour/test/test_negamax.py
|
amwhalen/connectfour
|
4f01bc4a94a04ae729c66c0498fe64b1ce8585f6
|
[
"MIT"
] | 1
|
2017-10-12T05:20:02.000Z
|
2017-10-12T05:20:02.000Z
|
connectfour/test/test_negamax.py
|
amwhalen/connectfour
|
4f01bc4a94a04ae729c66c0498fe64b1ce8585f6
|
[
"MIT"
] | null | null | null |
connectfour/test/test_negamax.py
|
amwhalen/connectfour
|
4f01bc4a94a04ae729c66c0498fe64b1ce8585f6
|
[
"MIT"
] | null | null | null |
import unittest
import connectfour.board
import connectfour.board_factory
from connectfour.players.negamax import NegamaxPlayer
from copy import deepcopy
class TestBoard(unittest.TestCase):
def setUp(self):
pass
def test_negamax(self):
n = NegamaxPlayer(4)
self.assertEquals(n.getName(), "Negamax(4)")
self.assertFalse(n.isHuman())
# 0 1 2 3 4 5 6
# . . . . . . .
# . . . . . . .
# . . . . . . .
# 1 . . . . . .
# 1 . . . . . .
# 1 . 2 2 . . .
#
# should result in player 2 (negamax) moving to column zero to block
#
bf = connectfour.board_factory.BoardFactory()
b = bf.generatePlayerOneNextMoveWinsColZero()
self.assertEquals(n.getMove(b, 2), 0)
# 0 1 2 3 4 5 6
# . . . . . . .
# . . . . . . .
# . . . . . . .
# . . . . . . 1
# . . . . . . 1
# . . 2 2 . . 1
#
# should result in player 2 (negamax) moving to column six to block
#
bf = connectfour.board_factory.BoardFactory()
b = bf.generatePlayerOneNextMoveWinsCol6()
self.assertEquals(n.getMove(b, 2), 6)
# 0 1 2 3 4 5 6
# . . . . . . .
# . . . . . . .
# . . . . . . .
# . . . . . . .
# 1 . . . . . .
# 1 . 2 2 . . .
#
# should result in player 2 (negamax) moving to column zero
#
bf = connectfour.board_factory.BoardFactory()
b = bf.generatePlayerOneHasTwoInColZero()
self.assertEquals(n.getMove(b, 2), 0)
# 0 1 2 3 4 5 6
# . . . . . . .
# . . . . . . .
# . . . . . . .
# . . . . . . .
# . . . . . . 1
# . . 2 2 . . 1
#
# should result in player 2 (negamax) moving to column six
#
bf = connectfour.board_factory.BoardFactory()
b = bf.generatePlayerOneHasTwoInCol6()
self.assertEquals(n.getMove(b, 2), 6)
if __name__ == '__main__':
unittest.main()
| 26.27027
| 70
| 0.486111
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.