repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
aTeK7/deep-stereo1.4 | old/planesweep.py | import numpy as np
from PIL import Image
from glumpy import app, gloo, gl, data, glm
from load_camera import CameraLoader, CameraRigType, camera_0
width = 1024/2
height = 768/2
ratio = float(width)/height
window = app.Window(width=width, height=height)
camera = CameraLoader()
posx = -10.0
angleR = 0.0
def cube():
vtype = [('a_position', np.float32, 3),
('a_texcoord', np.float32, 2),
('a_normal', np.float32, 3)]
itype = np.uint32
# Vertices positions
h = height
w = width
d = 0
p = np.array([
(-1, 1, d),
(1, 1, d),
(1, -1, d),
(-1, -1, d),
], dtype=float)
print points
# Face Normals
n = np.array([[0, 0, 1], [0, 0, 1]])
# Texture coords
t = np.array([
[1, 0],
[0, 0],
[0, 1],
[1, 1]
])
faces_p = [0, 1, 2, 3]
faces_n = [0, 0, 0, 0]
faces_t = [0, 1, 2, 3]
vertices = np.zeros(4, vtype)
vertices['a_position'] = p[faces_p]
vertices['a_normal'] = n[faces_n]
vertices['a_texcoord'] = t[faces_t]
filled = np.resize(
np.array([0, 1, 2, 0, 2, 3], dtype=itype), 6 * (2 * 3))
filled += np.repeat(4 * np.arange(6, dtype=itype), 6)
vertices = vertices.view(gloo.VertexBuffer)
filled = filled.view(gloo.IndexBuffer)
return vertices, filled
def normalCam(w, h):
fovy = 3.0
aspect = w / float(h)
zNear = 1.0
zFar = 100.0
return glm.perspective(fovy, aspect, zNear, zFar)
@window.event
def on_resize(width, height):
print "ON RESIZE"
#program['u_projection'] = glm.perspective(fovy, aspect, zNear, zFar)
program['u_projection'] = normalCam(1024, 768)
@window.event
def on_init():
gl.glEnable(gl.GL_DEPTH_TEST)
@window.event
def on_draw(dt):
window.clear()
gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_DEPTH_TEST)
program.draw(gl.GL_TRIANGLES, indices)
#model = np.eye(4, dtype=np.float32)
#program['u_model'] = model
#program['u_model'] = glm.xrotate(glm.translation(0, 0, posx), angleR)
#program['u_model'] = np.eye(4)
program['u_model'] = glm.zrotate(np.eye(4),angleR).dot(glm.translation(0, 0, posx))
@window.event
def on_key_press(symbol, modifiers):
global posx, angleR
# Press key 1
if symbol == 49:
print "[LEFT] Camera"
program['u_projection'] = camera.get_camera(CameraRigType.Left)
elif symbol == 50:
print "[VIRTUAL] Camera"
program['u_projection'] = camera.get_camera(CameraRigType.Virtual)
elif symbol == 51:
print "[RIGHT] Camera"
program['u_projection'] = camera.get_camera(CameraRigType.Right)
elif symbol == 32:
print "Switch back to original camera"
program['u_projection'] = normalCam(1024, 768)
#UP 65362
elif symbol == 65362:
posx += 1.0
print "Position X(up): %s" % posx
#DOWN 65364
elif symbol == 65364:
posx -= 1.0
print "Position X(down): %s" % posx
#LEFT
elif symbol == 65361:
angleR += 1.0
print "[ROTATE] X(left): %s" % angleR
#RIGHT
elif symbol == 65363:
angleR -= 1.0
print "[ROTATE] X(right): %s" % angleR
# Vertex Shader
vertex = """
uniform mat4 u_model; // Model matrix
uniform mat4 u_view; // View matrix
uniform mat4 u_projection; // Projection matrix
attribute vec3 a_position; // Vertex position
attribute vec2 a_texcoord; // Vertex texture coordinates
varying vec2 v_texcoord; // Interpolated fragment texture coordinates (out)
void main() {
// Assign varying variables
v_texcoord = a_texcoord;
// Final position
gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);
}
"""
fragment = """
uniform sampler2D u_texture; // Texture
varying vec2 v_texcoord;
void main() {
vec4 t_color = texture2D(u_texture, v_texcoord);
gl_FragColor = t_color;
}
"""
program = gloo.Program(vertex, fragment, count=4)
program['u_texture'] = np.asarray(Image.open('test_L.jpg'))
program['u_model'] = np.eye(4, dtype=np.float32)
#program['u_view'] = glm.translation(0, 0, -10.0)
program['u_view'] = np.eye(4, dtype=np.float32)
vertices, indices = camera_0.camera_plane_at()
program.bind(vertices)
app.run() |
aTeK7/deep-stereo1.4 | reprojection/camera.py | <reponame>aTeK7/deep-stereo1.4
import numpy as np
import math
class Camera(object):
def __init__(self, rotation, translation, intrinsics, name="camera"):
self.name = name
if len(translation) != 3:
raise NameError("Translation vector must shape must be of length 3")
if rotation.shape[0] != 3 and rotation.shape[1] != 3:
raise NameError("Rotation matrix must shape must be (3,3)")
if intrinsics.shape[0] != 3 and intrinsics.shape[1] != 3:
raise NameError("Intrinsics matrix must shape must be (3,3)")
self.rotation = rotation
self.translation = translation
self.intrinsics = intrinsics
def get_principal_point(self):
return np.asarray([self.intrinsics[0, 2], self.intrinsics[1, 2]])
def get_focal_length(self):
return np.asarray([self.intrinsics[0, 0], self.intrinsics[1, 1]])
def get_extrinsics(self):
return np.concatenate((self.rotation, np.vstack(self.translation)), axis=1)
def get_extrinsics_inv(self):
T_p = -np.transpose(self.rotation).dot(self.translation)
return np.concatenate((np.transpose(self.rotation), np.vstack(T_p)), axis=1)
def getRotation(self):
return self.rotation
def getTranslation(self):
return self.translation
def getIntrinsics(self):
return self.intrinsics
def getProjection(self):
RT = np.concatenate((self.getRotation(), np.vstack(self.getTranslation())), axis=1)
return self.getIntrinsics().dot(RT)
def __repr__(self):
return "%s" % self.getProjection()
def rotateX(angle):
""" Rotates the point around the X axis by the given angle in degrees. """
phi = angle * math.pi / 180
return np.array([
[1, 0, 0],
[0, math.cos(phi), -math.sin(phi)],
[0, math.sin(phi), math.cos(phi)]
])
MockCameraA = Camera(rotateX(-25), [0, 3, 0], np.eye(3))
MockCameraB = Camera(rotateX(25), [0, -3, 0], np.eye(3))
|
aTeK7/deep-stereo1.4 | tf_deep_stereo/tf_train.py | import os
import sys
import time
import cv2
import socket
import traceback
from datetime import datetime
from optparse import OptionParser
import tensorflow as tf
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from dataset_preparation.kitti_generator import KittiGenerator
from dataset_preparation.queued_processor import GeneratorQueued, KittiParams
from select_tower import select_tower
from color_tower import color_tower
from shared import InputOrganizer
import subprocess
def get_git_revision_short_hash():
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
def dotpMerge(color_tensors, select_tensor, num_planes=96):
"""
Reimplement Dotp merge in a more pythonic way
:param inputs:
:return:
"""
with tf.name_scope('dotPMerge'):
result = []
selections = tf.split(select_tensor, num_planes, 3)
for i in range(num_planes):
color_tensor = color_tensors[i]
a = selections[i]
select_rgb = tf.concat([a, a, a], 3)
r = tf.multiply(color_tensor, select_rgb)
result.append(r)
return tf.add_n(result)
def inference(input, num_planes=96, batch_size=1):
print("Tensorflow DeepEstimation Network: (planes:%s)" % num_planes)
color = color_tower(input, num_planes=num_planes)
select = select_tower(input, num_planes=num_planes)
net_out = dotpMerge(color, select, num_planes=num_planes)
target = input.get_target_placeholder()
tf.summary.image('Target', target, max_outputs=batch_size)
tf.summary.image('netout_as_is', net_out, max_outputs=batch_size)
# summaries for net output of first image
image_out = net_out[0, :, :, :]
net_min = tf.reduce_min(image_out, name="out_minimum")
net_max = tf.reduce_max(image_out, name="out_maximum")
net_mean = tf.reduce_mean(image_out, name="out_mean")
tf.summary.scalar(net_min.op.name, net_min)
tf.summary.scalar(net_max.op.name, net_max)
tf.summary.scalar(net_mean.op.name, net_mean)
tf.summary.histogram('out_histogram', image_out)
return net_out
def lossF(net_out, target):
substraction = tf.subtract(target, net_out)
absolute = tf.abs(substraction)
batch_losses = tf.reduce_sum(absolute, [1, 2, 3])
return tf.reduce_mean(batch_losses, name="Loss")
def training(loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.summary.scalar(loss.op.name, loss)
# Create the adagrad optimizer with the given learning rate.
optimizer = tf.train.AdagradOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def do_eval(sess, validation_batch, net_out, target):
substraction = tf.subtract(target, net_out)
absolute = tf.abs(substraction)
validation_l1 = tf.reduce_sum(absolute, [1, 2, 3])
validation_score = tf.reduce_mean(validation_l1, name="validation_loss")
val_summary = tf.summary.scalar(validation_score.op.name, validation_score)
val_value = sess.run(validation_score, feed_dict=validation_batch)
print('Validation Score: %s' % val_value)
return val_summary
def run_training(FLAGS):
"""Train MNIST for a number of steps."""
sess = None
try:
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images from all camera
input_organizer = InputOrganizer(batch_size=FLAGS.batch_size,
meanzero=FLAGS.mean_zero,
num_planes=FLAGS.num_planes)
# Build a Graph that computes predictions from the inference model.
net_out = inference(input_organizer,
num_planes=FLAGS.num_planes,
batch_size=FLAGS.batch_size)
print("Graph built! continuing...")
# Add to the Graph the Ops for loss calculation.
loss = lossF(net_out, input_organizer.get_target_placeholder())
# Add to the Graph the Ops that calculate and apply gradients.
print("Learning rate is: %s" % FLAGS.learning_rate)
train_op = training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
#eval_correct = evaluation(net_out, input_organizer.get_target_placeholder())
print("Merging summaries continuing...")
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
print("Initialize variables...")
# Add the variable initializer Op.
init = tf.initialize_all_variables()
print("Create a saver for writing training checkpoints...")
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
print("Starting session...")
# Create a session for running Ops on the Graph.
#sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
print("Creating SummaryWritter...")
git_hash = get_git_revision_short_hash()
summary_name = datetime.now().strftime("%Y_%B_%d_%H_%M_%S")
summary_name = "%s-%s-%s" % (summary_name, socket.gethostname(), git_hash)
summary_dir = os.path.join(FLAGS.traindir, summary_name)
#os.mkdir(summary_dir)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(summary_dir, sess.graph)
print("Started SummaryWriter -> %s" % summary_dir)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# read validation batch
print("Starting multiprocessing queue generator...")
# IMPORTANT: Define generator to be used
# Parameters for kitti dataset
kitti_params = KittiParams(
FLAGS.kitti_path,
FLAGS.depth_base,
FLAGS.depth_step,
FLAGS.patches_per_set
)
generator = GeneratorQueued(
kitti_params,
input_organizer,
batch_size=FLAGS.batch_size,
extraction_workers=FLAGS.extraction_workers,
aggregation_workers=FLAGS.aggregation_workers
)
print("Reading validation batch...")
validation_gene = KittiGenerator(FLAGS.kitti_path,
FLAGS.depth_base,
FLAGS.depth_step)
validation_batch = input_organizer.get_feed_dict([validation_gene.validation_batch(num_set_same_img=FLAGS.batch_size)])
del validation_gene
print("Done reading validation Batch!!")
print("Done! Start training loop, validate and save every (%s steps)..." % FLAGS.validate_step)
# Start the training loop.
max_steps = FLAGS.max_steps
for step in range(max_steps):
# Get images to process in a batch
start_time1 = time.time()
feed_dict = generator.get_batch()
duration_images = time.time() - start_time1
start_time2 = time.time()
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration_net = time.time() - start_time2
print('=== Step %d ===' % step)
# Write the summaries and print an overview fairly often.
if step % FLAGS.print_step == 0:
# Print status to stdout.
print('=== Step %d: loss = %.2f -> images:(%.3f sec), net:(%.3f sec) ===' % (step, loss_value, duration_images, duration_net))
# Update the events file.
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint every 100 iterations
# and evaluate the model periodically.
if (step + 1) % FLAGS.validate_step == 0 or (step + 1) == max_steps:
print("(Step: %s) Checkpoint, saving model." % step)
checkpoint_file = os.path.join(summary_dir, 'checkpoint')
saver.save(sess, checkpoint_file, global_step=step)
eval_summary = do_eval(sess, validation_batch, net_out, input_organizer.get_target_placeholder())
#summary_writer.add_summary(eval_summary, step)
#summary_writer.flush()
except Exception as e:
print("Exception on TRAIN: %s" % e)
traceback.print_exc()
if sess:
sess.close()
def main():
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog 1.0")
parser.add_option("-l", "--learning-rate",
default=0.0001, type="float",
help="Learning rate")
parser.add_option("-s", "--max-steps",
default=100000, type="int",
help="Max steps to do")
parser.add_option("-m", "--batch-size",
default=5, type="int",
help="Batch size")
parser.add_option("-r", "--patches-per-set",
default=1, type="int",
help="Same image set extract N patches")
parser.add_option("-p", "--print-step",
default=10, type="int",
help="Print every N steps")
parser.add_option("-i", "--validate-step",
default=1000, type="int",
help="Validate model every N steps")
parser.add_option("-n", "--num-processes",
default=5, type="int",
help="Num processes to use when extracting images")
parser.add_option("-k", "--kitti-path",
default=os.path.join(os.path.abspath(os.path.join(os.path.abspath(os.path.join(os.path.abspath(__file__)
, os.pardir)), os.pardir)), "../../dataset"),
help="Kitti sequences path")
parser.add_option("-t", "--traindir",
default='tf_train',
help="Train directory")
parser.add_option("-q", "--depth-base",
default=10.0, type="float",
help="Base depth to start plane sweep")
parser.add_option("-u", "--depth-step",
default=0.25, type="float",
help="Extract a plane each base_depth + (depth_step * num_planes) ")
parser.add_option("-e", "--extraction-workers",
default=5, type="int",
help="Number of workers to extract PSV")
parser.add_option("-a", "--aggregation-workers",
default=1, type="int",
help="Number of workers to aggregate PSV batches")
parser.add_option("-j", "--num-planes",
default=96, type="int",
help="Number of planes to use")
parser.add_option("-g", "--mean-zero", action="store_false",
default=True,
help="Use mean zero on input images")
(options, args) = parser.parse_args()
# Code version from GIT
print("GIT Hash: %s" % get_git_revision_short_hash())
# Check OpenCV version >= 3.1.0
print("OpenCV version -> %s" % cv2.__version__)
opencv_version = cv2.__version__.split('.')
assert int(opencv_version[0]) >= 3
print("Tensorflow Version -> %s" % tf.__version__)
assert tf.__version__ == '1.4.0'
run_training(options)
# Run main
if __name__ == "__main__":
main()
|
aTeK7/deep-stereo1.4 | reprojection/reprojection.py | <reponame>aTeK7/deep-stereo1.4<gh_stars>0
import numpy as np
import cv2
from dataset_preparation.kitti_camera import Camera
from functools import reduce
class Reprojection(object):
def __init__(self, camSrc, camVirtual, verbose=False):
if not isinstance(camSrc, Camera) or not isinstance(camVirtual, Camera):
raise ValueError("camSrc and camVirtual must be Camera objects")
self.verbose = verbose
self.camSRC = camSrc
self.camVIRTUAL = camVirtual
def reproject(self, depth, frame):
#print "Reprojecting at depth: %s" % depth
w = frame.shape[1]
h = frame.shape[0]
# 4 Corners on the virtual camera to get te 4 rays that intersect with the depth plane
src_pts = np.reshape([
0, 0,
w, 0,
w, h,
0, h], (4, 2))
dst_pts = np.ones((4, 2))
i = 0
for p in src_pts:
#print p
#print self.camSRC.intrinsics
c = self.camVIRTUAL.get_principal_point()
f = self.camVIRTUAL.get_focal_length()
# Point in virtual camera to corresponding depth
# Pc(1) = (x - cx) * z(r, c) / fx;
# Pc(2) = (y - cy) * z(r, c) / fy;
# Pc(3) = z(r, c);
pH = np.asarray([
(p[0] - c[0]) * depth / f[0],
(p[1] - c[1]) * depth / f[1],
depth,
])
# Convert point to world coordinates (CUIDADO CON ESTO QUE ES EL
# PROBLEMA DE QUE R Y T ESTEN DEFINIDAS DE MUNDO A CAMARA O AL
# REVES
#Pw = R'*(Pc-T); % EC. SI R,T estan definidas de mundo a camara (no es el caso para undo dancer)
#Pw = R*PH+T; # EC. si R,T definidas de camara a mundo (caso undo dancer)
#Pw(4) = 1;
pW = self.camVIRTUAL.getRotation().dot(pH) + self.camVIRTUAL.getTranslation()
pW = np.append(pW, 1.0) # to homogeneous coordinates
#print pW
# Reproject back to second camera
#pix = cams{2}.K * cams{2}.E * Pw;
pix = reduce(np.dot, [self.camSRC.getIntrinsics(), self.camSRC.get_extrinsics_inv(), pW])
#print pix
dst = np.asarray([pix[0]/pix[2], pix[1]/pix[2]])
#print dst
# homog to normal coords
dst_pts[i, :] = dst
i += 1
#print "Source points"
#print src_pts
#print "Reprojected at depth (%s)" % depth
#print dst_pts
return Reprojection.do_homography(dst_pts, src_pts, frame, (w, h))
@staticmethod
def do_homography(src_pts, dst_pts, frame, size):
"""
ENSURE OPENCV version is at least 3.1.0
:param src_pts:
:param dst_pts:
:param frame:
:param size:
:return:
"""
#print("Find homography from: %s to %s" % (src_pts, dst_pts))
w = size[0]
h = size[1]
# Ensure we passed proper points
assert src_pts.shape[0] == 4 and src_pts.shape[1] == 2
assert dst_pts.shape[0] == 4 and dst_pts.shape[1] == 2
# Calculate 2D homograpy and convert image
M, mask = cv2.findHomography(src_pts, dst_pts)
# Add alpha channel to act as a mask for not existing pixels
alpha_channel = np.ones((h, w), dtype=np.float32)
img_RGBA = np.dstack((frame, alpha_channel))
print(img_RGBA.shape)
print("+++++++++++++++++++++++++++++")
print(cv2.findHomography(src_pts, dst_pts))
print("==============================")
print(M)
result = cv2.warpPerspective(img_RGBA, M, (w, h))
#print("Reprojected image done")
return result
|
aTeK7/deep-stereo1.4 | reprojection/test_reprojection.py | <filename>reprojection/test_reprojection.py
import numpy as np
np.set_printoptions(suppress=True)
import unittest
from reprojection import Reprojection
from camera import Camera
from PIL import Image
class ReprojectionTest(unittest.TestCase):
def test_reproject_back(self):
srcCam = Camera(
intrinsics=np.array([
[2302.852541609168, 0.0, 960.0],
[0.0, 2302.852541609168, 540.0],
[0.0, 0.0, 1.0]
]),
rotation=np.eye(3),
translation=np.array([-80, 0, 0]),
name="cam0"
)
virtualCam = Camera(
intrinsics=np.array([
[2302.852541609168, 0.0, 960.0],
[0.0, 2302.852541609168, 540.0],
[0.0, 0.0, 1.0]
]),
rotation=np.eye(3),
translation=np.array([-60, 0, 0]),
name="cam1"
)
imgsrc = Image.open("../Dancer/Dancer_c1_frame.jpg")
image = np.array(imgsrc.getdata(), np.uint8).reshape(imgsrc.size[1], imgsrc.size[0], 3)
r = Reprojection(srcCam, virtualCam, image)
result = r.reproject(2000)
# Save result array
imgdst = Image.fromarray(result, mode='RGBA')
imgdst.save("../Dancer/Dancer_cam0Fromcam1.jpg")
|
aTeK7/deep-stereo1.4 | dataset_preparation/test_cameras.py | <gh_stars>0
import numpy as np
np.set_printoptions(suppress=True)
import unittest
from kitti_camera import KittiCamera
from kitti_generator import KittiGenerator, extract_multipatch
from reprojection.reprojection import Reprojection
from scipy import misc
import timeit
import matplotlib.image as mpimg
from dataset_preparation.set_generator import SetGenerator
import os
class CameraParserTest(unittest.TestCase):
sequences_path = "/Volumes/Bahia/kitti-dataset/sequences"
calibration_path = "/Volumes/Bahia/kitti-dataset/calibration"
dataset_path = "/Volumes/Bahia/kitti-dataset"
def test_parse_kitti(self):
sequence = "00"
camera = 0
kittiCams = KittiCamera(self.calibration_path, sequence)
cam0 = kittiCams.getCamera(0)
cam1 = kittiCams.getCamera(1)
cam2 = kittiCams.getCamera(2)
cam3 = kittiCams.getCamera(3)
def test_kitti_generator(self):
kittiGen = KittiGenerator(self.sequences_path)
# print kittiGen.sequence_names
# print kittiGen.sq_len
# print kittiGen.sq_dimensions
sq_num, subset = kittiGen.generate_set()
# print subset
patches = kittiGen.generate_patch(sq_num)
# print patches
def test_kitti_camera_depth_plane(self):
kittiCams = KittiCamera(self.calibration_path, "00")
cam_original = kittiCams.getCamera(0)
cam_virtual = kittiCams.getCamera(1)
# reprojection object
r = Reprojection(cam_original, cam_virtual)
# generate set
kittiGen = KittiGenerator(self.sequences_path)
sq_num, subset = kittiGen.generate_set()
# print subset
start_time_read = timeit.default_timer()
image_set = [
misc.imread(subset[0]),
misc.imread(subset[1]),
misc.imread(subset[2]),
misc.imread(subset[3]),
misc.imread(subset[4])
]
start_time = timeit.default_timer()
patches = kittiGen.generate_patch(sq_num)
patch_set = MultiprocessorExtractor(image_set, patches, r).generate_planes()
elapsed = timeit.default_timer() - start_time
# print "Elapsed time to extract 96 depth planes X 4 cameras: %.2f seconds" % elapsed
elapsed = timeit.default_timer() - start_time_read
# print "Elapsed time with image read: %.2f seconds" % elapsed
# print len(patch_set)
#plt.imshow(result)
#plt.show()
def test_multipatch_generation(self):
set_gen = SetGenerator(os.path.join(self.dataset_path, 'sequences'))
patches = set_gen.generate_patch("00")
for key in patches:
print("Patch key: %s" % key)
patch = patches[key]
print("TL:%s BR:%s" % (patch[0], patch[1]))
center = (patch[0][0] + key / 2, patch[0][1] + key / 2)
print("Center: (%s,%s)" % center)
# print(patches)
image = mpimg.imread('/Volumes/Bahia/kitti-dataset/sequences/00/image_2/000000.png')
extracted = extract_multipatch(image, patches)
print("Done")
|
aTeK7/deep-stereo1.4 | old/camera_plot.py | <filename>old/camera_plot.py
import matplotlib.pyplot as plt
import numpy as np
import math
def plot_camera(figure, camera, depth=1.0, rotate=True):
points = np.array([
(-1, 1, depth),
(1, 1, depth),
(1, -1, depth),
(-1, -1, depth),
])
#print points
zDepth = 1
p = zDepth * np.vstack([points, points[0, :]])
#print p
center = np.array([0, 0, 0])
#print "Camera center: %s" % camera.translation
p = np.reshape(np.hstack([p, np.vstack([center, center, center, center, center]), p]), (15, 3))
#p = camera.intrinsics.dot(p.T).T
#print p
#rotation = rotateX(90)
rotation = camera.getRotation()
print "Camera translation -> %s" % camera.getTranslation()
p = p + camera.getTranslation()
if rotate:
p = p.dot(rotation)
#print p
X = p[:, 0]
Y = p[:, 1]
Z = p[:, 2]
ax = figure.gca(projection='3d')
#ax.set_aspect('equal', 'datalim')
ax.plot(X, Y, Z)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_zlim(-5, 5)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.scatter(X, Y, Z)
def plot_depth_plane(figure, camera, depth=5.0):
points = np.array([
(-1, 1, depth),
(1, 1, depth),
(1, -1, depth),
(-1, -1, depth),
])
p = np.reshape(points, (4, 3))
p = p + camera.getTranslation()
p = p.dot(camera.getRotation())
ax = figure.gca(projection='3d')
X = p[:, 0]
Y = p[:, 1]
Z = p[:, 2]
ax.plot(X, Y, Z)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_zlim(-5, 5)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.scatter(X, Y, Z) |
aTeK7/deep-stereo1.4 | dataset_preparation/kitti_generator.py | import os
from skimage.transform import resize
from dataset_preparation.kitti_camera import KittiCamera
from reprojection.reprojection import Reprojection
import numpy as np
from scipy.misc import imsave
import matplotlib.image as mpimg
from dataset_preparation.set_generator import SetGenerator
class KittiGenerator(object):
def __init__(self, dataset_basepath, base_depth, depth_step):
# paths for data
self.sequences_path = os.path.join(dataset_basepath, 'sequences')
self.calibration_path = os.path.join(dataset_basepath, 'calibration')
self.pose_path = os.path.join(dataset_basepath, 'poses')
assert os.path.exists(self.sequences_path) == 1
assert os.path.exists(self.calibration_path) == 1
assert os.path.exists(self.pose_path) == 1
# Sweeping depth params
# base depth and depth step
self.base_depth = base_depth
self.depth_step = depth_step
d_from = self.base_depth
d_to = d_from + (self.depth_step * 96.0)
print("Depth will be swept from (%s meters to %s meters) (Step: %s)" % (d_from, d_to, depth_step))
# Kitti calibration data cache
self.kittiCams = KittiCamera(self.calibration_path, self.pose_path)
# Set generation
self.set_generator = SetGenerator(self.sequences_path)
@staticmethod
def get_reprojector(kitti_cams, kitti_set, subset_src, subset_virtual):
src_cam = kitti_cams.getNamedCamera(kitti_set.camera_name,
kitti_set.sq_number,
kitti_set.subset[subset_src])
virtual_cam = kitti_cams.getNamedCamera(kitti_set.camera_name,
kitti_set.sq_number,
kitti_set.subset[subset_virtual])
r = Reprojection(
camSrc=src_cam,
camVirtual=virtual_cam)
return r
def next_batch(self, multipatch=True, num_set_same_img=1):
kitti_set = self.set_generator.random_set(num_patches=num_set_same_img)
return self.generate_batch(kitti_set, multipatch=multipatch)
def validation_batch(self, multipatch=True, num_set_same_img=1):
kitti_set = self.set_generator.validation_set(num_patches=num_set_same_img)
return self.generate_batch(kitti_set, multipatch=multipatch)
def generate_batch(self, kitti_set, multipatch=True):
camera_reprojectors = [
KittiGenerator.get_reprojector(self.kittiCams, kitti_set, subset_src=0, subset_virtual=2),
KittiGenerator.get_reprojector(self.kittiCams, kitti_set, subset_src=1, subset_virtual=2),
KittiGenerator.get_reprojector(self.kittiCams, kitti_set, subset_src=3, subset_virtual=2),
KittiGenerator.get_reprojector(self.kittiCams, kitti_set, subset_src=4, subset_virtual=2)
]
# Get a list with image filenames
image_names = self.set_generator.get_set_filenames(kitti_set)
# images will be between 0 and 1
image_set = [
mpimg.imread(image_names[0]),
mpimg.imread(image_names[1]),
mpimg.imread(image_names[2]), # TARGET CAMERA
mpimg.imread(image_names[3]),
mpimg.imread(image_names[4])
]
# generate some patches
def_patches = kitti_set.patches
planes = dict()
for plane in range(96):
depth = self.base_depth + (self.depth_step * plane)
#print("Extracting plane (%s) set at depth (%s meters)" % (plane, depth))
image_cameras = {
'cam0': camera_reprojectors[0].reproject(depth=depth, frame=image_set[0]),
'cam1': camera_reprojectors[1].reproject(depth=depth, frame=image_set[1]),
'cam3': camera_reprojectors[2].reproject(depth=depth, frame=image_set[3]),
'cam4': camera_reprojectors[3].reproject(depth=depth, frame=image_set[4])
}
#print("Extraction done")
if multipatch:
for cam_key in image_cameras:
image = image_cameras[cam_key]
item_name = "plane%s_%s" % (plane, cam_key)
extracted_patches = []
for patch_key in def_patches:
patch = def_patches[patch_key]
extracted_patches.append(extract_multipatch(image, patch))
planes["%s_10" % item_name] = np.concatenate([patch['10'] for patch in extracted_patches], axis=0)
planes["%s_12" % item_name] = np.concatenate([patch['12'] for patch in extracted_patches], axis=0)
planes["%s_18" % item_name] = np.concatenate([patch['18'] for patch in extracted_patches], axis=0)
planes["%s_30" % item_name] = np.concatenate([patch['30'] for patch in extracted_patches], axis=0)
else:
for cam_key in image_cameras:
item_name = "plane%s_%s" % (plane, cam_key)
planes[item_name] = image_cameras[cam_key]
if multipatch:
targets = []
for k in def_patches:
# target image is the 8x8 patch
target_patch = def_patches[k]['t']
targets.append(extract_patch(image_set[2], target_patch))
im_target = np.concatenate(targets, axis=0)
else:
im_target = image_set[2]
return {
'planes': planes,
'target': im_target
}
def extract_multipatch(image, patches):
return {
'10': extract_patch(image, patches['ps1'], 10),
'12': extract_patch(image, patches['ps2'], 12),
'18': extract_patch(image, patches['ps3'], 18),
'30': extract_patch(image, patches['ps4'], 30)
}
def extract_patch(image, patch, new_size=None):
cut = image[patch[0][1]:patch[1][1], patch[0][0]:patch[1][0]]
if new_size:
cut = resize(cut, (new_size, new_size))
return np.expand_dims(cut, axis=0)
def save_batch_images(batch, base_path):
for key in batch['planes']:
image = np.squeeze(batch['planes'][key])
imsave(os.path.join(base_path, "%s.png" % key), image)
target_image = np.squeeze(batch['target'])
imsave(os.path.join(base_path, "target.png"), target_image)
def reprojected_images_plot(batch):
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
fig = plt.figure(1, (4., 4.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(5, 4), # creates 2x2 grid of axes
axes_pad=0.0, # pad between axes in inch.
)
p = 15
for i in range(0, 20, 4):
im_cam0 = np.squeeze(batch['planes']['plane%s_cam0' % p])
grid[i].imshow(im_cam0)
im_cam1 = np.squeeze(batch['planes']['plane%s_cam1' % p])
grid[i+1].imshow(im_cam1)
im_cam2 = np.squeeze(batch['planes']['plane%s_cam3' % p])
grid[i+2].imshow(im_cam2)
im_cam3 = np.squeeze(batch['planes']['plane%s_cam4' % p])
grid[i+3].imshow(im_cam3)
p += 3
#plt.savefig('~/Desktop/foo.png', bbox_inches='tight')
plt.axis('off')
plt.show()
|
aTeK7/deep-stereo1.4 | old/test.py | from load_camera import camera_0
import numpy as np
np.set_printoptions(suppress=True)
import unittest
class TestStringMethods(unittest.TestCase):
@unittest.skip("demonstrating skipping")
def test_intrinsic_opengl(self):
opengl_in = camera_0.get_intrinsic_opengl()
self.assertEqual(opengl_in.shape, (4, 4))
print "Intrinsic Matrix converted to opengl"
print opengl_in
@unittest.skip("demonstrating skipping")
def test_extrinsic_opengl(self):
opengl_ex = camera_0.get_extrinsic_opengl()
self.assertEqual(opengl_ex.shape, (4, 4))
print "Extrinsic matrix converted to opengl"
print opengl_ex
@unittest.skip("demonstrating skipping")
def test_opengl_projection(self):
opengl_proj = camera_0.projection_opengl()
self.assertEqual(opengl_proj.shape, (4, 4))
print "Opengl projection matrix from calibrated cameras"
print opengl_proj
def test_principal_point(self):
point = camera_0.get_principal_point()
print point
def test_planeCalculaton(self):
camera_0.camera_plane_at(50.0)
|
diogoalmeida/generic_control_toolbox | src/generic_control_toolbox/bag_parser.py | <filename>src/generic_control_toolbox/bag_parser.py
#!/usr/bin/env python
import glob
import os
import sys
import rosbag
import numpy as np
import rospkg
class BagParser():
'''
Provides functionality to parse a ROS bag which holds
an actionlib feedback message.
'''
def __init__(self, pkg, rel_path, prefix):
rospack = rospkg.RosPack()
self._dir = rospack.get_path(pkg) + "/" + rel_path
self._prefix = prefix
def getAllBags(self, elements):
'''
Get all data from bags in the directory.
@param elements attributes to extract from each bag (list)
@returns list of bag data dictionaries.
'''
bag_data = []
for num in range(len(glob.glob(self._dir + "/" + self._prefix + "*.bag"))):
bag = rosbag.Bag(self._dir + "/" + self._prefix + "_" + str(num + 1) + ".bag")
bag_data.append(self.getBagContents(elements, bag))
return bag_data
def getBagContents(self, elements, bag):
'''
Get all messages in the bag in the format msg.feedback.elements[i], plus the respective time.
@param elements elements to extract from each bag (list)
@param bag a rosbag.Bag instance.
@returns A dictionary with the data mapping {elements[i]: value}
'''
data = {}
data['t'] = []
for element in elements:
data[element] = []
for topic, msg, time in bag.read_messages():
data['t'].append(time.to_sec())
for element in elements:
msg_element = getattr(msg, element)
list_data = self.msgToList(msg_element)
if list_data is None:
raise RuntimeError("Got unsupported msg type " + msg_element._type)
data[element].append(list_data)
for key in data:
data[key] = np.asarray(data[key])
return data
def msgToList(self, msg):
'''
Convert the given message to a list.
'''
if type(msg) is float or type(msg) is int:
return msg
if msg._type == 'geometry_msgs/WrenchStamped':
return self.wrenchMsgToList(msg.wrench)
elif msg._type == 'geometry_msgs/Wrench':
return self.wrenchMsgToList(msg)
elif msg._type == 'geometry_msgs/PoseStamped':
return self.poseMsgToList(msg.pose)
elif msg._type == 'geometry_msgs/Pose':
return self.poseMsgToList(msg)
else:
return None
def wrenchMsgToList(self, msg):
'''
Convert a wrench message to a list
'''
w = [msg.force.x, msg.force.y, msg.force.y,
msg.torque.x, msg.torque.y, msg.torque.z]
return w
def poseMsgToList(self, msg):
'''
Convert a pose message to list
'''
p = [msg.position.x, msg.position.y, msg.position.z,
msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w]
return p |
acevery/hlsvr | videos/urls.py | <gh_stars>0
from django.urls import path
from . import views
app_name = 'videos'
urlpatterns = [
path('<int:video_id>/', views.video_detail, name='detail'),
path('', views.video_list, name='list'),
]
|
acevery/hlsvr | videos/views.py | from django.shortcuts import render, get_object_or_404
from videos.models import Video
# Create your views here.
def video_detail(request, video_id):
video = get_object_or_404(Video, pk=video_id)
context = {'the_video': video}
return render(request, 'videos/single.html', context)
def video_list(request):
videos = Video.objects.all()
context = {'videos': videos}
return render(request, 'videos/list.html', context)
|
acevery/hlsvr | videos/admin.py | <reponame>acevery/hlsvr
from django.contrib import admin
from videos.models import Video
# Register your models here.
class VideoAdmin(admin.ModelAdmin):
list_dispay = ['title', 'source', 'slug', ]
admin.site.register(Video, VideoAdmin)
|
acevery/hlsvr | videos/models.py | from django.db import models
from os.path import basename
# Create your models here.
class Video(models.Model):
"""Video with source and name."""
title = models.CharField(max_length=128)
location = models.CharField(max_length=128)
introduction = models.TextField()
source = models.URLField()
slug = models.SlugField(blank=True)
created_at = models.DateField(auto_now_add=True)
modified_at = models.DateField(auto_now=True)
def save(self, *args, **kwargs):
self.slug = basename(self.source)
super().save(*args, **kwargs)
def __str__(self):
return self.title
def get_source(self):
return "http://vr.cncn.win/hls/" + self.slug
def get_url(self):
return "/videos/{}/".format(self.pk)
|
aravindavk/gluster-file-history | main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com/>
# This file is part of GlusterFS.
#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
# cases as published by the Free Software Foundation.
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from datetime import datetime
import changelogparser
history = []
file_gfids = set()
args = None
paths_to_trace = set()
turn = 0
def human_time(ts):
return datetime.fromtimestamp(float(ts)).strftime("%Y-%m-%d %H:%M:%S")
def get_args():
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("changelogs_list",
help="List of Changelogs to process")
parser.add_argument("pgfid", help="Parent directory GFID")
parser.add_argument("basename", help="Basename of File")
parser.add_argument("--trace-rename", action="store_true",
help="Trace Renamed Files")
return parser.parse_args()
def process_changelog_record(record):
global args, history, file_gfids, paths_to_trace, turn
# If Trace Rename is Set, Do not process in first turn, Collect
# all possible path names in case of Rename.
if turn == 0 and args.trace_rename and record.fop != "RENAME":
return
if record.fop in ["CREATE", "MKNOD"]:
# If the File we are tracking is Created then add to history
if record.path in paths_to_trace:
history.append((record.ts, record.gfid,
"{0} {1}".format(record.fop,
record.path)))
file_gfids.add(record.gfid)
elif record.fop_type == "D":
# If the GFID we are tracking is modified then add to history
if record.gfid in file_gfids:
history.append((record.ts, record.gfid, "DATA"))
elif record.fop_type == "M":
# If the GFID we are tracking is modified then add to history
if record.gfid in file_gfids:
history.append((record.ts, record.gfid, "META"))
elif record.fop == "RENAME" and args.trace_rename:
# If New path is in the paths_to_trace list then start
# tracking old name too
if turn == 0 and args.trace_rename:
if record.path2 in paths_to_trace:
paths_to_trace.add(record.path1)
return
# Add to history if old name is in tracking list
if record.path1 in paths_to_trace:
paths_to_trace.add(record.path2)
history.append((record.ts, record.gfid,
"{0} {1} {2}".format(
record.fop,
record.path1,
record.path2)))
elif record.fop == "UNLINK":
# If the file which we are tracking is unlinked
pgfid, bn = record.path.split("/")
if record.path in paths_to_trace:
history.append((record.ts, record.gfid,
"{0} {1}".format(
record.fop,
record.path)))
def main():
global args, history, paths_to_trace, turn
args = get_args()
paths_to_trace.add("{0}/{1}".format(args.pgfid, args.basename))
with open(args.changelogs_list) as f:
for line in f:
changelogparser.parse(line.strip(),
callback=process_changelog_record)
if args.trace_rename:
f.seek(0)
turn += 1
for line in f:
changelogparser.parse(line.strip(),
callback=process_changelog_record)
if history:
print("{0:20s} {1:36s} {2}".format("DATE", "GFID", "DETAILS"))
print("-"*70)
for line in history:
print("{0:20s} {1:36s} {2}".format(human_time(line[0]),
line[1], line[2]))
if __name__ == "__main__":
main()
|
bfontaine/Algorithms | dtw/dtw.py | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
# Dynamic Time Wraping
#
# Good explanation: https://www.youtube.com/watch?v=_K1OsqCicBY
def avg(xs):
return sum(xs) / float(len(xs))
def dtw(series1, series2):
"""
Compute a distance between two numerical series using the DTW algorithm.
"""
w = len(series1)
h = len(series2)
matrix = []
for _ in range(h):
l = [0]*w
matrix.append(l)
for x in range(w):
for y in range(h):
v1 = series1[x]
v2 = series2[y]
d = abs(v1 - v2)
candidates = [0]
if x > 0:
candidates.append(matrix[y][x-1])
if y > 0:
candidates.append(matrix[y-1][x])
if x > 0 and y > 0:
candidates.append(matrix[y-1][x-1])
matrix[y][x] = d + min(candidates)
distances = []
x = 0
y = 0
while x < w or y < h:
distances.append(matrix[y][x])
candidates = []
if x+1 < w:
candidates.append((x+1, y))
if y+1 < h:
candidates.append((x, y+1))
if x+1 < w and y+1 < h:
candidates.append((x+1, y+1))
if not candidates:
break
min_distance = None
for next_x, next_y in candidates:
d = matrix[next_y][next_x]
if min_distance is None or d < min_distance:
min_distance = d
x = next_x
y = next_y
return avg(distances)
def euclid(series1, series2):
"""
Compute a distance between two numerical series using Euclidian distances
between their points at the same index.
"""
ds = [abs(series1[i] - series2[i])
for i in range(min(len(series1), len(series2)))]
return avg(ds)
def main():
signal = [1, 5, 9, -3, -2, -2, 3, 12, 8, 9, 3]
s1 = [0] * 4 + signal + [0] * 10
s2 = [0] * 9 + signal + [0] * 1
print("Euclid: %d" % euclid(s1, s2))
print("DTW: %d" % dtw(s1, s2))
if __name__ == "__main__":
main()
|
JohnMcGrane/ParticleFinder | FilterView.py | <gh_stars>0
"""*******************************************************************************
*
* Author: <NAME>
* Institution: University of Minnesota Duluth
* College: Swenson College of Science and Engineering
* Department: Chemistry & Biochemistry
* Copyright: <NAME>, 2021
*
******************************************************************************"""
from utilities import *
class FilterView:
def __init__(self,width,height,directory, um = 50, large = False, particles = []):
"""Instantiate a FilterView object.
Arguments:
width -- the number of spectra along the x-axis of the field of view
width type -- int
height -- the number of spectra along the y-axis of the field of view
height type -- int
directory -- the name of the directory containing all CSV files in the field of view
directory type -- str
Keyword arguments:
um -- the stepsize, in microns, between consecutive spectra in the field of view (default = 50)
um type -- int
large -- if dealing with particles larger than 200 microns this should be set to True (default = False)
large type -- bool
particles -- list of xy filter pixel coordinates found by manual spectral analysis (default = [])
particles type -- list
"""
self.width = width
self.height = height
self.name = directory
self.stepsize = um
self.large = large
self.filelist = createFileList(self.name)
self.manual = particles
self.differencelist = diffList(self.filelist,self.name)
self.pixellist = pixelList(self.filelist,self.differencelist,self.cutoffNumber())
def cutoffNumber(self):
"""Return a cutoff based on the distribution of reflectance differences for all spectra in the field of view.
Arguments:
None
"""
return cutoffMaker(self.large,self.differencelist)
def particleCount(self):
"""Return a computationally determined microplastic particle count for the field of view.
Arguments:
None
"""
particles = particleCount(self.pixellist,[self.width,self.height])[0]
return particles
def particleSize(self):
"""Return a the average particle diameter assuming that all particles are circular.
Arguments:
None
"""
groupList = particleCount(self.pixellist,[self.width,self.height])[1]
sizeList = particleCount(self.pixellist,[self.width,self.height])[2]
return sizer(groupList,sizeList,self.stepsize)
def histogram(self):
"""Creates a histogram of reflectance differences. Reflectance at 2750 cm-1 - reflectance at 2850 cm-1.
Arguments:
None
"""
if self.large == True:
paredlist = toGauss(self.differencelist)
elif self.large == False:
paredlist = noGauss(self.differencelist)
createHist(paredlist,self.differencelist,self.cutoffNumber())
def showView(self):
"""Display a screenshot of the filter if a screenshot is located in directory with the CSV files.
Arguments:
None
"""
showScreenShot(self.name)
def showChemigram(self):
"""Display a visualization of the plastic particles identified by the computational analysis.
Arguments:
None
"""
showChemigram(self.pixellist,[self.width,self.height])
def visualCompare(self,startx,starty):
"""Display a visualization comparing manually identified particles and computer identified particles.
Arguments:
startx -- x coordinate of the leftmost pixel in the field of view
startx type -- int
starty -- y coordinate of the lowest pixel in the field of view
starty type -- int
"""
humanlist = self.manualPixels()
likely = self.candidatePixels(startx,starty)
humanpixels, comppixels = transformer(humanlist,likely,startx,starty,self.width,self.stepsize)
printout(humanpixels,comppixels,[self.width,self.height])
def textCompare(self,startx,starty):
"""Display a text-based comparison of the manually identified particles and computer identified particles.
Counts for both approaches are displayed as well as a count for particles that were identified by both techniques.
Arguments:
startx -- x coordinate of the leftmost pixel in the field of view
startx type -- int
starty -- y coordinate of the lowest pixel in the field of view
starty type -- int
"""
humanlist = self.manualPixels()
likely = self.candidatePixels(startx,starty)
summary(humanlist,likely,self.particleCount(),self.manual)
def candidatePixels(self,startx,starty):
"""Display a list of xy pixel coordinates that the computer identified as plastics.
Arguments:
startx -- x coordinate of the leftmost pixel in the field of view
startx type -- int
starty -- y coordinate of the lowest pixel in the field of view
starty type -- int
"""
likely = candidates(self.pixellist,self.filelist,startx,starty,[self.width,self.height],self.stepsize)
return likely
def manualPixels(self):
"""Display a list of xy pixel coordinates that the researcher identified as plastics.
Arguments:
None
Note: Requires the user to pass in a list of xy coordinates using the "particles" keyword argument
"""
manuallist = listcleaner(self.manual)
return manuallist
if __name__ == '__main__':
# The following script is designed to be run from the command line.
# User particles can be copied and pasted in to the command line to
# enable comparison of the computer program and the manual analysis.
file = input("Enter directory name: ")
xdim = int(input("Enter x-dimension: "))
ydim = int(input("Enter y-dimension: "))
ask = input("Compare to manual analysis: (y/n)")
if ask == 'y':
print("Enter manually identified pixel coordinates: ")
lines = []
while True:
line = input()
if line:
lines.append(line)
else:
break
foundlist = '\n'.join(lines)
startx = int(input("First x coordinate: "))
starty = int(input("First y coordinate: "))
else:
foundlist = []
startx = 0
starty = 0
view1 = FilterView(xdim,ydim,file,particles = foundlist)
view1.showView()
view1.showChemigram()
view1.textCompare(startx,starty)
print(f'Particle Size : {view1.particleSize()}')
print(f'Cutoff : {view1.cutoffNumber()}')
print(f'Particle Count : {view1.particleCount()}')
view1.visualCompare(startx,starty)
view1.histogram()
print(view1.candidatePixels(startx,starty))
print(view1.manualPixels())
|
JohnMcGrane/ParticleFinder | utilities.py | import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from scipy.stats import norm
import pandas as pd
import os
import re
from scipy import ndimage
from scipy.stats.mstats import mquantiles
from PIL import Image
import matplotlib.image as mpimg
def createFileList(name):
"""A function that takes a directory name and creates
a list of all the files in that directory"""
filelist = []
ct=0
path = os.path.abspath(os.getcwd())
for filename in os.listdir(f"{path}/{name}/"):
if filename.endswith(".CSV"):
ct+=1
filelist.append(os.path.basename(filename)) # Add .csv files to a list
else:
pass
df = pd.DataFrame(filelist) # Add filenames to a pandas dataframe
df.rename(columns = {df.columns[0]:"filename"},inplace=True) # Rename the column with the files to "filename"
df = df.sort_values("filename",ignore_index = True, ascending = True) # Sort the files in the list
return df
def cutoffMaker(booly,dlist):
"""Function that returns a cutoff value either immediately for small
particles or after using toGauss function for large particles"""
if booly == False:
thirdquartile = mquantiles(dlist)[2]
iqr = stats.iqr(dlist)
return thirdquartile + 1.5*iqr # Cutoff based on typical statistical outlier
elif booly == True:
paredlist = toGauss(dlist) # toGauss removes outliers then checks to remove further outliers
thirdquartile = mquantiles(paredlist)[2]
iqr = stats.iqr(paredlist)
return thirdquartile + 1.5*iqr
else:
print("Keyword argument \"largeParticles\" must be a Boolean")
def diffList(flist,name):
"""Function takes list of filenames and a directory name. For each .csv file
the function calculates the difference between the reflectance at 2750 cm-1
and 2850 cm-1 and returns a list of these differences. Based on .csv files with
any spectral range and any resolution"""
numlistin=[]
for j in range(flist.size):
df1 = pd.read_csv(f"/Users/johnfox/Desktop/Microplastics/Code/{name}/{flist['filename'][j]}")
df1.rename(columns = {df1.columns[0]:"wavenumber",df1.columns[1]:"reflectance"},inplace=True) # Rename columns
if j == 0:
i2850 = ((df1['wavenumber']-2850).abs().argsort()[:1])[0]
i2750 = ((df1['wavenumber']-2750).abs().argsort()[:1])[0]
num = df1.iloc[i2750][1] - df1.iloc[i2850][1]
numlistin.append(num)
return numlistin
def pixelList(flist,dlist,coff):
"""Function to return a list of pixels in the chemigram that have the
characteristic reflectance differences of microplastic particles. Takes
the list of .csv files, the list of reflectance differences, and a cutoff
value."""
pixellistin = []
for j in range(flist.size):
if dlist[j] > coff:
pixellistin.append(j)
return pixellistin
def candidates(plist,flist,x,y,dimz,stepsize):
"""Function to return a list xy-coordinates that indicate which
pixels are likely microplastics based on reflectance differences.
This function takes the list of pixels with a positive reflectance difference,
the whole list of .csv files, the x and y coordinates of the bottom left hand
corner of the chemigram, and a tuple that indicates the (x,y)size in spectra
of the chemigram"""
startx, starty = x,y
state = np.zeros((dimz[0],dimz[1]),float) # Create a 41 x 41 matrix
likely = []
for istep in plist: # Iterate through the positive pixels and convert those into (x,y) coordinates
ix1 = np.remainder((istep),dimz[0])
iy1 = int(np.floor((istep)/(dimz[0])))
likely.append((startx+ix1*stepsize,starty+iy1*stepsize))
return likely
def listcleaner(newlist):
"""Function that returns list of tuples - (x,y) coordinates - found by
a researcher. This function takes an unformatted string of points in
order xy xy xy xy etc..."""
if bool(newlist) == False:
return []
newlist = re.split('\s',newlist)
foundlist = []
if len(newlist) != 0:
for k in range(0,len(newlist),2):
foundlist.append((int(newlist[k]),int(newlist[k+1])))
else:
pass
return foundlist
def updateState(plist,dimz,update = False, statein = 0,incrementby = 2):
"""Function that creates or updates a given state matrix by 2 unless
otherwise specified. This takes a list of pixels to update and the dimensions
of the chemigram. This can either update or create based on the update
keyword argument. The statein kwarg will be 0 unless specified. The incrementby
kwarg will be 2 unless otherwise specified. Returns a state with the values of
positive pixels increased by two"""
if update == False:
state = np.zeros((dimz[0],dimz[1]),float)
else:
state = statein
for istep in plist: # Iterate through the positive pixels and converts to xy coordinates
ix1 = np.remainder((istep),dimz[0])
iy1 = int(np.floor((istep)/(dimz[0])))
state[ix1,iy1] = state[ix1,iy1] + incrementby # Update positive pixels to a different color
return state
def particleCount(plist,dimz):
"""Function that takes a list of positive pixels and the dimensions of the chemigram
and returns a count of the particles."""
state = updateState(plist,dimz)
t = 1
for istep in plist: # Iterate through the positive and gets their matrix coordinates
ix1 = np.remainder((istep),dimz[0])
iy1 = int(np.floor((istep)/(dimz[0])))
visiter(ix1,iy1,state,t,dimz) # Recursively visits all connected
# particles and updates them to the same unique value
t+=2
particles = 0
grouplist = []
sizelist = []
negative = 2
for istep in plist: #
ix1 = np.remainder((istep),dimz[0])
iy1 = int(np.floor((istep)/(dimz[0])))
sizelist.append(state[ix1,iy1]) # THIS IS FOR SIZING
if state[ix1,iy1] == 2:
particles+=1
grouplist.append(negative)
negative -=3
elif state[ix1,iy1] != 0 and state[ix1,iy1] not in grouplist:
particles+=1
grouplist.append(state[ix1,iy1])
return particles, grouplist,sizelist
# particles is a count of the number of positive particles
# grouplist is a list of the unique values that each group has. Its length is the number of particles. Has no repeat values.
# sizelist is a list of the values of all positive pixels. Unless all positive pixels are not attached to one another,
# this list will be larger than grouplist. For example, if there is a particle that is made up of 10 pixels, the unique value
# of those pixels will be added to sizelist ten times. grouplist and sizelist are passed to the "sizer" function later on.
def visiter(x,y,current,num,dimz):
"""Function that updates the value of all matrix coordinates of attached pixels to the same unique value.
This is a recursive function. All diagonal pixels are assumed to be attached and originate from the same particle.
Function takes x&y matrix coordinates, current matrix state, the incrementing value, and the dimensions of the matrix
as arguments."""
if x != (dimz[0]-1) and current[x+1,y] == 2:
current[x+1,y] = num
visiter(x+1,y,current,num,dimz)
if x != (dimz[0]-1) and y != (dimz[1]-1) and current[x+1,y+1] == 2:
current[x+1,y+1] = num
visiter(x+1,y+1,current,num,dimz)
if x != 0 and current[x-1,y] == 2:
current[x-1,y] = num
visiter(x-1,y,current,num,dimz)
if x != 0 and y!= (dimz[1]-1) and current[x-1,y+1] == 2:
current[x-1,y+1] = num
visiter(x-1,y+1,current,num,dimz)
if y != 0 and current[x,y-1] == 2:
current[x,y-1] = num
visiter(x,y-1,current,num,dimz)
if x != 0 and y!= 0 and current[x-1,y-1] == 2:
current[x-1,y-1] = num
visiter(x-1,y-1,current,num,dimz)
if y != (dimz[1]-1) and current[x,y+1] == 2:
current[x,y+1] = num
visiter(x,y+1,current,num,dimz)
if x != (dimz[0]-1) and y!= 0 and current[x+1,y-1] == 2:
current[x+1,y-1] = num
visiter(x+1,y-1,current,num,dimz)
else:
pass
def summary(hlist,likly,parts,truth):
"""Function that takes a list of researcher identified pixels and computer identified pixels and calculates
which pixels are the same. This function mainly serves as a text summary of the computational analysis. Also
takes the number of particles as calculated by the particleCount function. If only a computational analysis,
no comparison to researcher data is displayed."""
numright = 0
for i in hlist:
if i in likly:
numright+=1
if bool(truth) == False:
print(f'Computer Found Particles: {parts}')
else:
print(f'User Found Particles : {len(hlist)}')
print(f'Computer Found Particles: {parts}')
print(f'Intersection Number : {numright}')
def transformer(hlist,likly,startx,starty,xdim,stepsize):
"""This function takes a list of xy pixel coordinates determined by the researcher and a list of xy pixel
coordinates determined by the computer and converts them into xy matrix coordinates. The starting x and y
coordinates of the field of view are require arguments as well as the x dimension of the chemigram."""
human_pixels = []
for i,j in hlist:
x = int(((i-startx)/stepsize))
y = int(((j-starty)/stepsize)*xdim)
human_pixels.append(x+y)
comp_pixels = []
for i,j in likly:
x = int(((i-startx)/stepsize))
y = int(((j-starty)/stepsize)*xdim)
comp_pixels.append(x+y)
return human_pixels, comp_pixels
def sizer(grouplist,sizelist,stepsize):
"""Takes the sizelist and grouplist from particleCount function and determines the average diamter of each
particle assuming that each particle is circular."""
sizes=[]
for i in grouplist:
sizes.append(sizelist.count(i))
return (np.sqrt((np.mean(sizes)*(stepsize*stepsize))/np.pi))*2
def printout(hpixels,cpixels,dimz):
"""Function that creates a graphic comparing the manually identified particles and the computationally
identified particles. Takes a list of pixel coordinates from the researcher and computer, as created by
the "transformer" function"""
state1 = updateState(hpixels,dimz,incrementby = 1)
state2 = updateState(cpixels,dimz,update = True,statein = state1,incrementby = 2)
img = state2
plt.figure(figsize=(7,7))
rotated_img = ndimage.rotate(img, 90)
plt.imshow(rotated_img)
plt.axis('off')
plt.show()
def showChemigram(plist,dimz):
"""Function that creates a chemigram graphic showing the comptuationally identified particles"""
img = updateState(plist,dimz)
plt.figure(figsize=(7,7))
rotated_img = ndimage.rotate(img, 90)
plt.imshow(rotated_img)
plt.axis('off')
plt.show()
def showScreenShot(name):
"""If there is a screenshot of the filter surface in the given directory, that image is shown.
Takes directory name."""
for filename in os.listdir(f"/Users/johnfox/Desktop/Microplastics/Code/{name}/"):
if filename.endswith(".JPG"):
newname = f"/Users/johnfox/Desktop/Microplastics/Code/{name}/" + filename
im = Image.open(newname)
width, height = im.size
# Setting the points for cropped image
# left = 1150
# top = 0
# right = width-100
# bottom = height/2*1.2
# im1 = im.crop((left, top, right, bottom))
im1 = im
plt.imshow(im1)
plt.show()
# display(im1)
return
print("No screenshot available in current directory")
def createHist(plist,dlist,coff):
"""Function that takes a list of the pixels that are positively identified as plastic by the computer,
a list of reflection differences, and a cutoff. Prints out a histogram of the differences in reflection
between 2750 and 2850 wavenumbers. The cutoff is also visually shown as a vertical blueline. A red normal
distribution is overlaid so that the assumption of a normal distribution can be qualitatively checked."""
bins = np.linspace(start = min(plist), stop = max(plist), num=int(abs(max(plist) - min(plist)))*4)
# sns.boxplot(x=dlist)
plt.figure(figsize=(9,5))
plt.hist(dlist,np.arange(-10,50,1))
plt.vlines(coff,ymin = 0, ymax = 210,color = "blue",linestyle = "dotted")
plt.ylim(0,210)
plt.xlabel("Difference",size = 20)
plt.ylabel("Occurrences",size = 20)
plt.plot(bins,len(plist)*norm.pdf(bins,loc=np.mean(plist), scale=np.std(plist)),color = "red",
linestyle = "dashed")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
# sns.boxplot(x=plist)
def toGauss(ls):
"""Takes a list of reflection differences with outliers and returns a list of reflection without outliers.
This function gets rid of outliers and then recursively calls itself until no outliers remain and then returns
the list that is now "pared" or "pruned" of all outliers"""
cutoff = mquantiles(ls)[2] + 1.5*stats.iqr(ls)
newnumlist = []
for i in ls:
if i < cutoff:
newnumlist.append(i)
if len(newnumlist) == len(ls):
return ls
else:
return toGauss(newnumlist)
def noGauss(ls):
""""Similar to the toGauss function but only the first set of outliers are removed. No recursive calls to itself
and therefore fewer particles are removed as outliers which means that fewer particles are labelled as microplastics"""
cutoff = mquantiles(ls)[2] + 1.5*stats.iqr(ls)
newnumlist = []
for i in ls:
if i < cutoff:
newnumlist.append(i)
return newnumlist
|
billy-horn/lightning-flash | tests/core/test_trainer.py | from typing import Any
import pytest
import torch
from torch import nn
from torch.nn import functional as F
from flash import ClassificationTask, Trainer
from flash.core.finetuning import Freeze, NoFreeze
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, predict: bool = False):
self._predict = predict
def __getitem__(self, index: int) -> Any:
sample = torch.rand(1, 28, 28)
if self._predict:
return sample
else:
return sample, torch.randint(10, size=(1, )).item()
def __len__(self) -> int:
return 100
class DummyClassifier(nn.Module):
def __init__(self):
super().__init__()
self.backbone = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
self.head = nn.LogSoftmax()
def forward(self, x):
return self.head(self.backbone(x))
def test_task_fit(tmpdir: str):
model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10), nn.LogSoftmax())
train_dl = torch.utils.data.DataLoader(DummyDataset())
val_dl = torch.utils.data.DataLoader(DummyDataset())
task = ClassificationTask(model, F.nll_loss)
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
result = trainer.fit(task, train_dl, val_dl)
assert result
def test_task_finetune(tmpdir: str):
model = DummyClassifier()
train_dl = torch.utils.data.DataLoader(DummyDataset())
val_dl = torch.utils.data.DataLoader(DummyDataset())
task = ClassificationTask(model, F.nll_loss)
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
result = trainer.finetune(task, train_dl, val_dl, strategy=NoFreeze())
assert result
|
billy-horn/lightning-flash | tests/vision/classification/test_model.py | <filename>tests/vision/classification/test_model.py
import pytest
import torch
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from flash import Trainer
from flash.vision import ImageClassifier
# ======== Mock functions ========
class DummyDataset(torch.utils.data.Dataset):
def __getitem__(self, index):
return torch.rand(3, 224, 224), torch.randint(10, size=(1, )).item()
def __len__(self):
return 100
# ==============================
@pytest.mark.parametrize(
"backbone",
[
"resnet18",
# "resnet34",
# "resnet50",
# "resnet101",
# "resnet152",
],
)
def test_init_train(tmpdir, backbone):
model = ImageClassifier(10, backbone=backbone)
train_dl = torch.utils.data.DataLoader(DummyDataset())
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.finetune(model, train_dl, strategy="freeze_unfreeze")
def test_non_existent_backbone():
with pytest.raises(MisconfigurationException):
ImageClassifier(2, "i am never going to implement this lol")
def test_freeze():
model = ImageClassifier(2)
model.freeze()
for p in model.backbone.parameters():
assert p.requires_grad is False
def test_unfreeze():
model = ImageClassifier(2)
model.unfreeze()
for p in model.backbone.parameters():
assert p.requires_grad is True
|
billy-horn/lightning-flash | flash/vision/embedding/model_map.py | <reponame>billy-horn/lightning-flash
from contextlib import suppress
from typing import Type
from pytorch_lightning.utilities import _BOLTS_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
if _BOLTS_AVAILABLE:
with suppress(TypeError):
from pl_bolts.models.self_supervised import SimCLR, SwAV
ROOT_S3_BUCKET = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com"
def load_simclr_imagenet(path_or_url: str = f"{ROOT_S3_BUCKET}/simclr/bolts_simclr_imagenet/simclr_imagenet.ckpt"):
simclr = SimCLR.load_from_checkpoint(path_or_url, strict=False)
model_config = {'model': simclr.encoder, 'emb_size': 2048}
return model_config
def load_swav_imagenet(path_or_url: str = f"{ROOT_S3_BUCKET}/swav/swav_imagenet/swav_imagenet.pth.tar"):
swav = SwAV.load_from_checkpoint(path_or_url, strict=True)
model_config = {'model': swav.model, 'num_features': 3000}
return model_config
_models = {'simclr-imagenet': load_simclr_imagenet, 'swav-imagenet': load_swav_imagenet}
def _load_model(name):
if not _BOLTS_AVAILABLE:
raise MisconfigurationException("Bolts isn't installed. Please, use ``pip install lightning-bolts``.")
if name in _models:
return _models[name]()
raise MisconfigurationException("Currently, only `simclr-imagenet` and `swav-imagenet` are supported.")
|
billy-horn/lightning-flash | tests/vision/classification/test_data.py | from pathlib import Path
import numpy as np
import torch
from PIL import Image
from torchvision import transforms as T
from flash.vision import ImageClassificationData
def _dummy_image_loader(filepath):
return torch.rand(3, 64, 64)
def _rand_image():
return Image.fromarray(np.random.randint(0, 255, (64, 64, 3), dtype="uint8"))
def test_from_filepaths(tmpdir):
img_data = ImageClassificationData.from_filepaths(
train_filepaths=["a", "b"],
train_labels=[0, 1],
train_transform=lambda x: x, # make sure transform works
loader=_dummy_image_loader,
batch_size=1,
num_workers=0,
)
data = next(iter(img_data.train_dataloader()))
imgs, labels = data
assert imgs.shape == (1, 3, 64, 64)
assert labels.shape == (1, )
assert img_data.val_dataloader() is None
assert img_data.test_dataloader() is None
img_data = ImageClassificationData.from_filepaths(
train_filepaths=["a", "b"],
train_labels=[0, 1],
train_transform=None,
valid_filepaths=["c", "d"],
valid_labels=[0, 1],
valid_transform=None,
test_filepaths=["e", "f"],
test_labels=[0, 1],
loader=_dummy_image_loader,
batch_size=1,
num_workers=0,
)
data = next(iter(img_data.val_dataloader()))
imgs, labels = data
assert imgs.shape == (1, 3, 64, 64)
assert labels.shape == (1, )
data = next(iter(img_data.test_dataloader()))
imgs, labels = data
assert imgs.shape == (1, 3, 64, 64)
assert labels.shape == (1, )
def test_from_folders(tmpdir):
train_dir = Path(tmpdir / "train")
train_dir.mkdir()
(train_dir / "a").mkdir()
_rand_image().save(train_dir / "a" / "1.png")
_rand_image().save(train_dir / "a" / "2.png")
(train_dir / "b").mkdir()
_rand_image().save(train_dir / "b" / "1.png")
_rand_image().save(train_dir / "b" / "2.png")
img_data = ImageClassificationData.from_folders(
train_dir, train_transform=None, loader=_dummy_image_loader, batch_size=1
)
data = next(iter(img_data.train_dataloader()))
imgs, labels = data
assert imgs.shape == (1, 3, 64, 64)
assert labels.shape == (1, )
assert img_data.val_dataloader() is None
assert img_data.test_dataloader() is None
img_data = ImageClassificationData.from_folders(
train_dir,
train_transform=T.ToTensor(),
valid_folder=train_dir,
valid_transform=T.ToTensor(),
test_folder=train_dir,
batch_size=1,
num_workers=0,
)
data = next(iter(img_data.val_dataloader()))
imgs, labels = data
assert imgs.shape == (1, 3, 64, 64)
assert labels.shape == (1, )
data = next(iter(img_data.test_dataloader()))
imgs, labels = data
assert imgs.shape == (1, 3, 64, 64)
assert labels.shape == (1, )
|
billy-horn/lightning-flash | flash/vision/classification/dataset.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from glob import glob
import numpy as np
from flash.core.data import download_data
def hymenoptera_data_download(path: str, predict_size: int = 10):
download_data("https://download.pytorch.org/tutorial/hymenoptera_data.zip", path)
predict_folder = os.path.join(path, "hymenoptera_data/predict")
if not os.path.exists(predict_folder):
os.makedirs(predict_folder)
if len(os.listdir(predict_folder)) > 0:
return
validation_image_paths = glob(os.path.join(path, "hymenoptera_data/val/*/*"))
assert predict_size < len(validation_image_paths)
indices = np.random.choice(range(len(validation_image_paths)), predict_size, replace=False)
for index in indices:
src = validation_image_paths[index]
dst = os.path.join(predict_folder, src.split('/')[-1])
shutil.copy(src, dst)
|
billy-horn/lightning-flash | tests/core/test_utils.py | import os
from flash import utils
from flash.core.data import download_data
# ======== Mock functions ========
class A:
def __call__(self, x):
return True
def b():
return True
c = lambda: True # noqa: E731
# ==============================
def test_get_callable_name():
assert utils.get_callable_name(A()) == "a"
assert utils.get_callable_name(b) == "b"
assert utils.get_callable_name(c) == "<lambda>"
def test_get_callable_dict():
d = utils.get_callable_dict(A())
assert type(d["a"]) == A
d = utils.get_callable_dict([A(), b])
assert type(d["a"]) == A
assert d["b"] == b
d = utils.get_callable_dict({"one": A(), "two": b, "three": c})
assert type(d["one"]) == A
assert d["two"] == b
assert d["three"] == c
def test_download_data(tmpdir):
path = os.path.join(tmpdir, "data")
download_data("https://pl-flash-data.s3.amazonaws.com/titanic.zip", path)
assert set(os.listdir(path)) == {'titanic', 'titanic.zip'}
|
billy-horn/lightning-flash | flash_examples/predict/classify_tabular.py | <reponame>billy-horn/lightning-flash
from flash.core.data import download_data
from flash.tabular import TabularClassifier
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/titanic.zip", 'data/')
# 2. Load the model from a checkpoint
model = TabularClassifier.load_from_checkpoint(
"https://flash-weights.s3.amazonaws.com/tabular_classification_model.pt"
)
# 3. Generate predictions from a sheet file! Who would survive?
predictions = model.predict("data/titanic/titanic.csv")
print(predictions)
|
billy-horn/lightning-flash | flash_examples/finetuning/summarization.py | <filename>flash_examples/finetuning/summarization.py<gh_stars>1-10
import flash
from flash import download_data
from flash.text import SummarizationData, SummarizationTask
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/xsum.zip", 'data/')
# 2. Load the data
datamodule = SummarizationData.from_files(
train_file="data/xsum/train.csv",
valid_file="data/xsum/valid.csv",
test_file="data/xsum/test.csv",
input="input",
target="target"
)
# 3. Build the model
model = SummarizationTask()
# 4. Create the trainer. Run once on data
trainer = flash.Trainer(max_epochs=1)
# 5. Fine-tune the model
trainer.finetune(model, datamodule=datamodule)
# 6. Test model
trainer.test()
# 7. Save it!
trainer.save_checkpoint("summarization_model_xsum.pt")
|
billy-horn/lightning-flash | flash_examples/predict/summarize.py | <filename>flash_examples/predict/summarize.py
from pytorch_lightning import Trainer
from flash.core.data import download_data
from flash.text import SummarizationData, SummarizationTask
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/xsum.zip", 'data/')
# 2. Load the model from a checkpoint
model = SummarizationTask.load_from_checkpoint("https://flash-weights.s3.amazonaws.com/summarization_model_xsum.pt")
# 2a. Summarize an article!
predictions = model.predict([
"""
Camilla bought a box of mangoes with a Brixton £10 note, introduced last year to try to keep the money of local
people within the community.The couple were surrounded by shoppers as they walked along Electric Avenue.
They came to Brixton to see work which has started to revitalise the borough.
It was Charles' first visit to the area since 1996, when he was accompanied by the former
South African president <NAME>.<NAME>, who has run a stall on Electric Avenue
for 20 years, said Camilla had been ""nice and pleasant"" when she purchased the fruit.
""She asked me what was nice, what would I recommend, and I said we've got some nice mangoes.
She asked me were they ripe and I said yes - they're from the Dominican Republic.""
Mr Chong is one of 170 local retailers who accept the Brixton Pound.
Customers exchange traditional pound coins for Brixton Pounds and then spend them at the market
or in participating shops.
During the visit, Prince Charles spent time talking to youth worker <NAME>, who works with children
nearby on an estate off Coldharbour Lane. Mr West said:
""He's on the level, really down-to-earth. They were very cheery. The prince is a lovely man.""
He added: ""I told him I was working with young kids and he said, 'Keep up all the good work.'""
Prince Charles also visited the Railway Hotel, at the invitation of his charity The Prince's Regeneration Trust.
The trust hopes to restore and refurbish the building,
where once <NAME> and The Clash played, as a new community and business centre."
"""
])
print(predictions)
# 2b. Or generate summaries from a sheet file!
datamodule = SummarizationData.from_file(
predict_file="data/xsum/predict.csv",
input="input",
)
predictions = Trainer().predict(model, datamodule=datamodule)
print(predictions)
|
billy-horn/lightning-flash | tests/text/translation/test_model.py | <filename>tests/text/translation/test_model.py<gh_stars>1-10
import os
import torch
from flash import Trainer
from flash.text import TranslationTask
# ======== Mock functions ========
class DummyDataset(torch.utils.data.Dataset):
def __getitem__(self, index):
return {
"input_ids": torch.randint(1000, size=(128, )),
"labels": torch.randint(1000, size=(128, )),
}
def __len__(self):
return 100
# ==============================
TEST_BACKBONE = "sshleifer/tiny-mbart" # super small model for testing
def test_init_train(tmpdir):
if os.name == "nt":
# TODO: huggingface stuff timing out on windows
return True
model = TranslationTask(TEST_BACKBONE)
train_dl = torch.utils.data.DataLoader(DummyDataset())
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_dl)
|
billy-horn/lightning-flash | flash/core/data/datapipeline.py | from typing import Any
from torch import Tensor
from torch.utils.data._utils.collate import default_collate
class DataPipeline:
"""
This class purpose is to facilitate the conversion of raw data to processed or batched data and back.
Several hooks are provided for maximum flexibility.
Example::
.. code-block:: python
class MyTextDataPipeline(DataPipeline):
def __init__(self, tokenizer, padder):
self.tokenizer = tokenizer
self.padder = padder
def before_collate(self, samples):
# encode each input sequence
return [self.tokenizer.encode(sample) for sample in samplers]
def after_collate(self, batch):
# pad tensor elements to the maximum length in the batch
return self.padder(batch)
def after_uncollate(self, samples):
# decode each input sequence
return [self.tokenizer.decode(sample) for sample in samples]
"""
def before_collate(self, samples: Any) -> Any:
"""Override to apply transformations to samples"""
return samples
def collate(self, samples: Any) -> Any:
"""Override to convert a set of samples to a batch"""
if not isinstance(samples, Tensor):
return default_collate(samples)
return samples
def after_collate(self, batch: Any) -> Any:
"""Override to apply transformations to the batch"""
return batch
def collate_fn(self, samples: Any) -> Any:
"""
Utility function to convert raw data to batched data
``collate_fn`` as used in ``torch.utils.data.DataLoader``.
To avoid the before/after collate transformations, please use ``collate``.
"""
samples = self.before_collate(samples)
batch = self.collate(samples)
batch = self.after_collate(batch)
return batch
def before_uncollate(self, batch: Any) -> Any:
"""Override to apply transformations to the batch"""
return batch
def uncollate(self, batch: Any) -> Any:
"""Override to convert a batch to a set of samples"""
samples = batch
return samples
def after_uncollate(self, samples: Any) -> Any:
"""Override to apply transformations to samples"""
return samples
def uncollate_fn(self, batch: Any) -> Any:
"""Utility function to convert batched data back to raw data"""
batch = self.before_uncollate(batch)
samples = self.uncollate(batch)
samples = self.after_uncollate(samples)
return samples
|
billy-horn/lightning-flash | tests/examples/test_examples.py | import subprocess
import sys
from pathlib import Path
from typing import List, Optional, Tuple
import pytest
root = Path(__file__).parent.parent.parent
def call_script(filepath: str,
args: Optional[List[str]] = None,
timeout: Optional[int] = 60 * 5) -> Tuple[int, str, str]:
if args is None:
args = []
args = [str(a) for a in args]
command = [sys.executable, filepath] + args
print(" ".join(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
stdout, stderr = p.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
return p.returncode, stdout, stderr
def run_test(filepath):
code, stdout, stderr = call_script(filepath)
assert not code
print(f"{filepath} STDOUT: {stdout}")
print(f"{filepath} STDERR: {stderr}")
@pytest.mark.parametrize(
"step,file",
[
("finetuning", "image_classification.py"),
("finetuning", "tabular_classification.py"),
("predict", "classify_image.py"),
("predict", "classify_tabular.py"),
# "classify_text.py" TODO: takes too long
]
)
def test_finetune_example(tmpdir, step, file):
with tmpdir.as_cwd():
run_test(str(root / "flash_examples" / step / file))
def test_generic_example(tmpdir):
with tmpdir.as_cwd():
run_test(str(root / "flash_examples" / "generic_task.py"))
|
billy-horn/lightning-flash | tests/text/test_data_model_integration.py | import os
from pathlib import Path
from pytorch_lightning import Trainer
from flash.text import TextClassificationData, TextClassifier
TEST_BACKBONE = "prajjwal1/bert-tiny" # super small model for testing
TEST_CSV_DATA = """sentence,label
this is a sentence one,0
this is a sentence two,1
this is a sentence three,0
"""
def csv_data(tmpdir):
path = Path(tmpdir) / "data.csv"
path.write_text(TEST_CSV_DATA)
return path
def test_classification(tmpdir):
if os.name == "nt":
# TODO: huggingface stuff timing out on windows
return True
csv_path = csv_data(tmpdir)
data = TextClassificationData.from_files(
backbone=TEST_BACKBONE,
train_file=csv_path,
input="sentence",
target="label",
num_workers=0,
batch_size=2,
)
model = TextClassifier(2, TEST_BACKBONE)
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, datamodule=data)
|
billy-horn/lightning-flash | tests/text/translation/test_data.py | <filename>tests/text/translation/test_data.py
import os
from pathlib import Path
from flash.text import TranslationData
TEST_BACKBONE = "sshleifer/tiny-mbart" # super small model for testing
TEST_CSV_DATA = """input,target
this is a sentence one,this is a translated sentence one
this is a sentence two,this is a translated sentence two
this is a sentence three,this is a translated sentence three
"""
TEST_JSON_DATA = """
{"input": "this is a sentence one","target":"this is a translated sentence one"}
{"input": "this is a sentence two","target":"this is a translated sentence two"}
{"input": "this is a sentence three","target":"this is a translated sentence three"}
"""
def csv_data(tmpdir):
path = Path(tmpdir) / "data.csv"
path.write_text(TEST_CSV_DATA)
return path
def json_data(tmpdir):
path = Path(tmpdir) / "data.json"
path.write_text(TEST_JSON_DATA)
return path
def test_from_csv(tmpdir):
if os.name == "nt":
# TODO: huggingface stuff timing out on windows
return True
csv_path = csv_data(tmpdir)
dm = TranslationData.from_files(
backbone=TEST_BACKBONE, train_file=csv_path, input="input", target="target", batch_size=1
)
batch = next(iter(dm.train_dataloader()))
assert "labels" in batch
assert "input_ids" in batch
def test_from_files(tmpdir):
if os.name == "nt":
# TODO: huggingface stuff timing out on windows
return True
csv_path = csv_data(tmpdir)
dm = TranslationData.from_files(
backbone=TEST_BACKBONE,
train_file=csv_path,
valid_file=csv_path,
test_file=csv_path,
input="input",
target="target",
batch_size=1
)
batch = next(iter(dm.val_dataloader()))
assert "labels" in batch
assert "input_ids" in batch
batch = next(iter(dm.test_dataloader()))
assert "labels" in batch
assert "input_ids" in batch
def test_from_json(tmpdir):
if os.name == "nt":
# TODO: huggingface stuff timing out on windows
return True
json_path = json_data(tmpdir)
dm = TranslationData.from_files(
backbone=TEST_BACKBONE, train_file=json_path, input="input", target="target", filetype="json", batch_size=1
)
batch = next(iter(dm.train_dataloader()))
assert "labels" in batch
assert "input_ids" in batch
|
billy-horn/lightning-flash | flash/core/data/utils.py | import os.path
import zipfile
from io import BytesIO
from typing import Any, Type
from urllib.request import urlopen, urlretrieve
from zipfile import ZipFile
import requests
import torch
from tqdm.auto import tqdm as tq
# Code taken from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603
# __author__ = "github.com/ruxi"
# __license__ = "MIT"
def download_file(url: str, path: str, verbose: bool = False) -> None:
"""
Download file with progressbar
Usage:
download_file('http://web4host.net/5MB.zip')
"""
if not os.path.exists(path):
os.makedirs(path)
local_filename = os.path.join(path, url.split('/')[-1])
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
chunk = 1
chunk_size = 1024
num_bars = int(file_size / chunk_size)
if verbose:
print(dict(file_size=file_size))
print(dict(num_bars=num_bars))
if not os.path.exists(local_filename):
with open(local_filename, 'wb') as fp:
for chunk in tq(
r.iter_content(chunk_size=chunk_size),
total=num_bars,
unit='KB',
desc=local_filename,
leave=True # progressbar stays
):
fp.write(chunk) # type: ignore
if '.zip' in local_filename:
if os.path.exists(local_filename):
with zipfile.ZipFile(local_filename, 'r') as zip_ref:
zip_ref.extractall(path)
def download_data(url: str, path: str = "data/") -> None:
"""
Downloads data automatically from the given url to the path. Defaults to data/ for the path.
Automatically handles .csv, .zip
Example::
from flash import download_data
Args:
url: path
path: local
"""
download_file(url, path)
def _contains_any_tensor(value: Any, dtype: Type = torch.Tensor) -> bool:
# TODO: we should refactor FlashDatasetFolder to better integrate
# with DataPipeline. That way, we wouldn't need this check.
# This is because we are running transforms in both places.
if isinstance(value, dtype):
return True
if isinstance(value, (list, tuple)):
return any(_contains_any_tensor(v, dtype=dtype) for v in value)
elif isinstance(value, dict):
return any(_contains_any_tensor(v, dtype=dtype) for v in value.values())
return False
|
billy-horn/lightning-flash | tests/tabular/data/test_dataset.py | import numpy as np
import pandas as pd
from flash.tabular.classification.data.dataset import PandasDataset
TEST_DF = pd.DataFrame(
data={
"category": [0, 1, 2, 1, 0, 2],
"scalar_a": [0.0, 1.0, 2.0, 3.0, 2.0, 5.0],
"scalar_b": [5.0, 4.0, 3.0, 2.0, 2.0, 1.0],
"label": [0, 1, 0, 1, 0, 1],
}
)
TEST_DF_NO_NUM = pd.DataFrame(data={
"category": [0, 1, 2, 1, 0, 2],
"label": [0, 1, 0, 1, 0, 1],
})
TEST_DF_NO_CAT = pd.DataFrame(data={
"category": [0, 1, 2, 1, 0, 2],
"label": [0, 1, 0, 1, 0, 1],
})
def test_pandas():
df = TEST_DF.copy()
ds = PandasDataset(
df,
cat_cols=["category"],
num_cols=["scalar_a", "scalar_b"],
target_col="label",
regression=False,
)
assert len(ds) == 6
(cat, num), target = ds[0]
assert cat == np.array([0])
assert np.allclose(num, np.array([0.0, 5.0]))
assert target == 0
def test_pandas_no_cat():
df = TEST_DF.copy()
ds = PandasDataset(
df,
cat_cols=[],
num_cols=["scalar_a", "scalar_b"],
target_col="label",
regression=False,
)
assert len(ds) == 6
(cat, num), target = ds[0]
assert cat.size == 0
assert np.allclose(num, np.array([0.0, 5.0]))
assert target == 0
def test_pandas_no_num():
df = TEST_DF.copy()
ds = PandasDataset(
df,
cat_cols=["category"],
num_cols=[],
target_col="label",
regression=False,
)
assert len(ds) == 6
(cat, num), target = ds[0]
assert cat == np.array([0])
assert num.size == 0
assert target == 0
|
billy-horn/lightning-flash | tests/tabular/classification/test_model.py | <reponame>billy-horn/lightning-flash
import torch
from pytorch_lightning import Trainer
from flash.tabular import TabularClassifier
# ======== Mock functions ========
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, num_num=16, num_cat=16):
super().__init__()
self.num_num = num_num
self.num_cat = num_cat
def __getitem__(self, index):
target = torch.randint(0, 10, size=(1, )).item()
cat_vars = torch.randint(0, 10, size=(self.num_cat, ))
num_vars = torch.rand(self.num_num)
return (cat_vars, num_vars), target
def __len__(self):
return 100
# ==============================
def test_init_train(tmpdir):
train_dl = torch.utils.data.DataLoader(DummyDataset(), batch_size=16)
model = TabularClassifier(num_classes=10, num_features=16 + 16, embedding_sizes=16 * [(10, 32)])
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_dl)
def test_init_train_no_num(tmpdir):
train_dl = torch.utils.data.DataLoader(DummyDataset(num_num=0), batch_size=16)
model = TabularClassifier(num_classes=10, num_features=16, embedding_sizes=16 * [(10, 32)])
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_dl)
def test_init_train_no_cat(tmpdir):
train_dl = torch.utils.data.DataLoader(DummyDataset(num_cat=0), batch_size=16)
model = TabularClassifier(num_classes=10, num_features=16, embedding_sizes=[])
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_dl)
|
billy-horn/lightning-flash | flash_examples/predict/image_embedder.py | import torch
from flash.core.data import download_data
from flash.vision import ImageEmbedder
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", 'data/')
# 2. Create an ImageEmbedder with swav trained on imagenet.
# Check out SWAV: https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_models.html#swav
embedder = ImageEmbedder(backbone="swav-imagenet", embedding_dim=128)
# 3. Generate an embedding from an image path.
embeddings = embedder.predict('data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg')
# 4. Print embeddings shape
print(embeddings.shape)
# 5. Create a tensor random image
random_image = torch.randn(1, 3, 32, 32)
# 6. Generate an embedding from this random image.
embeddings = embedder.predict(random_image)
# 7. Print embeddings shape
print(embeddings.shape)
|
billy-horn/lightning-flash | flash_examples/finetuning/tabular_classification.py | <reponame>billy-horn/lightning-flash
from pytorch_lightning.metrics.classification import Accuracy, Precision, Recall
import flash
from flash.core.data import download_data
from flash.tabular import TabularClassifier, TabularData
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/titanic.zip", 'data/')
# 2. Load the data
datamodule = TabularData.from_csv(
"./data/titanic/titanic.csv",
test_csv="./data/titanic/test.csv",
categorical_input=["Sex", "Age", "SibSp", "Parch", "Ticket", "Cabin", "Embarked"],
numerical_input=["Fare"],
target="Survived",
val_size=0.25,
)
# 3. Build the model
model = TabularClassifier.from_data(datamodule, metrics=[Accuracy(), Precision(), Recall()])
# 4. Create the trainer. Run 10 times on data
trainer = flash.Trainer(max_epochs=10)
# 5. Train the model
trainer.fit(model, datamodule=datamodule)
# 6. Test model
trainer.test()
# 7. Save it!
trainer.save_checkpoint("tabular_classification_model.pt")
|
billy-horn/lightning-flash | flash_examples/predict/classify_image.py | <gh_stars>1-10
from flash import Trainer
from flash.core.data import download_data
from flash.vision import ImageClassificationData, ImageClassifier
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", 'data/')
# 2. Load the model from a checkpoint
model = ImageClassifier.load_from_checkpoint("https://flash-weights.s3.amazonaws.com/image_classification_model.pt")
# 3a. Predict what's on a few images! ants or bees?
predictions = model.predict([
"data/hymenoptera_data/val/bees/65038344_52a45d090d.jpg",
"data/hymenoptera_data/val/bees/590318879_68cf112861.jpg",
"data/hymenoptera_data/val/ants/540543309_ddbb193ee5.jpg",
])
print(predictions)
# 3b. Or generate predictions with a whole folder!
datamodule = ImageClassificationData.from_folder(folder="data/hymenoptera_data/predict/")
predictions = Trainer().predict(model, datamodule=datamodule)
print(predictions)
|
billy-horn/lightning-flash | flash/vision/embedding/__init__.py | <gh_stars>1-10
from flash.vision.embedding.image_embedder_model import ImageEmbedder
|
billy-horn/lightning-flash | flash/core/data/datamodule.py | <gh_stars>1-10
import os
import warnings
from typing import Any, Optional
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from flash.core.data.datapipeline import DataPipeline
class TaskDataPipeline(DataPipeline):
def after_collate(self, batch: Any) -> Any:
return (batch["x"], batch["target"]) if isinstance(batch, dict) else batch
class DataModule(pl.LightningDataModule):
"""Basic DataModule class for all Flash tasks
Args:
train_ds: Dataset for training. Defaults to None.
valid_ds: Dataset for validating model performance during training. Defaults to None.
test_ds: Dataset to test model performance. Defaults to None.
batch_size: the batch size to be used by the DataLoader. Defaults to 1.
num_workers: The number of workers to use for parallelized loading.
Defaults to None which equals the number of available CPU threads.
"""
def __init__(
self,
train_ds: Optional[Dataset] = None,
valid_ds: Optional[Dataset] = None,
test_ds: Optional[Dataset] = None,
batch_size: int = 1,
num_workers: Optional[int] = None,
):
super().__init__()
self._train_ds = train_ds
self._valid_ds = valid_ds
self._test_ds = test_ds
if self._train_ds is not None:
self.train_dataloader = self._train_dataloader
if self._valid_ds is not None:
self.val_dataloader = self._val_dataloader
if self._test_ds is not None:
self.test_dataloader = self._test_dataloader
self.batch_size = batch_size
# TODO: figure out best solution for setting num_workers
# if num_workers is None:
# num_workers = os.cpu_count()
if num_workers is None:
# warnings.warn("Could not infer cpu count automatically, setting it to zero")
num_workers = 0
self.num_workers = num_workers
self._data_pipeline = None
def _train_dataloader(self) -> DataLoader:
return DataLoader(
self._train_ds,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.data_pipeline.collate_fn,
drop_last=True,
)
def _val_dataloader(self) -> DataLoader:
return DataLoader(
self._valid_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.data_pipeline.collate_fn,
)
def _test_dataloader(self) -> DataLoader:
return DataLoader(
self._test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.data_pipeline.collate_fn,
)
@property
def data_pipeline(self) -> DataPipeline:
if self._data_pipeline is None:
self._data_pipeline = self.default_pipeline()
return self._data_pipeline
@data_pipeline.setter
def data_pipeline(self, data_pipeline) -> None:
self._data_pipeline = data_pipeline
@staticmethod
def default_pipeline() -> DataPipeline:
return TaskDataPipeline()
|
billy-horn/lightning-flash | flash_examples/predict/translate.py | <gh_stars>1-10
from pytorch_lightning import Trainer
from flash import download_data
from flash.text import TranslationData, TranslationTask
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/wmt_en_ro.zip", 'data/')
# 2. Load the model from a checkpoint
model = TranslationTask.load_from_checkpoint("https://flash-weights.s3.amazonaws.com/translation_model_en_ro.pt")
# 2a. Translate a few sentences!
predictions = model.predict([
"BBC News went to meet one of the project's first graduates.",
"A recession has come as quickly as 11 months after the first rate hike and as long as 86 months.",
])
print(predictions)
# 2b. Or generate translations from a sheet file!
datamodule = TranslationData.from_file(
predict_file="data/wmt_en_ro/predict.csv",
input="input",
)
predictions = Trainer().predict(model, datamodule=datamodule)
print(predictions)
|
billy-horn/lightning-flash | flash/vision/classification/backbones.py | from typing import Tuple
import torch.nn as nn
import torchvision
from pytorch_lightning.utilities.exceptions import MisconfigurationException
def torchvision_backbone_and_num_features(model_name: str, pretrained: bool = True) -> Tuple[nn.Module, int]:
model = getattr(torchvision.models, model_name, None)
if model is None:
raise MisconfigurationException(f"{model_name} is not supported by torchvision")
if model_name in ["mobilenet_v2", "vgg11", "vgg13", "vgg16", "vgg19"]:
model = model(pretrained=pretrained)
backbone = model.features
num_features = model.classifier[-1].in_features
return backbone, num_features
elif model_name in [
"resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnext50_32x4d", "resnext101_32x8d"
]:
model = model(pretrained=pretrained)
# remove the last two layers & turn it into a Sequential model
backbone = nn.Sequential(*list(model.children())[:-2])
num_features = model.fc.in_features
return backbone, num_features
elif model_name in ["densenet121", "densenet169", "densenet161", "densenet161"]:
model = model(pretrained=pretrained)
backbone = nn.Sequential(*model.features, nn.ReLU(inplace=True))
num_features = model.classifier.in_features
return backbone, num_features
raise ValueError(f"{model_name} is not supported yet.")
|
billy-horn/lightning-flash | tests/text/classification/test_data.py | <filename>tests/text/classification/test_data.py
import os
from pathlib import Path
from flash.text import TextClassificationData
TEST_BACKBONE = "prajjwal1/bert-tiny" # super small model for testing
TEST_CSV_DATA = """sentence,label
this is a sentence one,0
this is a sentence two,1
this is a sentence three,0
"""
TEST_JSON_DATA = """
{"sentence": "this is a sentence one","lab":0}
{"sentence": "this is a sentence two","lab":1}
{"sentence": "this is a sentence three","lab":0}
"""
def csv_data(tmpdir):
path = Path(tmpdir) / "data.csv"
path.write_text(TEST_CSV_DATA)
return path
def json_data(tmpdir):
path = Path(tmpdir) / "data.json"
path.write_text(TEST_JSON_DATA)
return path
def test_from_csv(tmpdir):
if os.name == "nt":
# TODO: huggingface stuff timing out on windows
return True
csv_path = csv_data(tmpdir)
dm = TextClassificationData.from_files(
backbone=TEST_BACKBONE, train_file=csv_path, input="sentence", target="label", batch_size=1
)
batch = next(iter(dm.train_dataloader()))
assert batch["labels"].item() in [0, 1]
assert "input_ids" in batch
def test_test_valid(tmpdir):
if os.name == "nt":
# TODO: huggingface stuff timing out on windows
return True
csv_path = csv_data(tmpdir)
dm = TextClassificationData.from_files(
backbone=TEST_BACKBONE,
train_file=csv_path,
valid_file=csv_path,
test_file=csv_path,
input="sentence",
target="label",
batch_size=1
)
batch = next(iter(dm.val_dataloader()))
assert batch["labels"].item() in [0, 1]
assert "input_ids" in batch
batch = next(iter(dm.test_dataloader()))
assert batch["labels"].item() in [0, 1]
assert "input_ids" in batch
def test_from_json(tmpdir):
if os.name == "nt":
# TODO: huggingface stuff timing out on windows
return True
json_path = json_data(tmpdir)
dm = TextClassificationData.from_files(
backbone=TEST_BACKBONE, train_file=json_path, input="sentence", target="lab", filetype="json", batch_size=1
)
batch = next(iter(dm.train_dataloader()))
assert batch["labels"].item() in [0, 1]
assert "input_ids" in batch
|
billy-horn/lightning-flash | flash_examples/finetuning/translation.py | import flash
from flash import download_data
from flash.text import TranslationData, TranslationTask
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/wmt_en_ro.zip", 'data/')
# 2. Load the data
datamodule = TranslationData.from_files(
train_file="data/wmt_en_ro/train.csv",
valid_file="data/wmt_en_ro/valid.csv",
test_file="data/wmt_en_ro/test.csv",
input="input",
target="target",
)
# 3. Build the model
model = TranslationTask()
# 4. Create the trainer. Run once on data
trainer = flash.Trainer(max_epochs=1, precision=16, gpus=1)
# 5. Fine-tune the model
trainer.finetune(model, datamodule=datamodule)
# 6. Test model
trainer.test()
# 7. Save it!
trainer.save_checkpoint("translation_model_en_ro.pt")
|
billy-horn/lightning-flash | flash_examples/predict/classify_text.py | <filename>flash_examples/predict/classify_text.py
from pytorch_lightning import Trainer
from flash.core.data import download_data
from flash.text import TextClassificationData, TextClassifier
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/imdb.zip", 'data/')
# 2. Load the model from a checkpoint
model = TextClassifier.load_from_checkpoint("https://flash-weights.s3.amazonaws.com/text_classification_model.pt")
# 2a. Classify a few sentences! How was the movie?
predictions = model.predict([
"Turgid dialogue, feeble characterization - <NAME> a judge?.",
"The worst movie in the history of cinema.",
"I come from Bulgaria where it 's almost impossible to have a tornado."
"Very, very afraid"
"This guy has done a great job with this movie!",
])
print(predictions)
# 2b. Or generate predictions from a sheet file!
datamodule = TextClassificationData.from_file(
predict_file="data/imdb/predict.csv",
input="review",
)
predictions = Trainer().predict(model, datamodule=datamodule)
print(predictions)
|
billy-horn/lightning-flash | flash_examples/finetuning/image_classification.py | import flash
from flash.core.data import download_data
from flash.core.finetuning import FreezeUnfreeze
from flash.vision import ImageClassificationData, ImageClassifier
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", 'data/')
# 2. Load the data
datamodule = ImageClassificationData.from_folders(
train_folder="data/hymenoptera_data/train/",
valid_folder="data/hymenoptera_data/val/",
test_folder="data/hymenoptera_data/test/",
)
# 3. Build the model
model = ImageClassifier(num_classes=datamodule.num_classes)
# 4. Create the trainer. Run twice on data
trainer = flash.Trainer(max_epochs=2)
# 5. Train the model
trainer.finetune(model, datamodule=datamodule, strategy=FreezeUnfreeze(unfreeze_epoch=1))
# 6. Test the model
trainer.test()
# 7. Save it!
trainer.save_checkpoint("image_classification_model.pt")
|
billy-horn/lightning-flash | tests/tabular/test_data_model_integration.py | import pandas as pd
import pytorch_lightning as pl
from flash.tabular import TabularClassifier, TabularData
TEST_DF_1 = pd.DataFrame(
data={
"category": ["a", "b", "c", "a", None, "c"],
"scalar_a": [0.0, 1.0, 2.0, 3.0, None, 5.0],
"scalar_b": [5.0, 4.0, 3.0, 2.0, None, 1.0],
"label": [0, 1, 0, 1, 0, 1],
}
)
def test_classification(tmpdir):
train_df = TEST_DF_1.copy()
valid_df = TEST_DF_1.copy()
test_df = TEST_DF_1.copy()
data = TabularData.from_df(
train_df,
categorical_input=["category"],
numerical_input=["scalar_a", "scalar_b"],
target="label",
valid_df=valid_df,
test_df=test_df,
num_workers=0,
batch_size=2,
)
model = TabularClassifier(num_features=3, num_classes=2, embedding_sizes=data.emb_sizes)
trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, data)
|
billy-horn/lightning-flash | tests/vision/test_data_model_integration.py | import torch
from flash import Trainer
from flash.vision import ImageClassificationData, ImageClassifier
def _dummy_image_loader(_):
return torch.rand(3, 224, 224)
def test_classification(tmpdir):
data = ImageClassificationData.from_filepaths(
train_filepaths=["a", "b"],
train_labels=[0, 1],
train_transform=lambda x: x,
loader=_dummy_image_loader,
num_workers=0,
batch_size=2,
)
model = ImageClassifier(2, backbone="resnet18")
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.finetune(model, datamodule=data, strategy="freeze")
|
billy-horn/lightning-flash | flash/text/seq2seq/summarization/model.py | <reponame>billy-horn/lightning-flash<filename>flash/text/seq2seq/summarization/model.py<gh_stars>1-10
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Mapping, Optional, Sequence, Type, Union
import pytorch_lightning as pl
import torch
from flash.text.seq2seq.core.model import Seq2SeqTask
from flash.text.seq2seq.summarization.metric import RougeMetric
class SummarizationTask(Seq2SeqTask):
def __init__(
self,
backbone: str = "t5-small",
loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
metrics: Union[pl.metrics.Metric, Mapping, Sequence, None] = None,
learning_rate: float = 5e-5,
val_target_max_length: Optional[int] = None,
num_beams: Optional[int] = 4,
use_stemmer: bool = True,
rouge_newline_sep: bool = True
):
self.save_hyperparameters()
super().__init__(
backbone=backbone,
loss_fn=loss_fn,
optimizer=optimizer,
metrics=metrics,
learning_rate=learning_rate,
val_target_max_length=val_target_max_length,
num_beams=num_beams
)
self.rouge = RougeMetric(
rouge_newline_sep=rouge_newline_sep,
use_stemmer=use_stemmer,
)
@property
def task(self) -> str:
return "summarization"
def compute_metrics(self, generated_tokens, batch, prefix):
tgt_lns = self.tokenize_labels(batch["labels"])
result = self.rouge(generated_tokens, tgt_lns)
self.log_dict(result, on_step=False, on_epoch=True)
|
billy-horn/lightning-flash | flash_examples/finetuning/text_classification.py | <gh_stars>1-10
import flash
from flash.core.data import download_data
from flash.text import TextClassificationData, TextClassifier
if __name__ == "__main__":
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/imdb.zip", 'data/')
# 2. Load the data
datamodule = TextClassificationData.from_files(
train_file="data/imdb/train.csv",
valid_file="data/imdb/valid.csv",
test_file="data/imdb/test.csv",
input="review",
target="sentiment",
batch_size=512
)
# 3. Build the model
model = TextClassifier(num_classes=datamodule.num_classes)
# 4. Create the trainer. Run once on data
trainer = flash.Trainer(max_epochs=1)
# 5. Fine-tune the model
trainer.finetune(model, datamodule=datamodule, strategy='freeze')
# 6. Test model
trainer.test()
# 7. Save it!
trainer.save_checkpoint("text_classification_model.pt")
|
acheketa/cwavegan | tpu/backup.py | <filename>tpu/backup.py
from __future__ import print_function
from google.cloud import storage
import sys
def copy_blob(bucket_name, blob_name, new_bucket_name, new_blob_name):
"""Copies a blob from one bucket to another with a new name."""
storage_client = storage.Client()
source_bucket = storage_client.get_bucket(bucket_name)
source_blob = source_bucket.blob(blob_name)
destination_bucket = storage_client.get_bucket(new_bucket_name)
new_blob = source_bucket.copy_blob(
source_blob, destination_bucket, new_blob_name)
print('Blob {} in bucket {} copied to blob {} in bucket {}.'.format(
source_blob.name, source_bucket.name, new_blob.name,
destination_bucket.name))
def list_blobs(bucket_name):
"""Lists all the blobs in the bucket."""
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blobs = bucket.list_blobs()
return blobs
if __name__ == '__main__':
import os
import time
import tensorflow as tf
ckpt, nmin = sys.argv[1:3]
train_dir = 'gs://' + ckpt
nsec = int(float(nmin) * 60.)
while tf.train.latest_checkpoint(train_dir) is None:
print('Waiting for first checkpoint')
time.sleep(1)
while True:
latest_ckpt = tf.train.latest_checkpoint(train_dir)
# Sleep for two seconds in case file flushing
time.sleep(2)
files = list_blobs(ckpt)
for file in files:
name = file.name
_, latest_ckpt = os.path.split(latest_ckpt)
if latest_ckpt in name:
print("copied successfully\n", name)
copy_blob(ckpt, name, ckpt + '-backup', name)
print('-' * 80)
# Sleep for an hour
time.sleep(nsec)
|
acheketa/cwavegan | data/dump_tfrecord.py | import os
import sys
import numpy as np
from scipy.io.wavfile import write as wavwrite
import tensorflow as tf
out_dir, tfrecord_fps = sys.argv[1], sys.argv[2:]
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
def _mapper(example_proto):
features = {
'samples': tf.FixedLenSequenceFeature([1], tf.float32, allow_missing=True),
'label': tf.FixedLenSequenceFeature([], tf.string, allow_missing=True)
}
example = tf.parse_single_example(example_proto, features)
wav = example['samples'][:, 0]
wav = wav[:16384]
wav_len = tf.shape(wav)[0]
wav = tf.pad(wav, [[0, 16384 - wav_len]])
label = tf.reduce_join(example['label'], 0)
return wav, label
dataset = tf.data.TFRecordDataset(tfrecord_fps)
dataset = dataset.map(_mapper)
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(1))
x, y = dataset.make_one_shot_iterator().get_next()
x, y = x[0], y[0]
with tf.Session() as sess:
i = 0
while True:
try:
_x, _y = sess.run([x, y])
except:
break
_x *= 32767.
_x = np.clip(_x, -32767., 32767.)
_x = _x.astype(np.int16)
wavwrite(os.path.join(out_dir, '{}_{}.wav'.format(_y, str(i).zfill(5))), 16000, _x)
i += 1
|
acheketa/cwavegan | tpu/tpu_input.py | # Based on TensorFlow released DCGAN codes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
NUM_TRAIN_AUDIO = 60000
NUM_EVAL_AUDIO = 10000
class InputFunction(object):
"""Wrapper class that is passed as callable to Estimator."""
def __init__(self, is_training, noise_dim, bias):
self.is_training = is_training
self.noise_dim = noise_dim
self.bias = bias
mode = ('train' if is_training
else 'test')
def __call__(self, params):
"""Creates a simple Dataset pipeline."""
window_len = 8192 if self.bias else 8182
def parser(serialized_example):
features = {'samples': tf.FixedLenSequenceFeature([1], tf.float32, allow_missing=True)}
if self.bias:
features['label'] = tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True)
else:
features['label'] = tf.FixedLenSequenceFeature([], tf.float32, allow_missing=True)
example = tf.parse_single_example(serialized_example, features)
wav = example['samples']
label = example['label']
# first window
wav = wav[:window_len]
wav = tf.pad(wav, [[0, window_len - tf.shape(wav)[0]], [0, 0]])
wav.set_shape([window_len, 1])
if not self.bias:
label.set_shape(10)
return wav, label
batch_size = params['batch_size']
data_file = 'gs://sc09_tf_int' if self.bias else 'gs://sc09_tf/'
data_files = []
for i in range(128):
data_root = data_file + 'train-{}-of-128.tfrecord'.format(str(i).zfill(3))
data_files.append(data_root)
dataset = tf.data.TFRecordDataset(data_files)
dataset = dataset.map(parser).cache()
if self.is_training:
dataset = dataset.repeat()
dataset = dataset.shuffle(1024)
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
wav, labels = dataset.make_one_shot_iterator().get_next()
random_noise = tf.random_uniform([batch_size, self.noise_dim], -1., 1., dtype=tf.float32)
if self.bias:
labels = labels + tf.constant(10, name='fixed', dtype=tf.int64)
labels = tf.cast(labels, dtype=tf.float32)
labels = tf.reshape(labels, [batch_size, 1])
features = {
'real_audio': wav,
'random_noise': random_noise}
return features, labels
|
acheketa/cwavegan | tpu/tpu_main.py | # Based on TensorFlow released DCGAN codes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
from absl import flags
import numpy as np
import tensorflow as tf
import tpu_input
import tpu_model
from tensorflow.contrib import summary
from tensorflow.python.estimator import estimator
FLAGS = flags.FLAGS
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default='acheketa-tpu',
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default='dlcampjeju2018',
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default='us-central1-f',
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_string('condition', 'concat', 'Conditioning type bias or concat')
flags.DEFINE_string('model_dir', 'gs://acheketa3-ckpt', 'Output model directory')
flags.DEFINE_integer('batch_size', 1024,
'Batch size for both generator and discriminator')
flags.DEFINE_integer('num_shards', None, 'Number of TPU chips')
flags.DEFINE_integer('train_steps', 200000, 'Number of training steps')
flags.DEFINE_integer('train_steps_per_eval', 400,
'Steps per eval and image generation')
flags.DEFINE_integer('iterations_per_loop', 20,
'Steps per interior TPU loop. Should be less than'
' --train_steps_per_eval')
flags.DEFINE_float('learning_rate', 0.0002, 'LR for both D and G')
flags.DEFINE_boolean('eval_loss', False,
'Evaluate discriminator and generator loss during eval')
flags.DEFINE_boolean('use_tpu', True, 'Use TPU for training')
_NUM_VIZ_AUDIO = 20 # For generating a 10x10 grid of generator samples
_D_Y = 10 # label
_FS = 16000
# Global variables for data and model
dataset = None
model = None
def model_fn(features, labels, mode, params):
if mode == tf.estimator.ModeKeys.PREDICT:
###########
# PREDICT #
###########
# Pass only noise to PREDICT mode
random_noise = features['random_noise']
random_noise = tf.concat([random_noise, labels], 1)
predictions = {
'generated_audio': model.generator_wavegan(random_noise, labels, train=False, bias=is_bias)
}
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions)
# Use params['batch_size'] for the batch size inside model_fn
batch_size = params['batch_size'] # pylint: disable=unused-variable
real_audio = features['real_audio']
random_noise = features['random_noise']
# Concatenate
if not is_bias:
label_fill = tf.expand_dims(labels, axis=2)
random_noise = tf.concat([random_noise, labels], 1)
real_audio = tf.concat([real_audio, label_fill], 1)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
generated_audio = model.generator_wavegan(random_noise, labels, train=is_training, bias=is_bias)
# Get logits from discriminator
d_on_data_logits = tf.squeeze(model.discriminator_wavegan(real_audio, labels, reuse=False, bias=is_bias))
d_on_g_logits = tf.squeeze(model.discriminator_wavegan(generated_audio, labels, reuse=True, bias=is_bias))
# Calculate discriminator loss
d_loss_on_data = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(d_on_data_logits),
logits=d_on_data_logits)
d_loss_on_gen = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(d_on_g_logits),
logits=d_on_g_logits)
d_loss = d_loss_on_data + d_loss_on_gen
# Calculate generator loss
g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(d_on_g_logits),
logits=d_on_g_logits)
if mode == tf.estimator.ModeKeys.TRAIN:
#########
# TRAIN #
#########
# tensorboard summary
def host_call_fn(gs, g_loss, d_loss, real_audio, generated_audio):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
g_loss: `Tensor` with shape `[batch]` for the generator loss.
d_loss: `Tensor` with shape `[batch]` for the discriminator loss.
real_audio: `Tensor` with shape `[batch, 8192, 1]`
generated_audio: `Tensor` with shape `[batch, 8192, 1]`
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
with summary.create_file_writer(FLAGS.model_dir).as_default():
with summary.always_record_summaries():
summary.scalar('g_loss', g_loss, step=gs)
summary.scalar('d_loss', d_loss, step=gs)
summary.audio('real_audio', real_audio, sample_rate=_FS, max_outputs=10, step=gs)
summary.audio('generated_audio', generated_audio, sample_rate=_FS, max_outputs=10, step=gs)
return summary.all_summary_ops()
global_step = tf.reshape(tf.train.get_global_step(), [1])
g_loss_t = g_loss
d_loss_t = d_loss
host_call = (host_call_fn, [global_step, g_loss_t, d_loss_t, real_audio, generated_audio])
d_optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate, beta1=0.5)
g_optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate, beta1=0.5)
if FLAGS.use_tpu:
d_optimizer = tf.contrib.tpu.CrossShardOptimizer(d_optimizer)
g_optimizer = tf.contrib.tpu.CrossShardOptimizer(g_optimizer)
d_loss = tf.reduce_mean(d_loss)
g_loss = tf.reduce_mean(g_loss)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_step = d_optimizer.minimize(
d_loss,
var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope='Discriminator'))
g_step = g_optimizer.minimize(
g_loss,
var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope='Generator'))
increment_step = tf.assign_add(tf.train.get_or_create_global_step(), 1)
joint_op = tf.group([d_step, g_step, increment_step])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=g_loss,
train_op=joint_op,
host_call = host_call)
elif mode == tf.estimator.ModeKeys.EVAL:
########
# EVAL #
########
def _eval_metric_fn(d_loss, g_loss):
# When using TPUs, this function is run on a different machine than the
# rest of the model_fn and should not capture any Tensors defined there
return {
'discriminator_loss': tf.metrics.mean(d_loss),
'generator_loss': tf.metrics.mean(g_loss)}
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=tf.reduce_mean(g_loss),
eval_metrics=(_eval_metric_fn, [d_loss, g_loss]))
# Should never reach here
raise ValueError('Invalid mode provided to model_fn')
def generate_input_fn(is_training):
"""Creates input_fn depending on whether the code is training or not."""
return dataset.InputFunction(is_training, noise_dim, is_bias)
def main(argv):
del argv
global is_bias
global noise_dim
is_bias = True if FLAGS.condition == 'bias' else False
noise_dim = 100 if is_bias else 90
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_shards,
iterations_per_loop=FLAGS.iterations_per_loop))
# Set module-level global variable so that model_fn and input_fn can be
# identical for each different kind of dataset and model
global dataset, model
dataset = tpu_input
model = tpu_model
# TPU-based estimator used for TRAIN and EVAL
est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
config=config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size)
# CPU-based estimator used for PREDICT (generating images)
cpu_est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=False,
config=config,
predict_batch_size=_NUM_VIZ_AUDIO)
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
tf.logging.info('Starting training for %d steps, current step: %d' %
(FLAGS.train_steps, current_step))
while current_step < FLAGS.train_steps:
next_checkpoint = min(current_step + FLAGS.train_steps_per_eval,
FLAGS.train_steps)
est.train(input_fn=generate_input_fn(True),
max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info('Finished training step %d' % current_step)
if FLAGS.eval_loss:
# Evaluate loss on test set
metrics = est.evaluate(input_fn=generate_input_fn(False),
steps=dataset.NUM_EVAL_IMAGES // FLAGS.batch_size)
tf.logging.info('Finished evaluating')
tf.logging.info(metrics)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
|
acheketa/cwavegan | gpu/loader.py | <filename>gpu/loader.py<gh_stars>100-1000
import tensorflow as tf
"""
Data loader
fps: List of tfrecords
batch_size: Resultant batch size
window_len: Size of slice to take from each example
first_window: If true, always take the first window in the example, otherwise take a random window
repeat: If false, only iterate through dataset once
labels: If true, return (x, y), else return x
buffer_size: Number of examples to queue up (larger = more random)
"""
def get_batch(
fps,
batch_size,
window_len,
first_window=False,
repeat=True,
labels=True,
buffer_size=8192):
def _mapper(example_proto):
features = {'samples': tf.FixedLenSequenceFeature([1], tf.float32, allow_missing=True)}
if labels:
features['label'] = tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True)
example = tf.parse_single_example(example_proto, features)
wav = example['samples']
if labels:
label = example['label']
if first_window:
# Use first window
wav = wav[:window_len]
else:
# Select random window
wav_len = tf.shape(wav)[0]
start_max = wav_len - window_len
start_max = tf.maximum(start_max, 0)
start = tf.random_uniform([], maxval=start_max + 1, dtype=tf.int32)
wav = wav[start:start+window_len]
wav = tf.pad(wav, [[0, window_len - tf.shape(wav)[0]], [0, 0]])
wav.set_shape([window_len, 1])
# label.set_shape(10)
if labels:
return wav, label
else:
return wav
dataset = tf.data.TFRecordDataset(fps)
dataset = dataset.map(_mapper)
if repeat:
dataset = dataset.shuffle(buffer_size=buffer_size)
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
if repeat:
dataset = dataset.repeat()
iterator = dataset.make_one_shot_iterator()
return iterator.get_next() |
acheketa/cwavegan | tpu/preview.py | # Based on TensorFlow released DCGAN codes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, time
# Standard Imports
from absl import flags
from scipy.io.wavfile import write as wavwrite
import numpy as np
import tensorflow as tf
import bias_input
import bias_model
from tensorflow.contrib import summary
from tensorflow.python.estimator import estimator
FLAGS = flags.FLAGS
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default='acheketa2-tpu',
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default='dlcampjeju2018',
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default='us-central1-f',
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_string('model_dir', 'gs://acheketa1-ckpt', 'Output model directory')
flags.DEFINE_integer('noise_dim', 100,
'Number of dimensions for the noise vector')
flags.DEFINE_integer('batch_size', 512,
'Batch size for both generator and discriminator')
flags.DEFINE_integer('num_shards', None, 'Number of TPU chips')
flags.DEFINE_integer('train_steps', 200000, 'Number of training steps')
flags.DEFINE_integer('train_steps_per_eval', 400,
'Steps per eval and image generation')
flags.DEFINE_integer('iterations_per_loop', 40,
'Steps per interior TPU loop. Should be less than'
' --train_steps_per_eval')
flags.DEFINE_float('learning_rate', 0.02, 'LR for both D and G')
flags.DEFINE_boolean('eval_loss', False,
'Evaluate discriminator and generator loss during eval')
flags.DEFINE_boolean('use_tpu', True, 'Use TPU for training')
_NUM_VIZ_AUDIO = 20 # For generating a 10x10 grid of generator samples
_D_Y = 10 # label
_FS = 16000
_WINDOW_LEN = 8192
# Global variables for data and model
dataset = None
model = None
def model_fn(features, labels, mode, params):
def host_call_fn(gs, g_loss, d_loss, real_audio, generated_audio):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
g_loss: `Tensor` with shape `[batch]` for the generator loss.
d_loss: `Tensor` with shape `[batch]` for the discriminator loss.
real_audio: `Tensor` with shape `[batch, 8192, 1]`
generated_audio: `Tensor` with shape `[batch, 8192, 1]`
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
with summary.create_file_writer(FLAGS.model_dir).as_default():
with summary.always_record_summaries():
summary.scalar('g_loss', g_loss, step=gs)
summary.scalar('d_loss', d_loss, step=gs)
summary.audio('real_audio', real_audio, sample_rate=_FS, max_outputs=10, step=gs)
summary.audio('generated_audio', generated_audio, sample_rate=_FS, max_outputs=10, step=gs)
return summary.all_summary_ops()
if mode == tf.estimator.ModeKeys.PREDICT:
###########
# PREDICT #
###########
# Pass only noise to PREDICT mode
# labels
labels = tf.range(10)
labels = labels + tf.constant(10, name='fixed', dtype=tf.int32)
labels = tf.tile(labels, [2])
labels = tf.cast(labels, dtype=tf.float32)
labels = tf.reshape(labels, [_NUM_VIZ_AUDIO, 1])
random_noise = features['random_noise']
predictions = {
'generated_audio': model.generator_wavegan(random_noise, labels, train=False)
}
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions)
# Use params['batch_size'] for the batch size inside model_fn
batch_size = params['batch_size'] # pylint: disable=unused-variable
real_audio = features['real_audio']
random_noise = features['random_noise']
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
generated_audio = model.generator_wavegan(random_noise, labels, train=is_training)
# Get logits from discriminator
d_on_data_logits = tf.squeeze(model.discriminator_wavegan(real_audio, labels, reuse=False))
d_on_g_logits = tf.squeeze(model.discriminator_wavegan(generated_audio, labels, reuse=True))
# Calculate discriminator loss
d_loss_on_data = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(d_on_data_logits),
logits=d_on_data_logits)
d_loss_on_gen = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(d_on_g_logits),
logits=d_on_g_logits)
d_loss = d_loss_on_data + d_loss_on_gen
# Calculate generator loss
g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(d_on_g_logits),
logits=d_on_g_logits)
if mode != tf.estimator.ModeKeys.PREDICT:
global_step = tf.reshape(tf.train.get_global_step(), [1])
g_loss_t = g_loss
d_loss_t = d_loss
host_call = (host_call_fn, [global_step, g_loss_t, d_loss_t, real_audio, generated_audio])
if mode == tf.estimator.ModeKeys.TRAIN:
#########
# TRAIN #
#########
g_optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate,
beta1=0.5)
d_optimizer = tf.train.AdamOptimizer(
learning_rate=2e-4,
beta1=0.5)
if FLAGS.use_tpu:
d_optimizer = tf.contrib.tpu.CrossShardOptimizer(d_optimizer)
g_optimizer = tf.contrib.tpu.CrossShardOptimizer(g_optimizer)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_step = d_optimizer.minimize(
d_loss,
var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope='Discriminator'))
g_step = g_optimizer.minimize(
g_loss,
var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope='Generator'))
increment_step = tf.assign_add(tf.train.get_or_create_global_step(), 1)
joint_op = tf.group([d_step, g_step, increment_step])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=g_loss,
train_op=joint_op,
host_call=host_call)
elif mode == tf.estimator.ModeKeys.EVAL:
########
# EVAL #
########
def _eval_metric_fn(d_loss, g_loss):
# When using TPUs, this function is run on a different machine than the
# rest of the model_fn and should not capture any Tensors defined there
return {
'discriminator_loss': tf.metrics.mean(d_loss),
'generator_loss': tf.metrics.mean(g_loss)}
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=tf.reduce_mean(g_loss),
eval_metrics=(_eval_metric_fn, [d_loss, g_loss]))
# Should never reach here
raise ValueError('Invalid mode provided to model_fn')
def generate_input_fn(is_training):
"""Creates input_fn depending on whether the code is training or not."""
return dataset.InputFunction(is_training, FLAGS.noise_dim)
def noise_input_fn(params):
"""Input function for generating samples for PREDICT mode.
Generates a single Tensor of fixed random noise. Use tf.data.Dataset to
signal to the estimator when to terminate the generator returned by
predict().
Args:
params: param `dict` passed by TPUEstimator.
Returns:
1-element `dict` containing the randomly generated noise.
"""
# random noise
np.random.seed(0)
noise_dataset = tf.data.Dataset.from_tensors(tf.constant(
np.random.randn(params['batch_size'], FLAGS.noise_dim), dtype=tf.float32))
noise = noise_dataset.make_one_shot_iterator().get_next()
return {'random_noise': noise}, None
def main(argv):
del argv
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_shards,
iterations_per_loop=FLAGS.iterations_per_loop))
# Set module-level global variable so that model_fn and input_fn can be
# identical for each different kind of dataset and model
global dataset, model
dataset = bias_input
model = bias_model
# TPU-based estimator used for TRAIN and EVAL
est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
config=config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size)
# CPU-based estimator used for PREDICT (generating images)
cpu_est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=False,
config=config,
predict_batch_size=_NUM_VIZ_AUDIO)
current_step = estimator._load_global_step_from_checkpoint_dir(
FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
tf.logging.info('Starting training for %d steps, current step: %d' %
(FLAGS.train_steps, current_step))
# Render some generated images
G_z = cpu_est.predict(input_fn=noise_input_fn)
G_z = [p['generated_audio'][:, :] for p in G_z]
G_z = np.array(G_z)
preview_dir = './preview'
if not os.path.isdir(preview_dir):
os.makedirs(preview_dir)
for i in range(len(G_z)):
audio = np.int16(G_z[i]/np.max(np.abs(G_z[i])) * 32767)
preview_fp = os.path.join(preview_dir, '{}_{}_{}.wav'.format(str(i % 10), str(current_step), str(i)))
wavwrite(preview_fp, _FS, audio)
tf.logging.info('Finished generating images')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
|
naschorr/clipster | code/audio_player.py | <filename>code/audio_player.py<gh_stars>1-10
import os
import sys
import asyncio
import async_timeout
import time
import inspect
import logging
import random
import math
from typing import Callable
from concurrent import futures
import utilities
import dynamo_helper
import exceptions
import discord
from discord import errors
from discord.ext import commands
from discord.member import Member
## Config & logging
CONFIG_OPTIONS = utilities.load_config()
logger = utilities.initialize_logging(logging.getLogger(__name__))
class AudioPlayRequest:
'''
Represents a user's request for the bot to play some audio.
Instances of this class form the 'audio_play_queue' in a ServerStateManager instance.
'''
def __init__(
self,
member: discord.Member,
channel: discord.VoiceChannel,
audio: discord.FFmpegPCMAudio,
file_path: str,
callback: Callable = None
):
self.member = member
self.channel = channel
self.audio = audio
self.file_path = file_path
self.callback = callback
self.skipped = False
def __str__(self):
return "'{}' in '{}' wants '{}'".format(self.member.name, self.channel.name, self.file_path)
class ServerStateManager:
'''
Manages the state of the bot in a given server.
This class helps to manage the bot, initiate audio play requests, and move between channels.
'''
def __init__(self, ctx, bot: commands.Bot, audio_player_cog, channel_timeout_handler = None):
self.ctx = ctx
self.bot = bot
self.audio_player_cog = audio_player_cog
self.active_play_request: AudioPlayRequest = None
self.next = asyncio.Event() # flag for alerting the audio_player to play the next AudioPlayRequest
self.skip_votes = set() # set of Members that voted to skip
self.audio_play_queue = asyncio.Queue() # queue of AudioPlayRequest to play
self.audio_player = self.bot.loop.create_task(self.audio_player_loop())
self.channel_timeout_seconds = int(CONFIG_OPTIONS.get('channel_timeout_seconds', 15 * 60))
self.channel_timeout_handler = channel_timeout_handler
## Property(s)
@property
def audio(self) -> discord.FFmpegPCMAudio:
return self.active_play_request.audio
@property
def channel(self) -> discord.VoiceChannel:
return self.active_play_request.channel
## Methods
async def get_members(self, include_bots = False) -> list:
'''Returns a set of members in the current voice channel'''
members = self.active_play_request.channel.members
if (include_bots):
return members
else:
return [member for member in members if member.bot == False]
def is_playing(self) -> bool:
'''Returns a bool to determine if the bot is speaking in this state.'''
if(self.ctx.voice_client is None):
return False
return self.ctx.voice_client.is_playing()
async def add_play_request(self, play_request: AudioPlayRequest):
'''Pushes the given play_request into the audio_play_queue'''
await self.audio_play_queue.put(play_request)
async def get_voice_client(self, channel: discord.VoiceChannel):
'''Handles voice client management by connecting, and moving between voice channels'''
## Make sure the bot can actually connect to the requested VoiceChannel
permissions = channel.guild.me.permissions_in(channel)
if (not permissions.connect or not permissions.speak):
raise exceptions.UnableToConnectToVoiceChannelException(
"Unable to speak and/or connect to the channel",
channel,
can_speak=permissions.speak,
can_connect=permissions.connect
)
if (self.ctx.voice_client is not None):
## Check to see if the bot isn't already in the correct channel
if (self.ctx.voice_client.channel.id != channel.id):
await self.ctx.voice_client.move_to(channel)
return self.ctx.voice_client
else:
## NOTE: There's an issue where if you reset the app, while the bot is connected to a voice channel, upon the
## bot reconnecting and joining the same voice channel, playing audio won't work.
## See: https://github.com/Rapptz/discord.py/issues/2284
already_in_channel = next(filter(lambda member: member.id == self.bot.user.id, channel.members), None)
if (already_in_channel):
raise exceptions.AlreadyInVoiceChannelException(
"Old instance of bot already exists in the channel",
channel
)
return await channel.connect()
def skip_audio(self):
'''Skips the currently playing audio. If more audio is queued up, it will be played immediately.'''
if(self.is_playing()):
logger.debug("Skipping file at: {}, in channel: {}, in server: {}, for user: {}".format(
self.active_play_request.file_path,
self.ctx.voice_client.channel.name,
self.ctx.guild.name,
self.active_play_request.member.name if self.active_play_request.member else None
))
self.ctx.voice_client.stop()
self.active_play_request.skipped = True
self.next.set()
self.skip_votes.clear()
async def disconnect(self, inactive=False):
## No voice client to disconnect!
if (not self.ctx.voice_client):
return
logger.debug("Attempting to leave channel: {}, in server: {}, due to inactivity for past {} seconds".format(
self.ctx.voice_client.channel.name,
self.ctx.guild.name,
self.channel_timeout_seconds
))
if (inactive and self.channel_timeout_handler):
await self.channel_timeout_handler(self, self.ctx.voice_client.disconnect)
return
## Default to a regular voice client disconnect
await self.ctx.voice_client.disconnect()
async def audio_player_loop(self):
'''
Audio player event loop task.
This event loop handles processing the play_queue by joining the requester's channel, playing the requested
audio, and handling successful skip requests
'''
while(True):
try:
self.next.clear()
active_play_request = None
try:
async with async_timeout.timeout(self.channel_timeout_seconds):
self.active_play_request = await self.audio_play_queue.get()
active_play_request = self.active_play_request
except asyncio.TimeoutError:
if (self.ctx.voice_client and self.ctx.voice_client.is_connected()):
self.bot.loop.create_task(self.disconnect(inactive=True))
continue
except asyncio.CancelledError:
logger.exception("CancelledError during audio_player_loop, ignoring and continuing loop.")
continue
## Join the requester's voice channel & play their requested audio (Or Handle the appropriate exception)
voice_client = None
try:
voice_client = await self.get_voice_client(self.active_play_request.channel)
except futures.TimeoutError:
logger.error("Timed out trying to connect to the voice channel")
await self.ctx.send("Sorry <@{}>, I can't connect to that channel right now.".format(active_play_request.member.id))
continue
except exceptions.UnableToConnectToVoiceChannelException as e:
logger.error("Unable to connect to voice channel")
required_permission_phrases = []
if (not e.can_connect):
required_permission_phrases.append("connect to that channel")
if (not e.can_speak):
required_permission_phrases.append("speak in that channel")
await self.ctx.send("Sorry <@{}>, I don't have permission to {}.".format(
self.ctx.message.author.id,
" or ".join(required_permission_phrases)
))
continue
except exceptions.AlreadyInVoiceChannelException as e:
logger.error("Unable to connect to voice channel, old instance of the bot already exists")
await self.ctx.send(
"Uh oh <@{}>, looks like I'm still in the channel! Wait until I disconnect before trying again."
.format(self.ctx.message.author.id)
)
continue
if (voice_client.is_playing()):
voice_client.stop()
def after_play_callback_builder():
## Wrap this in a closure to keep it available even when it should be out of scope
current_active_play_request = active_play_request
def after_play(_):
self.skip_votes.clear()
if (id(self.active_play_request) == id(current_active_play_request)):
self.next.set()
## Perform callback after the audio has finished (assuming it's defined)
callback = current_active_play_request.callback
if(callback):
if(asyncio.iscoroutinefunction(callback)):
self.bot.loop.create_task(callback())
else:
callback()
return after_play
logger.debug('Playing file at: {}, in channel: {}, in server: {}, for user: {}'.format(
self.active_play_request.file_path,
self.active_play_request.channel.name,
self.active_play_request.channel.guild.name,
self.active_play_request.member.name if self.active_play_request.member else None
))
voice_client.play(self.active_play_request.audio, after=after_play_callback_builder())
await self.next.wait()
except Exception as e:
logger.exception('Exception inside audio player event loop', exc_info=e)
class AudioPlayer(commands.Cog):
## Keys
SKIP_PERCENTAGE_KEY = "skip_percentage"
FFMPEG_PARAMETERS_KEY = "ffmpeg_parameters"
FFMPEG_POST_PARAMETERS_KEY = "ffmpeg_post_parameters"
def __init__(self, bot: commands.Bot, channel_timeout_handler, **kwargs):
self.bot = bot
self.server_states = {}
self.channel_timeout_handler = channel_timeout_handler
self.dynamo_db = dynamo_helper.DynamoHelper()
## Clamp between 0.0 and 1.0
self.skip_percentage = max(min(float(CONFIG_OPTIONS.get(self.SKIP_PERCENTAGE_KEY, 0.5)), 1.0), 0.0)
self.ffmpeg_parameters = CONFIG_OPTIONS.get(self.FFMPEG_PARAMETERS_KEY, "")
self.ffmpeg_post_parameters = CONFIG_OPTIONS.get(self.FFMPEG_POST_PARAMETERS_KEY, "")
## Methods
def get_server_state(self, ctx) -> ServerStateManager:
'''Retrieves the server state for the provided server_id, or creates a new one if no others exist'''
server_id = ctx.message.guild.id
server_state = self.server_states.get(server_id, None)
if (server_state is None):
server_state = ServerStateManager(ctx, self.bot, self, self.channel_timeout_handler)
self.server_states[server_id] = server_state
return server_state
def build_player(self, file_path) -> discord.FFmpegPCMAudio:
'''Builds an audio player for playing the file located at 'file_path'.'''
return discord.FFmpegPCMAudio(
file_path,
before_options=self.ffmpeg_parameters,
options=self.ffmpeg_post_parameters
)
## Commands
@commands.command(no_pm=True)
async def skip(self, ctx, force = False):
'''Vote to skip the current audio.'''
state = self.get_server_state(ctx)
## Is the bot speaking?
if(not state.is_playing()):
await ctx.send("I'm not speaking at the moment.")
return False
## Handle forced skips (should pretty much always be from an admin/bot owner)
if (force):
state.skip_audio()
await ctx.send("<@{}> has skipped the audio.".format(ctx.message.author.id))
return True
## Add a skip vote and tally it up!
voter = ctx.message.author
if(voter == state.active_play_request.member):
state.skip_audio()
await ctx.send("<@{}> skipped their own audio.".format(voter.id))
return False
elif(voter.id not in state.skip_votes):
state.skip_votes.add(voter.id)
## Ensure all voters are still in the current channel (no drive-by skipping)
active_members = await state.get_members()
active_voters = [voter for voter in state.skip_votes if any(voter == member.id for member in active_members)]
total_votes = len(active_voters)
## Determine if a skip should happen or not
vote_percentage = total_votes / len(active_members)
if(vote_percentage >= self.skip_percentage):
state.skip_audio()
await ctx.send("Skip vote passed! Skipping the current audio right now.")
return True
else:
## The total votes needed for a successful skip
required_votes = math.ceil(len(active_members) * self.skip_percentage)
raw = "Skip vote added! Currently at {} of {} votes."
await ctx.send(raw.format(total_votes, required_votes))
else:
await ctx.send("<@{}> has already voted!".format(voter.id))
## Interface for playing the audio file for the invoker's channel
async def play_audio(self, ctx, file_path: str, target_member = None):
'''Plays the given audio file aloud to your channel'''
## Verify that the target/requester is in a channel
if (not target_member or not isinstance(target_member, Member)):
target_member = ctx.message.author
voice_channel = None
if (target_member.voice): ## Handle users not in a voice channel
voice_channel = target_member.voice.channel
if(voice_channel is None):
await ctx.send("<@{}> isn't in a voice channel.".format(target_member.id))
return False
## Make sure file_path points to an actual file
if (not os.path.isfile(file_path)):
logger.error("Unable to play file at: {}, file doesn't exist or isn't a file.".format(file_path))
await ctx.send("Sorry, <@{}>, that couldn't be played.".format(ctx.message.author.id))
return False
## Get/Build a state for this audio, build the player, and add it to the state
state = self.get_server_state(ctx)
player = self.build_player(file_path)
await state.add_play_request(AudioPlayRequest(ctx.message.author, voice_channel, player, file_path))
self.dynamo_db.put(dynamo_helper.DynamoItem(
ctx, ctx.message.content, inspect.currentframe().f_code.co_name, True))
return True
async def _play_audio_via_server_state(self, server_state: ServerStateManager, file_path: str, callback = None):
'''Internal method for playing audio without a requester. Instead it'll play from the active voice_client.'''
## Make sure file_path points to an actual file
if (not os.path.isfile(file_path)):
logger.error("Unable to play file at: {}, file doesn't exist or isn't a file.".format(file_path))
return False
## Create a player for the audio file
player = self.build_player(file_path)
## On successful player creation, build a AudioPlayRequest and push it into the queue
play_request = AudioPlayRequest(None, server_state.ctx.voice_client.channel, player, file_path, callback)
await server_state.add_play_request(play_request)
return True
|
naschorr/clipster | code/clips.py | import json
import os
import random
import asyncio
import logging
from discord.ext import commands
from discord.ext.commands.errors import MissingRequiredArgument
import utilities
import dynamo_helper
from string_similarity import StringSimilarity
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class Clip:
def __init__(self, name, path, **kwargs):
self.name = name
self.path = path
self.kwargs = kwargs
def __str__(self):
return "{} at {}, {}".format(self.name, self.path, self.kwargs)
class ClipGroup:
def __init__(self, name, key, description):
self.name = name
self.key = key
self.description = description
self.clips = {}
def add_clip(self, clip):
if (isinstance(clip, Clip)):
self.clips[clip.name] = clip
else:
logger.warning("Couldn't add clip: {}, as it's not a valid Clip object".format(clip))
class Clips(commands.Cog):
## Keys
MANIFEST_FILE_NAME_KEY = "manifest.json"
CLIPS_KEY = "clips"
NAME_KEY = "name"
PATH_KEY = "path"
HELP_KEY = "help"
BRIEF_KEY = "brief"
DESCRIPTION_KEY = "description"
def __init__(self, clipster, bot, clips_folder_path, **command_kwargs):
self.clipster = clipster
self.bot = bot
self.dynamo_db = dynamo_helper.DynamoHelper()
self.manifest_file_name = self.MANIFEST_FILE_NAME_KEY
self.clips_folder_path = clips_folder_path
self.command_kwargs = command_kwargs
self.command_names = []
self.command_group_names = []
self.find_command_minimum_similarity = float(CONFIG_OPTIONS.get('find_command_minimum_similarity', 0.5))
self.channel_timeout_clip_paths = CONFIG_OPTIONS.get('channel_timeout_clip_paths', [])
## Make sure context is always passed to the callbacks
self.command_kwargs["pass_context"] = True
## The mapping of clips into groups
self.clip_groups = {}
## Load and add the clips
self.init_clips()
## Properties
@property
def audio_player_cog(self):
return self.clipster.get_audio_player_cog()
## Methods
## Removes all existing clips when the cog is unloaded
def cog_unload(self):
self.remove_clips()
## Searches the clips folder for folders containing a manifest.json file (which then describes the clips to be loaded)
def scan_clips(self, path_to_scan):
def is_clip_dir(file_path):
if (os.path.isdir(file_path)):
manifest_exists = os.path.isfile(os.path.sep.join([file_path, self.manifest_file_name]))
is_populated = len([path for path in os.listdir(file_path)]) > 1
return (manifest_exists and is_populated)
return False
clip_dirs = []
for file in os.listdir(path_to_scan):
full_file_path = os.sep.join([path_to_scan, file])
if(is_clip_dir(full_file_path)):
clip_dirs.append(full_file_path)
return clip_dirs
## Builds a ClipGroup object from a directory containing clips and a manifest.json file
def _build_clip_group(self, path):
with open(os.path.sep.join([path, self.manifest_file_name])) as fd:
group_raw = json.load(fd)
name = group_raw.get('name', path.split(os.path.sep)[-1])
key = group_raw.get('key', name)
description = group_raw.get('description', None)
return ClipGroup(name, key, description)
## Initialize the clips available to the bot
def init_clips(self):
clip_dir_paths = self.scan_clips(os.path.sep.join([utilities.get_root_path(), self.clips_folder_path]))
counter = 0
for clip_dir_path in clip_dir_paths:
starting_count = counter
clip_group = self._build_clip_group(clip_dir_path)
for clip in self.load_clips(clip_dir_path):
try:
self.add_clip(clip)
clip_group.add_clip(clip)
except Exception as e:
logger.warn("Couldn't add clip", exc_info=True)
else:
counter += 1
## Ensure we don't add in empty clip files into the groupings
if(counter > starting_count):
self.clip_groups[clip_group.key] = clip_group
## Set up a dummy command for the category, to assist with creating the help interface.
## asyncio.sleep is just a dummy command since commands.Command needs some kind of async callback
help_command = commands.Command(self._create_noop_callback(), name=clip_group.key, hidden=True, no_pm=True)
self.bot.add_command(help_command)
self.command_group_names.append(clip_group.key) # Keep track of the 'parent' commands for later use
logger.info("Loaded {} clip{}.".format(counter, "s" if counter != 1 else ""))
return counter
## Unloads all clip commands, then reloads them from the clips.json file
def reload_clips(self):
self.remove_clips()
return self.init_clips()
## Load clips from json into a list of clip objects
def load_clips(self, clip_dir_path):
## Insert source[key] (if it exists) into target[key], else insert a default string
def insert_if_exists(target, source, key, default=None):
if(key in source):
target[key] = source[key]
return target
clips = []
manifest_path = os.path.sep.join([clip_dir_path, self.manifest_file_name])
with open(manifest_path) as fd:
for clip_raw in json.load(fd)[self.CLIPS_KEY]:
try:
## Todo: make this less ugly
kwargs = {}
help_value = clip_raw.get(self.HELP_KEY) # fallback for the help submenus
kwargs = insert_if_exists(kwargs, clip_raw, self.HELP_KEY)
kwargs = insert_if_exists(kwargs, clip_raw, self.BRIEF_KEY, help_value)
kwargs = insert_if_exists(kwargs, clip_raw, self.DESCRIPTION_KEY, help_value)
clip_name = clip_raw[self.NAME_KEY]
clip = Clip(
clip_name,
os.path.sep.join([clip_dir_path, clip_raw[self.PATH_KEY]]),
**kwargs
)
clips.append(clip)
self.command_names.append(clip_name)
except Exception as e:
logger.warning("Error loading {} from {}. Skipping...".format(clip_raw, fd), exc_info=True)
## Todo: This doesn't actually result in the clips in the help menu being sorted?
return sorted(clips, key=lambda clip: clip.name)
## Unloads the preset clips from the bot's command list
def remove_clips(self):
for name in self.command_names + self.command_group_names:
self.bot.remove_command(name)
self.command_names = []
self.command_group_names = []
self.clip_groups = {} # yay garbage collection
logger.info("Removed clips")
return True
## Add a clip command to the bot's command list
def add_clip(self, clip):
if(not isinstance(clip, Clip)):
raise TypeError("{} not instance of Clip.".format(clip))
## Manually build command to be added
command = commands.Command(
self._create_clip_callback(clip.path),
name = clip.name,
**clip.kwargs,
**self.command_kwargs
)
## _clip_callback doesn't have an instance linked to it,
## (not technically a method of Clips?) so manually insert the correct instance anyway.
## This also fixes the broken category label in the default help page.
command.instance = self
self.bot.add_command(command)
def _create_noop_callback(self):
'''
Build an async noop callback. This is used as a dummy callback for the help commands that make up the command
categories
'''
async def _noop_callback(ctx):
await asyncio.sleep(0)
return _noop_callback
def _create_clip_callback(self, path):
'''Build a dynamic callback to invoke the bot's play_audio method'''
async def _clip_callback(ctx):
## Pass a self arg to it now that the command.instance is set to self
audio_player_cog = self.audio_player_cog
play_audio = audio_player_cog.play_audio
## Attempt to get a target channel
try:
target = ctx.message.mentions[0]
except:
target = None
await play_audio(ctx, path, target_member=target)
return _clip_callback
async def play_random_channel_timeout_clip(self, server_state, callback):
'''Channel timeout logic, picks an appropriate sign-off message and plays it'''
if (len(self.channel_timeout_clip_paths) > 0):
await self.audio_player_cog._play_audio_via_server_state(
server_state,
os.path.sep.join([utilities.get_root_path(), random.choice(self.channel_timeout_clip_paths)]),
callback
)
## Says a random clip from the added clips
@commands.command(no_pm=True)
async def random(self, ctx):
"""Says a random clip from the list of clips."""
random_clip = random.choice(self.command_names)
command = self.bot.get_command(random_clip)
await command.callback(ctx)
def _calcSubstringScore(self, message, description):
## Todo: shrink instances of repeated letters down to a single letter in both message and description
## (ex. yeeeee => ye or reeeeeboot => rebot)
message_split = message.split(' ')
word_frequency = 0
for word in message_split:
if (word in description.split(' ')):
word_frequency += 1
return word_frequency / len(message_split)
@commands.command(no_pm=True)
async def find(self, ctx, *, search_text = None):
'''Find clips that are similar to the search text'''
## This method isn't ideal, as it breaks the command's signature. However it's the least bad option until
## Command.error handling doesn't always call the global on_command_error
if (search_text is None):
await self.find_error(ctx, MissingRequiredArgument(ctx.command.params['search_text']))
return
## Strip all non alphanumeric and non whitespace characters out of the message
message = ''.join(char for char in search_text.lower() if (char.isalnum() or char.isspace()))
most_similar_command = (None, 0)
for clip_group in self.clip_groups.values():
for clip in clip_group.clips.values():
## Todo: Maybe look into filtering obviously bad descriptions from the calculation somehow?
## A distance metric might be nice, but then if I could solve that problem, why not just use that
## distance in the first place and skip the substring check?
description = clip.kwargs.get(self.DESCRIPTION_KEY)
if (not description):
continue
## Build a weighted distance using a traditional similarity metric and the previously calculated word
## frequency as well as the similarity of the actual string that invokes the clip
distance = (self._calcSubstringScore(message, description) * 0.5) + \
(StringSimilarity.similarity(description, message) * 0.3) + \
(StringSimilarity.similarity(message, clip.name) * 0.2)
if (distance > most_similar_command[1]):
most_similar_command = (clip, distance)
if (most_similar_command[1] > self.find_command_minimum_similarity):
command = self.bot.get_command(most_similar_command[0].name)
await command.callback(ctx)
else:
await ctx.send("I couldn't find anything close to that, sorry <@{}>.".format(ctx.message.author.id))
@find.error
async def find_error(self, ctx, error):
'''
Find command error handler. Addresses some common error scenarios that on_command_error doesn't really help with
'''
if (isinstance(error, MissingRequiredArgument)):
output_raw = "Sorry <@{}>, but I need something to search for! Why not try: **{}find {}**?"
await ctx.send(output_raw.format(
ctx.message.author.id,
CONFIG_OPTIONS.get("activation_string"),
random.choice(self.command_names)
))
|
naschorr/clipster | code/clipster.py | <filename>code/clipster.py<gh_stars>1-10
import os
import inspect
import logging
import discord
from discord.ext import commands
from discord.ext.commands.view import StringView
import utilities
import audio_player
import admin
import clips
import dynamo_helper
import help_command
import module_manager
from string_similarity import StringSimilarity
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class Clipster:
## Keys
VERSION_KEY = "version"
ACTIVATION_STRING_KEY = "activation_string"
DESCRIPTION_KEY = "description"
CLIPS_FOLDER_PATH_KEY = "clips_folder_path"
## Initialize the bot, and add base cogs
def __init__(self):
self.version = CONFIG_OPTIONS.get(self.VERSION_KEY, 'No version information found')
self.activation_string = CONFIG_OPTIONS.get(self.ACTIVATION_STRING_KEY)
self.description = CONFIG_OPTIONS.get(self.DESCRIPTION_KEY, 'No bot description found')
self.clips_folder_path = CONFIG_OPTIONS.get(self.CLIPS_FOLDER_PATH_KEY)
self.invalid_command_minimum_similarity = float(CONFIG_OPTIONS.get("invalid_command_minimum_similarity", 0.25))
self.dynamo_db = dynamo_helper.DynamoHelper()
self.token = CONFIG_OPTIONS.get('discord_token')
## Make sure we've got the bare minimums to instantiate and run the bot
if (not self.token):
raise RuntimeError('Unable to get the token for Discord!')
if (not self.activation_string):
raise RuntimeError('Unable to run the bot without an activation string!')
## Init the bot and module manager
self.bot = commands.Bot(
command_prefix=commands.when_mentioned_or(self.activation_string),
description=self.description
)
self.module_manager = module_manager.ModuleManager(self, self.bot)
## Apply customized HelpCommand
self.bot.help_command = help_command.ClipsterHelpCommand()
## Register the modules (Order of registration is important, make sure dependancies are loaded first)
self.module_manager.register(admin.Admin, True, self, self.bot)
self.module_manager.register(clips.Clips, True, self, self.bot, self.clips_folder_path)
self.module_manager.register(audio_player.AudioPlayer, True, self.bot, self.get_clips_cog().play_random_channel_timeout_clip)
## Load any dynamic modules inside the /modules folder
self.module_manager.discover()
## Give some feedback for when the bot is ready to go, and provide some help text via the 'playing' status
@self.bot.event
async def on_ready():
## todo: Activity instead of Game? Potentially remove "Playing" text below bot
bot_status = discord.Game(type=0, name="Use {}help".format(self.activation_string))
await self.bot.change_presence(activity=bot_status)
logger.info("Logged in as '{}' (version: {}), (id: {})".format(self.bot.user.name, self.version, self.bot.user.id))
@self.bot.event
async def on_command_error(ctx, exception):
'''Handles command errors. Attempts to find a similar command and suggests it, otherwise directs the user to the help prompt.'''
logger.exception("Unable to process command.", exc_info=exception)
self.dynamo_db.put(dynamo_helper.DynamoItem(
ctx, ctx.message.content, inspect.currentframe().f_code.co_name, False, str(exception)))
## Attempt to find a command that's similar to the one they wanted. Otherwise just direct them to the help page
most_similar_command = self.find_most_similar_command(ctx.message.content)
if (most_similar_command[0] == ctx.invoked_with):
## Handle issues where the command is valid, but couldn't be completed for whatever reason.
await ctx.send("I'm sorry <@{}>, I'm afraid I can't do that.\n" \
"Discord is having some issues that won't let me speak right now."
.format(ctx.message.author.id))
else:
help_text_chunks = [
"Sorry <@{}>, **{}{}** isn't a valid command.".format(ctx.message.author.id, ctx.prefix, ctx.invoked_with)
]
if (most_similar_command[1] > self.invalid_command_minimum_similarity):
help_text_chunks.append("Did you mean **{}{}**?".format(self.activation_string, most_similar_command[0]))
else:
help_text_chunks.append("Try the **{}help** page.".format(self.activation_string))
## Dump output to user
await ctx.send(" ".join(help_text_chunks))
return
## Methods
## Add an arbitary cog to the bot
def add_cog(self, cls):
self.bot.add_cog(cls)
## Returns a cog with a given name
def get_cog(self, cls_name):
return self.bot.get_cog(cls_name)
## Returns the bot's audio player cog
def get_audio_player_cog(self):
return self.bot.get_cog("AudioPlayer")
## Returns the bot's clips cog
def get_clips_cog(self):
return self.bot.get_cog("Clips")
## Register an arbitrary module with clipster (easy wrapper for self.module_manager.register)
def register_module(self, cls, is_cog, *init_args, **init_kwargs):
self.module_manager.register(cls, is_cog, *init_args, **init_kwargs)
## Finds the most similar command to the supplied one
def find_most_similar_command(self, command):
## Build a message string that we can compare with.
try:
message = command[len(self.activation_string):]
except TypeError:
message = command
## Get a list of all visible commands
commands = [cmd.name for cmd in self.bot.commands if not cmd.hidden]
## Find the most similar command
most_similar_command = (None, 0)
for key in commands:
distance = StringSimilarity.similarity(key, message)
if (distance > most_similar_command[1]):
most_similar_command = (key, distance)
return most_similar_command
def run(self):
'''Starts the bot up'''
## So ideally there would be some flavor of atexit.register or signal.signal command to gracefully shut the bot
## down upon SIGTERM or SIGINT. However that doesn't seem to be possible at the moment. Discord.py's got most of
## the functionality built into the base close() method that fires on SIGINT and SIGTERM, but the bot never ends
## up getting properly disconnected from the voice channels that it's connected to. I end up having to wait for
## a time out. Otherwise the bot will be in a weird state upon starting back up, and attempting to speak in one
## of the channels that it was previously in. Fortunately this bad state will self-recover in a minute or so,
## but it's still unpleasant. A temporary fix is to bump up the RestartSec= property in the service config to be
## long enough to allow for the bot to be forcefully disconnected
logger.info('Starting up the bot.')
self.bot.run(self.token)
if (__name__ == "__main__"):
clipster = Clipster()
# clipster.register_module(ArbitraryClass(*init_args, **init_kwargs))
# or,
# clipster.add_cog(ArbitaryClass(*args, **kwargs))
clipster.run()
|
naschorr/clipster | code/admin.py | import inspect
import logging
import utilities
import dynamo_helper
from discord.ext import commands
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = logging.getLogger(__name__)
class Admin(commands.Cog):
## Keys
ADMINS_KEY = "admins"
def __init__(self, hawking, bot):
self.hawking = hawking
self.bot = bot
self.admins = CONFIG_OPTIONS.get(self.ADMINS_KEY, [])
self.dynamo_db = dynamo_helper.DynamoHelper()
## Properties
@property
def audio_player_cog(self):
return self.hawking.get_audio_player_cog()
@property
def clips_cog(self):
return self.hawking.get_clips_cog()
## Methods
## Checks if a user is a valid admin
def is_admin(self, name):
return (str(name) in self.admins)
## Commands
## Root command for other admin-only commands
@commands.group(no_pm=True, hidden=True)
async def admin(self, ctx):
"""Root command for the admin-only commands"""
if(ctx.invoked_subcommand is None):
if(self.is_admin(ctx.message.author)):
await ctx.send("Missing subcommand.")
return True
else:
await ctx.send("<@{}> isn't allowed to do that.".format(ctx.message.author.id))
return False
return False
## Tries to reload the preset clips (admin only)
@admin.command(no_pm=True)
async def reload_clips(self, ctx):
"""Reloads the list of preset clips."""
## I don't really like having core modules intertwined with dynamic ones, maybe move the appropriate admin
## modules out into their dynamic module and exposing some admin auth function that they check in with before
## running the command?
if(not self.clips_cog):
await ctx.send("Sorry <@{}>, but the clips cog isn't available.".format(ctx.message.author.id))
return False
if(not self.is_admin(ctx.message.author)):
await ctx.send("<@{}> isn't allowed to do that.".format(ctx.message.author.id))
self.dynamo_db.put(dynamo_helper.DynamoItem(ctx, ctx.message.content, inspect.currentframe().f_code.co_name, False))
return False
count = self.clips_cog.reload_clips()
loaded_clips_string = "Loaded {} clip{}.".format(count, "s" if count != 1 else "")
await ctx.send(loaded_clips_string)
self.dynamo_db.put(dynamo_helper.DynamoItem(ctx, ctx.message.content, inspect.currentframe().f_code.co_name, True))
return (count >= 0)
## Tries to reload the addon cogs (admin only)
@admin.command(no_pm=True)
async def reload_cogs(self, ctx):
"""Reloads the bot's cogs."""
if(not self.is_admin(ctx.message.author)):
await ctx.send("<@{}> isn't allowed to do that.".format(ctx.message.author.id))
self.dynamo_db.put(dynamo_helper.DynamoItem(ctx, ctx.message.content, inspect.currentframe().f_code.co_name, False))
return False
count = self.hawking.module_manager.reload_all()
total = len(self.hawking.module_manager.modules)
loaded_cogs_string = "Loaded {} of {} cogs.".format(count, total)
await ctx.send(loaded_cogs_string)
self.dynamo_db.put(dynamo_helper.DynamoItem(ctx, ctx.message.content, inspect.currentframe().f_code.co_name, True))
return (count >= 0)
## Skips the currently playing audio (admin only)
@admin.command(no_pm=True)
async def skip(self, ctx):
"""Skips the current audio."""
if(not self.is_admin(ctx.message.author)):
logger.debug("Unable to admin skip audio, user: {} is not an admin".format(ctx.message.author.name))
await ctx.send("<@{}> isn't allowed to do that.".format(ctx.message.author.id))
return False
await self.audio_player_cog.skip(ctx, force = True)
return True
## Disconnects the bot from their current voice channel
@admin.command(no_pm=True)
async def disconnect(self, ctx):
""" Disconnect from the current voice channel."""
if(not self.is_admin(ctx.message.author)):
await ctx.send("<@{}> isn't allowed to do that.".format(ctx.message.author.id))
self.dynamo_db.put(dynamo_helper.DynamoItem(ctx, ctx.message.content, inspect.currentframe().f_code.co_name, False))
return False
state = self.audio_player_cog.get_server_state(ctx)
await state.ctx.voice_client.disconnect()
self.dynamo_db.put(dynamo_helper.DynamoItem(ctx, ctx.message.content, inspect.currentframe().f_code.co_name, True))
return True
|
naschorr/clipster | code/dynamo_helper.py | import boto3
import base64
import time
import logging
import utilities
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class DynamoItem:
def __init__(self, discord_context, query, command, is_valid, error=None):
author = discord_context.message.author
channel = discord_context.message.channel
guild = discord_context.message.guild
self.user_id = int(author.id)
self.user_name = "{}#{}".format(author.name, author.discriminator)
self.timestamp = int(discord_context.message.created_at.timestamp() * 1000)
self.channel_id = channel.id
self.channel_name = channel.name
self.server_id = guild.id
self.server_name = guild.name
self.query = query
self.command = command
self.is_valid = is_valid
self.error = error
self.primary_key_name = CONFIG_OPTIONS.get("boto_primary_key", "QueryId")
self.primary_key = self.build_primary_key()
## Methods
def getDict(self):
output = {
"user_id": self.user_id,
"user_name": self.user_name,
"timestamp": self.timestamp,
"channel_id": self.channel_id,
"channel_name": self.channel_name,
"server_id": self.server_id,
"server_name": self.server_name,
"query": self.query,
"command": self.command,
"is_valid": self.is_valid
}
if(self.error != None):
output["error"] = self.error
output[self.primary_key_name] = self.primary_key
return output
def build_primary_key(self):
concatenated = "{}{}".format(self.user_id, self.timestamp)
return base64.b64encode(bytes(concatenated, "utf-8")).decode("utf-8")
class DynamoHelper:
## Keys
BOTO_ENABLE_KEY = "boto_enable"
BOTO_RESOURCE_KEY = "boto_resource"
BOTO_REGION_NAME_KEY = "boto_region_name"
BOTO_TABLE_NAME_KEY = "boto_table_name"
## Defaults
BOTO_ENABLE = CONFIG_OPTIONS.get(BOTO_ENABLE_KEY, False)
BOTO_RESOURCE = CONFIG_OPTIONS.get(BOTO_RESOURCE_KEY, "dynamodb")
BOTO_REGION_NAME = CONFIG_OPTIONS.get(BOTO_REGION_NAME_KEY, "us-east-2")
BOTO_TABLE_NAME = CONFIG_OPTIONS.get(BOTO_TABLE_NAME_KEY, "Hawking")
def __init__(self, **kwargs):
self.enabled = kwargs.get(self.BOTO_ENABLE_KEY, self.BOTO_ENABLE)
self.resource = kwargs.get(self.BOTO_RESOURCE_KEY, self.BOTO_RESOURCE)
self.region_name = kwargs.get(self.BOTO_REGION_NAME_KEY, self.BOTO_REGION_NAME)
self.table_name = kwargs.get(self.BOTO_TABLE_NAME_KEY, self.BOTO_TABLE_NAME)
self.dynamo_db = boto3.resource(self.resource, region_name=self.region_name)
self.table = self.dynamo_db.Table(self.table_name)
## Methods
def put(self, dynamo_item):
if(self.enabled):
try:
return self.table.put_item(Item=dynamo_item.getDict())
except Exception as e:
## Don't let issues with dynamo tank the bot's functionality
logger.exception("Exception while performing dynamo put")
return None
else:
return None
|
naschorr/clipster | code/utilities.py | <filename>code/utilities.py<gh_stars>1-10
import os
import sys
import json
import logging
import pathlib
from logging.handlers import RotatingFileHandler
## Config
CONFIG_OPTIONS = {} # This'll be populated on import
CONFIG_NAME = "config.json" # The name of the config file
DEV_CONFIG_NAME = "config.dev.json" # The name of the dev config file (overrides properties stored in the normal and prod config files)
PROD_CONFIG_NAME = "config.prod.json" # The name of the prod config file (overrides properties stored in the normal config file)
DIRS_FROM_ROOT = 1 # How many directories away this script is from the root
PLATFORM = sys.platform
def get_root_path():
## -1 includes this script itself in the realpath
return os.sep.join(os.path.realpath(__file__).split(os.path.sep)[:(-1 - DIRS_FROM_ROOT)])
def load_json(path):
with open(path) as fd:
return json.load(fd)
def load_config():
config_path = pathlib.Path(os.sep.join([get_root_path(), CONFIG_NAME]))
if (not config_path.exists()):
raise RuntimeError("Unable to find config.json file in root!")
config = load_json(config_path)
## Override the config values if the prod config file exists.
prod_config_path = pathlib.Path(os.sep.join([get_root_path(), PROD_CONFIG_NAME]))
if (prod_config_path.exists()):
prod_config = load_json(prod_config_path)
for key, value in prod_config.items():
config[key] = value
## Override the config values if the dev config file exists.
dev_config_path = pathlib.Path(os.sep.join([get_root_path(), DEV_CONFIG_NAME]))
if (dev_config_path.exists()):
dev_config = load_json(dev_config_path)
for key, value in dev_config.items():
config[key] = value
return config
def is_linux():
return ("linux" in PLATFORM)
def is_windows():
return ("win" in PLATFORM)
def initialize_logging(logger):
FORMAT = "%(asctime)s - %(module)s - %(funcName)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(FORMAT)
logging.basicConfig(format=FORMAT)
log_level = str(CONFIG_OPTIONS.get("log_level", "DEBUG"))
if (log_level == "DEBUG"):
logger.setLevel(logging.DEBUG)
elif (log_level == "INFO"):
logger.setLevel(logging.INFO)
elif (log_level == "WARNING"):
logger.setLevel(logging.WARNING)
elif (log_level == "ERROR"):
logger.setLevel(logging.ERROR)
elif (log_level == "CRITICAL"):
logger.setLevel(logging.CRITICAL)
else:
logger.setLevel(logging.DEBUG)
## Get the directory containing the logs and make sure it exists, creating it if it doesn't
log_path = CONFIG_OPTIONS.get("log_path")
if (not log_path):
log_path = os.path.sep.join([get_root_path(), "logs"]) # Default logs to a 'logs' folder inside the hawking directory
pathlib.Path(log_path).mkdir(parents=True, exist_ok=True) # Basically a mkdir -p $log_path
log_file = os.path.sep.join([log_path, "clipster.log"]) # Build the true path to the log file
## Setup and add the rotating log handler to the logger
max_bytes = CONFIG_OPTIONS.get("log_max_bytes", 1024 * 1024 * 10) # 10 MB
backup_count = CONFIG_OPTIONS.get("log_backup_count", 10)
rotating_log_handler = RotatingFileHandler(log_file, maxBytes=max_bytes, backupCount=backup_count)
rotating_log_handler.setFormatter(formatter)
logger.addHandler(rotating_log_handler)
return logger
os.environ = {}
CONFIG_OPTIONS = load_config()
|
naschorr/clipster | code/exceptions.py | from discord.errors import ClientException
class UnableToConnectToVoiceChannelException(ClientException):
'''
Exception that's thrown when the client is unable to connect to a voice channel
'''
def __init__(self, message, channel, **kwargs):
super(UnableToConnectToVoiceChannelException, self).__init__(message)
self._channel = channel
self._can_connect = kwargs.get('connect', False)
self._can_speak = kwargs.get('speak', False)
@property
def channel(self):
return self._channel
@property
def can_connect(self):
return self._can_connect
@property
def can_speak(self):
return self._can_speak
class AlreadyInVoiceChannelException(ClientException):
'''
Exception that's thrown when the client is already in the destination voice channel. Usually happens due to
disconnecting the bot while connected, and reconnecting before the bot can time out.
'''
def __init__(self, message, channel):
super(AlreadyInVoiceChannelException, self).__init__(message)
self._channel = channel
@property
def channel(self):
return self._channel
|
naschorr/clipster | code/help_command.py | <gh_stars>1-10
import logging
import random
import utilities
import dynamo_helper
from discord.ext import commands
from discord.ext.commands import DefaultHelpCommand, Paginator
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class ClipsterHelpCommand(commands.DefaultHelpCommand):
@property
def max_name_size(self):
"""
int : Returns the largest name length of the bot's commands.
"""
size = 0
try:
commands = self.context.bot.commands
if commands:
size = max(map(lambda c: len(c.name) if self.show_hidden or not c.hidden else 0, commands))
except AttributeError as e:
size = 15
return size + len(CONFIG_OPTIONS.get('activation_string', ''))
def dump_header_boilerplate(self):
"""
Adds the header boilerplate text (Description, Version, How to activate) to the paginator
"""
self.paginator.add_line(CONFIG_OPTIONS.get("description"), empty=False)
## Append the version info into the help screen
version_note = "Clipster version: {}".format(CONFIG_OPTIONS.get("version", "Beta"))
self.paginator.add_line(version_note, empty=True)
## Append (additional) activation note
activation_note = "Activate with the '{0}' character (ex. '{0}help')".format(self.clean_prefix)
self.paginator.add_line(activation_note, empty=True)
def dump_footer_boilerplate(self, categories):
"""
Adds the footer boilerplate text (Using the help interface) to the paginator
"""
# Ending note logic from HelpFormatter.format
command_name = self.context.invoked_with
ending_note = "Check out the other clip categories! Why not try '{0}{1} {2}'?".format(
self.clean_prefix,
command_name,
random.choice(categories)
)
self.paginator.add_line(ending_note)
def dump_commands(self):
"""
Adds information about the bot's available commands (unrelated to the clip commands) to the paginator
"""
self.paginator.add_line("Basic Commands:")
for command in sorted(self.context.bot.commands, key=lambda cmd: cmd.name):
if((command.module != "clips" or command.name == 'random' or command.name == 'find') and not command.hidden):
entry = ' {0}{1:<{width}} {2}'.format(
CONFIG_OPTIONS.get('activation_string', ''),
command.name,
command.short_doc,
width=self.max_name_size
)
self.paginator.add_line(self.shorten_text(entry))
self.paginator.add_line()
def dump_clip_group(self, clip_group, width=None):
"""
Adds information about the supplied clip group (Group name, tabbed list of clip commands) to the paginator
"""
if(not width):
width = self.max_name_size
self.paginator.add_line(clip_group.name + ":")
for name, clip in sorted(clip_group.clips.items(), key=lambda tup: tup[0]):
entry = ' {0}{1:<{width}} {2}'.format(
CONFIG_OPTIONS.get('activation_string', ''),
name,
clip.kwargs.get("help"),
width=width
)
self.paginator.add_line(self.shorten_text(entry))
self.paginator.add_line()
def dump_clip_categories(self, clip_groups, width=None):
"""
Adds information about the bot's clip categories, that the user can drill down into with the help interface,
to the paginator
"""
if(not width):
width = self.max_name_size
help_string = '{}help '.format(CONFIG_OPTIONS.get('activation_string', ''))
width += len(help_string)
self.paginator.add_line('Clip Category Help:')
for name, group in sorted(clip_groups.items(), key=lambda tup: tup[0]):
## Don't insert empty groups
if(len(group.clips) > 0):
entry = ' {0}{1:<{width}} {2}'.format(
help_string,
group.key,
group.description,
width=width
)
self.paginator.add_line(self.shorten_text(entry))
self.paginator.add_line()
async def send_clip_category_help(self, command):
'''Sends help information for a given command representing a Clip Category'''
## Initial setup
max_width = self.max_name_size
clip_groups = self.context.bot.get_cog("Clips").clip_groups
self.dump_header_boilerplate()
# self.dump_commands()
self.dump_clip_group(clip_groups[command.name], max_width)
self.dump_clip_categories(clip_groups, max_width)
self.dump_footer_boilerplate(list(clip_groups.keys()))
self.paginator.close_page()
await self.send_pages()
async def send_bot_help(self, mapping):
'''The main bot help command (overridden)'''
## Initial setup
self.paginator = Paginator()
clip_groups = self.context.bot.get_cog("Clips").clip_groups
self.dump_header_boilerplate()
## Dump the non-clip commands
self.dump_commands()
## Dump the base clip commands
clips_group = clip_groups["internet"]
if(clips_group):
self.dump_clip_group(clips_group)
## Dump the names of the additional clips. Don't print their commands because that's too much info.
## This is a help interface, not a CVS receipt
self.dump_clip_categories(clip_groups)
self.dump_footer_boilerplate(list(clip_groups.keys()))
await self.send_pages()
async def send_command_help(self, command):
'''Help interface for the commands themselves (Overridden)'''
## Initial setup
self.paginator = Paginator()
clip_groups = self.context.bot.get_cog("Clips").clip_groups
## Is the help command a category? If so only dump the relv
command_str = command.__str__()
if(command.name in clip_groups):
await self.send_clip_category_help(command)
return
# <signature> section
signature = self.get_command_signature(command)
self.paginator.add_line(signature, empty=True)
# <long doc> section
help_section = command.help
if help_section:
if(len(help_section) > self.paginator.max_size):
for line in help_section.splitlines():
self.paginator.add_line(line)
else:
self.paginator.add_line(help_section, empty=True)
self.paginator.close_page()
await self.send_pages()
|
naschorr/clipster | code/module_manager.py | import os
import sys
import logging
import inspect
import importlib
from collections import OrderedDict
import utilities
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class ModuleEntry:
def __init__(self, cls, is_cog, *init_args, **init_kwargs):
self.module = sys.modules[cls.__module__]
self.cls = cls
self.name = cls.__name__
self.is_cog = is_cog
self.args = init_args
self.kwargs = init_kwargs
## Methods
## Returns an invokable object to instantiate the class defined in self.cls
def get_class_callable(self):
return getattr(self.module, self.name)
class ModuleManager:
## Keys
MODULES_FOLDER_KEY = "modules_folder"
def __init__(self, clipster, bot):
self.modules_folder = CONFIG_OPTIONS.get(self.MODULES_FOLDER_KEY, "")
self.clipster = clipster
self.bot = bot
self.modules = OrderedDict()
## Methods
## Registers a module, class, and args necessary to instantiate the class
def register(self, cls, is_cog=True, *init_args, **init_kwargs):
if(not inspect.isclass(cls)):
raise RuntimeError("Provided class parameter '{}' isn't actually a class.".format(cls))
if(not init_args):
init_args = [self.clipster, self.bot]
module_entry = ModuleEntry(cls, is_cog, *init_args, **init_kwargs)
self.modules[module_entry.name] = module_entry
## Add the module to the bot (if it's a cog), provided it hasn't already been added.
if(not self.bot.get_cog(module_entry.name) and module_entry.is_cog):
cog_cls = module_entry.get_class_callable()
self.bot.add_cog(cog_cls(*module_entry.args, **module_entry.kwargs))
logger.info("Registered cog: {} on bot.".format(module_entry.name))
## Finds and registers modules inside the modules folder
def discover(self):
## Assumes that the modules folder is inside the root
modules_folder_path = os.path.abspath(os.path.sep.join(["..", self.modules_folder]))
## Expose the modules folder to the interpreter, so modules can be loaded
sys.path.append(modules_folder_path)
## Build a list of potential module paths and iterate through it...
candidate_modules = os.listdir(modules_folder_path)
for candidate in candidate_modules:
## If the file could be a python file...
if(candidate[-3:] == ".py"):
name = candidate[:-3]
## Attempt to import the module (akin to 'import [name]') and register it normally
## NOTE: Modules MUST have a 'main()' function that essentially returns a list containing all the args
## needed by the 'register()' method of this ModuleManager class. At a minimum this list MUST
## contain a reference to the class that serves as an entry point to the module. You should also
## specify whether or not a given module is a cog (for discord.py) or not.
try:
module = importlib.import_module(name)
declarations = module.main()
## Validate the shape of the main() method's data, and attempt to tolerate poor formatting
if(not isinstance(declarations, list)):
declarations = [declarations]
elif(len(declarations) == 0):
raise RuntimeError("Module '{}' main() returned empty list. Needs a class object at minimum.".format(module.__name__))
self.register(*declarations)
except Exception as e:
del module
## Reimport a single module
def _reimport_module(self, module):
try:
importlib.reload(module)
except Exception as e:
logger.error("Error: ({}) reloading module: {}".format(e, module))
return False
else:
return True
## Reloads a module with the provided name
def _reload_module(self, module_name):
module_entry = self.modules.get(module_name)
assert module_entry is not None
self._reimport_module(module_entry.module)
## Reload a cog attached to the bot
def _reload_cog(self, cog_name):
module_entry = self.modules.get(cog_name)
assert module_entry is not None
self.bot.remove_cog(cog_name)
self._reimport_module(module_entry.module)
cog_cls = module_entry.get_class_callable()
self.bot.add_cog(cog_cls(*module_entry.args, **module_entry.kwargs))
## Reload all of the registered modules
def reload_all(self):
counter = 0
for module_name in self.modules:
try:
if(self.modules[module_name].is_cog):
self._reload_cog(module_name)
else:
self._reload_module(module_name)
except Exception as e:
logger.error("Error: {} when reloading cog: {}".format(e, module_name))
else:
counter += 1
logger.info("Loaded {}/{} cogs.".format(counter, len(self.modules)))
return counter
|
lamharrison/coronavirus-machine-learning | mlp_uk.py | <reponame>lamharrison/coronavirus-machine-learning
import numpy as np
np.random.seed(1337)
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
model = Sequential()
model.add(Dense(units=50, input_dim=1, activation='relu'))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.add(Dense(units=1, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
import csv
# italy
with open('data/italy_history.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
corn_y = []
for each_y in rows:
corn_y.append(int(each_y[0]))
dates = len(corn_y)
corn_x = list(range(1, dates + 1))
corn_x = np.array(corn_x)
corn_y = np.array(corn_y)
italy_dates_length = len(corn_x)
# set italy absorb
italy_absorb = corn_y[italy_dates_length-1]*1.1
corn_y_norm = corn_y / italy_absorb
model.fit(corn_x, corn_y_norm, epochs=20000, shuffle=False)
corn_y_predict = model.predict(corn_x)
corn_y_predict = corn_y_predict * italy_absorb
fig1 = plt.figure(figsize=(7, 5))
plt.scatter(corn_x, corn_y, label='Real Confirmed')
plt.plot(corn_x, corn_y_predict, label='Predict Result')
plt.title('Italy Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.show()
# germany
with open('data/germany_history.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
ger_corn_y = []
for each_y in rows:
ger_corn_y.append(int(each_y[0]))
dates = len(ger_corn_y)
ger_corn_x = list(range(1, dates + 1))
ger_corn_x = np.array(ger_corn_x)
ger_corn_y = np.array(ger_corn_y)
ger_dates_length = len(ger_corn_x)
ger_absorb = ger_corn_y[ger_dates_length-1]*1.1
corn_y_norm = ger_corn_y / ger_absorb
model.fit(ger_corn_x, corn_y_norm, epochs=20000, shuffle=False)
corn_y_predict = model.predict(ger_corn_x)
corn_y_predict = corn_y_predict * ger_absorb
fig_italy = plt.figure(figsize=(7, 5))
plt.scatter(ger_corn_x, ger_corn_y, label='Real Confirmed')
plt.plot(ger_corn_x, corn_y_predict, label='Predict Result')
plt.title('Germany Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.show()
# France model
with open('data/france_history.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
fr_corn_y = []
for each_y in rows:
fr_corn_y.append(int(each_y[0]))
dates = len(fr_corn_y)
fr_corn_x = list(range(1, dates + 1))
fr_corn_x = np.array(fr_corn_x)
fr_corn_y = np.array(fr_corn_y)
fr_dates_length = len(fr_corn_x)
fr_absorb = fr_corn_y[fr_dates_length-1]*1.1
corn_y_norm = fr_corn_y / fr_absorb
model.fit(fr_corn_x, corn_y_norm, epochs=20000, shuffle=False)
corn_y_predict = model.predict(fr_corn_x)
corn_y_predict = corn_y_predict * fr_absorb
fig_italy = plt.figure(figsize=(7, 5))
plt.scatter(fr_corn_x, fr_corn_y, label='Real Confirmed')
plt.plot(fr_corn_x, corn_y_predict, label='Predict Result')
plt.title('France Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.show()
# uk corona
import json
url = 'https://api.covid19uk.live/historyfigures'
def read_url_to_json(url):
import urllib.request as request
webpage = request.urlopen(url)
get_data = webpage.read()
data = json.loads(get_data)
return data
read_data = read_url_to_json(url)
each_data = read_data['data']
uk_comfirmed_data = []
for each in each_data:
uk_comfirmed_data.append(each['confirmed'])
# add uk latest data manually
# uk_comfirmed_data.append(206715)
uk_date_length = len(uk_comfirmed_data)
uk_dates = list(range(1, uk_date_length + 1))
uk_comfirmed_data = np.array(uk_comfirmed_data)
uk_dates = np.array(uk_dates)
# increase absorb value
uk_absorb_amount = uk_comfirmed_data[uk_date_length-1]*1.3
uk_comfirmed_data_norm = uk_comfirmed_data / uk_absorb_amount
# fit model
model.fit(uk_dates, uk_comfirmed_data_norm, epochs=30000, shuffle=False)
uk_comfirmed_data_predict = model.predict(uk_dates)
uk_comfirmed_data_predict = uk_comfirmed_data_predict * uk_absorb_amount
fig2 = plt.figure(figsize=(7, 5))
plt.scatter(uk_dates, uk_comfirmed_data, label='Real Confirmed')
plt.plot(uk_dates, uk_comfirmed_data_predict, label='Predict Result')
plt.title('UK Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.show()
uk_comfirmed_data_predict = np.array(list(range(1, uk_date_length+31)))
uk_comfirmed_predict_7_days = model.predict(uk_comfirmed_data_predict)
fig3 = plt.figure(figsize=(7, 5))
plt.scatter(uk_dates, uk_comfirmed_data, label='Real Confirmed')
plt.plot(uk_comfirmed_data_predict, uk_comfirmed_predict_7_days*uk_absorb_amount, label='Predict Result')
plt.title('UK Prediction Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.savefig('uk_model_7_days.png')
plt.show()
# save prediction data
with open('uk_prediction_data/uk_prediction.json', 'w') as f:
dict_uk_data = dict(zip(list(range(1, uk_date_length+31)), (uk_comfirmed_predict_7_days*uk_absorb_amount).tolist()))
json.dump(dict_uk_data, f)
|
AdityaSrivast/senz | senz-client-samples/python/tests/test_image_utils.py | import sys
import os
import base64
sys.path.insert(
0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
from utils.image_utils import imageToString, stringToImage
def test_imagetoString():
file_path = (os.path.dirname(__file__))+"/sample.jpg"
with open(file_path, "rb") as imageFile:
assert base64.b64decode(imageToString(file_path)) == imageFile.read()
|
AdityaSrivast/senz | senz-client-samples/python/senz/client.py | #!/usr/bin/env python3
import socket, sys, time
host, port = 'localhost', 2552
# Create an ipv4 socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the client
client.connect((host, port))
# Send message to server
def sendMessage(message):
# Send message
client.send(message.encode())
# Receive and print the respond
res = receiveMessage()
print("[Server] {}\n".format(res))
def receiveMessage():
response = client.recv(5000000)
return response.decode()
def getTimestamp():
return time.strftime("%Y%m%d%H%M%S", time.localtime())
if __name__ == "__main__":
# Load file if provided
commands = []
if len(sys.argv) > 1:
filename = sys.argv[1]
with open (filename, "r") as myfile:
commands=myfile.read().split('\n')
print(commands)
# send commands of file
for line in commands:
sendMessage(line)
# send some data (in this case a HTTP GET request)
while True:
msg = input()
sendMessage(msg)
|
makokaz/matsubo | examples/example_cog.py | <gh_stars>0
"""Example Cog file
Simple example file to help understand how to add more functionality to this bot.
This cog defines the following discord commands:
- clear
- ping
It also defines special behaviour when a member has joined the server:
- on_member_join
Refer to the Discord Docs to learn the basic functionality of a discord bot:
https://discordpy.readthedocs.io/en/stable/
"""
import discord
from discord.ext import commands
class ExampleCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
##############
# Add here your functions
##############
# EXAMPLE: Whenever somebody joins this server, this function will be called
@commands.Cog.listener()
async def on_member_join(self, member):
"""
Whenever somebody joins this server, this function will be called.
Currently, it only prints to the console that somebody has joined this server.
"""
print(f'{member} has joined the server.')
# EXAMPLE: If you write `.ping` in a discord message, the bot will respond with the ping in ms
@commands.command(aliases=['test'])
async def ping(self, ctx):
"""Returns with a `pong"!` message and the ping"""
await ctx.send(f'pong! [{round(self.bot.latency * 1000)}ms]')
# EXAMPLE: If you write `.clear 5`, the bot will delete the 5 last messages in this channel.
@commands.command()
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount: int):
"""Bulk clears the last #amount messages in the channel"""
await ctx.channel.purge(limit=amount)
# This function will register this cog as a module to the bot, so the bot receives the functionality you defined above
def setup(bot):
bot.add_cog(ExampleCog(bot))
|
makokaz/matsubo | cogs/utils/event.py | """Event class
Simple event class to define what attributes an event has, and other helpful functions like euqlity-checking.
"""
import calendar
from . import utils
class Event(object):
def __init__(self,
id='', # Unique ID for every event
name='', # Event name
description='', # Event description
url='', # URL where event was found
img='', # Image URL
date_start='', # Start-date of event
date_end='', # End-date of event
date_fuzzy='', # If no hard date is given
time_start='', # Time of event
time_end='', # Time of event
location='', # Event-Location
cost='', # Entry-fee to event
status='', # Cancelled, Online, Postponed, ...
other='', # Additional information tag
visibility='', # Prefecture, University, ... used for visibility to channels
source='', # Source where event was scrapped
date_added=None): # ONLY SET BY DATABASE: Date of when event was added to database
self.id = id
self.name = name
self.description = description
self.url = url
self.img = img
self.date_start = date_start
self.date_end = date_end
self.date_fuzzy = date_fuzzy
self.time_start = time_start
self.time_end = time_end
self.location = location
self.cost = cost
self.status = status
self.other = other
self.visibility = visibility
self.source = source
self.date_added = date_added
def __eq__(self, other):
if isinstance(other, Event):
return self.id == other.id
return False
def __str__(self):
text = f"""***{self.name}*** [{self.id}]
date: {self.getDateRange() if not self.date_fuzzy else self.date_fuzzy}
time: {self.getTimeRange()}
location: {self.location}
cost: {self.cost}
url: {self.url}
image-url: {self.img}
status: {self.status}
visibility: {self.visibility}
source: {self.source}
other: {self.other}
description: {self.description}"""
return text
def getDateRange(self) -> str:
"""Returns date-range of when event occurs"""
if self.date_fuzzy:
return self.date_fuzzy
date_start = str(utils.custom_strftime('%b {S} ({DAY}), %Y', self.date_start))
date_end = str(utils.custom_strftime('%b {S} ({DAY}), %Y', self.date_end)) if self.date_start != self.date_end else ''
return f"{date_start} - {date_end}".strip(' - ')
def getTimeRange(self) -> str:
"""Returns time-range of when event occurs"""
if not self.time_start:
return '---'
time_start = self.time_start.strftime('%H:%M')
time_end = self.time_end.strftime('%H:%M') if self.time_end else ''
return f"{time_start} - {time_end}".strip(' - ')
def mergeDuplicateEvents(events, check_duplicate_func=None, merge_func=None, verbose=False):
"""
Merges duplicate events in given list.
Duplicate events happen when e.g. the same event is hold next week again.
Optional arguments:
* check_duplicate_func: Pointer to function that checks if two events are identical. [Default: Check by event-ID]
* merge_func: Pointer to function that merges the events. [Default: Only merge event-dates]
* verbose: Flag, defines if merged events shall be printed
"""
# Merging functions
def sameIDDate(eventA:Event, eventB:Event):
"""Checks if two events are duplicate by their ID and start_date"""
if not (eventA and eventB):
utils.print_warning("One of the two events was `None`!")
return False
return eventA.id == eventB.id and eventA.date_start == eventB.date_start
def mergeDate(eventA:Event, eventB:Event):
"""Merges two events by appending only their date"""
if not (eventA and eventB):
utils.print_warning("One of the two events was `None`!")
if eventA:
return eventA
return eventB
eventA.date += ' & ' + eventB.date
return eventA
def dontmerge(eventA:Event, eventB:Event):
"""Simply discards eventB. Does not merge metadata."""
return eventA
# Process mergefunc
if type(merge_func) is str:
merge_func = {'mergeDate':mergeDate,'dontmerge':dontmerge}.get(merge_func, dontmerge)
# Fallback: If no check/merging functions given, use the default (merge if same ID; merge by date)
if check_duplicate_func is None:
check_duplicate_func = sameIDDate
if merge_func is None:
merge_func = dontmerge
# Loop over entire event array
i = 0
while i < len(events):
eventA = events[i]
j = i + 1
while j < len(events):
eventB = events[j]
if check_duplicate_func(eventA, eventB):
eventA = merge_func(eventA, eventB)
events[i] = eventA
if verbose:
print("Merged events:\n\teventA: {}\n\teventB: {}".format(eventA.url,eventB.url))
#print("Merged event:\n{}".format(eventA))
del events[j]
j -= 1
j += 1
i += 1
return events
|
makokaz/matsubo | cogs/utils/utils.py | <gh_stars>0
"""Utils
Simple utils python file for handy functions that are needed everywhere in the code.
"""
import asyncio
import calendar
import datetime
import pytz
# import builtins
from functools import wraps
def day_suffix(d:int) -> str:
"""Returns day-suffix 'st', 'nd', 'rd', 'th' for a day of a month.
For example, the 21st of August -> returns 'st'.
Parameters
----------
d: :class:`int`
Integer that represents the day of the month.
Ranges from 1-31(max).
"""
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def day_kanji(w:str) -> str:
"""Returns a Japanese Kanji that represents a weekday.
Parameters
----------
w: :class:`str`
String that represents the day of the week, for example 'Monday'.
"""
return {'Monday':'月','Tuesday':'火','Wednesday':'水','Thursday':'木','Friday':'金','Saturday':'土','Sunday':'日'}.get(w,'')
def custom_strftime(format:str, t:datetime.datetime) -> str:
"""Returns special date format as string.
That means:
- `{S}` in a string will be replaced with the day + suffix
- `{DAY}` will be replaced with the Kanji of the weekday
Parameters
----------
format: :class:`str`
String with special keys `{S}` and `{DAY}` to be formatted.
t: :class:`datetime`
The date from where to fetch the information.
"""
return t.strftime(format).replace('{S}', str(t.day) + day_suffix(t.day)).replace('{DAY}', day_kanji(calendar.day_name[t.weekday()]))
def getJSTtime():
"""Returns current time in JST"""
return datetime.datetime.now(tz=pytz.timezone('Asia/Tokyo')).strftime('%Y-%m-%d %H:%M:%S')
class bcolors:
"""Class that defines colors for printing in a console."""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_color(text:str, color:str):
"""
Print function for colors.
Pass in any color of the class ``bcolors``.
"""
print(f"{color}{text}{bcolors.ENDC}")
def print_warning(text:str):
"""
Print function for warnings (yellow color).
It is a shorthand version of writing ``print_color(text, bcolors.WARNING)``.
"""
print_color(text, bcolors.WARNING)
def log_call(func):
"""
Wrapper. Prints to console that function has been called.
Everything inside the function will be enwrapped in a headline and footline.
"""
async def wrapper_helper(func, *args, **kwargs):
"""
Helper function for the wrapper.
Makes this decorator independent of async or not-async functions.
From: https://stackoverflow.com/a/63156433
"""
if asyncio.iscoroutinefunction(func):
return await func(*args, **kwargs)
else:
return func(*args, **kwargs)
@wraps(func)
async def wrapper(*args, **kwargs):
print('============================================')
print(f'Function called: {func.__name__.upper()}()')
# old = builtins.print
# builtins.print = lambda x, *args, **kwargs: old(" >", x, *args, **kwargs)
ret = await wrapper_helper(func, *args, **kwargs)
# builtins.print = old
print('============================================')
return ret
return wrapper
|
makokaz/matsubo | cogs/event_listener.py | <reponame>makokaz/matsubo
"""Event Listener Cog
Discord Bot Cog that scraps the web for events, and then posts them on subscribed channels.
"""
import os
import discord
import asyncio
import datetime
import pytz
import typing
from discord.ext import commands, tasks
from itertools import cycle
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from .utils import utils
from .utils import database as db
from .utils.event import Event
from .utils.event_scrapper import getEvents
#########################
# Global variables
#########################
# Note: The time variables below are in UNIX CRON format:
# * * * * *
# min hour dayOfMonth month weekday
# For example: "Every 2nd hour at minute 0 on Monday to Thursday every month"
# -> 0 0-23/2 * * 0-3
# Local timezone
LOCAL_TZ = pytz.timezone('Asia/Tokyo')
# Times when the web-scrapper should run
SCRAP_TIMES = '0 15 * * *' # Every day at 15:00
# Times when new events shall be posted to subscribed channels
POST_TIMES = '0 20 * * 5-6' # Every Saturday & Sunday at 20:00
POST_BEFORE_WEEKS = 2 # how many weeks prior to the start of the event it is posted
# Time when it shall be reminded of events happening today/tomorrow/...
REMIND_TIMES = '0 9-10 * * *' # Every day at 10:00
REMIND_BEFORE_DAYS = 0 # how many days before the reminder should be done
# Define (logo, thumbnail, footer) for all sources that are scrapped
SCRAP_SOURCES = {
'Web:TokyoCheapo': {
'footer': 'TOKYO CHEAPO',
'icon': 'https://community.tokyocheapo.com/uploads/db1536/original/1X/91a0a0ee35d00aaa338a0415496d40f3a5cb298e.png',
'thumbnail': 'https://cdn.cheapoguides.com/wp-content/themes/cheapo_theme/assets/img/logos/tokyocheapo/logo.png'
},
'Web:JapanCheapo': {
'footer': 'JAPAN CHEAPO',
'icon': 'https://pbs.twimg.com/profile_images/1199468429553455104/GdCZbc-R_400x400.png',
'thumbnail': 'https://cdn.cheapoguides.com/wp-content/themes/cheapo_theme/assets/img/logos/japancheapo/logo.png'
}
}
# Sleep status messages that will be iterated through
SLEEP_STATUS = [f"Counting 🐑... {i} {'💤' if i%2 else ''}" for i in range(1, 10)]
# How many past messages are checked per channel for event searching
SEARCH_DEPTH = 100
# TODO Make this variable disappear, and instead make it depend on utils/event_scrapper.py
# All possible topics to be subscribable
TOPICS = ['Chubu', 'Chugoku', 'Hokkaido', 'Kansai', 'Kanto', 'Kyushu', 'Okinawa', 'Shikoku', 'Tohoku']
#########################
# Classes & Functions
#########################
class EventListener(commands.Cog):
"""
This cog gives the bot the ability to scrap for events in the web and post them in subscribed channels.
"""
def __init__(self, bot:commands.Bot):
self.bot = bot
self.scheduler = AsyncIOScheduler()
self.scheduler.start()
# Start scheduled tasks
self.scheduler.add_job(self.loop_scrap, CronTrigger.from_crontab(SCRAP_TIMES, timezone=LOCAL_TZ), id='scrap')
self.scheduler.add_job(self.loop_post, CronTrigger.from_crontab(POST_TIMES, timezone=LOCAL_TZ), id='post')
self.scheduler.add_job(self.loop_remind, CronTrigger.from_crontab(REMIND_TIMES, timezone=LOCAL_TZ), id='remind')
# Print next run times of scheduled tasks
print('Next run time of scheduled tasks:')
print(f" > {self.scheduler.get_job('scrap').func.__name__.upper()}: {self.scheduler.get_job('scrap').next_run_time}")
print(f" > {self.scheduler.get_job('post').func.__name__.upper()}: {self.scheduler.get_job('post').next_run_time}")
print(f" > {self.scheduler.get_job('remind').func.__name__.upper()}: {self.scheduler.get_job('remind').next_run_time}")
# Start other loops
self.countingSheeps.start()
@tasks.loop(seconds=10)
async def countingSheeps(self):
"""Counts sheeps. Very handy, because it shows the bot is still running."""
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(next(self.status_cycle)))
def cog_unload(self):
self.countingSheeps.cancel()
@countingSheeps.before_loop
async def before_countingSheeps(self):
await self.bot.wait_until_ready()
self.status_cycle = cycle(SLEEP_STATUS)
@countingSheeps.after_loop
async def on_countingSheeps_cancel(self):
# if self.countingSheeps.is_being_cancelled():
# await self.bot.change_presence(status=discord.Status.idle, activity=discord.Activity(name='Internet', type=discord.ActivityType.listening))
pass
@utils.log_call
async def loop_scrap(self):
"""[Background task] Scraps web at specified times for new events."""
await self.bot.wait_until_ready()
self.countingSheeps.cancel() #TODO check if already cancelled
await asyncio.sleep(1) #bugfix: wait before change_presence is called too fast!
await self.scrap()
await asyncio.sleep(1) #bugfix: wait before change_presence is called too fast!
self.countingSheeps.start() #TODO check if already started
print(f"Next run time of LOOP_SCRAP(): {self.scheduler.get_job('scrap').next_run_time}")
@utils.log_call
async def loop_post(self):
"""[Background task] Notifies all subscribed channels of new events."""
await self.bot.wait_until_ready()
self.countingSheeps.cancel() #TODO check if already cancelled
await asyncio.sleep(1) #bugfix: wait before change_presence is called too fast!
await self.notify()
await asyncio.sleep(1) #bugfix: wait before change_presence is called too fast!
self.countingSheeps.start() #TODO check if already started
print(f"Next run time of LOOP_POST(): {self.scheduler.get_job('post').next_run_time}")
@utils.log_call
async def loop_remind(self):
"""[Background task] Reminds subscribed channels of when events are happening (today, tomorrow, ...).
The global variable `REMIND_BEFORE_DAYS`` defines how many days prior to the start of the event the reminder will be issued.
"""
await self.bot.wait_until_ready()
self.countingSheeps.cancel() #TODO check if already cancelled
await asyncio.sleep(1) #bugfix: wait before change_presence is called too fast!
await self.remind()
await asyncio.sleep(1) #bugfix: wait before change_presence is called too fast!
self.countingSheeps.start() #TODO check if already started
print(f"Next run time of LOOP_REMIND(): {self.scheduler.get_job('remind').next_run_time}")
async def scrap(self):
"""Searches the web for new events, and puts them into the database"""
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game('Scrapping the web...'))
# Scrap events
print("Scrapping events...")
events = getEvents()
# print("Found the following events:")
# for event in events:
# print(event)
# Insert events into database
db.eventDB.insertEvents(events)
print("Finished scrapping events!")
pass
async def notify(self, channels:list[commands.TextChannelConverter]=None):
"""Notifies given channels of new events.
If no list of channels are given, it defaults to notifying every channel.
Parameters
------------
channels: Optional[:class:`list`[:class:`commands.TextChannelConverter`]]
The channels to be notified, ``None`` if all channels shall be notified.
"""
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game('Notifying channels...'))
# Extract subscribed topics per channel from database
if channels:
chvs = [[channel.id, db.discordDB.getChannelVisibility(channel.id)] for channel in channels]
print(f'### Notifying channels {channels} of new events')
else:
chvs = db.discordDB.getAllChannelVisibility()
print('### Notifying all channels of new events')
# Loop over every channel
for chv in chvs:
channel = self.bot.get_channel(chv[0])
topics = chv[1]
#print(f"Channel-ID: {channel.id}; Topics: {topics}")
# Obtain all events in database from today until 1 week of topics this channel has subscribed to
events = db.eventDB.getEvents(
visibility=topics,
from_date=datetime.datetime.now(tz=LOCAL_TZ).date(),
until_date=datetime.datetime.now(tz=LOCAL_TZ).date()+datetime.timedelta(weeks=POST_BEFORE_WEEKS)
)
################
# Notify channel
################
print(f"-> Notifying channel #{channel}:{channel.id} of new events")
# Find messages of events that have already been posted to discord
messages, idx = await self.findEventMessages(channel, events)
# Loop over every event
for i, event in enumerate(events):
if i in idx: # If event has already been posted before, update it with new details (if any)
message = messages[idx.index(i)]
# Only edit if embed has changed
if self.embedsAreEqual(message.embeds[0], self.getEmbed(event)):
continue
# Edit message
await message.edit(embed=self.getEmbed(event))
print(f'Edited event in message: {event.name} [{event.id}] -> Message-ID:{message.id}')
pass
else: # Post NEW event
if event.status.lower() in ['cancelled','canceled']:
continue # Only post if event has not been cancelled in the first place
await channel.send(content=f'***{event.name} [{event.id}]***', embed=self.getEmbed(event))
print(f'Posted event to channel: {event.name} [{event.id}] -> #{channel}:{channel.id}')
await asyncio.sleep(2) #bugfix: sleep for some time before new event is posted
print("### Notified all channels!")
async def remind(self, channels:list[commands.TextChannelConverter]=None):
"""Reminds given channels of events that are happening soon.
If no list of channels are given, it defaults to reminding every channel.
The global variable `REMIND_BEFORE_DAYS`` defines how many days prior to the start of the event the reminder will be issued.
Parameters
------------
channels: Optional[:class:`list`[:class:`commands.TextChannelConverter`]]
The channels to be notified, ``None`` if all channels shall be notified.
"""
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game('Checking for reminders...'))
# Extract subscribed topics per channel from database
if channels:
chvs = [[channel.id, db.discordDB.getChannelVisibility(channel.id)] for channel in channels]
print(f'### Reminding channels {channels} of current events')
else:
chvs = db.discordDB.getAllChannelVisibility()
print('### Reminding all channels of current events')
# Loop over every channel
for chv in chvs:
channel = self.bot.get_channel(chv[0])
topics = chv[1]
#print(f"Channel-ID: {channel.id}; Topics: {topics}")
# Obtain all currently happening events in database of topics this channel has subscribed to
events = db.eventDB.getEvents(
visibility=topics,
from_date=(datetime.datetime.now(tz=LOCAL_TZ)+datetime.timedelta(days=REMIND_BEFORE_DAYS)).date(),
until_date=(datetime.datetime.now(tz=LOCAL_TZ)+datetime.timedelta(days=REMIND_BEFORE_DAYS)).date()
)
# Remove events that are cancelled anyways -> no need to remind
for event in events:
if event.status.lower() in ['cancelled','canceled']:
events.remove(event)
if not events:
print(f"-> Channel #{channel}:{channel.id} has no currently happening events")
continue
################
# Remind channel
################
print(f"-> Reminding channel #{channel}:{channel.id} of currently happening events")
# for event in events:
# print(event)
# Find all event embeds for the reminder, so the embed-URLs can be set as links in the reminder message
event_messages, idx = await self.findEventMessages(channel, events)
events_t:list[tuple[Event,str]] = [] # list of tuples (event, discord-url)
for i, event in enumerate(events):
url = None
if i in idx:
url = event_messages[idx.index(i)].jump_url
events_t.append((event,url))
# Find today's reminder that has already been posted to Discord (if it even exists)
message = await self.findReminderMessage(channel, events)
reminder = self.getReminder(events_t)
if message: # In case event information has changed, delete reminder and create a new one
if message.content != reminder: # If reminders are different, then event information must have changed last minute!
# Delete reminder, then post new one
reminder = reminder.replace('\n',' (UPDATED!) :sparkles:\nEvent information has changed last minute!\n',1)
if message.content != reminder:
try:
await channel.send(content=reminder)
print(f'Updated reminder in channel: {event.name} [{event.id}] -> #{channel}:{channel.id}')
await message.delete()
except discord.errors.HTTPException:
utils.print_warning("Message is too big, couldn't sent it.")
else: # Reminder must not be changed
print(f'Reminder does not need to be updated in channel: {event.name} [{event.id}] -> #{channel}:{channel.id}')
else: # Reminder must not be changed
print(f'Reminder does not need to be updated in channel: {event.name} [{event.id}] -> #{channel}:{channel.id}')
else: # Send reminder
await channel.send(content=reminder)
print(f'Reminded channel: {event.name} [{event.id}] -> #{channel}:{channel.id}')
print('### Reminded all channels!')
async def findEventMessages(self, channel: commands.TextChannelConverter, events: list[Event]) -> tuple[list[discord.Message],list[int]]:
"""Finds events that have already been posted to discord.
Returns the messages and the indices of the events in the provided list.
Parameters
------------
channel: :class:`discord.TextChannel`
The channel where to search for already posted events.
events: :class:`list`[:class:`Event`]
The events to check for if they have already been posted.
"""
# Search results will be appended to these lists
messages = []
idx = []
# Loop over every message
async for message in channel.history(limit=SEARCH_DEPTH):
if not len(message.embeds):
continue
if message.author != self.bot.user:
continue
# Find message that has an Event embedded
embed = message.embeds[0]
try: # Check if embed has a date-field -> then it must be the Event-embed!
datefield = next((field for field in embed.fields if field.value.startswith(':date:')))
except StopIteration:
# message has not an event-embed
continue
# Find index in list events that matches the discord message event
index = next((i for i,event in enumerate(events) if event.id==embed.footer.text.split()[-1] and f':date: ***{event.getDateRange()}***'==datefield.value), None)
if index is None:
continue
# Append message and index to return-lists
messages.append(message)
idx.append(index)
return messages, idx
async def findReminderMessage(self, channel: commands.TextChannelConverter, events: list[Event]) -> discord.Message:
"""Finds today's reminder message of currently happening events.
Note:
This method does only check if a reminder has been sent TODAY already.
It does not check for the latest reminder message.
If no reminder message has been sent yet today, of course no message is found.
Parameters
------------
channel: :class:`discord.TextChannel`
The channel where to search for the reminder.
events: :class:`list`[:class:`Event`]
The events to check if they have all been mentioned in the reminder.
"""
today = datetime.datetime.now(tz=LOCAL_TZ).date()
header = f"***\*\*\*Reminder [{utils.custom_strftime('%b {S} ({DAY}), %Y', today)}]\*\*\****"
async for message in channel.history(limit=SEARCH_DEPTH):
if message.content.startswith('***\*\*\*Reminder'):
# This must be the lastest reminder message!
# Now check if it is from today.
if message.content.startswith(header): # It is a reminder from today! Return the message
return message
else: # The reminder is old. So there exists no reminder from today yet
return None
return None # No reminder message found
def getReminder(self, events_t:list[tuple[Event,str]]) -> str:
"""Creates reminder message and returns as string.
Parameters
------------
events: :class:`list`[:class:`tuple`[:class:`Event`,:class:`str`]]
List of tuples.
First item of the tuple is the currently happening event,
second item is the URL to the discord message.
"""
today = datetime.datetime.now(tz=LOCAL_TZ).date()
string = f"***\*\*\*Reminder [{utils.custom_strftime('%b {S} ({DAY}), %Y', today)}]\*\*\****"
string += f"\nThere are {len(events_t)} events starting { {0:'today',1:'tomorrow'}.get(REMIND_BEFORE_DAYS, f'in {REMIND_BEFORE_DAYS} days') }!"
for event, url in events_t:
if not url:
url = event.url # Might still be an empty string, e.g. when emails do not have an url to the event
if url:
# string += f"\n • [{event.name} [{event.id}]: {event.getDateRange()}]({url})"
string += f"\n • **{event.name} [{event.id}]: {event.getDateRange()}**\n *<{url}>*"
else:
# string += f"\n • **{event.name} [{event.id}]: {event.getDateRange()}**"
string += f"\n • **{event.name} [{event.id}]: {event.getDateRange()}**"
return string
def getEmbed(self, event: Event) -> discord.Embed:
"""Returns discord.Embed object of given event
Parameters
------------
event: :class:`Event`
The event.
"""
embed = discord.Embed(
title=event.name,
colour=discord.Colour(0xd69d37),
url=event.url,
description=f"```{event.description}```\nFind out more [here]({event.url}).",
timestamp=event.date_added if event.date_added else datetime.datetime.now(tz=pytz.timezone('Asia/Tokyo'))
)
# Set event image
if event.img:
embed.set_image(url=event.img)
# Set author
embed.set_author(
name=os.getenv("BOT_NAME", 'Matsubo'),
url=os.getenv("BOT_URL", 'https://github.com/makokaz/matsubo'),
icon_url=os.getenv("BOT_ICON_URL", 'https://discord.com/assets/f9bb9c4af2b9c32a2c5ee0014661546d.png')
)
# Set footer & thumbnail
if event.source in SCRAP_SOURCES.keys():
embed.set_footer(
text=f"{SCRAP_SOURCES[event.source]['footer']} • {event.id}",
icon_url=SCRAP_SOURCES[event.source]['icon']
)
embed.set_thumbnail(url=SCRAP_SOURCES[event.source]['thumbnail'])
else:
embed.set_footer(text=[event.source])
# Add field: CANCELLED
if event.status.lower() in ['cancelled', 'canceled']:
embed.add_field(name=':x: ***This event has been CANCELLED***', value='\u200B', inline=False)
# Add field: Date
embed.add_field(name='\u200B', value=f':date: ***{event.getDateRange()}***', inline=True)
# Add field: Time
embed.add_field(name='\u200B', value=f':clock10: ***{event.getTimeRange()}***', inline=True)
# Add field: Cost
embed.add_field(name='\u200B', value=f':coin: ***{event.cost}***', inline=True)
# Add field: Location
loc_string = ''
if event.status.lower() == 'online':
loc_string += f"[ONLINE]({event.url}), "
for location in event.location.split(', '):
loc_string += f"[{location}](https://www.google.com/maps/search/?api=1&query={location.replace(' ','%')}), "
loc_string = loc_string.rstrip(', ')
if loc_string == '':
loc_string = '---'
embed.add_field(name='\u200B', value=f":round_pushpin: ***{loc_string}***", inline=True)
return embed
def embedsAreEqual(self, embed1:discord.Embed, embed2:discord.Embed) -> bool:
"""Checks if two :class:`discord.Embed` representing two :class:`Event` are equal.
Equality happens when their metadata (all fields in the embed) are equal.
Parameters
------------
embed1: :class:`discord.Embed`
The embed of the first event.
embed2: :class:`discord.Embed`
The embed of the second event.
"""
if embed1.title != embed2.title:
return False
if embed1.url != embed2.url:
return False
if embed1.description != embed2.description:
return False
if embed1.image.url != embed2.image.url:
return False
if embed1.footer.text != embed2.footer.text:
return False
if embed1.footer.icon_url != embed2.footer.icon_url:
return False
if embed1.thumbnail.url != embed2.thumbnail.url:
return False
# Check if fields are equal
if len(embed1.fields) != len(embed2.fields):
return False
fieldsAreEqual = next((False for i in range(len(embed1.fields)) if
embed1.fields[i].name != embed2.fields[i].name or
embed1.fields[i].value != embed2.fields[i].value or
embed1.fields[i].inline != embed2.fields[i].inline
), True)
if not fieldsAreEqual:
return False
return True
@commands.command(name='scrap')
@commands.has_permissions(administrator=True)
@utils.log_call
async def cmd_scrap(self, ctx):
"""Searches the web for new events, and posts updates to all subscibed channels."""
self.countingSheeps.cancel()
await ctx.send(f"Scanning the web... this might take a while :coffee:")
await self.scrap()
await self.notify()
await ctx.send(f"That's all I could find :innocent:")
await asyncio.sleep(2) #bugfix: wait before change_presence is called too fast!
self.countingSheeps.start()
@commands.command(name='subscribe')
@commands.has_permissions(administrator=True)
@utils.log_call
async def cmd_subscribe(self, ctx, channel:typing.Optional[commands.TextChannelConverter]=None, *topics):
"""Subscribes channel the message was sent in. Will post events to this channel."""
if not channel:
channel = ctx.channel
topics = set(topic.capitalize() if topic.capitalize() in TOPICS else None for topic in topics)
topics.discard(None)
if len(topics):
topics_all = topics | db.discordDB.getChannelVisibility(channel.id)
db.discordDB.updateChannel(channel.id, list(topics_all))
await ctx.send(f"Subscribed the following new topics for channel <#{channel.id}>: {topics}\nAll subscribed topics of this channel: {topics_all}")
else:
await ctx.send(f"Either I don't know that topic, or you already subscribed to that topic!")
@commands.command(name='unsubscribe')
@commands.has_permissions(administrator=True)
@utils.log_call
async def cmd_unsubscribe(self, ctx, channel:typing.Optional[commands.TextChannelConverter]=None, *topics):
"""Unsubscribes topics from the channel the message was sent in. If no topics are given, the entire channel will be unsubscribed."""
if not channel:
channel = ctx.channel
if len(topics):
topics = set(topic.capitalize() if topic.capitalize() in TOPICS else None for topic in topics)
topics.discard(None)
if not topics:
await ctx.send(f"I don't know of that topic... Did you misspell it?")
return
topics_new = db.discordDB.getChannelVisibility(channel.id) - topics
else:
topics_new = None
if topics_new:
db.discordDB.updateChannel(channel.id, list(topics_new))
await ctx.send(f"Unsubscribed the following topics from channel <#{channel.id}>: {topics}\nAll subscribed topics of this channel: {topics_new}")
else:
db.discordDB.removeChannel(channel.id)
await ctx.send(f"Unsubscribed channel <#{channel.id}> from all topics")
@commands.command(name='getsubscribedtopics')
@utils.log_call
async def cmd_getSubscribedTopics(self, ctx, channel:commands.TextChannelConverter=None):
"""Returns topics this channel is subscribed to."""
if not channel:
channel = ctx.channel
topics_all = db.discordDB.getChannelVisibility(channel.id)
if topics_all:
await ctx.send(f"All subscribed topics of <#{channel.id}>: {topics_all}")
else:
await ctx.send(f"<#{channel.id}> has currently no subscribtions")
@commands.command(name='gettopics')
@utils.log_call
async def cmd_getTopics(self, ctx):
"""Returns topics this channel is subscribed to."""
string = '\n'.join(f"`-` {topic}" for topic in TOPICS)
await ctx.send(f"These are all topics that can be subscribed:\n{string}")
@cmd_getSubscribedTopics.error
async def error_getSubscribedTopics(self, ctx, error):
"""Error handler. Is invoked when channel given to cmd_getSubscribedTopics() is unknown."""
if isinstance(error, commands.BadArgument):
await ctx.send("I don't know of that channel... Did you mistype it?")
@commands.command(name='recreatetable')
@commands.has_permissions(administrator=True)
@utils.log_call
async def cmd_recreateTable(self, ctx, *tables):
"""Recreates given tables."""
tables = set({'discord':db.discordDB, 'event':db.eventDB}.get(table, None) for table in tables)
tables.discard(None)
if not tables:
await ctx.send(f"... either I don't know this table, or I don't know any table by that name :thinking:\nPlease specify it more.")
return
db.createTables(*tables, recreate=True)
await ctx.send(f"Recreated the following tables :thumbsup:\n{[str(table) for table in tables]}")
def setup(bot):
bot.add_cog(EventListener(bot))
# TODO:
# - Implement command that returns which events are happening {currently; in given period; this month; in this area; ...}
# - Extend sources of event scrapping
# - Add list of topics one can subscribe
|
makokaz/matsubo | bot.py | """Discord bot
Scraps events and posts them onto discord.
"""
# import asyncio
import discord
from discord.ext import commands
from cogs.utils.utils import getJSTtime
import os
# Bot setup
BOT_TOKEN = os.getenv("BOT_TOKEN")
bot = commands.Bot(command_prefix='.')
@bot.event
async def on_ready():
"""Gets called when bot is ready. Change presence and other setup of the bot."""
await bot.change_presence(status=discord.Status.idle, activity=discord.Activity(name='Internet', type=discord.ActivityType.listening))
print(f"[{getJSTtime()}] Hello peeps! {os.getenv('BOT_NAME','Matsubo')} is online ⚡")
@bot.command()
async def load(ctx, extension : str):
"""Loads bot cogs during execution"""
bot.load_extension(f'cogs.{extension}')
string = f"Successfully loaded bot-extension '{extension}'.\nType `{bot.command_prefix}help` for an explanation of my new abilities!"
print(string)
await ctx.send(string)
@bot.command()
async def unload(ctx, extension : str):
"""Unloads bot cogs during execution"""
bot.unload_extension(f'cogs.{extension}')
string = f"Successfully unloaded bot-extension '{extension}'"
print(string)
await ctx.send(string)
@bot.command()
async def reload(ctx, extension : str):
"""Reloads bot cogs during execution"""
if extension.lower() == 'all':
for cog in getAllCogs():
if cog:
bot.reload_extension(f'cogs.{cog}')
string = f"Successfully reloaded all active bot-extensions.\nType `{bot.command_prefix}help` for an explanation of my abilities!"
else:
bot.reload_extension(f'cogs.{extension}')
string = f"Successfully reloaded bot-extension '{extension}'.\nType `{bot.command_prefix}help` for an explanation of my new abilities!"
print(string)
await ctx.send(string)
@bot.event
async def on_guild_join(guild):
"""Gets called when this bot joins a server"""
print(f"Joined new server: {guild.id}")
@bot.event
async def on_guild_remove(guild):
"""Gets called when this bot is removed from a server"""
print(f"Got kicked from server: {guild.id}")
def getAllCogs():
return [filename[:-3] if filename.endswith('.py') else '' for filename in os.listdir('./cogs')]
def loadCogs():
"""Loads all bot extensions"""
# Load all cogs in the cogs/ folder
for cog in getAllCogs():
if cog:
bot.load_extension(f'cogs.{cog}')
print(f"Successfully loaded bot-extension '{cog}'")
# Load other cogs
bot.load_extension('dch')
print(f"Successfully loaded bot-extension 'discord-custom-help'")
if __name__ == "__main__":
loadCogs()
bot.run(BOT_TOKEN)
print('Running.')
|
makokaz/matsubo | cogs/utils/database.py | """Database wrapper
Simple interface to save and load event data of a postgres database.
"""
import os
import sys
import psycopg2
import psycopg2.extras
import datetime
from .event import Event
from . import utils
# Setup database
if os.getenv('DATABASE_URL'): # On Heroku, all fields are concatenated into one string
DB_USER, DB_PW, DB_HOST, DB_PORT, DB_NAME = os.getenv('DATABASE_URL').replace(
'postgres://','').translate(str.maketrans({':': ' ', '@': ' ', '/': ' '})).split()
else: # Default
DB_HOST = os.getenv("DB_HOST")
DB_PORT = os.getenv("DB_PORT",5432)
DB_USER = os.getenv("DB_USER")
DB_PW = os.getenv("DB_PW")
DB_NAME = os.getenv("DB_NAME")
print(f"DATABASE-INFO: HOST={DB_HOST},PORT={DB_PORT},USER={DB_USER},PW={'*'*len(DB_PW)},NAME={DB_NAME}")
class DBConnector():
"""
Class helper to connect with a database using psycopg2.
Allows to connect to database with python command:
with DBConnector() as conn:
# do something
"""
def __init__(self,host=DB_HOST,port=DB_PORT,user=DB_USER,password=<PASSWORD>,database=DB_NAME):
self.host = host
self.port = port
self.user = user
self.password = password
self.database = database
def __enter__(self):
self.conn = psycopg2.connect(host=self.host,port=self.port,user=self.user,password=self.password,database=self.database)
self.cur = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
return self.cur
def __exit__(self, type, value, traceback):
self.conn.commit()
self.cur.close()
self.conn.close()
class DBEvent():
"""
Class helper for saving events into an event-database.
"""
TABLE = "events"
def __init__(self,host=DB_HOST,port=DB_PORT,user=DB_USER,password=<PASSWORD>,database=DB_NAME):
self.connector = DBConnector(host=host,port=port,user=user,password=password,database=database)
def __str__(self):
return self.TABLE
def createTable(self):
"""Creates database if not present."""
with self.connector as cur:
cur.execute(f"""CREATE TABLE events (
id VARCHAR NOT NULL,
name VARCHAR NOT NULL,
description TEXT,
url VARCHAR,
img VARCHAR,
date_start DATE NOT NULL,
date_end DATE,
date_fuzzy VARCHAR,
time_start TIME WITH TIME ZONE,
time_end TIME WITH TIME ZONE,
location VARCHAR,
cost VARCHAR,
status VARCHAR,
other VARCHAR,
visibility VARCHAR,
source VARCHAR,
date_added TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp,
CONSTRAINT PK_event PRIMARY KEY (id, date_start)
);""") #BUG: current_timestamp will use timezone of PC, but it should use Japan timezone!
def printTable(self):
"""Print all records in database"""
with self.connector as cur:
cur.execute(f"SELECT * FROM events;")
print(cur.fetchall())
def getEvents(self, visibility:list[str]=None, from_date:datetime.datetime.date=None, until_date:datetime.datetime.date=None) -> list[Event]:
"""Return events of given visibility, in the given date duration"""
with self.connector as cur:
# Construct query and data based on arguments given
query = f"set time zone 'Asia/Tokyo'; SELECT * FROM {self.TABLE} WHERE "
data = ()
if visibility:
query += f"(visibility = ANY(%s)) AND "
data += (visibility,)
if from_date:
query += f"date_start >= %s AND "
data += (from_date,)
if until_date:
query += f"date_end <= %s AND "
data += (until_date,)
query = query.rstrip(' AND ').rstrip(' WHERE ')
query += f";"
# Execute query
cur.execute(query, data)
# Construct Event objects and return them as a list
events = []
for ret in cur:
event = Event(id=ret[0], name=ret[1], description=ret[2], url=ret[3], img=ret[4], date_start=ret[5], date_end=ret[6],
date_fuzzy=ret[7], time_start=ret[8], time_end=ret[9], location=ret[10], cost=ret[11], status=ret[12],
other=ret[13], visibility=ret[14], source=ret[15], date_added=ret[16])
events.append(event)
return events
def insertEvents(self, events):
"""Inserts events into database"""
with self.connector as cur:
query = f"""set time zone 'Asia/Tokyo'; INSERT INTO events (id, name, description, url, img, date_start, date_end, date_fuzzy, time_start, time_end, location, cost, status, other, visibility, source) VALUES """
for event in events:
id = event.id
name = event.name
description = event.description.replace("'","''")
url = event.url
img = event.img
date_start = event.date_start if event.date_start else 'NULL'
date_end = event.date_end if event.date_end else 'NULL'
date_fuzzy = event.date_fuzzy if event.date_fuzzy else 'NULL'
time_start = event.time_start if event.time_start else 'NULL'
time_end = event.time_end if event.time_end else 'NULL'
location = event.location
cost = event.cost
status = event.status
other = event.other if event.other else 'NULL'
visibility = event.visibility
source = event.source
query += f"('{id}', '{name}', '{description}', '{url}', '{img}', '{date_start}', '{date_end}', '{date_fuzzy}', '{time_start}', '{time_end}', '{location}', '{cost}', '{status}', '{other}', '{visibility}', '{source}'),"
query = query.strip(',') + ' ON CONFLICT ON CONSTRAINT PK_event DO UPDATE SET name=EXCLUDED.name, description=EXCLUDED.description, url=EXCLUDED.url, img=EXCLUDED.img, date_end=EXCLUDED.date_end, date_fuzzy=EXCLUDED.date_fuzzy, time_start=EXCLUDED.time_start, time_end=EXCLUDED.time_end, location=EXCLUDED.location, cost=EXCLUDED.cost, status=EXCLUDED.status, other=EXCLUDED.other, visibility=EXCLUDED.visibility, source=EXCLUDED.source;'
query = query.replace("'NULL'", "NULL")
cur.execute(query)
class DBDiscord():
"""
Class helper for saving Discord-related data, for example:
- Where should events be posted
- ...
"""
TABLE = "discord"
def __init__(self,host=DB_HOST,port=DB_PORT,user=DB_USER,password=<PASSWORD>,database=DB_NAME):
self.connector = DBConnector(host=host,port=port,user=user,password=password,database=database)
def __str__(self):
return self.TABLE
def createTable(self):
"""Creates table if not present."""
with self.connector as cur:
cur.execute(f"""CREATE TABLE {self.TABLE} (
channel_id BIGINT NOT NULL,
visibility VARCHAR[],
CONSTRAINT PK_discord PRIMARY KEY (channel_id)
);""")
def executeQuery(self, query : str, retval : bool = False):
"""Executes any query. Returns output if retval flag is set to true."""
with self.connector as cur:
cur.execute(query)
if retval:
return cur.fetchall()
else:
return None
def printTable(self):
"""Print all records in database"""
with self.connector as cur:
cur.execute(f"SELECT * FROM {self.TABLE};")
print(cur.fetchall())
def updateChannel(self, channel_id : int, visibility : list[str]):
"""Updates channel info in database. If it does not exist, it will be newly created"""
with self.connector as cur:
query = f"""INSERT INTO {self.TABLE} (channel_id, visibility) VALUES (%s, %s)
ON CONFLICT ON CONSTRAINT PK_discord DO UPDATE SET visibility=EXCLUDED.visibility;"""
data = (channel_id, visibility)
cur.execute(query,data)
def getChannelVisibility(self, channel_id : int) -> set[str]:
"""Returns the visibility of events to this channel"""
with self.connector as cur:
cur.execute(f"SELECT visibility FROM {self.TABLE} WHERE (channel_id = %s);", (channel_id,))
ret = cur.fetchone()
if ret:
return set(ret[0])
return set([])
def removeChannel(self, channel_id : int):
"""Removes channel from table"""
with self.connector as cur:
cur.execute(f"DELETE FROM {self.TABLE} WHERE (channel_id = %s);", (channel_id,))
def getAllChannelVisibility(self):
"""Returns all channels with their visibility"""
with self.connector as cur:
cur.execute(f"SELECT channel_id, visibility FROM {self.TABLE};")
return cur.fetchall()
def dropTables(*tables):
"""Attempts to drops given tables."""
for table in tables:
if not table:
return
try:
with table.connector as cur:
cur.execute(f"DROP TABLE IF EXISTS {table.TABLE};")
utils.print_warning(f'!! Dropped table {table.TABLE} !!')
except Exception:
print(f"Table {table.TABLE} did not exist. Nothing to drop.")
def createTables(*tables, recreate=False):
"""Creates given tables.
If flag `recreate` is set to `True`, it will also attempt to delete the tables before if they already exist.
"""
for table in tables:
if not table:
return
if recreate:
dropTables(table)
table.createTable()
print(f"[INFO] created table {table.TABLE}.")
def createDatabase(recreate=False):
"""Creates database (all tables).
If flag `recreate` is set to `True`, it will delete all tables beforehand (only if they exist).
"""
createTables(eventDB, discordDB, recreate=recreate)
# Open database connections
eventDB = DBEvent(host=DB_HOST, port=DB_PORT, user=DB_USER, password=<PASSWORD>, database=DB_NAME)
discordDB = DBDiscord(host=DB_HOST, port=DB_PORT, user=DB_USER, password=DB_PW, database=DB_NAME)
if __name__ == '__main__':
args = sys.argv[1:]
for arg in args:
if arg == 'create':
createDatabase(recreate=True)
else:
print(f"argument '{arg}' unknown. SKIP")
try:
discordDB.printTable()
eventDB.printTable()
except Exception:
pass
# print("Special query:")
#print(discordDB.executeQuery(f"SELECT * FROM {discordDB.TABLE};"))
# discordDB.executeQuery(f"INSERT INTO {discordDB.TABLE} (channel_id, visibility) VALUES ('ch7', ARRAY['kanto','kansai']);")
#print(discordDB.getChannelVisibility('abc'))
|
makokaz/matsubo | cogs/servercommands.py | <reponame>makokaz/matsubo
"""Server Commands Cog
Simple discord bot cog that defines basic commands on a server for a bot:
- managing roles
- testing
- fun activites
- ... and more!
"""
import discord
from discord.ext import commands
from .utils.utils import *
import traceback
class ServerCommands(commands.Cog):
"""
Cog that defines basic bot commands on a server.
"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
"""TODO: Write member they should choose their university"""
print(f'{member} has joined the server.')
@commands.command()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: commands.MemberConverter, *, reason=None):
"""Kicks specified member from server"""
await member.kick(reason=reason)
await ctx.send(f"{member} was kicked from the server.")
@commands.command()
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: commands.MemberConverter, *, reason=None):
"""Bans specified member from server"""
await member.ban(reason=reason)
await ctx.send(f"{member} was banned from the server.")
@commands.command()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, *, member):
"""Unbans specified member from server"""
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator)==(member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f"{user.mention} was unbanned.")
return
@commands.command(aliases=['test'])
async def ping(self, ctx):
"""Test command to check if bot has not frozen && discord API is still working"""
await ctx.send(f'pong! [{round(self.bot.latency * 1000)}ms]')
@commands.command()
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount : int):
"""Bulk clears the last #amount messages in the channel"""
await ctx.channel.purge(limit=amount)
@commands.Cog.listener()
async def on_command_error(self, ctx, error:Exception):
"""Gets called when an error happens due to a false command by a user"""
# This prevents any commands with local handlers being handled here in on_command_error.
if hasattr(ctx.command, 'on_error'):
return
# If second character of the message is not a letter, then it was not a command (e.g. '...' or '. ')
if not ctx.message.content[1].isalpha():
return
# Allows us to check for original exceptions raised and sent to CommandInvokeError.
# If nothing is found. We keep the exception passed to on_command_error.
error = getattr(error, 'original', error)
# General Error handling
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f'You forgot to specify a few arguments.\nType: `{self.bot.command_prefix}help <command>` to see the required arguments.')
elif isinstance(error, commands.CommandNotFound):
await ctx.send(f"I don't know this command... sorry 🙈\nType:\n - `{self.bot.command_prefix}help` for general help\n - `{self.bot.command_prefix}help <COMMAND>` to get specific help for this command")
elif isinstance(error, commands.MissingPermissions):
await ctx.send(f"Too much power, this command for you has, {ctx.message.author.display_name} 😬")
else:
print_warning("The following error-command was raised:")
print_warning(''.join(traceback.format_exception(type(error), error, error.__traceback__)))
print_warning(f"The message that raised the error was:\n[Guild: '{ctx.message.guild}':{ctx.message.guild.id}; Channel: #{ctx.message.channel}:{ctx.message.channel.id}; Message-ID: {ctx.message.id}; User: {ctx.message.author}]\n{ctx.message.content}")
await ctx.send(f"Something (possibly internally) went wrong... 🙈\n - Type `{self.bot.command_prefix}help` for general help\n - Tag the admins with `@Admin` to ask for their help!")
def setup(bot):
bot.add_cog(ServerCommands(bot))
|
makokaz/matsubo | cogs/utils/event_scrapper.py | <reponame>makokaz/matsubo
"""Event scrapper
Scraps events from pre-defined websites.
"""
import sys
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen
import datetime, pytz
from dateutil.parser import parse as parse_date
import calendar
from .event import Event, mergeDuplicateEvents
from . import database
def grabPage(url: str):
"""Return html-code of a given url as soup"""
uClient = urlopen(url)
page_html = uClient.read()
uClient.close()
return soup(page_html, "html.parser")
def getTCDate(date):
"""Returns date_start, date_end, date_fuzzy of Tokyo Cheapo and Japan Cheapo Events"""
# try:
# parse_date(date, default=datetime.datetime(1978, 1, 1, 0, 0), fuzzy_with_tokens=True)
# except Exception:
# return '', '', 'Unknown'
# Split date into two parts: start_date and end_date
date = date.split(" ~ ")
# Hotfix
if len(date) > 1:
try:
date_start, fuzzy = parse_date(date[0], default=datetime.datetime(datetime.datetime.now().year, 1, 1, 0, 0, tzinfo=pytz.timezone('Asia/Tokyo')), fuzzy_with_tokens=True)
except Exception:
date[0] = date[0] + ' ' + date[1].split()[1]
###########################
# Get starting date
###########################
# BUG: For dates like LATE JAN ~ LATE FEB 2022, the timerange being interpreted is 2021-2022. But it should be both in the year 2022!
date_start, fuzzy = parse_date(date[0], default=datetime.datetime(datetime.datetime.now().year, 1, 1, 0, 0, tzinfo=pytz.timezone('Asia/Tokyo')), fuzzy_with_tokens=True)
date_start = date_start.date()
# Process fuzzy keywords, if there exists any
fuzzy = [a.strip() for a in fuzzy]
if fuzzy[0]:
if fuzzy[0].strip() == 'Early':
pass
if fuzzy[0].strip() == 'Mid':
date_start = parse_date(f"{date_start.year}-{date_start.month}-{'10'}").date()
if fuzzy[0].strip() == 'End' or fuzzy[0].strip() == 'Late':
date_start = parse_date(f"{date_start.year}-{date_start.month}-{'22'}").date()
###########################
# Get end date
###########################
if len(date) > 1: # Only if end date has been provided
date_end, fuzzy = parse_date(date[1], default=datetime.datetime(datetime.datetime.now().year, 1, 1, 0, 0, tzinfo=pytz.timezone('Asia/Tokyo')), fuzzy_with_tokens=True)
date_end = date_end.date()
fuzzy = [a.strip() for a in fuzzy]
else: # end date has not been provided -> set expectations from start_date
date_end = date_start
# Process fuzzy keywords, if there exists any
if fuzzy[0]:
if fuzzy[0].strip() == 'Early':
date_end = parse_date(f"{date_end.year}-{date_end.month}-{'10'}").date()
if fuzzy[0].strip() == 'Mid':
date_end = parse_date(f"{date_end.year}-{date_end.month}-{'21'}").date()
if fuzzy[0].strip() == 'End' or fuzzy[0].strip() == 'Late':
date_end = parse_date(f"{date_end.year}-{date_end.month}-{calendar.monthrange(date_end.year, date_end.month)[1]}").date()
###########################
# Create proper fuzzy date string
###########################
if fuzzy[0] not in ['Early', 'Mid', 'End', 'Late']:
fuzzy[0] = ''
date_fuzzy = " ~ ".join(date) if fuzzy[0] else ''
return date_start, date_end, date_fuzzy
def getTCTime(time):
"""Returns time_start and time_end of Tokyo Cheapo and Japan Cheapo Events"""
time = time.split(" – ")
if not time[0]:
return '', ''
time_start = parse_date(time[0], default=datetime.datetime(datetime.datetime.now().year, 1, 1, 0, 0, tzinfo=pytz.timezone('Asia/Tokyo'))).timetz()
time_end = ''
if len(time) > 1:
time_end = parse_date(time[1], default=datetime.datetime(datetime.datetime.now().year, 1, 1, 0, 0, tzinfo=pytz.timezone('Asia/Tokyo'))).timetz()
return time_start, time_end
def getEventsTC():
"""Return events from Tokyo Cheapo"""
# Fetch soup from TokyoCheapo
url = 'https://tokyocheapo.com/events/'
page = grabPage(url)
event_soup = page.findAll("article",{"class":"article card card--event"})
# Identify events in soup
events = []
for event_ in event_soup:
# Process date & Time
date=event_.findAll("div", class_="card--event__date-box")[0].div.text.strip().replace("\n"," ")
time=', '.join([t.parent.span.text.strip() for t in event_.findAll("div", title="Start/end time")])
date_start, date_end, date_fuzzy = getTCDate(date)
time_start, time_end = getTCTime(time)
# Create event-object
event = Event(
id='TC'+event_.findAll(attrs={"data-post-id" : True})[0]['data-post-id'].strip(),
name=event_.findAll("h3", class_="card__title")[0].text.strip(),
description=event_.findAll("p", class_="card__excerpt")[0].text.strip(),
url=event_.findAll("h3", class_="card__title")[0].a['href'],
img=event_.findAll("a", class_="card__image")[0].img,
date_start=date_start,
date_end=date_end,
date_fuzzy=date_fuzzy,
time_start=time_start,
time_end=time_end,
location=', '.join([loc.text for loc in event_.findAll("a", class_="location")]),
cost=', '.join([cost.parent.text.strip() for cost in event_.findAll("div", title="Entry")]),
status=', '.join([stat.text.strip().lower() for stat in event_.findAll("div", class_="event-status")]),
visibility='Kanto',
source='Web:TokyoCheapo')
if event.img is not None: # Hotfix
event.img = event.img['data-src']
events.append(event)
# merge duplicate events: Merge date, check by ID
#events = mergeDuplicateEvents(events,verbose=True)
return events
def getEventsJC():
"""Return events from Japan Cheapo"""
regions = {
'Chubu': ['Niigata','Ishikawa','Fukui','Yamanashi','Nagano','Gifu','Shizuoka','Aichi'],
'Chugoku': ['Shimane','Okayama','Hiroshima','Yamaguchi'],
'Hokkaido': ['Hokkaido'],
'Kansai': ['Mie','Shiga','Kyoto','Osaka','Hyogo','Nara','Wakayama'], # Himeji, Kobe are also options, but are also included in Hyogo
'Kanto': ['Tochigi'], # Tokyo, Ibaraki, Gunma... are left out because they are published on Tokyo Cheapo
'Kyushu': ['Fukuoka','Saga','Nagasaki','Kumamoto','Oita','Miyazaki'],
'Okinawa': ['Okinawa'],
'Shikoku': ['Tokushima','Kagawa'],
'Tohoku': ['Aomori','Iwate','Miyagi','Akita','Yamagata','Fukushima'],
}
# setup toolbar
WIDTH_PROGRESSBAR = len(regions)
sys.stdout.write("Progress: [%s]" % (" " * len(regions)))
sys.stdout.flush()
sys.stdout.write("\b" * (len(regions)+1)) # return to start of line, after '['
# Crawl events
events = []
for i, region in enumerate(regions):
for prefecture in regions[region]:
# Fetch soup from JapanCheapo
url = 'https://japancheapo.com/events/location/' + prefecture.lower()
page = grabPage(url)
event_soup = page.findAll("article",{"class":"article card card--event"})
# Identify events in soup
prefecture_events = []
for event_ in event_soup:
# Process date & Time
date=event_.findAll("div", class_="card--event__date-box")[0].div.text.strip().replace("\n"," ")
time=', '.join([t.parent.span.text.strip() for t in event_.findAll("div", title="Start/end time")])
date_start, date_end, date_fuzzy = getTCDate(date)
time_start, time_end = getTCTime(time)
# Create event-object
event = Event(
id='JC'+event_.findAll(attrs={"data-post-id" : True})[0]['data-post-id'].strip(),
name=event_.findAll("h3", class_="card__title")[0].text.strip(),
description=event_.findAll("p", class_="card__excerpt")[0].text.strip(),
url=event_.findAll("h3", class_="card__title")[0].a['href'],
img=event_.findAll("a", class_="card__image")[0].img,
date_start=date_start,
date_end=date_end,
date_fuzzy=date_fuzzy,
time_start=time_start,
time_end=time_end,
location=', '.join([loc.text for loc in event_.findAll("a", class_="location")]),
cost=', '.join([cost.parent.text.strip() for cost in event_.findAll("div", title="Entry")]),
status=', '.join([stat.text.strip().lower() for stat in event_.findAll("div", class_="event-status")]),
visibility=region,
source='Web:JapanCheapo')
if event.img is not None: # Hotfix
event.img = event.img['data-src']
prefecture_events.append(event)
# merge duplicate events: Merge date, check by ID
#prefecture_events = mergeDuplicateEvents(prefecture_events)
events.extend(prefecture_events)
# Update progressbar
sys.stdout.write("-")
sys.stdout.flush()
# merge duplicate events: Merge date, check by ID
#events = mergeDuplicateEvents(events,verbose=True)
sys.stdout.write("]\n") # this ends the progress bar
return events
def getEvents() -> list[Event]:
"""Scraps all event sources. Returns list of scrapped events."""
events = []
events += getEventsTC()
events += getEventsJC()
# Print events
# print("Found the following events:")
# for event in events:
# print(event)
# TODO: In JapanCheapo, a few events might happen on the border of 2 cities and are thus seen in both cities/categories/visibility. Merge these events before
events = mergeDuplicateEvents(events)
return events
# START OF PROGRAM
if __name__ == "__main__":
# Crawl events
events = getEvents()
database.eventDB.insertEvents(events)
# TODO:
# - Extend crawled websites: Global Komaba, Todai ISSR, EMail, Facebook, ...
# - Handle the status 'postponed' correctly in the database (currently, it believes a new event is created...)
|
andmansim/Laberinto | inicio.py | <filename>inicio.py
wall = ((0,1), (0,2), (0,3), (0,4), (1,1), (2,1), (2,3), (3,3), (4,0), (4,1), (4,2), (4,3))
# maze shape
#maze1 = [" ","X","X","X","X"]
#maze2 = [" ","X"," "," "," "]
#maze3 = [" ","X"," ","X"," "]
#maze4 = [" "," "," ","X"," "]
#maze5 = ["X","X","X","X"," "]
lab=[]
def mazecreation():
maze = []
for i in range(0,5): #line
for j in range(0,5): #column
if tuple([i,j]) in wall:
maze.append("X")
else:
maze.append(" ")
lab.append (maze)
maze = []
mazecreation()
for x in lab:
print(" ".join(x))
x = 0
y = 0
direction = "down"
def downmovement(x1, y1, direction1):
x1 = x1 + 1
y1 = y1
direction1 = "down"
return x1, y1, direction1
def rightmovement(x2, y2, direction2):
x2 = x2
y2 = y2 + 1
direction2 = "right"
return x2, y2, direction2
def upmovement(x3, y3, direction3):
y3 = y3
x3 = x3 - 1
direction3 = "up"
return x3, y3, direction3
fin = False
while fin != True:
if direction == "down":
x,y,direction= downmovement(x, y, direction)
if (x,y) in wall or x > 4 :
x = x - 1
x,y,direction = rightmovement(x, y, direction)
if (x,y) in wall or y > 4:
y = y - 1
x,y,direction = upmovement(x, y, direction)
print(x,y,direction)
else:
print(x,y,direction)
else:
print(x,y,direction)
elif direction == "right":
x,y,direction = rightmovement(x, y, direction)
if (x,y) in wall or y > 4:
y = y - 1
x,y,direction = downmovement(x, y, direction)
if (x,y) in wall or x > 4:
x = x - 1
x,y,direction = upmovement(x, y, direction)
print(x,y,direction)
else:
print(x,y,direction)
else:
print(x,y,direction)
elif direction == "up":
x,y,direction = upmovement(x, y, direction)
if (x,y) in wall or x > 4:
x = x + 1
x,y,direction = rightmovement(x, y, direction)
if (x,y) in wall or y > 4:
y = y - 1
x,y,direction = downmovement(x, y, direction)
print(x,y,direction)
else:
print(x,y,direction)
else:
print(x,y,direction)
if x == 4 and y == 4 :
fin = True
print("You win")
direction = " "
|
kaikai581/leetcode-logistics | utilities/tree_test.py | #!/usr/bin/env python
from collections import defaultdict
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def __init__(self):
pass
def mirror(self, root):
if root == None:
return None
root.left, root.right = root.right, root.left
self.mirror(root.left)
self.mirror(root.right)
return root
def traverse(self, root):
if root == None:
return []
return [root.val]+self.traverse(root.left)+self.traverse(root.right)
def isSymmetric(self, root: TreeNode) -> bool:
arr1 = self.traverse(root)
mirror_root = self.mirror(root)
arr2 = self.traverse(mirror_root)
return arr1 == arr2
# Function to insert nodes in level order
def construct_tree(arr, root, i):
n = len(arr)
# Base case for recursion
if i < n:
root = TreeNode(arr[i])
# insert left child
root.left = construct_tree(arr, root.left,
2 * i + 1)
# insert right child
root.right = construct_tree(arr, root.right,
2 * i + 2)
return root
# Function to print tree nodes in
# InOrder fashion
def inOrder(root):
if root == None:
return []
return inOrder(root.left)+[root.val]+inOrder(root.right)
def preorder(root):
if root == None:
return []
return [root.val]+preorder(root.left)+preorder(root.right)
def postorder(root):
if root == None:
return []
return preorder(root.left)+preorder(root.right)+[root.val]
def mirror(root):
if root == None:
return None
root.left, root.right = root.right, root.left
mirror(root.left)
mirror(root.right)
return root
def pseudoPalindromicPaths(root: TreeNode) -> int:
# def traverse(root, num_freq):
# if root == None:
# return
# num_freq[root.val] += 1
# traverse(root.left, num_freq)
# traverse(root.right, num_freq)
# if root.left == None and root.right == None:
# print(num_freq)
# nodd_freq = 0
# for freq in num_freq.values():
# if freq%2 == 1:
# nodd_freq += 1
# if nodd_freq <= 1:
# # res += 1
# pass
def traverse(root, arr):
if root == None:
return
arr.append(root.val)
traverse(root.left, arr)
traverse(root.right, arr)
if root.left == None and root.right == None:
print(arr)
arr.pop()
res = 0
traverse(root, [])
return res
if __name__ == '__main__':
tree1 = construct_tree([1,2,2,3,4,4,3], TreeNode(), 0)
tree2 = mirror(tree1)
print(preorder(tree1))
print(postorder(tree1))
# arr1 = inOrder(tree1)
# arr2 = inOrder(tree2)
# print(arr1)
# print(arr2)
tree3 = construct_tree([1,2,2,None,3,None,3], TreeNode(), 0)
tree4 = mirror(tree3)
print(preorder(tree3))
print(postorder(tree3))
# arr3 = inOrder(tree3)
# arr4 = inOrder(tree4)
# print(arr3)
# print(arr4)
# print(arr3 == arr4)
# my_sol = Solution()
# print(my_sol.isSymmetric(tree3))
# 1457. Pseudo-Palindromic Paths in a Binary Tree
tree1475 = construct_tree([2,3,1,3,1,None,1], TreeNode(), 0)
print(pseudoPalindromicPaths(tree1475))
|
adelenelai/classify_homologues | src/classify_homologues/__init__.py | <gh_stars>0
'''Classifying homologous series using RDKit.''' |
adelenelai/classify_homologues | src/classify_homologues/nextgen_classify_homols.py | <reponame>adelenelai/classify_homologues<filename>src/classify_homologues/nextgen_classify_homols.py
import os
import sys
import rdkit.Chem as Chem
from utils import *
from rdkit.Chem import AllChem
import rdkit.Chem.rdchem as rdchem
import rdkit.Chem.Draw as Draw
import rdkit.Chem.Descriptors
from rdkit.Chem import PandasTools
from rdkit.Chem.Draw import IPythonConsole, MolsToGridImage
#PandasTools.RenderImagesInAllDataFrames(images=True)
import pandas as pd
import numpy as np
#from contextlib import redirect_stderr
#Chem.WrapLogs()
#from contextlib import redirect_stdout, redirect_stderr
#import io, logging
#import externallib
#logging.basicConfig(filename='error.log', level=logging.DEBUG)
#f = io.StringIO()
#with redirect_stdout(f), redirect_stderr(f):
# result = externallib.check_result()
#logging.info(f.getvalue())
#print(result)
#https://www.codeforests.com/2020/11/05/python-suppress-stdout-and-stderr/
#read in smiles and labels, prepare repeating units
smiles, mols = read_smiles_csv(sys.argv[1])
labels = read_labels_csv(sys.argv[2])
df = pd.DataFrame({ "SMILES":smiles, "Mols":mols, "Labels":labels})
ru = setup_repeating_unit('[#6&H2]-')
#perform RU detection
mols_no_ru_matches, labels_mols_no_ru_matches, mols_with_ru, labels_mols_with_ru = detect_repeating_units(mols, labels, ru)
#perform core detection
patt1, cores1, patt2, cores2, empty_cores_idx = detect_homologue_cores(mols_with_ru, ru)
#detect and output molecules made solely of RUs
os.makedirs("output"+"/")
mols_made_of_ru, labels_made_of_ru = detect_mols_made_of_ru(mols_with_ru, labels_mols_with_ru, empty_cores_idx)
#generate canonical SMILES of largest molecule fragment in cores
cores2_nonempty, cores2_nonempty_largest_molfrag_cano_smiles, cores2_nonempty_largest_molfrag = largest_core_molfrag_to_cano_smiles(cores2)
##construct dataframe for inspection
#finalise lists after filtering out mols_made_of_ru
mols_with_ru = [j for i,j in enumerate(mols_with_ru) if (i not in empty_cores_idx)]
labels_mols_with_ru = [j for i,j in enumerate(labels_mols_with_ru) if (i not in empty_cores_idx)]
#filter out row with empty core after first chopping from all cols and output
patt1 = [q for p,q in enumerate(patt1) if (p not in empty_cores_idx)]
cores1 = [q for p,q in enumerate(cores1) if (p not in empty_cores_idx)]
patt2 = [q for p,q in enumerate(patt2) if (p not in empty_cores_idx)]
cores2 = [q for p,q in enumerate(cores2) if (p not in empty_cores_idx)]
#first convert to smarts, then molfromsmarts, then sanitize
#cores2_nonempty_smarts = [Chem.rdmolfiles.MolToSmarts(co) for co in cores2_nonempty]
#cores2_nonempty_molfromsmarts = [Chem.MolFromSmarts(co) for co in cores2_nonempty_smarts] #generates QueryAtoms
#[Chem.SanitizeMol(co) for co in cores2_nonempty_molfromsmarts]
#build df to summarise SS match and chopping steps
imp_df_nonempty= pd.DataFrame({#"Smiles":smiles,
#"Mols":mols,
"Mols": mols_with_ru,
"Labels": labels_mols_with_ru,
"patt1": [j for i,j in enumerate(patt1) if cores2[i].GetNumAtoms()>0],
"Cores": [j for i,j in enumerate(cores1) if cores2[i].GetNumAtoms()>0],
"Cores2": cores2_nonempty,
"LargestMolFrag_sanitised": cores2_nonempty_largest_molfrag
})#"cano_smiles_cores2": cores2_cano_smiles
#populate new column in df with corresponding cores2_nonempty_largestmolfrag_canosmiles for each row
imp_df_nonempty["canoSMILES_LargestMolfrag_sanitised"] = cores2_nonempty_largest_molfrag_cano_smiles
#assign SeriesNo to only those series with >1 member
imp_df_nonempty['SeriesNo'] = imp_df_nonempty.groupby('canoSMILES_LargestMolfrag_sanitised').filter(lambda group: len(group) > 1).groupby('canoSMILES_LargestMolfrag_sanitised').ngroup()
#merge imp_df_nonempty with df on mol
result = pd.merge(df, imp_df_nonempty, how="left", on=["Mols","Labels"])
#annotate no_alkyl mols AND series with 1 member to have negative SeriesNo
result['SeriesNo'] = result['SeriesNo'].fillna(-1)
result.SeriesNo = result.SeriesNo.astype(int)
#calc further identifiers and descr; write into log file
#with open('log.txt', 'w') as f:
# with redirect_stderr(f):
inchis = [Chem.inchi.MolToInchi(i) for i in result.Mols]
inchikeys = [Chem.inchi.MolToInchiKey(i) for i in result.Mols]
mf = [Chem.rdMolDescriptors.CalcMolFormula(i) for i in result.Mols]
monoiso_mass = [round(Chem.Descriptors.ExactMolWt(i),4) for i in result.Mols]
out = result[["SeriesNo","Labels","canoSMILES_LargestMolfrag_sanitised","SMILES", ]].copy()
out['InChI'] = inchis
out['InChIKey'] = inchikeys
out['molecular_formula'] = mf
out['monoisotopic_mass'] = monoiso_mass
out.rename(columns={"SeriesNo":"series_no", "Labels":"series_name", "canoSMILES_LargestMolfrag_sanitised": "common_core"},inplace=True)
out.to_csv('output/' + 'classified_series.csv',index=False)
#plots per group - only series which have >1 member i.e. actual series
result_pos_serno = result[result["SeriesNo"] > -1]
#legends
lgs = [i for i in result_pos_serno.groupby('canoSMILES_LargestMolfrag_sanitised').Labels.apply(list)]
for i,j in enumerate(lgs):
lgs[i] = lgs[i] + ["core"]
grpdmols = result_pos_serno.groupby('canoSMILES_LargestMolfrag_sanitised').SMILES.apply(list) #lists of SMILES
for i,j in enumerate(grpdmols):
grpdmols[i] = grpdmols[i] + [grpdmols.keys()[i]]
grpdmols = [[Chem.MolFromSmiles(s) for s in g] for g in grpdmols]
list_grid_images = []
for i,j in enumerate(grpdmols):
list_grid_images.append(DrawMolsZoomed(grpdmols[i], legends=lgs[i], molsPerRow=5))
#plot nans i.e. those with no alkyls and therefore not classified
#print(str(len(mols_no_alkyl_matches)) + ", " + str(len(labels_mols_no_alkyl_matches)))
if len(mols_no_ru_matches) > 0:
nans = DrawMolsZoomed(mols=mols_no_ru_matches, legends=labels_mols_no_ru_matches, molsPerRow=5)
nans.save("output/" + "no_repeating_unit_matches.png")
#print(len(mols_no_alkyl_matches))
#nans = DrawMolsZoomed(mols=mols_no_alkyl_matches, legends=labels_mols_no_alkyl_matches, molsPerRow=5)
#nans.save("output/" + "no_alkyl_matches.png")
#save each plot per group
[img.save("output/" + str(idx) + ".png") for idx,img in enumerate(list_grid_images)]
#plot mols with alkyls but are 1-member series (i.e. not actually series)
#SeriesNo = -1, SMILES is not empty
onememseries = result.loc[(result['SeriesNo'] == -1) & (result['canoSMILES_LargestMolfrag_sanitised'].notnull())]
if len(onememseries.Mols) >0:
mols_onememseries = [i for i in onememseries.Mols]
labs_onememseries = [i for i in onememseries.Labels]
pl_onememseries = DrawMolsZoomed(mols_onememseries,labs_onememseries,molsPerRow=5)
pl_onememseries.save("output/non_series_containing_repeating_unit.png")
print(str(len(onememseries.Mols))+ " molecule(s) have repeating unit matches of minimum x units but do not belong to any series.")
num_series = result.SeriesNo.max()
if num_series < 0:
num_series= 0
#sys.stderr = old_stderr
#assert sio.getvalue() != ""
mols_classified = len(result.Mols)-len(onememseries.Mols)-len(mols_no_ru_matches)-len(mols_made_of_ru)
print("Homologue classification complete! " + str(mols_classified) + " molecules have been classified into " +str(num_series) + " series." )
|
adelenelai/classify_homologues | src/classify_homologues/utils.py | from rdkit import Chem
from rdkit.Chem import AllChem
import numpy as np
from rdkit.Chem.Draw import rdMolDraw2D
from io import BytesIO
from itertools import compress
try:
import Image
except ImportError:
from PIL import Image
def read_smiles_csv(path_to_smiles_csv): #sys.argv[1]
'''Function to read in list of SMILES and create molecule objects.'''
with open(path_to_smiles_csv) as f:
smiles = [line.strip().replace('"','') for line in f]
mols = [AllChem.MolFromSmiles(smile) for smile in smiles]
return smiles, mols
def read_labels_csv(path_to_labels_csv): #sys.argv[2]
'''Function to read in list of labels corresponding to SMILES.'''
with open(path_to_labels_csv) as f:
labels = [line.strip().replace('"','') for line in f]
return labels
def setup_repeating_unit(smarts):
'''Function to generate list of RU chains as query molecules from SMARTS.'''
smiles_ru = []
#[smiles_ru.append(sys.argv[3]*i) for i in range(1,31)] ##most generic form, taking RU SMILES input
[smiles_ru.append(smarts*i) for i in range(1,31)]
smiles_ru = smiles_ru[2:] #eliminate meth- and eth- as they are too short
smiles_ru = [x[:-1] for x in smiles_ru] #remove last hyphen in each string
ru = [Chem.MolFromSmarts(smi) for smi in smiles_ru]
return ru
def detect_repeating_units(mols, labels, ru):
'''Function to detect whether molecules contain repeating units.'''
# set up RU-match matrix for detection of RU in mols
mat1 = SubstructMatchMatrix_ru_mols(mols, ru, accountForRings=True)
mat_array_sums = []
length_ru_chains_to_chop = []
for x, y in enumerate(mat1):
mat_array_sums.append(int(np.sum(mat1[x])))
length_ru_chains_to_chop.append(mat_array_sums[x] + 2) # n = sum + 2, where n is the length of C chain to chop
n_mols_no_ru = mat_array_sums.count(0)
if n_mols_no_ru>0:
print(str(n_mols_no_ru) + " mols have no repeating unit chains of minimum 3 repeating units in length.")
#remove mols with no RU matches
fil_ru = []
fil_ru = [bool(x) for x in mat_array_sums] #those which are False have array_sum = 0 i.e. no alkyls
mols_no_ru_matches = list(compress(mols, [not i for i in fil_ru]))
labels_mols_no_ru_matches = list(compress(labels, [not i for i in fil_ru]))
mols_with_ru = list(compress(mols, fil_ru))
labels_mols_with_ru = list(compress(labels, fil_ru))
return mols_no_ru_matches, labels_mols_no_ru_matches, mols_with_ru, labels_mols_with_ru
def detect_homologue_cores(mols_with_ru, ru):
'''Function that performs RU matching-and-removal twice to isolate/detect cores in molecule object. Idxs of empty cores generated.'''
mat2 = SubstructMatchMatrix_ru_mols(mols_with_ru, ru, accountForRings=True) #set up RU-match matrix for 1st RU removal from mols with RU
patt1, cores1 = delete_longest_RU_match(mols_with_ru, mat2, ru) #first removal
empty_cores_idx = [i for i, j in enumerate(cores1) if j.GetNumAtoms() == 0] #isolate empty cores' idxs after first chopping, occur when mol is 100% made of RU
mat3 = SubstructMatchMatrix_ru_mols(cores1, ru, accountForRings=True) #set up RU-match matrix for 2nd RU removal from cores1
patt2, cores2 = delete_longest_RU_match(cores1, mat3, ru) #second removal
return patt1, cores1, patt2, cores2, empty_cores_idx
def detect_mols_made_of_ru(mols_with_ru, labels_mols_with_ru, empty_cores_idx):
'''Function to detect and output molecules made solely of RUs such as PEGs.'''
mols_made_of_ru = [j for i,j in enumerate(mols_with_ru) if (i in empty_cores_idx)] #isolate Mol and Label with empty core after first chopping i.e. entire mol is made of ru
labels_made_of_ru = [j for i, j in enumerate(labels_mols_with_ru) if (i in empty_cores_idx)]
if len(mols_made_of_ru) > 0:
pure_repeating_units = DrawMolsZoomed(mols_made_of_ru, labels_made_of_ru)
pure_repeating_units.save("output/mols_pure_repeating_units.png")
print(str(len(mols_made_of_ru)) + " molecule(s) are made purely of repeating units of minimum length x.")
return mols_made_of_ru, labels_made_of_ru
def DrawMolsZoomed(mols, legends, molsPerRow=3, subImgSize=(300, 300)):#, leg):
"""Function to draw rows of zoomed molecules. Credit <NAME>."""
nRows = len(mols) // molsPerRow
if len(mols) % molsPerRow: nRows += 1
fullSize = (molsPerRow * subImgSize[0], nRows * subImgSize[1])
full_image = Image.new('RGBA', fullSize )
for ii, mol in enumerate(mols):
if mol.GetNumConformers() == 0:
AllChem.Compute2DCoords(mol)
le = legends[ii]
column = ii % molsPerRow
row = ii // molsPerRow
offset = ( column*subImgSize[0], row * subImgSize[1] )
d2d = rdMolDraw2D.MolDraw2DCairo(subImgSize[0], subImgSize[1])
d2d.DrawMolecule(mol,legend=le)
d2d.FinishDrawing()
sub = Image.open(BytesIO(d2d.GetDrawingText()))
full_image.paste(sub,box=offset)
return full_image
#from https://sourceforge.net/p/rdkit/mailman/rdkit-discuss/thread/CAHGTkV8sdfb4Q7FLn9C5MTwrqiJjHtnXK%2Bmz2SY3_4j2eAtevQ%40mail.gmail.com/#msg36477772
def hasSubstructMatchAccountForRings(mol, q):
"""Function to exclude substructure matches if they are part of rings. Credit <NAME>."""
matches = mol.GetSubstructMatches(q)
hasMatch = False
for match in matches:
hasMatch = True
for i, j in enumerate(match):
if (q.GetAtomWithIdx(i).IsInRing() ^ mol.GetAtomWithIdx(j).IsInRing()):
hasMatch = False
break
if (hasMatch):
break
return hasMatch
#from https://gist.github.com/ptosco/26af473fc1f3129878ca86cb070afe3a
def SubstructMatchMatrix_ru_mols(mols, ru, accountForRings=True):
'''Function to generate matrix of 1s and 0s for RU substructure matching in molecules.'''
mat = np.zeros((len(mols), len(ru)))
for a,b in enumerate(mols):
for i,j in enumerate(ru):
if accountForRings:
mat[a,i] = hasSubstructMatchAccountForRings(b,j)
else:
mat[a,i] = mols[a].HasSubstructMatch(j)
return mat
def delete_longest_RU_match(mols, mat, ru):
'''Function to delete the longest RU substructure match from each molecule. Returns remaining cores and the RU deleted.'''
cores = list()
patt = list()
for x,y in enumerate(mols):
patt.append(ru[int(np.sum(mat[x])-1)])
cores.append(AllChem.DeleteSubstructs(y, patt[x]))
return patt, cores
def largest_core_molfrag_to_cano_smiles(cores2):
'''Function to isolate the largest molecule fragment by NumAtoms remaining in the core object and convert to canonical SMILES.'''
cores2_nonempty = [j for i, j in enumerate(cores2) if j.GetNumAtoms() > 0] # isolate non-empty cores after chopping
cores2_nonempty_molfrags = [Chem.GetMolFrags(co, asMols=True) for co in cores2_nonempty] # cores2_nonempty_molfromsmarts
cores2_nonempty_largest_molfrag = []
for i, j in enumerate(cores2_nonempty_molfrags): # get the largest molfrag of all molfrags
cores2_nonempty_largest_molfrag.append(max(cores2_nonempty_molfrags[i],
default=cores2_nonempty_molfrags[i],
key=lambda m: m.GetNumAtoms()
)
)
cores2_nonempty_largest_molfrag_smiles = [Chem.MolToSmiles(co) for co in cores2_nonempty_largest_molfrag]
cores2_nonempty_largest_molfrag_cano_smiles = [Chem.CanonSmiles(smi) for smi in cores2_nonempty_largest_molfrag_smiles]
return cores2_nonempty, cores2_nonempty_largest_molfrag_cano_smiles, cores2_nonempty_largest_molfrag
|
zf020114/CHPDet | src/DOTA_devkit/prepare_dota1_ms.py | <gh_stars>1-10
import os
import os.path as osp
from DOTA_devkit.DOTA2JSON import generate_json_labels
from DOTA_devkit.DOTA2COCO import DOTA2COCOTrain, DOTA2COCOTest, wordname_15
from DOTA_devkit.ImgSplit_multi_process import splitbase as splitbase_trainval
from DOTA_devkit.SplitOnlyImage_multi_process import \
splitbase as splitbase_test
def mkdir_if_not_exists(path):
if not osp.exists(path):
os.mkdir(path)
def prepare_multi_scale_data(src_path, dst_path, gap=200, subsize=1024, scales=[0.5, 1.0, 1.5], num_process=8):
"""Prepare DOTA split data and labels
Args:
src_path: dataset path
dst_path: output path
gap: overlapping area
subsize: size of chip image
scales: multi-scale settings
num_process: num of processer
"""
dst_train_path = osp.join(dst_path, 'train_split')
dst_val_path = osp.join(dst_path, 'val_split')
dst_trainval_path = osp.join(dst_path, 'trainval_split')
dst_test_base_path = osp.join(dst_path, 'test_split')
dst_test_path = osp.join(dst_path, 'test_split/images')
# make dst path if not exist
mkdir_if_not_exists(dst_path)
mkdir_if_not_exists(dst_train_path)
mkdir_if_not_exists(dst_val_path)
mkdir_if_not_exists(dst_test_base_path)
mkdir_if_not_exists(dst_test_path)
# split train data
print('split train data')
# split_train = splitbase_trainval(osp.join(src_path, 'train'), dst_train_path,
# gap=gap, subsize=subsize, ext='.png',num_process=num_process)
split_train = splitbase_trainval(osp.join(src_path, 'train'), dst_train_path,
gap=gap, subsize=subsize, ext='.jpg',num_process=num_process)
for scale in scales:
split_train.splitdata(scale)
print('split val data')
# split val data
split_val = splitbase_trainval(osp.join(src_path, 'val'), dst_val_path,
gap=gap, subsize=subsize, num_process=num_process)
for scale in scales:
split_val.splitdata(scale)
# split test data
print('split test data')
split_test = splitbase_test(osp.join(src_path, 'test/images'), dst_test_path,
gap=gap, subsize=subsize, num_process=num_process)
for scale in scales:
split_test.splitdata(scale)
# prepare trainval data
print('move train val to trainval')
mkdir_if_not_exists(dst_trainval_path)
os.system(
'mv {}/* {}'.format(dst_train_path, dst_trainval_path))
# os.system('find '+dst_val_path+'/images/ -name "*.png" -exec mv {} ' +
# dst_trainval_path + '/images/ \\;')
os.system('find '+dst_val_path+'/images/ -name "*.jpg" -exec mv {} ' +
dst_trainval_path + '/images/ \\;')
os.system('find '+dst_val_path+'/labelTxt/ -name "*.txt" -exec mv {} ' +
dst_trainval_path + '/labelTxt/ \\;')
print('generate labels with json format')
generate_json_labels(dst_trainval_path, osp.join(
dst_trainval_path, 'trainval.json'))
generate_json_labels(dst_test_base_path, osp.join(
dst_test_base_path, 'test.json'), trainval=False)
print('generate labels with coco format')
DOTA2COCOTrain(dst_trainval_path,
osp.join(dst_trainval_path, 'trainval_coco.json'),
wordname_15)
DOTA2COCOTest(dst_test_base_path,
osp.join(dst_test_base_path, 'test_coco.json'),
wordname_15)
if __name__ == '__main__':
# single scale
# prepare_multi_scale_data('/project/jmhan/data/dota',
# '/workfs/jmhan/dota_1024_s2anet',scales=[1.0])
prepare_multi_scale_data('/home/zf/Dataset/USnavy_test_gt',
'/home/zf/Dataset/USnavy_test_gt/usnavy_1024_s2anet',scales=[1.0])
# multi scale
# prepare_multi_scale_data('/project/jmhan/data/dota',
# '/workfs/jmhan/dota_1024_ms_s2anet',scales=[0.5, 1.0, 1.5], gap=500)
print('done')
|
zf020114/CHPDet | src/3show_json_anno.py | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 10:34:41 2019
@author: zf
"""
# -*- coding:utf-8 -*-
from __future__ import print_function
from pycocotools.coco import COCO
import os, sys, zipfile
import urllib.request
import shutil
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
import matplotlib
import json
pylab.rcParams['figure.figsize'] = (8.0, 10.0)
matplotlib.use('TkAgg')
datadir='/home/zf/E/ship/all_10'
data_set='train'
ana_set='person_keypoints'#'instances'person_keypoints
index=500
annFile=os.path.join(datadir,'annotations/{}_{}2017.json'.format(ana_set,data_set))
folder=os.path.join(datadir,'{}2017'.format(data_set))
jpgdir=os.path.join(datadir,'{}')
one_json_file=os.path.join(datadir,'one_{}_{}2017.json'.format(ana_set,data_set))
coco=COCO(annFile)
json_file=annFile # # Object Instance 类型的标注
#json_file='D:/dataset/COCO/annotations/instances_train2017.json' # # Object Instance 类型的标注
#json_file='/home/zf/dataset/HRSC/HRSC_train_mask.json'
#json_file='/home/zf/dataset/ZKXT_traindata/annotations/instances_train2017.json' # # Object Instance 类型的标注
# json_file='./annotations/person_keypoints_val2017.json' # Object Keypoint 类型的标注格式
# json_file='./annotations/captions_val2017.json' # Image Caption的标注格式
data=json.load(open(json_file,'r'))
data_2={}
#data_2['info']=data['info']
#data_2['licenses']=data['licenses']
data_2['images']=[data['images'][index]] # 只提取第一张图片
data_2['categories']=data['categories'] # Image Caption 没有该字段
annotation=[]
# 通过imgID 找到其所有对象
imgID=data_2['images'][0]['id']
for ann in data['annotations']:
if ann['image_id']==imgID:
annotation.append(ann)
data_2['annotations']=annotation
# 保存到新的JSON文件,便于查看数据特点
json.dump(data_2,open(one_json_file,'w'),indent=4) # indent=4 更加美观显示
# json.dump(data_2,open('./new_person_keypoints_val2017.json','w'),indent=4) # indent=4 更加美观显示
# json.dump(data_2,open('./new_captions_val2017.json','w'),indent=4) # indent=4 更加美观显示
print('json down!')
# display COCO categories and supercategories
cats = coco.loadCats(coco.getCatIds())
nms=[cat['name'] for cat in cats]
print('COCO categories: \n{}\n'.format(' '.join(nms)))
nms = set([cat['supercategory'] for cat in cats])
print('COCO supercategories: \n{}'.format(' '.join(nms)))
# imgIds = coco.getImgIds(imgIds = [324158])
imgIds = coco.getImgIds()
img = coco.loadImgs(imgIds[index])[0]
#folder='/home/zf/dataset/data_convert_example/coco/'
I = io.imread(os.path.join(folder,img['file_name'].replace('xml','jpg')))
#I = io.imread('%s/%s/%s'%(dataDir,dataType,img['file_name']))
plt.axis('off')
plt.imshow(I)
plt.show()
# load and display instance annotations
# 加载实例掩膜
# catIds = coco.getCatIds(catNms=['person','dog','skateboard']);
# catIds=coco.getCatIds()
catIds=[]
for ann in coco.dataset['annotations']:
if ann['image_id']==imgIds[index]:
catIds.append(ann['category_id'])
plt.imshow(I); plt.axis('off')
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
coco.showAnns(anns)
plt.savefig(jpgdir.format(img['file_name'].replace('xml','')))
|
zf020114/CHPDet | src/DOTA_devkit/Rotatexml2DOTA.py | import json
import os
import os.path as osp
import numpy as np
import xmltodict
import cv2
from dota_poly2rbox import rbox2poly_single
def parse_ann_info(objects):
bboxes, labels, bboxes_ignore, labels_ignore = [], [], [], []
# only one annotation
if type(objects) != list:
objects = [objects]
for obj in objects:
if obj['difficult'] == '0':
bbox = float(obj['mbox_cx']), float(obj['mbox_cy']), float(
obj['mbox_w']), float(obj['mbox_h']), float(obj['mbox_ang'])
label = 'ship'
bboxes.append(bbox)
labels.append(label)
elif obj['difficult'] == '1':
bbox = float(obj['mbox_cx']), float(obj['mbox_cy']), float(
obj['mbox_w']), float(obj['mbox_h']), float(obj['mbox_ang'])
label = 'ship'
bboxes_ignore.append(bbox)
labels_ignore.append(label)
return bboxes, labels, bboxes_ignore, labels_ignore
def parse_rotatexml_ann_info(objects):
bboxes, labels, bboxes_ignore, labels_ignore = [], [], [], []
# only one annotation
if type(objects) != list:
objects = [objects]
for obj in objects:
if obj['difficult'] == '0':
bbox = float(obj['robndbox']['cx']), float(obj['robndbox']['cy']), float(
obj['robndbox']['w']), float(obj['robndbox']['h']), float(obj['robndbox']['angle'])
label =obj['name']
bboxes.append(bbox)
labels.append(label)
elif obj['difficult'] == '1':
bbox = float(obj['robndbox']['cx']), float(obj['robndbox']['cy']), float(
obj['robndbox']['w']), float(obj['robndbox']['h']), float(obj['robndbox']['angle'])
label =obj['name']
bboxes.append(bbox)
labels.append(label)
bboxes_ignore.append(bbox)
labels_ignore.append(label)
return bboxes, labels, bboxes_ignore, labels_ignore
def ann_to_txt(ann):
out_str = ''
for bbox, label in zip(ann['bboxes'], ann['labels']):
poly = rbox2poly_single(bbox)
str_line = '{} {} {} {} {} {} {} {} {} {}\n'.format(
poly[0], poly[1], poly[2], poly[3], poly[4], poly[5], poly[6], poly[7], label, '0')
out_str += str_line
for bbox, label in zip(ann['bboxes_ignore'], ann['labels_ignore']):
poly = rbox2poly_single(bbox)
str_line = '{} {} {} {} {} {} {} {} {} {}\n'.format(
poly[0], poly[1], poly[2], poly[3], poly[4], poly[5], poly[6], poly[7], label, '1')
out_str += str_line
return out_str
def drow_box_on_image(img_name,bboxes):
img= cv2.imdecode(np.fromfile(img_name,dtype=np.uint8),-1)
for box in bboxes:
box = rbox2poly_single(box)
box=np.int32(np.array(box).reshape((-1,2)))
cv2.polylines(img,[box],True,(0,255,255))
pts=box
cv2.circle(img, (pts[0][0],pts[0][1]), 2, (0, 0, 255), 0)
cv2.circle(img, (pts[1][0],pts[1][1]), 4, (0, 0, 255), 0)
cv2.circle(img, (pts[2][0],pts[2][1]), 6, (0, 0, 255), 0)
cv2.circle(img, (pts[3][0],pts[3][1]), 8, (0, 0, 255), 0)
cv2.imwrite('1.jpg', img)
# cv2.imshow('rotate_box',img.astype('uint8'))
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# cv2.imwrite(os.path.join(outputfolder,name), img)
def generate_txt_labels_train(root_path,):
img_path = osp.join(root_path, 'train2017')
label_path = osp.join(root_path, 'rotatexml')
label_txt_path = osp.join(root_path, 'labelTxt')
if not osp.exists(label_txt_path):
os.mkdir(label_txt_path)
img_names = [osp.splitext(img_name.strip())[0] for img_name in os.listdir(img_path)]
for img_name in img_names:
print(img_name)
label = osp.join(label_path, img_name+'.xml')
label_txt = osp.join(label_txt_path, img_name+'.txt')
f_label = open(label)
data_dict = xmltodict.parse(f_label.read())
data_dict = data_dict['annotation']
f_label.close()
label_txt_str = ''
# with annotations
if len(data_dict['object'])>0:
objects = data_dict['object']
bboxes, labels, bboxes_ignore, labels_ignore = parse_rotatexml_ann_info(
objects)
ann = dict(
bboxes=bboxes,
labels=labels,
bboxes_ignore=bboxes_ignore,
labels_ignore=labels_ignore)
label_txt_str = ann_to_txt(ann)
imgfile_path=osp.join(img_path,img_name+'.jpg')
# drow_box_on_image(imgfile_path,bboxes)
with open(label_txt,'w') as f_txt:
f_txt.write(label_txt_str)
def generate_txt_labels_val(root_path):
img_path = osp.join(root_path, 'val2017')
label_path = osp.join(root_path, 'rotatexml_val')
label_txt_path = osp.join(root_path, 'labelTxt_val')
if not osp.exists(label_txt_path):
os.mkdir(label_txt_path)
img_names = [osp.splitext(img_name.strip())[0] for img_name in os.listdir(img_path)]
for img_name in img_names:
label = osp.join(label_path, img_name+'.xml')
label_txt = osp.join(label_txt_path, img_name+'.txt')
f_label = open(label)
data_dict = xmltodict.parse(f_label.read())
data_dict = data_dict['annotation']
f_label.close()
label_txt_str = ''
# with annotations
if 'object' in data_dict.keys():
if len(data_dict['object'])>0:
objects = data_dict['object']
bboxes, labels, bboxes_ignore, labels_ignore = parse_rotatexml_ann_info(
objects)
ann = dict(
bboxes=bboxes,
labels=labels,
bboxes_ignore=bboxes_ignore,
labels_ignore=labels_ignore)
label_txt_str = ann_to_txt(ann)
imgfile_path=osp.join(img_path,img_name+'.jpg')
# drow_box_on_image(imgfile_path,bboxes)
with open(label_txt,'w') as f_txt:
f_txt.write(label_txt_str)
def generate_txt_labels(img_path,label_path,label_txt_path):
# img_path = osp.join(root_path, 'val2017')
# label_path = osp.join(root_path, 'rotatexml_val')
# label_txt_path = osp.join(root_path, 'labelTxt_val')
if not osp.exists(label_txt_path):
os.mkdir(label_txt_path)
img_names = [osp.splitext(img_name.strip())[0] for img_name in os.listdir(img_path)]
for img_name in img_names:
label = osp.join(label_path, img_name+'.xml')
label_txt = osp.join(label_txt_path, img_name+'.txt')
f_label = open(label)
data_dict = xmltodict.parse(f_label.read())
data_dict = data_dict['annotation']
f_label.close()
label_txt_str = ''
# with annotations
if 'object' in data_dict.keys():
if len(data_dict['object'])>0:
objects = data_dict['object']
bboxes, labels, bboxes_ignore, labels_ignore = parse_rotatexml_ann_info(
objects)
ann = dict(
bboxes=bboxes,
labels=labels,
bboxes_ignore=bboxes_ignore,
labels_ignore=labels_ignore)
label_txt_str = ann_to_txt(ann)
# imgfile_path=osp.join(img_path,img_name+'.jpg')
# drow_box_on_image(imgfile_path,bboxes)
with open(label_txt,'w') as f_txt:
f_txt.write(label_txt_str)
if __name__ == '__main__':
# img_path = '/media/zf/E/Dataset/US_Navy_train_square/val2017'
# label_path = '/home/zf/Dataset/USnavy_test_gt/rotatexml'
# label_txt_path ='/home/zf/Dataset/USnavy_test_gt/labelTxt_val'
img_path = '/media/zf/E/Dataset/US_Navy_train_square/val2017'
label_path = '/media/zf/E/Dataset/US_Navy_train_square/rotatexml_val'
label_txt_path ='/media/zf/E/Dataset/US_Navy_train_square/labelTxt_val'
generate_txt_labels(img_path,label_path,label_txt_path)
# generate_txt_labels('/project/jmhan/data/HRSC2016/Test')
print('done!')
|
zf020114/CHPDet | src/r_nms_cpu.py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 12 10:15:36 2019
@author: admin
"""
#coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
def nms_rotate_cpu(boxes, scores, iou_threshold, max_output_size):
keep = []#保留框的结果集合
order = scores.argsort()[::-1]#对检测结果得分进行降序排序
num = boxes.shape[0]#获取检测框的个数
suppressed = np.zeros((num), dtype=np.int)
for _i in range(num):
if len(keep) >= max_output_size:#若当前保留框集合中的个数大于max_output_size时,直接返回
break
i = order[_i]
if suppressed[i] == 1:#对于抑制的检测框直接跳过
continue
keep.append(i)#保留当前框的索引
# (midx,midy),(width,height), angle)
r1 = ((boxes[i, 0], boxes[i, 1]), (boxes[i, 2], boxes[i, 3]), boxes[i, 4])
# r1 = ((boxes[i, 1], boxes[i, 0]), (boxes[i, 3], boxes[i, 2]), boxes[i, 4]) #根据box信息组合成opencv中的旋转bbox
# print("r1:{}".format(r1))
area_r1 = boxes[i, 2] * boxes[i, 3]#计算当前检测框的面积
for _j in range(_i + 1, num):#对剩余的而进行遍历
j = order[_j]
if suppressed[i] == 1:
continue
r2 = ((boxes[j, 0], boxes[j, 1]), (boxes[j, 2], boxes[j, 3]), boxes[j, 4])
area_r2 = boxes[j, 2] * boxes[j, 3]
inter = 0.0
int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]#求两个旋转矩形的交集,并返回相交的点集合
if int_pts is not None:
order_pts = cv2.convexHull(int_pts, returnPoints=True)#求点集的凸边形
int_area = cv2.contourArea(order_pts)#计算当前点集合组成的凸边形的面积
inter = int_area * 1.0 / (area_r1 + area_r2 - int_area + 0.0000001)
if inter >= iou_threshold:#对大于设定阈值的检测框进行滤除
suppressed[j] = 1
return np.array(keep, np.int64)
if __name__ == '__main__':
img = cv2.imread('E:/ship/0.jpg')
boxes = np.array([[50, 40, 100, 100, 0],
[60, 50, 100, 100, 0],
[50, 30, 100, 100, -45.],
[200, 190, 100, 100, 0.]])
scores = np.array([0.99, 0.88, 0.66, 0.77])
keep = nms_rotate_cpu(boxes,scores,0.7, 5)
|
zf020114/CHPDet | src/DOTA_devkit/demo.py | <reponame>zf020114/CHPDet<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import matplotlib.pyplot as plt
import os
from DOTA import DOTA
import dota_utils as util
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 10.0)
# In[2]:
example = DOTA('/home/zf/2020HJJ/train_worm_aug')
# In[3]:
imgids = example.getImgIds()
len(imgids)
# In[4]:
imgid=imgids[0]
img = example.loadImgs(imgid)[0]
# In[5]:
plt.figure(figsize=(15,15))
plt.axis('off')
plt.imshow(img)
plt.show()
# In[8]:
import math
def cal_line_length(point1, point2):
return math.sqrt( math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
def get_best_begin_point_wrapp(coordinate):
coordinate = np.array(coordinate).reshape(4, 2)
output = get_best_begin_point(coordinate)
output = np.array(output).reshape(8)
return output
def get_best_begin_point(coordinate):
x1 = coordinate[0][0]
y1 = coordinate[0][1]
x2 = coordinate[1][0]
y2 = coordinate[1][1]
x3 = coordinate[2][0]
y3 = coordinate[2][1]
x4 = coordinate[3][0]
y4 = coordinate[3][1]
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
xmax = max(x1, x2, x3, x4)
ymax = max(y1, y2, y3, y4)
combinate = [[[x1, y1], [x2, y2], [x3, y3], [x4, y4]], [[x2, y2], [x3, y3], [x4, y4], [x1, y1]],
[[x3, y3], [x4, y4], [x1, y1], [x2, y2]], [[x4, y4], [x1, y1], [x2, y2], [x3, y3]]]
dst_coordinate = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
force = 100000000.0
force_flag = 0
for i in range(4):
temp_force = cal_line_length(combinate[i][0], dst_coordinate[0]) + cal_line_length(combinate[i][1],
dst_coordinate[
1]) + cal_line_length(
combinate[i][2], dst_coordinate[2]) + cal_line_length(combinate[i][3], dst_coordinate[3])
if temp_force < force:
force = temp_force
force_flag = i
if force_flag != 0:
print("choose one direction!")
return combinate[force_flag]
# In[9]:
plt.figure(figsize=(15,15))
anns = example.loadAnns(imgId=imgid)
anns_new = anns.copy()
for i,ann in enumerate(anns):
poly = get_best_begin_point_wrapp(ann['poly'])
anns_new[i]['poly'] = [(poly[i*2],poly[i*2+1]) for i in range(4)]
example.showAnns(anns_new, imgid, 2)
# ## Split Image And Label
# We provide the scale param before split the images and labels.
# Sometimes, the instance is too large that it can be easily cut down(for example, ground track filed), in such case you need to set the param "rate" less than 1.
#
# Before going on, first create folder to store the split data
# ```
# mkdir examplesplit
# mkdir examplesplit/images
# mkdir examplesplit/labelTxt
# ```
# For test images, you only need to split images, refer to "SplitOnlyImage.py"
# In[5]:
from ImgSplit import splitbase
# In[6]:
split = splitbase(r'example',
r'examplesplit', choosebestpoint=True)
split.splitdata(0.5)
split.splitdata(1)
split.splitdata(2)
# In[7]:
examplesplit = DOTA('examplesplit')
# In[8]:
imgids = examplesplit.getImgIds(catNms=['plane'])
imgid = imgids[1]
img = examplesplit.loadImgs(imgid)[0]
# In[10]:
anns = examplesplit.loadAnns(imgId=imgid)
#print(anns)
examplesplit.showAnns(anns, imgid, 2)
# ## Merge patches
# Now, we will merge these patches to see if they can be restored in the initial large images
# In[11]:
from ResultMerge import mergebypoly
# In[12]:
util.groundtruth2Task1(r'examplesplit/labelTxt',
r'Task1')
mergebypoly(r'Task1',
r'Task1_merge')
util.Task2groundtruth_poly(r'Task1_merge',
r'restoredexample/labelTxt')
# In[13]:
filepath = 'example/labelTxt'
imgids = util.GetFileFromThisRootDir(filepath)
imgids = [util.custombasename(x) for x in imgids]
print(imgids)
# In[14]:
example = DOTA(r'example')
num = 2
anns = example.loadAnns(imgId=imgids[num])
# print(anns)
example.showAnns(anns, imgids[num], 2)
# In[15]:
restored = DOTA(r'restoredexample')
num = 2
anns = restored.loadAnns(imgId=imgids[num])
# print(anns)
restored.showAnns(anns, imgids[num], 2)
|
zf020114/CHPDet | src/lib/models/networks/orn/__init__.py | <reponame>zf020114/CHPDet
from .modules.ORConv import ORConv2d
from .functions import rotation_invariant_encoding,RotationInvariantPooling,RotationInvariantEncoding
|
zf020114/CHPDet | src/DataFunction.py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 29 08:04:59 2019
@author: zhangfeng
版本 10.31
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import argparse
import cv2
import numpy as np
import datetime
from lxml import etree
import xml.etree.ElementTree as ET
#from osgeo import gdal
#import gdal
from PIL import Image, ImageEnhance
from skimage import exposure,util
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, Flip, OneOf, Compose,ISONoise,ToGray,JpegCompression,Equalize#FancyPCA
)
# from albumentations.imgaug.transforms import (
# HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
# Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
# IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
# IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose,JpegCompression,
# ISONoise,Equalize,FancyPCA,ToGray
# )
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('imagePath', help='the path of test image')
parser.add_argument('resultPath', help='the path to save result')
args = parser.parse_args()
return args
def get_file_paths_recursive(folder=None, file_ext=None):
""" Get the absolute path of all files in given folder recursively
:param folder:
:param file_ext:
:return:
"""
file_list = []
if folder is None:
return file_list
file_list = [os.path.join(folder, f) for f in sorted(os.listdir(folder)) if f.endswith(file_ext)]
return file_list
#xml txt 读写区
def write_xml(xml_name,boxes,labels,CLASSES):
#将检测结果表示为中科星图比赛格式的程序
headstr = """\
<?xml version="1.0" encoding="utf-8"?>
<Research Direction="高分软件大赛" ImageName="{}">
<Department>国防科技大学电子科学学院</Department>
<Date>{}</Date>
<PluginName>目标识别</PluginName>
<PluginClass>检测</PluginClass>
<Results Coordinate="Pixel">
"""
objstr = """\
<Object>{}</Object>
<Pixel Coordinate="X and Y">
<Pt index="1" LeftTopX="{:.4f}" LeftTopY="{:.4f}" RightBottomX="" RightBottomY="" />
<Pt index="2" LeftTopX="{:.4f}" LeftTopY="{:.4f}" RightBottomX="" RightBottomY="" />
<Pt index="3" LeftTopX="{:.4f}" LeftTopY="{:.4f}" RightBottomX="" RightBottomY="" />
<Pt index="4" LeftTopX="{:.4f}" LeftTopY="{:.4f}" RightBottomX="" RightBottomY="" />
</Pixel>
"""
tailstr = '''\
</Results>
</Research>
'''
name=os.path.split(xml_name)[-1].replace('xml','tif')
day2 = datetime.date.today()
head=headstr.format(name,day2)
tail = tailstr
f = open(xml_name, "w",encoding='utf-8')
f.write(head)
for i, box in enumerate(boxes):
obj=objstr.format(CLASSES[labels[i]],box[0][0],box[0][1],box[1][0],box[1][1],box[2][0],box[2][1],box[3][0],box[3][1])
f.write(obj)
f.write(tail)
f.close()
def read_tiff(inpath):
ds=gdal.Open(inpath)
row=ds.RasterXSize
col=ds.RasterYSize
band=ds.RasterCount
geoTransform=ds.GetGeoTransform()
x_gsd,y_gsd=geoTransform[1],geoTransform[5]
data=np.zeros([row,col,band])
for i in range(band):
dt=ds.GetRasterBand(1)
data[:,:,i]=dt.ReadAsArray(0,0,col,row)
img_size=[row,col,band]
return img_size,x_gsd,y_gsd,data
def read_VOC_xml(xml_path,NAME_LABEL_MAP):
"""
:param xml_path: the path of voc xml
:return: a list contains gtboxes and labels, shape is [num_of_gtboxes, 5],
and has [xmin, ymin, xmax, ymax, label] in a per row
"""
tree = ET.parse(xml_path)
root = tree.getroot()
img_width = None
img_height = None
box_list = []
for child_of_root in root:
# if child_of_root.tag == 'filename':
# assert child_of_root.text == xml_path.split('/')[-1].split('.')[0] \
# + FLAGS.img_format, 'xml_name and img_name cannot match'
if child_of_root.tag == 'size':
for child_item in child_of_root:
if child_item.tag == 'width':
img_width = int(child_item.text)
if child_item.tag == 'height':
img_height = int(child_item.text)
if child_of_root.tag == 'object':
label = None
for child_item in child_of_root:
if child_item.tag == 'name':
label_name=child_item.text.replace('\ufeff','')
label =NAME_LABEL_MAP[label_name]#float(child_item.text) #训练VOC用NAME_LABEL_MAP[child_item.text]#因为用自己的这边的ID是编号 训练卫星数据用1
# if child_item.text=='plane' or child_item.text=='airplane'or child_item.text=='aircraft':
# label=0
# elif(child_item.text=='helicopter'):
# label=1
# else:
# print('label {} error!)'.format(child_item.text))
if child_item.tag == 'bndbox':
tmp_box = [0, 0, 0, 0]
for node in child_item:
if node.tag == 'xmin':
tmp_box[0] = float(node.text)
if node.tag == 'ymin':
tmp_box[1] = float(node.text)
if node.tag == 'xmax':
tmp_box[2] = float(node.text)
if node.tag == 'ymax':
tmp_box[3] = float(node.text)
assert label is not None, 'label is none, error'
tmp_box.append(label)
box_list.append(tmp_box)
# gtbox_label = np.array(box_list, dtype=np.int32)
return img_height, img_width, box_list
def read_VOC_xml2(xml_path,NAME_LABEL_MAP):
tree = etree.parse(xml_path)
# get bbox
object_num=len(tree.xpath('//object'))
object_array=[]
for i in range(object_num):
object_element=tree.xpath('//object')[i]# 获取object素的内容
name=(object_element.getchildren()[3].text)
# label=NAME_LABEL_MAP[name]
if name=='plane' or name=='airplane'or name=='aircraft':
label=0
elif name=='helicopter':
label=1
else:
print('label {} error!)'.format(name))
xmin=float(object_element.getchildren()[0].getchildren()[0].text)
ymin=float(object_element.getchildren()[0].getchildren()[1].text)
xmax=float(object_element.getchildren()[0].getchildren()[3].text)
ymax=float(object_element.getchildren()[0].getchildren()[2].text)
object_array.append([xmin,ymin,xmax,ymax,label])
return object_array
def write_VOC_xml(output_floder,img_name,size,gsd,imagesource,gtbox_label,CLASSES):
#将检测结果表示为VOC格式的xml文件
headstr = """\
<annotation>
<folder>{}</folder>
<filename>{}</filename>
<path>{}</path>
<source>
<database>{}</database>
</source>
<size>
<width>{}</width>
<height>{}</height>
<depth>{}</depth>
</size>
<segmented>0</segmented>
"""
objstr = """\
<object>
<name>{}</name>
<pose>0</pose>
<truncated>1</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>{}</xmin>
<ymin>{}</ymin>
<xmax>{}</xmax>
<ymax>{}</ymax>
</bndbox>
</object>
"""
tailstr = '''\
</annotation>
'''
[floder,name]=os.path.split(img_name)
filename=os.path.join(floder,os.path.splitext(name)[0]+'.xml')
foldername=os.path.split(img_name)[0]
# head=headstr.format(name,foldername,size[1],size[0])
head=headstr.format(gsd,name,foldername,imagesource,size[1],size[0],size[2])
rotate_xml_name=os.path.join(output_floder,os.path.split(filename)[1])
f = open(rotate_xml_name, "w",encoding='utf-8')
f.write(head)
for i,box in enumerate (gtbox_label):
obj=objstr.format(CLASSES[int(box[4]-1)],box[0],box[1],box[2],box[3])
# obj=objstr.format(labelMat[i],difficultMat[i],center[0],center[1],NewWidth,NewHeight,Angle)
f.write(obj)
f.write(tailstr)
f.close()
def read_rotate_xml(xml_path,NAME_LABEL_MAP):
"""
:param xml_path: the path of voc xml
:return: a list contains gtboxes and labels, shape is [num_of_gtboxes, 5],
and has [xmin, ymin, xmax, ymax, label] in a per row
"""
tree = ET.parse(xml_path)
root = tree.getroot()
img_width = None
img_height = None
box_list = []
extra=[]
for child_of_root in root:
if child_of_root.tag == 'folder':#读取gsd之前把它赋予到了folder字段
try:
gsd = float(child_of_root.text)
except:
gsd =0
if child_of_root.tag == 'size':
for child_item in child_of_root:
if child_item.tag == 'width':
img_width = int(child_item.text)
if child_item.tag == 'height':
img_height = int(child_item.text)
if child_item.tag == 'depth':
img_depth = int(child_item.text)
if child_of_root.tag == 'source':
for child_item in child_of_root:
if child_item.tag == 'database':
imagesource=child_item.text
if child_of_root.tag == 'object':
label = None
for child_item in child_of_root:
if child_item.tag == 'name':
# label_name=child_item.text.replace('plane','other').replace('\ufeffB-1B','B-1B').replace('F-31','F-35').replace('L-39','L-159')
label_name=child_item.text.replace('\ufeff','').replace('其它','其他').replace('尼米兹级','航母').replace('圣安东尼奥','圣安东尼奥级').replace('圣安东尼奥级级','圣安东尼奥级')#.replace('塔瓦拉级','黄蜂级')
label =NAME_LABEL_MAP[label_name]#float(child_item.text) #训练VOC用NAME_LABEL_MAP[child_item.text]#因为用自己的这边的ID是编号 训练卫星数据用1
if child_item.tag == 'difficult':
difficult=int(child_item.text)
if child_item.tag == 'extra':
extra.append(child_item.text)
if child_item.tag == 'robndbox':
tmp_box = [0, 0, 0, 0, 0,0,0]
for node in child_item:
if node.tag == 'cx':
tmp_box[0] = float(node.text)
if node.tag == 'cy':
tmp_box[1] = float(node.text)
if node.tag == 'w':
tmp_box[2] = float(node.text)
if node.tag == 'h':
tmp_box[3] = float(node.text)
if node.tag == 'angle':
tmp_box[4] = float(node.text)
assert label is not None, 'label is none, error'
tmp_box[5]=label
tmp_box[6]=difficult
box_list.append(tmp_box)
# gtbox_label = np.array(box_list, dtype=np.int32)
img_size=[img_height,img_width,img_depth]
return img_size,gsd,imagesource,box_list,extra
def write_rotate_xml(output_floder,img_name,size,gsd,imagesource,gtbox_label,CLASSES,extra=[]):#size,gsd,imagesource
#将检测结果表示为中科星图比赛格式的程序,这里用folder字段记录gsd
headstr = """\
<annotation>
<folder>{}</folder>
<filename>{}</filename>
<path>{}</path>
<source>
<database>{}</database>
</source>
<size>
<width>{}</width>
<height>{}</height>
<depth>{}</depth>
</size>
<segmented>0</segmented>
"""
objstr = """\
<object>
<name>{}</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>{}</difficult>
<robndbox>
<cx>{}</cx>
<cy>{}</cy>
<w>{}</w>
<h>{}</h>
<angle>{}</angle>
</robndbox>
<extra>{}</extra>
</object>
"""
tailstr = '''\
</annotation>
'''
[floder,name]=os.path.split(img_name)
filename=os.path.join(floder,os.path.splitext(name)[0]+'.xml')
foldername=os.path.split(img_name)[0]
head=headstr.format(gsd,name,foldername,imagesource,size[1],size[0],size[2])
rotate_xml_name=os.path.join(output_floder,os.path.split(filename)[1])
f = open(rotate_xml_name, "w",encoding='utf-8')
f.write(head)
for i,box in enumerate (gtbox_label):
if len(extra)==0:
obj=objstr.format(CLASSES[int(box[5])],int(box[6]),box[0],box[1],box[2],box[3],box[4],' ')
else:
obj=objstr.format(CLASSES[int(box[5])],int(box[6]),box[0],box[1],box[2],box[3],box[4],extra[i])
f.write(obj)
f.write(tailstr)
f.close()
def read_txt_tianzhi(txt_dir,txt_path,NAME_LABEL_MAP_CHA):
txt_name=os.path.split(txt_path)[1]
txt_name=os.path.splitext(txt_name)[0]+'.txt'
txt_name=os.path.join(txt_dir,txt_name)
# f = open(txt_path,"r",encoding='UTF-8') #设置文件对象
# txt_name='E:/Standard_data_set/Dota/train/labelrbb1.5/P0000.txt'
# txt_name='E:/Dataset/2019tianzhi/russia_plane_result3/0.5-khurba-su24-20140919_111.txt'
f = open(txt_name)
data = f.readlines() #直接将文件中按行读到list里,效果与方法2一样
f.close()
box_list = []
for i in data:
str_num=i.strip().split(' ')
#以下语句是替换掉无用的符号
for j in range(len(str_num)):
str_num[j]=str_num[j].replace('(', '').replace(')', '').replace(')', '').replace(',', '')
label=NAME_LABEL_MAP_CHA[str_num[0]]
##到时候这里读真值的时候要改
object_array=[float(str_num[2]),float(str_num[3]),float(str_num[4]),float(str_num[5]),label]
box_list.append(object_array)
# gtbox_label = np.array(box_list)
return box_list
def write_tianzhi_txt(input_dir,image_ID,boxes,label,CLASSES,scores=[]):
#将检测结果表示为可以评价的txt文件的程序
# txt_name=os.path.join(input_dir,image_ID.replace('jpg','txt').replace('tiff','txt'))
txt_name=os.path.split(image_ID)[1]
txt_name=os.path.splitext(txt_name)[0]+'.txt'
txt_name=os.path.join(input_dir,txt_name)
f = open(txt_name, "w",encoding='utf-8')
if len(scores)>0:
objstr = '{} {} {} {} {} {}\n'
for i, box in enumerate(boxes):
# ((x_center,y_center),(cv_w,cv_h),cv_angle)
cv_rotete_rect=rotate_rect2cv(box[0:5])
rect_box = (cv2.boxPoints(cv_rotete_rect))
xmin,ymin,xmax,ymax=np.min(rect_box[:,0]),np.min(rect_box[:,1]),np.max(rect_box[:,0]),np.max(rect_box[:,1])
obj=objstr.format(CLASSES[int(label[i])], '%.2f' % float(scores[i]) , '%.2f' % xmin, '%.2f' % ymin, '%.2f' % xmax, '%.2f' % ymax)
f.write(obj)
else:#这是模拟生成给定的文件
objstr = '{} {} {} {} {}\n'
for i, box in enumerate(boxes):
cv_rotete_rect=rotate_rect2cv(box[0:5])
rect_box = (cv2.boxPoints(cv_rotete_rect))
xmin,ymin,xmax,ymax=np.min(rect_box[:,0]),np.min(rect_box[:,1]),np.max(rect_box[:,0]),np.max(rect_box[:,1])
obj=objstr.format(CLASSES[int(label[i])], '%.2f' %xmin, '%.2f' %ymin, '%.2f' %xmax, '%.2f' %ymax)
f.write(obj)
f.close()
def read_ship_tianzhi_txt(txt_dir,txt_path,NAME_LABEL_MAP_CHA):
txt_name=os.path.split(txt_path)[1]
txt_name=os.path.splitext(txt_name)[0]+'.txt'
txt_name=os.path.join(txt_dir,txt_name)
# f = open(txt_path,"r",encoding='UTF-8') #设置文件对象
# txt_name='E:/Standard_data_set/Dota/train/labelrbb1.5/P0000.txt'
# txt_name='E:/Dataset/2019tianzhi/russia_plane_result3/0.5-khurba-su24-20140919_111.txt'
f = open(txt_name)
data = f.readlines() #直接将文件中按行读到list里,效果与方法2一样
f.close()
box_list = []
for i in data:
str_num=i.strip().split(' ')
#以下语句是替换掉无用的符号
for j in range(len(str_num)):
str_num[j]=str_num[j].replace('(', '').replace(')', '').replace(')', '').replace(',', '')
label=NAME_LABEL_MAP_CHA[str_num[0]]
##到时候这里读真值的时候要改
object_array=[float(str_num[1]),float(str_num[2]),float(str_num[3]),float(str_num[4]),float(str_num[5]),float(str_num[6]),float(str_num[7]),float(str_num[8]),label]
box_list.append(object_array)
# gtbox_label = np.array(box_list)
return box_list
def tianzhi2roatate_xml(box_list,NAME_LABEL_MAP_CHA):
rotate_box_list=[]
for i,box in enumerate (box_list):
point1=np.array([box[0],box[1]])
point2=np.array([box[2],box[3]])
point3=np.array([box[4],box[5]])
point4=np.array([box[6],box[7]])
l12=np.linalg.norm(point1-point2)
l23=np.linalg.norm(point2-point3)
l34=np.linalg.norm(point3-point4)
l41=np.linalg.norm(point4-point1)
# head
head=(point1+point2)/2#头部坐标
center=(point1+point2+point3+point4)/4#中心坐标
Width=(l23+l41)/2
Height=(l12+l34)/2
det1=point2-point3
det2=point1-point4
if det1[0]==0:
if det1[1]>0:
Angle1=np.pi/2
else:
Angle1=-np.pi/2
else:
Angle1=np.arctan(det1[1]/det1[0])
if det2[0]==0:
if det2[1]>0:
Angle2=np.pi/2
else:
Angle2=-np.pi/2
else:
Angle2=np.arctan(det2[1]/det2[0])
#还会出现一种情况就是angle1 angle2 都比较大,但是由于在90度俯角,导致两个差异很大
if np.abs(Angle1)>np.pi/2-np.pi/36:
if Angle2<0:
Angle1=-np.pi/2
else:
Angle1=np.pi/2
if np.abs(Angle2)>np.pi/2-np.pi/36:
if Angle1<0:
Angle2=-np.pi/2
else:
Angle2=np.pi/2
Angle=(Angle1+Angle2)/2
#以上得到了HRSC格式的表示的各项数据,以下将其转为旋转xml格式的表示的数据
#分别计算旋转矩形两个头部的坐标,和实际我们得出的头部坐标比较,距离小的我们就认为他是头部
head_rect_right=[center[0]+Width/2*np.cos(Angle),center[1]+Width/2*np.sin(Angle)]
head_rect_left=[center[0]-Width/2*np.cos(Angle),center[1]-Width/2*np.sin(Angle)]
l_head_right=np.linalg.norm(head_rect_right-head)
l_head_left=np.linalg.norm(head_rect_left-head)
if l_head_right<l_head_left:#头部方向在第一四象限
Angle=Angle+np.pi/2
else:
Angle=Angle+np.pi*3/2#头部方向在第二三象限,角度要在原来基础上加上PI
NewWidth=Height
NewHeight=Width
if NewWidth>NewHeight:
NewWidth=Width
NewHeight=Height
Angle=Angle-np.pi/2
tmp_box=[center[0],center[1],NewWidth,NewHeight,Angle,box_list[i][8],0]
rotate_box_list.append(tmp_box)
return rotate_box_list
def write_ship_tianzhi_txt(input_dir,image_ID,boxes,label,CLASSES,scores=[]):
#将检测结果表示为可以评价的txt文件的程序
# txt_name=os.path.join(input_dir,image_ID.replace('jpg','txt').replace('tiff','txt'))
txt_name=os.path.split(image_ID)[1]
txt_name=os.path.splitext(txt_name)[0]+'.txt'
txt_name=os.path.join(input_dir,txt_name)
f = open(txt_name, "w",encoding='utf-8')
if len(scores)>0:
objstr = '{} {} {} {} {} {} {} {} {} {}\n'
# objstr = '{} {} {} {} {} {} {} {} {}\n'
for i, box in enumerate(boxes):
# ((x_center,y_center),(cv_w,cv_h),cv_angle)
cv_rotete_rect=rotate_rect2cv(box[0:5])
rect_box = (cv2.boxPoints(cv_rotete_rect))
p0,p1,p2,p3=rect_box[0],rect_box[1],rect_box[2],rect_box[3]
# xmin,ymin,xmax,ymax=np.min(rect_box[:,0]),np.min(rect_box[:,1]),np.max(rect_box[:,0]),np.max(rect_box[:,1])
obj=objstr.format(CLASSES[int(label[i])], '%.2f' % float(scores[i]) , '%.2f' % p0[0], '%.2f' % p0[1], '%.2f' % p1[0], '%.2f' % p1[1], '%.2f' % p2[0], '%.2f' % p2[1], '%.2f' % p3[0], '%.2f' % p3[1])
# obj=objstr.format(CLASSES[int(label[i])], '%.2f' % p0[0], '%.2f' % p0[1], '%.2f' % p1[0], '%.2f' % p1[1], '%.2f' % p2[0], '%.2f' % p2[1], '%.2f' % p3[0], '%.2f' % p3[1])
f.write(obj)
def GT_xml2txt(xmlname):
#将HRSC的xml文件转为可以评价的txt的函数
tree = ET.parse(xmlname)
root = tree.getroot()
txt_name=xmlname.replace('jpg','txt')
txt_name=os.path.split(xmlname)[1]
txt_name=os.path.splitext(txt_name)[0]+'.txt'
bboxs=[]
for i in root: # 遍历一级节点
if i.tag == 'HRSC_Objects':
for j in i:
if j.tag == 'HRSC_Object':
bbox = []
xmin = 0
ymin = 0
xmax = 0
ymax = 0
for r in j:
if r.tag == 'Class_ID':
Class_ID = 'ship'#eval(r.text)
if r.tag == 'box_xmin':
xmin = eval(r.text)
if r.tag == 'box_ymin':
ymin = eval(r.text)
if r.tag == 'box_xmax':
xmax = eval(r.text)
if r.tag == 'box_ymax':
ymax = eval(r.text)
bbox.append(Class_ID) # 保存当前box对应的image_id
bbox.append(xmin)
bbox.append(ymin)
bbox.append(xmax)
bbox.append(ymax)
bboxs.append(bbox)
f = open(txt_name, "w",encoding='utf-8')
objstr = '{} {} {} {} {}\n'
for i, box in enumerate(bboxs):
# obj=objstr.format(CLASSES[label[i]],scores[i],box[0],box[1],box[2],box[3])
obj=objstr.format(box[0],box[1],box[2],box[3],box[4])
f.write(obj)
f.close()
def write_evl_txt(input_dir,image_ID,boxes,label,scores,CLASSES):
#将检测结果表示为可以评价的txt文件的程序
txt_name=os.path.join(input_dir,image_ID.replace('jpg','txt'))
txt_name=os.path.split(image_ID)[1]
txt_name=os.path.splitext(txt_name)[0]+'.txt'
txt_name=os.path.join(input_dir,txt_name)
f = open(txt_name, "w",encoding='utf-8')
objstr = '{} {} {} {} {} {}\n'
for i, box in enumerate(boxes):
obj=objstr.format('ship',scores[i],box[0],box[1],box[2],box[3])
f.write(obj)
f.close()
##xml 变换区
def DOTA2Rotatexml(imgfolder,txtfolder,filename):
name=os.path.split(filename)[1]
txt_name=os.path.splitext(name)[0]+'.txt'
txt_name=os.path.join(txtfolder,txt_name)
file=open(txt_name)
#'imagesource':imagesource
#'gsd':gsd
#x1, y1, x2, y2, x3, y3, x4, y4, category, difficult
#x1, y1, x2, y2, x3, y3, x4, y4, category, difficult
#...
dataMat=[]
labelMat=[]
difficultMat=[]
box_list=[]
for i,line in enumerate (file.readlines()):
curLine=line.strip().split("\t")
curLine=''.join(curLine)
data=curLine.split( )
if i==0:
imagesource=curLine.split(':')[1]
if i==1:
data=curLine.split(':')
if data[1] != 'null':
gsd=float(data[1])
else:
gsd=0;
elif i>1:
# floatLine=map(float,curLine)#这里使用的是map函数直接把数据转化成为float类型
points=[float(data[0]),float(data[1]),float(data[2]),float(data[3]),float(data[4]),float(data[5]),float(data[6]),float(data[7])]
label=data[8]
difficult=int(data[9])
dataMat.append(points)
labelMat.append(label)
difficultMat.append(difficult)
img=cv2.imread(filename)#(高,宽,(B,G,R))
size = img.shape#高,宽 ,通道
for i,box in enumerate (dataMat):
point1=np.array([box[0],box[1]])
point2=np.array([box[2],box[3]])
point3=np.array([box[4],box[5]])
point4=np.array([box[6],box[7]])
l12=np.linalg.norm(point1-point2)
l23=np.linalg.norm(point2-point3)
l34=np.linalg.norm(point3-point4)
l41=np.linalg.norm(point4-point1)
head=(point1+point2)/2#头部坐标
center=(point1+point2+point3+point4)/4#中心坐标
Width=(l23+l41)/2
Height=(l12+l34)/2
det1=point2-point3
det2=point1-point4
if det1[0]==0:
if det1[1]>0:
Angle1=np.pi/2
else:
Angle1=-np.pi/2
else:
Angle1=np.arctan(det1[1]/det1[0])
if det2[0]==0:
if det2[1]>0:
Angle2=np.pi/2
else:
Angle2=-np.pi/2
else:
Angle2=np.arctan(det2[1]/det2[0])
#还会出现一种情况就是angle1 angle2 都比较大,但是由于在90度俯角,导致两个差异很大
if np.abs(Angle1)>np.pi/2-np.pi/36:
if Angle2<0:
Angle1=-np.pi/2
else:
Angle1=np.pi/2
if np.abs(Angle2)>np.pi/2-np.pi/36:
if Angle1<0:
Angle2=-np.pi/2
else:
Angle2=np.pi/2
Angle=(Angle1+Angle2)/2
#以上得到了HRSC格式的表示的各项数据,以下将其转为旋转xml格式的表示的数据
#分别计算旋转矩形两个头部的坐标,和实际我们得出的头部坐标比较,距离小的我们就认为他是头部
head_rect_right=[center[0]+Width/2*np.cos(Angle),center[1]+Width/2*np.sin(Angle)]
head_rect_left=[center[0]-Width/2*np.cos(Angle),center[1]-Width/2*np.sin(Angle)]
l_head_right=np.linalg.norm(head_rect_right-head)
l_head_left=np.linalg.norm(head_rect_left-head)
if l_head_right<l_head_left:#头部方向在第一四象限
Angle=Angle+np.pi/2
else:
Angle=Angle+np.pi*3/2#头部方向在第二三象限,角度要在原来基础上加上PI
NewWidth=Height
NewHeight=Width
tmp_box=[center[0],center[1],NewWidth,NewHeight,Angle,NAME_LABEL_MAP[labelMat[i]],difficultMat[i]]
box_list.append(tmp_box)
return size,gsd,imagesource,box_list
##xml 变换区
#def Tianzhiship2Rotatexml():
# points=[float(data[0]),float(data[1]),float(data[2]),float(data[3]),float(data[4]),float(data[5]),float(data[6]),float(data[7])]
# label=data[8]
# difficult=int(data[9])
# dataMat.append(points)
# labelMat.append(label)
# difficultMat.append(difficult)
# img=cv2.imread(filename)#(高,宽,(B,G,R))
# size = img.shape#高,宽 ,通道
# for i,box in enumerate (dataMat):
# point1=np.array([box[0],box[1]])
# point2=np.array([box[2],box[3]])
# point3=np.array([box[4],box[5]])
# point4=np.array([box[6],box[7]])
# l12=np.linalg.norm(point1-point2)
# l23=np.linalg.norm(point2-point3)
# l34=np.linalg.norm(point3-point4)
# l41=np.linalg.norm(point4-point1)
# head=(point1+point2)/2#头部坐标
# center=(point1+point2+point3+point4)/4#中心坐标
# Width=(l23+l41)/2
# Height=(l12+l34)/2
# det1=point2-point3
# det2=point1-point4
# if det1[0]==0:
# if det1[1]>0:
# Angle1=np.pi/2
# else:
# Angle1=-np.pi/2
# else:
# Angle1=np.arctan(det1[1]/det1[0])
# if det2[0]==0:
# if det2[1]>0:
# Angle2=np.pi/2
# else:
# Angle2=-np.pi/2
# else:
# Angle2=np.arctan(det2[1]/det2[0])
# #还会出现一种情况就是angle1 angle2 都比较大,但是由于在90度俯角,导致两个差异很大
# if np.abs(Angle1)>np.pi/2-np.pi/36:
# if Angle2<0:
# Angle1=-np.pi/2
# else:
# Angle1=np.pi/2
# if np.abs(Angle2)>np.pi/2-np.pi/36:
# if Angle1<0:
# Angle2=-np.pi/2
# else:
# Angle2=np.pi/2
# Angle=(Angle1+Angle2)/2
# #以上得到了HRSC格式的表示的各项数据,以下将其转为旋转xml格式的表示的数据
# #分别计算旋转矩形两个头部的坐标,和实际我们得出的头部坐标比较,距离小的我们就认为他是头部
# head_rect_right=[center[0]+Width/2*np.cos(Angle),center[1]+Width/2*np.sin(Angle)]
# head_rect_left=[center[0]-Width/2*np.cos(Angle),center[1]-Width/2*np.sin(Angle)]
# l_head_right=np.linalg.norm(head_rect_right-head)
# l_head_left=np.linalg.norm(head_rect_left-head)
# if l_head_right<l_head_left:#头部方向在第一四象限
# Angle=Angle+np.pi/2
# else:
# Angle=Angle+np.pi*3/2#头部方向在第二三象限,角度要在原来基础上加上PI
# NewWidth=Height
# NewHeight=Width
# tmp_box=[center[0],center[1],NewWidth,NewHeight,Angle,NAME_LABEL_MAP[labelMat[i]],difficultMat[i]]
# box_list.append(tmp_box)
# return size,gsd,imagesource,box_list
def rotate_xml_filter(size,gsd,imagesource,box_list):
#过滤xml的程序,只有当指定类别满足时才会保存当前的标注
valid_gtbox_label=[]
for i,box in enumerate (box_list):
#[center[0],center[1],NewWidth,NewHeight,Angle,NAME_LABEL_MAP[labelMat[i]],difficultMat[i]]
if box[5]==8 or box[5]==10:
valid_gtbox_label.append(box)
return size,gsd,imagesource,valid_gtbox_label
def rect_xml_filter(box_list):
#过滤xml的程序,只有当指定类别满足时才会保存当前的标注
valid_gtbox_label=[]
for i,box in enumerate (box_list):
#[center[0],center[1],NewWidth,NewHeight,Angle,NAME_LABEL_MAP[labelMat[i]],difficultMat[i]]
if box[4]==8:
valid_gtbox_label.append(box)
return valid_gtbox_label
def rotate_xml_transform(t_y,t_x,scale,gsd,box_list):
#对图像和平移和缩放后的对应的xml文件的变换,如果同时进行,则先缩放,再平移
valid_gtbox_label=[]
for i,box in enumerate (box_list):
#[center[0],center[1],NewWidth,NewHeight,Angle,NAME_LABEL_MAP[labelMat[i]],difficultMat[i]]
valid_gtbox_label.append([box[0]*scale-t_x,box[1]*scale-t_y,box[2]*scale,box[3]*scale,box[4],box[5],box[6]])
return gsd/scale,valid_gtbox_label
def rotate_rect2cv(rotatebox):
#此程序将rotatexml中旋转矩形的表示,转换为cv2的RotateRect
[x_center,y_center,w,h,angle]=rotatebox
angle_mod=angle*180/np.pi%180
if angle_mod>=0 and angle_mod<90:
[cv_w,cv_h,cv_angle]=[h,w,angle_mod-90]
if angle_mod>=90 and angle_mod<180:
[cv_w,cv_h,cv_angle]=[w,h,angle_mod-180]
return ((x_center,y_center),(cv_w,cv_h),cv_angle)
def rotate_rect2cv_np(rotatebox):
#此程序将rotatexml中旋转矩形的表示,转换为cv2的RotateRect
[x_center,y_center,w,h,angle]=rotatebox
angle_mod=angle*180/np.pi%180
if angle_mod>=0 and angle_mod<90:
[cv_w,cv_h,cv_angle]=[h,w,angle_mod-90]
if angle_mod>=90 and angle_mod<180:
[cv_w,cv_h,cv_angle]=[w,h,angle_mod-180]
return x_center,y_center,cv_w,cv_h,cv_angle
def rotate_xml_valid(h_crop,w_crop,outrange_ratio,box_list,flag='normal'):
#周处切割区域的坐标
gtbox_label_valid=[]
rect_boxs=[]
# rect_box_draw=[]
for i,box in enumerate (box_list):
#[center[0],center[1],NewWidth,NewHeight,Angle,NAME_LABEL_MAP[labelMat[i]],difficultMat[i]]
cv_rotete_rect=rotate_rect2cv(box[0:5])
rect_box = np.int0(cv2.boxPoints(cv_rotete_rect))
box_xmin , box_xmax ,box_ymin, box_ymax=np.min(rect_box[:,0]) ,np.max(rect_box[:,0]) ,np.min(rect_box[:,1]) ,np.max(rect_box[:,1])
box_w ,box_h=box_xmax-box_xmin ,box_ymax-box_ymin
#找出位置合适的目标标签,当出界不超过20%的时候保留
if box_xmin>-box_w*outrange_ratio and box_ymin>-box_h*outrange_ratio and box_xmax-w_crop<box_w*outrange_ratio and box_ymax-h_crop<box_h*outrange_ratio:
gtbox_label_valid.append(box)
#这里有两种斜框转为正框的格式,一种是正常的转换就是
if flag=='plane':
rect_box_new=[]
rect_box_new.append(np.int32((rect_box[0]+rect_box[1])/2))
rect_box_new.append(np.int32((rect_box[1]+rect_box[2])/2))
rect_box_new.append(np.int32((rect_box[2]+rect_box[3])/2))
rect_box_new.append(np.int32((rect_box[3]+rect_box[0])/2))
rect_box_new=np.array(rect_box_new)
xmin,ymin,xmax,ymax=np.min(rect_box_new[:,0]),np.min(rect_box_new[:,1]),np.max(rect_box_new[:,0]),np.max(rect_box_new[:,1])
rect_boxs.append([xmin,ymin,xmax,ymax,box[5]])
else:
rect_boxs.append([box_xmin,box_ymin,box_xmax,box_ymax,box[5]])
return gtbox_label_valid,rect_boxs
def rotate_xml_rotate(ang_rad,rot_center,box_list):
#对图像和平移和缩放后的对应的xml文件的变换,如果同时进行,则先缩放,再平移
# ang_rad=ang/180*(np.pi)
gtbox_label_valid=[]
for i,box in enumerate (box_list):
#[center[0],center[1],NewWidth,NewHeight,Angle,NAME_LABEL_MAP[labelMat[i]],difficultMat[i]]
[x_center,y_center,w,h,angle]=box[0:5]
[x_relative_center,y_relative_center]=[x_center-rot_center[0] ,y_center-rot_center[1] ]
angle_new=(angle+ang_rad)%(2*np.pi)
RotateMatrix=np.array([
[np.cos(ang_rad),-np.sin(ang_rad)],
[np.sin(ang_rad),np.cos(ang_rad)]])
a=np.transpose([x_relative_center,y_relative_center])
new_center=np.transpose(np.dot(RotateMatrix, a))+rot_center
gtbox_label_valid.append([new_center[0],new_center[1],w,h,angle_new,box[5],box[6]])
# rect_box_draw.append(rect_box)
# rect_box_draw=np.array(rect_box_draw, dtype=np.int32)
# cv2.polylines(img,rect_box_draw,True,(255,0,0))
## cv2.drawContours(img, [rect_boxs], 0, (0, 0, 255), 2)
# cv2.imshow('rotate',img.astype('uint8'))
# cv2.waitKey(0)
return gtbox_label_valid
def rotate_xml_flip(h_crop,w_crop,flag,box_list):
#水平或垂直翻转xml文件 horizontal vertical
gtbox_label_valid=[]
if flag=='horizontal':
for i,box in enumerate (box_list):
[x_center,y_center,w,h,angle]=box[0:5]
gtbox_label_valid.append([w_crop-x_center,y_center,w,h,2*np.pi-angle,box[5],box[6]])
elif flag=='vertical':
for i,box in enumerate (box_list):
[x_center,y_center,w,h,angle]=box[0:5]
angle_ver=(3*np.pi-angle)%(2*np.pi)
gtbox_label_valid.append([x_center,h_crop-y_center,w,h,angle_ver,box[5],box[6]])
else:
print('flag error!')
return gtbox_label_valid
def rect_xml_flip(h_crop,w_crop,flag,box_list):
#水平或垂直翻转xml文件 horizontal vertical
gtbox_label_valid=[]
if flag=='horizontal':
for i,box in enumerate (box_list):
[xmin,ymin,xmax,ymax]=box[0:4]
gtbox_label_valid.append([w_crop-xmax,ymin,w_crop-xmin,ymax,box[4]])
elif flag=='vertical':
for i,box in enumerate (box_list):
[xmin,ymin,xmax,ymax]=box[0:4]
gtbox_label_valid.append([xmin,h_crop-ymax,xmax,h_crop-ymin,box[4]])
else:
print('flag error!')
return gtbox_label_valid
#以下是图像变换区
#图像旋转用,里面的angle是角度制的
def im_rotate(im,angle,center = None,scale = 1.0):
#旋转角度逆时针为正值角度值
angle=angle/np.pi*180
h,w = im.shape[:2]
if center is None:
center = (w/2,h/2)
M = cv2.getRotationMatrix2D(center,angle,scale)
im_rot = cv2.warpAffine(im,M,(w,h))
return im_rot
def randomColor(image):
# 颜色抖动
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # cv2 转 PIL
random_factor = np.random.randint(0, 23) / 10. # 随机因子31
color_image = ImageEnhance.Color(image).enhance(random_factor) # 调整图像的饱和度
random_factor = np.random.randint(10, 13) / 10. # 随机因子21
brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor) # 调整图像的亮度
random_factor = np.random.randint(10, 13) / 10. # 随机因1子21
contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor) # 调整图像对比度
random_factor = np.random.randint(0, 23) / 10. # 随机因子31
img=ImageEnhance.Sharpness(contrast_image).enhance(random_factor)# 调整图像锐度
# PIL 转 cv2
img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
return img.astype(np.uint8)
def PCAJitter(img):
# PCA抖动
image = img
img = image / 255.0
img_size = img.size // 3
img1 = img.reshape(img_size, 3)
img1 = np.transpose(img1)
img_cov = np.cov([img1[0], img1[1], img1[2]]) #计算协方差,默认情况下每一行代表一个变量(属性),每一列代表一个观测
lamda, p = np.linalg.eig(img_cov)
p = np.transpose(p)
alpha0 = np.random.uniform(0, 0.3)
alpha1 = np.random.uniform(0, 0.3)
alpha2 = np.random.uniform(0, 0.3)
v = np.transpose((alpha0*img1[0], alpha1*img1[1], alpha2*img1[2]))
add_num = 2*np.dot(v, p).reshape(np.shape(img))
return np.array(image + add_num).astype(np.uint8)
def adjustGamma(image,para):
return exposure.adjust_gamma(image, para)
def downUpSample(image,scale=1/2):
#下采样,上采样
img_downsampled = cv2.resize(image,(int(scale*image.shape[1]),int(scale*image.shape[0])))
img_sam=cv2.resize(img_downsampled,(image.shape[1],image.shape[0]))
return img_sam
def addNoise(image,sigma=0.04):
#图像加燥 模式 gaussian localvar poisson salt pepper s&p speckle
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # cv2 转 PIL
img_noise =util.random_noise(image, var=sigma**2)*255
img_noise=img_noise.astype(np.uint8)
return cv2.cvtColor(np.asarray(img_noise), cv2.COLOR_RGB2BGR)
def randFlip(image,flag='Image.FLIP_TOP_BOTTOM'):
#随机翻转
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if flag=='Image.FLIP_TOP_BOTTOM':
image = image.transpose(Image.FLIP_TOP_BOTTOM)
else:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
return cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
def strong_aug(image,p=0.5):
image2 =Compose([#加躁
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
ISONoise(),
], p=0.2),
OneOf([#模糊
MotionBlur(p=0.1),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
JpegCompression(p=.1),
], p=0.2),
OneOf([#锐化
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomBrightnessContrast(),
], p=0.3),
OneOf([#直方图均衡,对比度,色度变化,pca
HueSaturationValue(),
RandomBrightnessContrast(),
Equalize(),
# FancyPCA(),
], p=0.3),
ToGray(p=0.1),
], p=p)(image=image)['image']
return image2
|
zf020114/CHPDet | src/DOTA_devkit/prepare_hrsc2016.py | <reponame>zf020114/CHPDet<filename>src/DOTA_devkit/prepare_hrsc2016.py
import os
import os.path as osp
from DOTA_devkit.HRSC2DOTA import generate_txt_labels
from DOTA_devkit.DOTA2JSON import generate_json_labels
def preprare_hrsc2016(data_dir):
train_dir = osp.join(data_dir,'Train')
test_dir = osp.join(data_dir, 'Test')
# convert hrsc2016 to dota raw format
generate_txt_labels(train_dir)
generate_txt_labels(test_dir)
# convert it to json format
generate_json_labels(train_dir,osp.join(train_dir,'trainval.json'))
generate_json_labels(test_dir,osp.join(test_dir,'test.json'), trainval=False)
if __name__ == '__main__':
hrsc2016_dir = '/project/jmhan/data/HRSC2016'
preprare_hrsc2016(hrsc2016_dir)
print('done')
|
zf020114/CHPDet | src/DOTA_devkit/dota_poly2rbox.py | <reponame>zf020114/CHPDet
import os
import math
import argparse
import os.path as osp
import numpy as np
def cal_line_length(point1, point2):
return math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
def get_best_begin_point_single(coordinate):
x1, y1, x2, y2, x3, y3, x4, y4 = coordinate
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
xmax = max(x1, x2, x3, x4)
ymax = max(y1, y2, y3, y4)
combinate = [[[x1, y1], [x2, y2], [x3, y3], [x4, y4]], [[x2, y2], [x3, y3], [x4, y4], [x1, y1]],
[[x3, y3], [x4, y4], [x1, y1], [x2, y2]], [[x4, y4], [x1, y1], [x2, y2], [x3, y3]]]
dst_coordinate = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
force = 100000000.0
force_flag = 0
for i in range(4):
temp_force = cal_line_length(combinate[i][0], dst_coordinate[0]) \
+ cal_line_length(combinate[i][1], dst_coordinate[1]) \
+ cal_line_length(combinate[i][2], dst_coordinate[2]) \
+ cal_line_length(combinate[i][3], dst_coordinate[3])
if temp_force < force:
force = temp_force
force_flag = i
if force_flag != 0:
pass
# print("choose one direction!")
return np.array(combinate[force_flag]).reshape(8)
def poly2rbox_single(poly):
"""
poly:[x0,y0,x1,y1,x2,y2,x3,y3]
to
rrect:[x_ctr,y_ctr,w,h,angle]
"""
poly = np.array(poly[:8], dtype=np.float32)
pt1 = (poly[0], poly[1])
pt2 = (poly[2], poly[3])
pt3 = (poly[4], poly[5])
pt4 = (poly[6], poly[7])
edge1 = np.sqrt((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) +
(pt1[1] - pt2[1]) * (pt1[1] - pt2[1]))
edge2 = np.sqrt((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) +
(pt2[1] - pt3[1]) * (pt2[1] - pt3[1]))
angle = 0
width = 0
height = 0
if edge1 > edge2:
width = edge1
height = edge2
angle = np.arctan2(
np.float(pt2[1] - pt1[1]), np.float(pt2[0] - pt1[0]))
elif edge2 >= edge1:
width = edge2
height = edge1
angle = np.arctan2(
np.float(pt4[1] - pt1[1]), np.float(pt4[0] - pt1[0]))
if angle > np.pi*3/4:
angle -= np.pi
if angle < -np.pi/4:
angle += np.pi
x_ctr = np.float(pt1[0] + pt3[0]) / 2
y_ctr = np.float(pt1[1] + pt3[1]) / 2
rbox = np.array([x_ctr, y_ctr, width, height, angle])
return rbox
def poly2rbox_single_v2(poly):
"""
poly:[x0,y0,x1,y1,x2,y2,x3,y3]
to
rrect:[x_ctr,y_ctr,w,h,angle]
"""
poly = np.array(poly[:8], dtype=np.float32)
pt1 = (poly[0], poly[1])
pt2 = (poly[2], poly[3])
pt3 = (poly[4], poly[5])
pt4 = (poly[6], poly[7])
edge1 = np.sqrt((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) +
(pt1[1] - pt2[1]) * (pt1[1] - pt2[1]))
edge2 = np.sqrt((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) +
(pt2[1] - pt3[1]) * (pt2[1] - pt3[1]))
angle = 0
width = 0
height = 0
if edge1 > edge2:
width = edge1
height = edge2
angle = np.arctan2(
np.float(pt2[1] - pt1[1]), np.float(pt2[0] - pt1[0]))
elif edge2 >= edge1:
width = edge2
height = edge1
angle = np.arctan2(
np.float(pt4[1] - pt1[1]), np.float(pt4[0] - pt1[0]))
if angle > np.pi*3/4:
angle -= np.pi
if angle < -np.pi/4:
angle += np.pi
x_ctr = np.float(pt1[0] + pt3[0]) / 2
y_ctr = np.float(pt1[1] + pt3[1]) / 2
return float(x_ctr), float(y_ctr), float(width), float(height), float(angle)
def rbox2poly_single(rrect):
"""
rrect:[x_ctr,y_ctr,w,h,angle]
to
poly:[x0,y0,x1,y1,x2,y2,x3,y3]
"""
x_ctr, y_ctr, width, height, angle = rrect[:5]
tl_x, tl_y, br_x, br_y = -width/2, -height/2, width/2, height/2
rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
poly = R.dot(rect)
x0, x1, x2, x3 = poly[0, :4] + x_ctr
y0, y1, y2, y3 = poly[1, :4] + y_ctr
poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3], dtype=np.float32)
poly = get_best_begin_point_single(poly)
return poly
def convert2rbox(src_path):
image_path = osp.join(src_path, 'images/')
src_label_path = osp.join(src_path, 'labelTxt/')
dst_label_path = osp.join(src_path, 'labelTxtRbox/')
if not osp.exists(dst_label_path):
os.mkdir(dst_label_path)
image_list = os.listdir(image_path)
image_list.sort()
for image in image_list:
img_name = osp.basename(image)
print(img_name)
ann_name = img_name.split('.')[0]+'.txt'
lab_path = osp.join(src_label_path, ann_name)
dst_path = osp.join(dst_label_path, ann_name)
out_str = ''
# import time
# half the time used by poly2rbox
with open(lab_path, 'r') as f:
for ann_line in f.readlines():
ann_line = ann_line.strip().split(' ')
bbox = [np.float32(ann_line[i]) for i in range(8)]
# 8 point to 5 point xywha
x_ctr, y_ctr, width, height, angle = poly2rbox_single(bbox)
class_name = ann_line[8]
difficult = int(ann_line[9])
out_str += "{} {} {} {} {} {} {}\n".format(str(x_ctr), str(
y_ctr), str(width), str(height), str(angle), class_name, difficult)
with open(dst_path, 'w') as fdst:
fdst.write(out_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', type=str, required=True)
args = parser.parse_args()
convert2rbox(args.path)
|
zf020114/CHPDet | src/compute_iou_angle.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 15:39:49 2020
@author: zf
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os, sys
import cv2
import numpy as np
import shutil
import sys
sys.path.append('/home/zf/0tools')
from DataFunction import get_file_paths_recursive,read_rotate_xml,rotate_rect2cv,rotate_rect2cv_np
# from r_nms_cpu import nms_rotate_cpus
from All_Class_NAME_LABEL import NAME_LABEL_MAP_HRSC, NAME_LABEL_MAP_USnavy_20
#sys.path.append('/media/zf/E/Dataset/DOTA_devkit')
sys.path.append('/home/zf/s2anet_rep/DOTA_devkit')
from dota15 import data_v15_evl_1
def nms_rotate_cpu(boxes, scores, iou_threshold, max_output_size):
keep = []#保留框的结果集合
order = scores.argsort()[::-1]#对检测结果得分进行降序排序
num = boxes.shape[0]#获取检测框的个数
suppressed = np.zeros((num), dtype=np.int)
angle_det = np.zeros((num))
for _i in range(num):
if len(keep) >= max_output_size:#若当前保留框集合中的个数大于max_output_size时,直接返回
break
i = order[_i]
if suppressed[i] == 1:#对于抑制的检测框直接跳过
continue
keep.append(i)#保留当前框的索引
# (midx,midy),(width,height), angle)
r1 = ((boxes[i, 0], boxes[i, 1]), (boxes[i, 2], boxes[i, 3]), boxes[i, 4])
# r1 = ((boxes[i, 1], boxes[i, 0]), (boxes[i, 3], boxes[i, 2]), boxes[i, 4]) #根据box信息组合成opencv中的旋转bbox
# print("r1:{}".format(r1))
area_r1 = boxes[i, 2] * boxes[i, 3]#计算当前检测框的面积
for _j in range(_i + 1, num):#对剩余的而进行遍历
j = order[_j]
if suppressed[i] == 1:
continue
r2 = ((boxes[j, 0], boxes[j, 1]), (boxes[j, 2], boxes[j, 3]), boxes[j, 4])
area_r2 = boxes[j, 2] * boxes[j, 3]
inter = 0.0
int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]#求两个旋转矩形的交集,并返回相交的点集合
if int_pts is not None:
order_pts = cv2.convexHull(int_pts, returnPoints=True)#求点集的凸边形
int_area = cv2.contourArea(order_pts)#计算当前点集合组成的凸边形的面积
inter = int_area * 1.0 / (area_r1 + area_r2 - int_area + 0.0000001)
if inter >= iou_threshold:#对大于设定阈值的检测框进行滤除
suppressed[j] = 1
angle_det[j]=np.abs( r1 [2]-r2[2])
return np.array(keep, np.int64),angle_det
def get_label_name_map(NAME_LABEL_MAP):
reverse_dict = {}
for name, label in NAME_LABEL_MAP.items():
reverse_dict[label] = name
return reverse_dict
def eval_rotatexml(GT_xml_dir, det_xml_dir, NAME_LABEL_MAP,
file_ext='.xml', ovthresh=0.5):
# GT_xml_path 是需要转换为txt的xml文件
# txt_dir_h是将要写入的xml转换为txt的文件路径
# annopath 是GT的txt文件
# 读取原图路径
LABEl_NAME_MAP = get_label_name_map(NAME_LABEL_MAP)
file_paths = get_file_paths_recursive(GT_xml_dir, '.xml')
angle_det_all=[]
for count, xml_path in enumerate(file_paths):
img_size,gsd,imagesource,gtbox_label,extra=read_rotate_xml(xml_path,NAME_LABEL_MAP)
det_xml=xml_path.replace(GT_xml_dir,det_xml_dir)
try:
img_size,gsd,imagesource,detbox_label,extra=read_rotate_xml(det_xml,NAME_LABEL_MAP)
except :
continue
cvrboxes=[]
for box in gtbox_label:
cvrboxes.append(rotate_rect2cv_np(box))
for box in detbox_label:
cvrboxes.append(rotate_rect2cv_np(box))
cvrboxes = np.array(cvrboxes)
score=np.ones((len(gtbox_label)))
score_det=np.array(extra)
score=np.hstack((score,score_det))
# gtbox_label=np.array(gtbox_label+detbox_label)
if len(cvrboxes)>0:##斜框NMS
keep ,angle_det= nms_rotate_cpu(cvrboxes, score, ovthresh,
200) #这里可以改
angle_det=np.array(angle_det)
inx=angle_det>0
angle_det=angle_det[inx]
angle_det=angle_det.tolist()
# assert(len(angle_det)==len(gtbox_label))
print(len(angle_det))
angle_det_all+=angle_det
print(len(angle_det_all))
angle_det_all=np.array(angle_det_all)
mean_angle=np.mean(angle_det_all)
print('Mean of angle is {}'.format(mean_angle))
if __name__ == '__main__':
#这是手动计算同样一个rotatebox旋转5度iou
#和不同的检测算法角度预测精度的程序,实验结果表明没有明显的优势
GT_xml_dir = '/home/zf/Dataset/USnavy_test_gt/train/rotatexml'
det_xml_dir = '/home/zf/Dataset/USnavy_test_gt/6center_DLA1024_rotatexml_merge'
NAME_LABEL_MAP = NAME_LABEL_MAP_USnavy_20
eval_rotatexml(GT_xml_dir, det_xml_dir, NAME_LABEL_MAP, ovthresh=0.5)
|
zf020114/CHPDet | src/DOTA_devkit/rotatexml_utils.py | <reponame>zf020114/CHPDet
import os
import os.path as osp
import numpy as np
# from ..bbox import rbox2poly_single
def write_rotate_xml(output_floder,img_name,size,gsd,imagesource,gtbox_label,CLASSES):#size,gsd,imagesource#将检测结果表示为中科星图比赛格式的程序,这里用folder字段记录gsd
voc_headstr = """\
<annotation>
<folder>{}</folder>
<filename>{}</filename>
<path>{}</path>
<source>
<database>{}</database>
</source>
<size>
<width>{}</width>
<height>{}</height>
<depth>{}</depth>
</size>
<segmented>0</segmented>
"""
voc_rotate_objstr = """\
<object>
<name>{}</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>{}</difficult>
<robndbox>
<cx>{}</cx>
<cy>{}</cy>
<w>{}</w>
<h>{}</h>
<angle>{}</angle>
</robndbox>
<extra>{:.2f}</extra>
</object>
"""
voc_tailstr = '''\
</annotation>
'''
[floder,name]=os.path.split(img_name)
# filename=name.replace('.jpg','.xml')
filename=os.path.join(floder,os.path.splitext(name)[0]+'.xml')
foldername=os.path.split(img_name)[0]
head=voc_headstr.format(gsd,name,foldername,imagesource,size[1],size[0],size[2])
rotate_xml_name=os.path.join(output_floder,os.path.split(filename)[1])
f = open(rotate_xml_name, "w",encoding='utf-8')
f.write(head)
for i,box in enumerate (gtbox_label):
obj=voc_rotate_objstr.format(CLASSES[int(box[6])],0,box[0],box[1],box[2],box[3],box[4],box[5])
f.write(obj)
f.write(voc_tailstr)
f.close()
def result2rotatexml(results, dst_path, dataset):
CLASSES = dataset.CLASSES
img_names = dataset.img_names
assert len(results) == len(
img_names), 'length of results must equal with length of img_names'
if not osp.exists(dst_path):
os.mkdir(dst_path)
for img_id, result in enumerate(results):
rotateboxes=[]
for class_id, bboxes in enumerate(result):
if(bboxes.size != 0):
for bbox in bboxes:
rotateboxes.append([bbox[0],bbox[1],bbox[3],bbox[2],bbox[4]+np.pi/2,bbox[5],class_id])
rotateboxes=np.array(rotateboxes)
# if rotateboxes.shape[0]>0:
# inxw=rotateboxes[:,2]>5
# inxh=rotateboxes[:,3]>12
# inx=np.multiply(inxw,inxh)
# rotateboxes=rotateboxes[inx]
write_rotate_xml(dst_path,dataset.img_names[img_id],[1024 ,1024,3],0.5,'0.5',rotateboxes.reshape((-1,7)),CLASSES)
return True
|
zf020114/CHPDet | src/4rotate_xml2DOTAtxt.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 15:39:49 2020
@author: zf
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os,sys
import cv2
import numpy as np
import sys
sys.path.append('/home/zf/0tools')
from DataFunction import get_file_paths_recursive,read_rotate_xml,rotate_rect2cv,read_VOC_xml
import shutil
sys.path.append('/media/zf/E/Dataset/DOTA_devkit')
import os.path as osp
def get_label_name_map(NAME_LABEL_MAP):
reverse_dict = {}
for name, label in NAME_LABEL_MAP.items():
reverse_dict[label] = name
return reverse_dict
def generate_file_list(img_dir,output_txt,file_ext='.txt'):
#读取原图路径
# img_dir=os.path.split(img_dir)[0]
imgs_path = get_file_paths_recursive(img_dir, file_ext)
f = open(output_txt, "w",encoding='utf-8')
for num,img_path in enumerate(imgs_path,0):
obj='{}\n'.format(os.path.splitext(os.path.split(img_path)[1])[0])
f.write(obj)
f.close()
print('Generate {} down!'.format(output_txt))
def rotated_box_to_poly_np(rrects):
"""
rrect:[x_ctr,y_ctr,w,h,angle]
to
poly:[x0,y0,x1,y1,x2,y2,x3,y3]
"""
polys = []
for rrect in rrects:
x_ctr, y_ctr, width, height, angle = rrect[:5]
tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2
rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
poly = R.dot(rect)
x0, x1, x2, x3 = poly[0, :4] + x_ctr
y0, y1, y2, y3 = poly[1, :4] + y_ctr
poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3], dtype=np.float32)
polys.append(poly)
# polys = np.array(polys)
# polys = get_best_begin_point(polys)
return polys
NAME_LABEL_MAP = {
'Roundabout': 1,
'Intersection': 2,
'Bridge': 3,
'Tennis Court': 4,
'Basketball Court': 5,
'Football Field': 6,
'Baseball Field': 7,
'Liquid Cargo Ship': 8,
'Passenger Ship': 9,
'Dry Cargo Ship': 10,
'Motorboat': 11,
'Fishing Boat': 12,
'Engineering Ship': 13,
'Warship': 14,
'Tugboat': 15,
'other-ship': 16,
'Cargo Truck': 17,
'Small Car': 18,
'Dump Truck': 19,
'Tractor': 20,
'Bus': 21,
'Trailer': 22,
'Truck Tractor': 23,
'Van': 24,
'Excavator': 25,
'other-vehicle': 26,
'Boeing787': 27,
'Boeing777': 28,
'A350': 29,
'A330': 30,
'Boeing747': 31,
'A321': 32,
'ARJ21': 33,
'Boeing737': 34,
'A220': 35,
'C919': 36,
'other-airplane': 37
}
NAME_LABEL_MAP_eval = {
'Roundabout': 1,
'Intersection': 2,
'Bridge': 3,
'Tennis-Court': 4,
'Basketball-Court': 5,
'Football-Field': 6,
'Baseball-Field': 7,
'Liquid-Cargo-Ship': 8,
'Passenger-Ship': 9,
'Dry-Cargo-Ship': 10,
'Motorboat': 11,
'Fishing-Boat': 12,
'Engineering-Ship': 13,
'Warship': 14,
'Tugboat': 15,
'other-ship': 16,
'Cargo-Truck': 17,
'Small-Car': 18,
'Dump-Truck': 19,
'Tractor': 20,
'Bus': 21,
'Trailer': 22,
'Truck-Tractor': 23,
'Van': 24,
'Excavator': 25,
'other-vehicle': 26,
'Boeing787': 27,
'Boeing777': 28,
'A350': 29,
'A330': 30,
'Boeing747': 31,
'A321': 32,
'ARJ21': 33,
'Boeing737': 34,
'A220': 35,
'C919': 36,
'other-airplane': 37
}
GT_xml_path='/media/zf/U/2021ZKXT_aug/annotations/valrotatexml'
txt_dir_h='/media/zf/U/2021ZKXT_aug/annotations/GT'
imagesetfile=osp.join(osp.dirname(GT_xml_path), 'gt_list.txt')
generate_file_list(GT_xml_path,imagesetfile,file_ext='.xml')
# LABEl_NAME_MAP = get_label_name_map(NAME_LABEL_MAP)
LABEl_NAME_MAP_eval=get_label_name_map(NAME_LABEL_MAP_eval)
file_paths = get_file_paths_recursive(GT_xml_path, '.xml')
if os.path.isdir(txt_dir_h):
shutil.rmtree(txt_dir_h,True)
os.makedirs(txt_dir_h)
### GT
for count, xml_path in enumerate(file_paths):
img_size,gsd,imagesource,gtbox_label,extra=read_rotate_xml(xml_path,NAME_LABEL_MAP)
# eval txt
CLASS_DOTA = NAME_LABEL_MAP.keys()
# Task1 #
write_handle_h = open(os.path.join(txt_dir_h, '{}.txt'.format(os.path.splitext(os.path.split(xml_path)[1])[0])), 'w')#Task1_gt_
# gtbox_label=np.array(gtbox_label)
ploys=rotated_box_to_poly_np(gtbox_label)
for i, rect_box in enumerate(ploys):
# rbox[4]=0
# rbox_cv=rotate_rect2cv(rbox)
# rect_box = cv2.boxPoints(rbox_cv)
# xmin,ymin,xmax,ymax=np.min(rect_box[:,0]),np.min(rect_box[:,1]),np.max(rect_box[:,0]),np.max(rect_box[:,1])
# command = '%.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %s 0\n' % (
# xmin, ymin, xmax, ymin,
# xmax, ymax, xmin, ymax,
# LABEl_NAME_MAP[rbox[5]]
# )
# command = '%.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %s 0\n' % (
# rect_box[0][0], rect_box[0][1], rect_box[1][0], rect_box[1][1],
# rect_box[2][0], rect_box[2][1], rect_box[3][0], rect_box[3][1],
# LABEl_NAME_MAP[rbox[5]]
# )
command = '%.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %s 0\n' % (
rect_box[0], rect_box[1], rect_box[2], rect_box[3],
rect_box[4], rect_box[5], rect_box[6], rect_box[7],
LABEl_NAME_MAP_eval[gtbox_label[i][5]]
)
write_handle_h.write(command)
write_handle_h.close()
|
zf020114/CHPDet | src/DOTA_devkit/HRSC2JSON.py | import json
import os
import os.path as osp
import xmltodict
def parse_ann_info(objects):
bboxes, labels, bboxes_ignore, labels_ignore = [], [], [], []
# only one annotation
if type(objects) != list:
objects = [objects]
for obj in objects:
if obj['difficult'] == '0':
bbox = float(obj['mbox_cx']), float(obj['mbox_cy']), float(
obj['mbox_w']), float(obj['mbox_h']), float(obj['mbox_ang'])
label = 'ship'
bboxes.append(bbox)
labels.append(label)
return bboxes, labels, bboxes_ignore, labels_ignore
def generate_json_labels(root_path, txt_ann_path):
img_path = osp.join(root_path, 'FullDataSet/AllImages')
label_path = osp.join(root_path, 'FullDataSet/Annotations')
json_ann_path = osp.splitext(txt_ann_path)[
0]+'.json' # yourdir/trainval.json
with open(txt_ann_path) as f:
img_names = [img_name.strip() for img_name in f.readlines()]
data_dicts = []
for img_name in img_names:
label = osp.join(label_path, img_name+'.xml')
f_label = open(label)
data_dict = xmltodict.parse(f_label.read())
data_dict = data_dict['HRSC_Image']
f_label.close()
img_info = dict(
filename=img_name+'.bmp',
height=int(data_dict['Img_SizeHeight']),
width=int(data_dict['Img_SizeWidth']),
id=img_name,
annotations=dict(
bboxes=[],
labels=[],
bboxes_ignore=[],
labels_ignore=[]))
# with annotations
if data_dict['HRSC_Objects']:
objects = data_dict['HRSC_Objects']['HRSC_Object']
bboxes, labels, bboxes_ignore, labels_ignore = parse_ann_info(
objects)
ann = dict(
bboxes=bboxes,
labels=labels,
bboxes_ignore=bboxes_ignore,
labels_ignore=labels_ignore)
img_info['annotations'] = ann
data_dicts.append(img_info)
with open(json_ann_path, 'w') as f:
json.dump(data_dicts, f)
if __name__ == '__main__':
generate_json_labels('/project/jmhan/data/HRSC2016',
"/project/jmhan/data/HRSC2016/ImageSets/trainval.txt")
generate_json_labels('/project/jmhan/data/HRSC2016',
"/project/jmhan/data/HRSC2016/ImageSets/test.txt")
print('done!')
|
zf020114/CHPDet | src/lib/models/networks/orn/functions/active_rotating_filter.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from .. import _C
#import _C
class _ActiveRotatingFilter(Function):
@staticmethod
def forward(ctx, input, indices):
indices = indices.byte()
ctx.input = input
output = _C.arf_forward(input, indices)
ctx.save_for_backward(indices)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
indices, = ctx.saved_tensors
input = ctx.input
grad_input = _C.arf_backward(indices, grad_output)
return grad_input, None
active_rotating_filter = _ActiveRotatingFilter.apply
class ActiveRotatingFilter(nn.Module):
def __init__(self, indices):
super(ActiveRotatingFilter, self).__init__()
self.indices = indices
def forward(self, input):
return active_rotating_filter(input, self.indices)
if __name__ == "__main__":
import math
def get_indices(nOrientation, nRotation, kernel_size, mode='fast'):
kernel_indices = {
1: {
0: (1,),
45: (1,),
90: (1,),
135: (1,),
180: (1,),
225: (1,),
270: (1,),
315: (1,)
},
3: {
0: (1,2,3,4,5,6,7,8,9),
45: (2,3,6,1,5,9,4,7,8),
90: (3,6,9,2,5,8,1,4,7),
135: (6,9,8,3,5,7,2,1,4),
180: (9,8,7,6,5,4,3,2,1),
225: (8,7,4,9,5,1,6,3,2),
270: (7,4,1,8,5,2,9,6,3),
315: (4,1,2,7,5,3,8,9,6)
}
}
delta_orientation = 360 / nOrientation
delta_rotation = 360 / nRotation
kH, kW = kernel_size
indices = torch.ByteTensor(nOrientation * kH * kW, nRotation)
for i in range(0, nOrientation):
for j in range(0, kH * kW):
for k in range(0, nRotation):
angle = delta_rotation * k
layer = (i + math.floor(angle / delta_orientation)) % nOrientation
kernel = kernel_indices[kW][angle][j]
indices[i * kH * kW + j, k] = int(layer * kH * kW + kernel)
return indices.view(nOrientation, kH, kW, nRotation)
out_channels = 4
in_channels = 2
nOrientation = 8
nRotation = 8
kernel_size = 3
input = torch.randn(out_channels, in_channels, nOrientation, kernel_size, kernel_size)
input.requires_grad = True
input = input.double()
indices = get_indices(nOrientation, nRotation, (kernel_size, kernel_size))
input = input.cuda()
indices = indices.cuda()
output = active_rotating_filter(input, indices)
print(output.size())
res = torch.autograd.gradcheck(active_rotating_filter, (input, indices), raise_exception=True)
print(res)
|
zf020114/CHPDet | src/DOTA_devkit/DOTA2JSON.py | import json
import os
import os.path as osp
import random
from PIL import Image
from dota_poly2rbox import poly2rbox_single_v2
def parse_ann_info(img_base_path, label_base_path, img_name):
lab_path = osp.join(label_base_path, img_name+'.txt')
bboxes, labels, bboxes_ignore, labels_ignore = [], [], [], []
with open(lab_path, 'r') as f:
for ann_line in f.readlines():
ann_line = ann_line.strip().split(' ')
bbox = [float(ann_line[i]) for i in range(8)]
# 8 point to 5 point xywha
bbox = poly2rbox_single_v2(bbox)
class_name = ann_line[8]
difficult = int(ann_line[9])
# ignore difficult =2
if difficult == 0:
bboxes.append(bbox)
labels.append(class_name)
elif difficult == 1:
bboxes_ignore.append(bbox)
labels_ignore.append(class_name)
return bboxes, labels, bboxes_ignore, labels_ignore
def generate_txt_labels(src_path, out_path, trainval=True):
"""Generate .txt labels recording img_names
Args:
src_path: dataset path containing images and labelTxt folders.
out_path: output txt file path
trainval: trainval or test?
"""
img_path = os.path.join(src_path, 'images')
label_path = os.path.join(src_path, 'labelTxt')
img_lists = os.listdir(img_path)
with open(out_path, 'w') as f:
for img in img_lists:
img_name = osp.splitext(img)[0]
label = os.path.join(label_path, img_name+'.txt')
if(trainval == True):
if(os.path.exists(label) == False):
print('Label:'+img_name+'.txt'+' Not Exist')
else:
f.write(img_name+'\n')
else:
f.write(img_name+'\n')
def generate_json_labels(src_path, out_path, trainval=True):
"""Generate .json labels which is similar to coco format
Args:
src_path: dataset path containing images and labelTxt folders.
out_path: output json file path
trainval: trainval or test?
"""
img_path = os.path.join(src_path, 'images')
label_path = os.path.join(src_path, 'labelTxt')
img_lists = os.listdir(img_path)
data_dict = []
with open(out_path, 'w') as f:
for id, img in enumerate(img_lists):
img_info = {}
img_name = osp.splitext(img)[0]
label = os.path.join(label_path, img_name+'.txt')
img = Image.open(osp.join(img_path, img))
img_info['filename'] = img_name+'.png'
img_info['height'] = img.height
img_info['width'] = img.width
img_info['id'] = id
if(trainval == True):
if(os.path.exists(label) == False):
print('Label:'+img_name+'.txt'+' Not Exist')
else:
bboxes, labels, bboxes_ignore, labels_ignore = parse_ann_info(
img_path, label_path, img_name)
ann = {}
ann['bboxes'] = bboxes
ann['labels'] = labels
ann['bboxes_ignore'] = bboxes_ignore
ann['labels_ignore'] = labels_ignore
img_info['annotations'] = ann
data_dict.append(img_info)
json.dump(data_dict, f)
if __name__ == '__main__':
generate_json_labels('/project/jmhan/data/dota/trainval',
'/project/jmhan/data/dota/trainval/trainval.json')
generate_json_labels('/project/jmhan/data/dota/test',
'/project/jmhan/data/dota/test/test.json', trainval=False)
print('done!')
|
zf020114/CHPDet | src/0data_crop.py | <reponame>zf020114/CHPDet
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 10 18:22:36 2019
版本10.31
@author: admin
"""
import os
import cv2
import numpy as np
import DataFunction
from timeit import default_timer as timer
#voc路径
img_dir = 'E:/Dataset/HRSC2016/Train/AllImages'#r'E:\Dataset\HRSC2016FullDataSet\AllImages'
xml_dir = img_dir
outputfolder='E:/Dataset/HRSC2016_Train_1024/'
file_ext='.bmp'
output_VOC_folder='E:/Dataset/2019tianzhi/YSplane_VOC/'#存储新的VOCanno位置
[w_crop,h_crop]=[1024,1024] #这是要切割的图像尺寸 第一个是宽,第二个是高
overlap_ratio=1/5
outrange_ratio=1/6
ratios=[0.7,0.8,0.9,1,1.1]#[1]#设置在切割过程中缩放的倍数
skip_center=50 #表示如果机场有30以下的飞机,则正常都遍历,如果超过了30个,每增加一倍,取中心的步距增加一倍
angle_range = [np.pi/20,np.pi*19/20]#角度im_rotate用到的是角度制 旋转角度的范围
NAME_LABEL_MAP = {
'船': 1,
'航母': 2,
'军舰': 3,
'商船': 4,
'尼米兹级航母': 5,
'企业级航母': 6,
'阿利伯克级驱逐舰': 7,
'惠德贝岛级船坞登陆舰': 8,
'佩里级护卫舰': 9,
'圣安东尼奥级两栖船坞运输舰': 10,
'提康德罗加级巡洋舰': 11,
'小鹰级航母': 12,
'俄罗斯库兹涅佐夫号航母': 13,
'阿武隈级护卫舰': 14,
'奥斯汀级两栖船坞运输舰': 15,
'塔拉瓦级通用两栖攻击舰': 16,
'蓝岭级指挥舰': 17,
'集装箱货船': 18,
'尾部OX头部圆指挥舰': 19,
'运输汽车船': 20,
'舰':21,
'气垫船': 22,
'航':23,
'游艇': 24,
'货船(_|.--.--|_]=':25,
'游轮':26,
'潜艇':27,
'琵琶形军舰':28,
'医疗船':29,
'运输汽车船(======|':30,
'福特级航空母舰':31,
'中途号航母':32,
'无敌级航空母舰':33
}
def get_label_name_map():
reverse_dict = {}
for name, label in NAME_LABEL_MAP.items():
reverse_dict[label] = name
return reverse_dict
LABEl_NAME_MAP = get_label_name_map()
##每一类别的飞机旋转的次数
angle_number_dict= {
'船': 1,
'航母': 20,
'军舰': 5,
'商船': 40,
'尼米兹级航母': 5,
'企业级航母': 10,
'阿利伯克级驱逐舰': 0,
'惠德贝岛级船坞登陆舰': 3,
'佩里级护卫舰': 0,
'圣安东尼奥级两栖船坞运输舰': 6,
'提康德罗加级巡洋舰': 0,
'小鹰级航母': 12,
'俄罗斯库兹涅佐夫号航母': 20,
'阿武隈级护卫舰': 14,
'奥斯汀级两栖船坞运输舰': 6,
'塔拉瓦级通用两栖攻击舰': 3,
'蓝岭级指挥舰': 17,
'集装箱货船': 10,
'尾部OX头部圆指挥舰': 6,
'运输汽车船': 20,
'舰':21,
'气垫船': 8,
'航':23,
'游艇': 24,
'货船(_|.--.--|_]=':1,
'游轮':50,
'潜艇':4,
'琵琶形军舰':20,
'医疗船':24,
'运输汽车船(======|':16,
'福特级航空母舰':31,
'中途号航母':10,
'无敌级航空母舰':33
}
angle_num_multiple=1
for name, aug_num in angle_number_dict.items():
angle_number_dict[name] = aug_num*angle_num_multiple
#新建输出文件夹
if not os.path.isdir(outputfolder):
os.makedirs(outputfolder)
#读取原图全路径
imgs_path = DataFunction.get_file_paths_recursive(img_dir, file_ext)
#旋转角的大小,整数表示逆时针旋转
imgs_total_num=len(imgs_path)
for num,img_path in enumerate(imgs_path,0):
start = timer()
#一、读取图像并获取基本参数
img_path=imgs_path[num]
img = cv2.imread(img_path)
#16 to 8
# min_pix=img.min()
# max_pix=img.max()
# img = (img /max_pix * 255).astype(np.uint8)
img_size=img.shape#高,宽 ,通道
img_num=1
#二、读取标注文件
[floder,name]=os.path.split(img_path)
xml_path=os.path.join(xml_dir,os.path.splitext(name)[0]+'.xml')
img_size,gsd,imagesource,gtbox,extra=DataFunction.read_rotate_xml(xml_path,NAME_LABEL_MAP)
gsd=1
#三、把单通道图像转为三通道
if img_size[2]==1:
img_new=np.zeros((img_size[0],img_size[1],3))
img_new[:,:,0]=img
img_new[:,:,1]=img
img_new[:,:,2]=img
img=img_new
img_size[2]=3
if len(gtbox)>0:
#
# # #四、将图片变为横向的图片
# if img_size[0]>img_size[1]:#说明图像是高度大于宽度,是竖直的图像
# r_center=(img_size[1]/2,img_size[0]/2)
# M = cv2.getRotationMatrix2D(r_center, -90, 1) #
# # compute the new bounding dimensions of the image
# nW = img_size[0]
# nH = img_size[1]
# # adjust the rotation matrix to take into account translation
# t_x=(nW / 2) - r_center[0]
# t_y=-t_x
# M[0, 2] += (nW / 2) - r_center[0]
# M[1, 2] += (nH / 2) - r_center[1]
# img = cv2.warpAffine(img, M, (img_size[0], img_size[1])) #6
# gtbox=DataFunction.rotate_xml_rotate(np.pi/2,(img_size[0]/2,img_size[1]/2),gtbox)
# gsd_scale,gtbox=DataFunction.rotate_xml_transform(-t_x,t_y,1,gsd,gtbox)
# img_size=[img_size[1],img_size[0],img_size[2]]
# DataFunction.write_rotate_xml(outputfolder,img_path,img_size,gsd,imagesource,gtbox_label,LABEl_NAME_MAP)
# img_filename=os.path.join(outputfolder,os.path.splitext(name)[0]+'.jpg')
# cv2.imwrite(img_filename, img)
# #五、对于标签框占比较大的图像进行缩小 以下程序是找出图像中所有标签的最大和最小尺寸
# gtbox_label_array = np.array(gtbox_label)
# objects_Width=gtbox_label_array[:,2]
# objects_Height=gtbox_label_array[:,3]
# max_len=np.max([np.max(objects_Width),np.max(objects_Height)])
# min_len=np.min([np.max(objects_Width),np.max(objects_Height)])
#六、将需要处理图像切小,切掉多余的区域,思路,因为在旋转的时候,最多转的半径为最大的的切割尺寸,所有在那个范围之外的图像都切掉
# #得到最大的旋转半径
# r_max=w_crop/2+100
# #得到中心,最大值,最小值点,
# gtbox_label_array = np.array(gtbox_label)
# c_x,c_y = gtbox_label_array[:,0], gtbox_label_array[:,1]
# c_xmin ,c_ymin, c_xmax, c_ymax=int(np.min(c_x)), int(np.min(c_y)), int(np.max(c_x)), int(np.max(c_y))
# #确定切割位置
# c_xmin ,c_ymin, c_xmax, c_ymax= int(max([c_xmin-r_max,0])),int(max([ c_ymin-r_max,0])) ,int(min([c_xmax+r_max,img_size[1]])), int(min([c_ymax+r_max,img_size[0]]))
# if (c_xmin !=0 or c_ymin !=0 or c_xmax !=img_size[1] or c_ymax !=img_size[0]):
# img=img[int(c_ymin):int(c_ymax),int(c_xmin):int(c_xmax),:]
# img_size=[c_ymax-c_ymin,c_xmax-c_xmin ,3]
# gsd_scale,gtbox_label=DataFunction.rotate_xml_transform(c_ymin,c_xmin,1,gsd,gtbox_label)
# DataFunction.write_rotate_xml(outputfolder,img_path,img_size,gsd_scale,imagesource,gtbox_label,LABEl_NAME_MAP)
# img_filename=os.path.join(outputfolder,os.path.splitext(name)[0]+'.jpg')
# cv2.imwrite(img_filename, img)
#第1种方法将gsd缩放到0.5
# if gsd>0:#说明读取到了gsd数据
# zoom_scale=gsd/1
# if zoom_scale!=1:
# img_size=[int(img_size[0]*zoom_scale),int(img_size[1]*zoom_scale),img_size[2]]
# img=cv2.resize(img,(img_size[1],img_size[0]))
# gsd_scale,gtbox=DataFunction.rotate_xml_transform(0,0,zoom_scale,gsd,gtbox)
# DataFunction.write_rotate_xml(outputfolder,img_path,img_size,gsd_scale,imagesource,gtbox_label,LABEl_NAME_MAP)
# img_filename=os.path.join(outputfolder,os.path.splitext(name)[0]+'.jpg')
# cv2.imwrite(img_filename, img)
# img_num+=1
# else:
# #第2种方法标签筛选如果标签尺寸超过所需分辨率1/4,则将图片和标签缩小两倍
## if max_len>h_crop/3:
## img_size=[int(img_size[0]/2),int(img_size[1]/2),img_size[2]]
## img=cv2.resize(img,(img_size[1],img_size[0]))
## gsd,gtbox_label=DataFunction.rotate_xml_transform(0,0,1/2,gsd,gtbox_label)
#
# #如果没有GSD数据,则将所有的标签大小归一化到 max_plane_len,min_plane_len=142,18之间
# shrink_ratio=max_plane_len/max_len
# enlargement_ratio=min_plane_len/min_len
# if shrink_ratio<1:#表示最大的飞机尺寸大于140,要缩小
# zoom_scale=shrink_ratio*0.9
# elif enlargement_ratio>1:#表示最小的飞机尺寸小于18,要放大
# zoom_scale=enlargement_ratio
# else:
# zoom_scale=1
# img_size=[int(img_size[0]*zoom_scale),int(img_size[1]*zoom_scale),img_size[2]]
# img=cv2.resize(img,(img_size[1],img_size[0]))
# gsd_scale,gtbox_label=DataFunction.rotate_xml_transform(0,0,zoom_scale,gsd,gtbox_label)
#
##图像切割
img_num=DataFunction.crop_img_rotatexml(ratios,overlap_ratio,outrange_ratio,h_crop,w_crop,img_path,outputfolder,output_VOC_folder,img_num,img_size,img,gsd,imagesource,gtbox,LABEl_NAME_MAP)
# #图像旋转切割
gtbox_label_array=np.array(gtbox)
label_num=gtbox_label_array[:,5]
# if 29 in label_num: #or 35 in label_num or 37 in label_num:
centers=gtbox_label_array[:,0:2].tolist()
labels=gtbox_label_array[:,5].tolist()
#这里做一个判断,如果一幅图像目标数量过多,则减少围绕旋转的中心数量
multiple=int(len(centers)/skip_center)+1
centers_new=centers[0:len(centers)+1:multiple]
labels_new=labels[0:len(centers)+1:multiple]
for i, center in enumerate (centers_new):
angle_number=angle_number_dict[LABEl_NAME_MAP[labels_new[i]]]
# angle_number=angle_number_class[int(labels_new[i])-1]
angles=np.linspace(angle_range[0],angle_range[1],num=angle_number)+i*2*np.pi/180#转为弧度
for ang in angles:
im_rot=DataFunction.im_rotate(img,-ang,center =(center[0],center[1]),scale=1.0)
gtbox_label_rotate=DataFunction.rotate_xml_rotate(ang,center,gtbox)
gtbox_label_rotate,rect_box=DataFunction.rotate_xml_valid(img_size[0],img_size[1],outrange_ratio,gtbox_label_rotate)
img_num=DataFunction.crop_img_rotatexml(ratios,overlap_ratio,outrange_ratio,h_crop,w_crop,img_path,outputfolder,output_VOC_folder,img_num,img_size,im_rot,gsd,imagesource,gtbox_label_rotate,LABEl_NAME_MAP,center,flag='plane')
## (ratios,overlap_ratio,outrange_ratio,h_crop,w_crop,img_path,outputfolder,output_VOC_folder,img_num,img_size,img,gsd,imagesource,gtbox_label,LABEl_NAME_MAP,center_select = None)
# DataFunction.write_rotate_xml(outputfolder,img_path,img_size,gsd_scale,imagesource,gtbox_label_rotate)
# img_filename=os.path.join(outputfolder,os.path.splitext(name)[0]+'.jpg')
# cv2.imwrite(img_filename, im_rot)
else :
print('{} annotation is empty!'.format(img_path))
time_elapsed = timer() - start
print('{}/{},time{}: augnum:{}'.format(num,imgs_total_num,time_elapsed,img_num)) |
zf020114/CHPDet | src/lib/detectors/multi_pose.py | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
import os
try:
from external.nms import soft_nms_39
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import multi_pose_decode
from models.utils import flip_tensor, flip_lr_off, flip_lr
from utils.image import get_affine_transform
from utils.post_process import multi_pose_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
# NAME_LABEL_MAP ={
# 'plane': 1,
# 'baseball-diamond': 2,
# 'bridge': 3,
# 'ground-track-field': 4,
# 'small-vehicle': 5,
# 'large-vehicle': 6,
# 'ship': 7,
# 'tennis-court': 8,
# 'basketball-court': 9,
# 'storage-tank': 10,
# 'soccer-ball-field': 11,
# 'roundabout': 12,
# 'harbor': 13,
# 'swimming-pool': 14,
# 'helicopter': 15,
# 'container-crane': 16
# }
NAME_LABEL_MAP = {
'Aircraft carriers': 1,
'Wasp ': 2,
'Tarawa ': 3,
'Austin ': 4,
'Whidbey Island ': 5,
'San Antonio ': 6,
'Newport ': 7,
'Ticonderoga ': 8,
' Burke ': 9,
'Perry ': 10,
'Lewis and Clark ': 11,
'Supply ': 12,
'<NAME> ': 13,
' Hope ': 14,
'Mercy ': 15,
'Freedom ': 16,
'Independence ': 17,
'Avenger ': 18,
'Submarine':19,
'Other':20
}
def get_label_name_map(NAME_LABEL_MAP):
reverse_dict = {}
for name, label in NAME_LABEL_MAP.items():
reverse_dict[label] = name
return reverse_dict
LABEL_NAME_MAP=get_label_name_map(NAME_LABEL_MAP)
class MultiPoseDetector(BaseDetector):
def __init__(self, opt):
super(MultiPoseDetector, self).__init__(opt)
self.flip_idx = opt.flip_idx
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
if self.opt.hm_hp and not self.opt.mse_loss:
output['hm_hp'] = output['hm_hp'].sigmoid_()
reg = output['reg'] if self.opt.reg_offset else None
hm_hp = output['hm_hp'] if self.opt.hm_hp else None
hp_offset = output['hp_offset'] if self.opt.reg_hp_offset else None
torch.cuda.synchronize()
forward_time = time.time()
if self.opt.flip_test:
output['hm'] = (output['hm'][0:1] + flip_tensor(output['hm'][1:2])) / 2
output['wh'] = (output['wh'][0:1] + flip_tensor(output['wh'][1:2])) / 2
output['hps'] = (output['hps'][0:1] +
flip_lr_off(output['hps'][1:2], self.flip_idx)) / 2
hm_hp = (hm_hp[0:1] + flip_lr(hm_hp[1:2], self.flip_idx)) / 2 \
if hm_hp is not None else None
reg = reg[0:1] if reg is not None else None
hp_offset = hp_offset[0:1] if hp_offset is not None else None
dets = multi_pose_decode(
output['hm'], output['wh'], output['hps'],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets = multi_pose_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'])
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 7)
dets[0][j][:, :4] /= scale
dets[0][j][:, 5:] /= scale
# for j in range(1, self.num_classes+1):# +
# if len(dets[0][j])>1:
# dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 39)
# # import pdb; pdb.set_trace()
# dets[0][j][:, :4] /= scale
# dets[0][j][:, 5:] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
for i in range(self.num_classes ):
results[i+1] = np.concatenate(
[detection[i+1] for detection in detections], axis=0).astype(np.float32)
if self.opt.nms or len(self.opt.test_scales) > 1:
soft_nms_39(results[i+1], Nt=0.8, method=2)
results[i+1] = results[i+1].tolist()
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy().copy()
dets[:, :, :4] *= self.opt.down_ratio
dets[:, :, 5:7] *= self.opt.down_ratio#39
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
if self.opt.hm_hp:
pred = debugger.gen_colormap_hp(
output['hm_hp'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
def show_results(self, debugger, image, results,image_path=None):
debugger.add_img(image, img_id='multi_pose')
for i in range(self.opt.num_classes):#change24
for bbox in results[i+1]:
if bbox[4] > self.opt.vis_thresh:
# debugger.add_coco_bbox(bbox[:4], 0, bbox[4], img_id='multi_pose')
debugger.add_coco_rbox(bbox,bbox[4], img_id='multi_pose',label_name=LABEL_NAME_MAP[i+1])
debugger.add_coco_hp(bbox[5:7], img_id='multi_pose')#
debugger.show_all_imgs(pause=self.pause)
# prefix=os.path.splitext(os.path.split(image_path)[1])[0]
debugger.save_all_imgs(prefix='') |
zf020114/CHPDet | src/1make_json_anno.py | # coding=utf-8
import xml.etree.ElementTree as ET
import os
import json
import numpy as np
import DataFunction
import cv2
from timeit import default_timer as timer
from All_Class_NAME_LABEL import NAME_LABEL_MAP_HRSC,NAME_LABEL_MAP_USnavy_20,NAME_LABEL_MAP_ICDAR,NAME_LABEL_MAP_HRSC
NAME_LABEL_MAP=NAME_LABEL_MAP_USnavy_20#NAME_LABEL_MAP_ICDAR
data_dir='/home/zf/CenterNet/data/coco_usnavy_val/'#'/media/zf/E/Dataset/HRSC2016_aug1024/'#r'E:\Dataset\US_Navy_train_aug'
Rotatexmls =os.path.join( data_dir,'rotatexml_val')
json_name =os.path.join( data_dir,'annotations/person_keypoints_val2017.json')
# Rotatexmls =os.path.join( data_dir,'rotatexml')
# json_name =os.path.join( data_dir,'annotations/person_keypoints_train2017.json')
def get_label_name_map():
reverse_dict = {}
for name, label in NAME_LABEL_MAP.items():
reverse_dict[label] = name
return reverse_dict
LABEl_NAME_MAP = get_label_name_map()
voc_clses=[]
for name, label in NAME_LABEL_MAP.items():
voc_clses.append(name)
categories = []
for iind, cat in enumerate(voc_clses):
cate = {}
cate['supercategory'] = cat
cate['name'] = cat
cate['id'] = iind + 1
cate['keypoints'] = [
"ship head"
]
cate['skeleton'] = [
[
1,
1
]
]
categories.append(cate)
def get_file_paths_recursive(folder=None, file_ext=None):
""" Get the absolute path of all files in given folder recursively
:param folder:
:param file_ext:
:return:
"""
file_list = []
if folder is None:
return file_list
file_list = [os.path.join(folder, f) for f in sorted(os.listdir(folder)) if f.endswith(file_ext)]
return file_list
def txt2list(txtfile):
f = open(txtfile)
l = []
for line in f:
l.append(line[:-1])
return l
def xml2bbox_seg(xmlname, rotate_box_list,i_index):
#通过这个函数将旋转矩形框换正框转换为coco可以读取的格式。其中将旋转矩形框转换维恩带有头部的分割点集
bbox=[]
for ind, rectbox in enumerate(rotate_box_list):
rotatebox=rotate_box_list[ind]
#rect_box_list:xmin ymin xmax ymax label
#rotate_box_list:cx cy w h angle label difficult
img_id=i_index#int(os.path.split(xmlname)[1].replace('.xml','').split('/')[-1])
[x_center,y_center,w,h,angle,label]=rotatebox[0:6]
# cv_rotete_rect=DataFunction.rotate_rect2cv(rotatebox[0:5])
#以下的代码是我想更精确地回归bbox,所以以机头,机尾,以及两个机翼的大概位置为四点,然后确定bbox的大小,但是这种方法证明是错误的。
#因为实例分割他是在检测结果的基础上再进行分割,这样之后,检测的结果会限制分割的结果,导致在最后的分割模型进行最小矩形拟合的时候,都是正方形,得不到正确的方向。
xmin,ymin,xmax,ymax = x_center-w/2, y_center-h/2 , x_center+w/2, y_center+h/2
RotateMatrix=np.array([
[np.cos(angle),-np.sin(angle)],
[np.sin(angle),np.cos(angle)]])
#以下是舰船的liu点的mask制作方式
r1,rhead1,rhead2,rhead, r2,r3,r4=np.transpose([-w/2,-h/2+w]),np.transpose([-w/12,-h/2]),np.transpose([w/12,-h/2]),np.transpose([0,-h/2]),np.transpose([w/2,-h/2+w]),np.transpose([w/2,h/2]),np.transpose([-w/2,h/2])
#这是矩形的mask制作方式
# r1,rhead,r2,r3,r4=np.transpose([-w/2,-h/2]),np.transpose([0,-h/2]),np.transpose([w/2,-h/2]),np.transpose([w/2,h/2]),np.transpose([-w/2,h/2])
#这是飞机的制作mask的方式
# r1,rhead,r2,r3,r4=np.transpose([-w/2,0]),np.transpose([0,-h/2]),np.transpose([w/2,0]),np.transpose([w/2,h/2]),np.transpose([-w/2,h/2])
p1=np.transpose(np.dot(RotateMatrix, r1))+[x_center,y_center]
head1=np.transpose(np.dot(RotateMatrix, rhead1))+[x_center,y_center]
head2=np.transpose(np.dot(RotateMatrix, rhead2))+[x_center,y_center]
head=np.transpose(np.dot(RotateMatrix, rhead))+[x_center,y_center]
p2=np.transpose(np.dot(RotateMatrix, r2))+[x_center,y_center]
p3=np.transpose(np.dot(RotateMatrix, r3))+[x_center,y_center]
p4=np.transpose(np.dot(RotateMatrix, r4))+[x_center,y_center]
area=h*w*3/4
bbox.append([p1[0],p1[1],head1[0],head1[1],head2[0],head2[1],p2[0],p2[1],p3[0],p3[1],p4[0],p4[1],head[0],head[1],2,xmin,ymin,w,h,img_id,label,area])
# bbox.append[p1,head,head,p2,p3,p4,xmin,ymin,xmax - xmin,ymax - ymin,img_id,label,h*w*3/4]
return bbox
if not os.path.isdir(os.path.split(json_name)[0]):
os.makedirs(os.path.split(json_name)[0])
#voc2007xmls = '/home/zf/dataset/HRSC/Annotitions/'
#json_name = '/home/zf/dataset/HRSC/annotations/instances_train2017.json'
xmls = get_file_paths_recursive(Rotatexmls,'xml')
bboxes = []
ann_js = {}
images = []
total_num=len(xmls)
start = timer()
for i_index, xml_file in enumerate(xmls):
# img_height, img_width, rect_box_list=TestFunction.read_VOC_xml(xml_file)
# rotate_xml_path=xml_file.replace(voc2007xmls,Rotatexmls)
img_size,gsd,imagesource,rotate_box_list,extra=DataFunction.read_rotate_xml(xml_file,NAME_LABEL_MAP)
# assert img_size[0] == img_height and img_size[1] == img_width, 'ann %s size dont match!' % (xml_file)
# assert len(rect_box_list) == len(rotate_box_list), 'ann %s size dont match!' % (xml_file)
image = {}
image['file_name'] = os.path.split(xml_file)[-1].replace('xml','jpg')
image['width'] = 1024#img_size[1]#width#
image['height'] =1024#img_size[0]#600#
image['id'] =i_index#int(os.path.split(xml_file)[1].replace('.xml','').split('/')[-1])
#p1[0],p1[1],head[0],head[1],p2[0],p2[1],p3[0],p3[1],p4[0],p4[1],xmin,ymin,xmax-xmin,ymax-ymin,img_id,label,area
# sig_xml_bbox_old=xml2bbox_seg_old(xml_file,rect_box_list, rotate_box_list)
sig_xml_bbox=xml2bbox_seg(xml_file,rotate_box_list,i_index)
# image, sig_xml_bbox = getimages(xml_file, i_index)
images.append(image)
bboxes.extend(sig_xml_bbox)
if i_index%1000==0:
print('{}/{}'.format(i_index,total_num))
time_elapsed = timer() - start
print('time:{}s'.format(time_elapsed))
ann_js['images'] = images
ann_js['categories'] = categories
annotations = []
total_box=len(bboxes)
for box_ind, box in enumerate(bboxes):
anno = {}
anno['image_id'] = box[-3]
anno['category_id'] = box[-2] #1 HRSC
anno['bbox'] = box[-7:-3]
anno['id'] = box_ind
anno['area'] = box[-1]
anno['iscrowd'] = 0
anno['segmentation']=[box[0:12]]
anno['num_keypoints'] = 1
z1=np.array(box[12:15]).reshape((1,-1))#np.ones([1,3])
zeros_mat=np.zeros([1,48])
# z2=np.squeeze(np.column_stack((z1,zeros_mat))).astype(np.int32)
anno['keypoints']=z1.tolist()
annotations.append(anno)
if box_ind%5000==0:
print('{}/{}'.format(box_ind,total_box))
ann_js['annotations'] = annotations
json.dump(ann_js, open(json_name, 'w'), indent=4) # indent=4 更加美观显示
time_elapsed = timer() - start
print('time:{}s'.format(time_elapsed))
print('down!')
|
zf020114/CHPDet | src/All_Class_NAME_LABEL.py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 11:06:37 2019
@author: admin
"""
def get_label_name_map(NAME_LABEL_MAP):
reverse_dict = {}
for name, label in NAME_LABEL_MAP.items():
reverse_dict[label] = name
return reverse_dict
NAME_LABEL_MAP_ICDAR = {'1': 1}
NAME_LABEL_MAP_USnavy = {
'航母': 1,
'黄蜂级': 2,
'塔瓦拉级': 3,
'蓝岭级': 4,
'奥斯汀级': 5,
'惠特贝岛级': 6,
'圣安东尼奥级': 7,
'新港级': 8,
'提康德罗加级': 9,
'阿利·伯克级': 10,
'朱姆沃尔特级': 11,
'佩里级': 12,
'刘易斯和克拉克级': 13,
'供应级': 14,
'凯泽级': 15,
'霍普级': 16,
'仁慈级': 17,
'先锋级': 18,
'自由级': 19,
'独立级': 20,
'复仇者级': 21,
'胜利级': 22,
'潜艇':23,
'其它':24,
'其他':24
}
NAME_LABEL_MAP_bridge = {
'bridge': 1,
}
NAME_LABEL_MAP_plane = {
'plane': 1,
}
NAME_LABEL_MAP_USnavy_20 = {
'航母': 1,
'黄蜂级': 2,
'塔瓦拉级': 3,
'蓝岭级': 24,
'奥斯汀级': 5,
'惠特贝岛级': 6,
'圣安东尼奥级': 7,
'新港级': 8,
'提康德罗加级': 9,
'阿利·伯克级': 10,
'朱姆沃尔特级': 24,
'佩里级': 12,
'刘易斯和克拉克级': 13,
'供应级': 14,
'凯泽级': 15,
'霍普级': 16,
'仁慈级': 17,
'先锋级': 24,
'自由级': 19,
'独立级': 20,
'复仇者级': 21,
'胜利级': 24,
'潜艇':23,
'其他':24
}
LABEL_NAME_MAP_USnavy_20 = get_label_name_map(NAME_LABEL_MAP_USnavy_20)
NAME_LABEL_MAP_USnavy_en = {
'Aircraft carriers': 1,
'Wasp class': 2,
'Tarawa class': 3,
'Blue Ridge class': 4,
'Austin class': 5,
'Whidbey Island class': 6,
'San Antonio class': 7,
'Newport class': 8,
'Ticonderoga class ': 9,
'Arleigh Burke class': 10,
'Zumwalt class': 11,
'Perry class': 12,
'Lewis and Clark class': 13,
'Supply class': 14,
'<NAME> class': 15,
'Bob Hope Class': 16,
'Mercy class': 17,
'Spearhead class': 18,
'Freedom class': 19,
'Independence class': 20,
'Avenger class': 21,
'Victorious-class': 22,
'Submarine':23,
'Other':24
}
NAME_LONG_MAP_USnavy = {
'航母': 330,
'黄蜂级': 253,
'塔瓦拉级': 253,
'蓝岭级': 193,
'奥斯汀级': 173,
'惠特贝岛级': 185,
'圣安东尼奥级': 208,
'新港级': 168,
'提康德罗加级': 172,
'阿利·伯克级': 154,
'朱姆沃尔特级': 182,
'佩里级': 135,
'刘易斯和克拉克级': 210,
'供应级': 229,
'凯泽级': 206,
'霍普级': 290,
'仁慈级': 272,
'先锋级': 103,
'自由级': 120,
'独立级': 127,
'复仇者级': 68,
'胜利级': 71.5,
'潜艇':140,
'其他':200
}
normal=0.10
NAME_STD_MAP_USnavy = {
'航母': 0.1,
'黄蜂级': normal,
'塔瓦拉级': normal,
'蓝岭级': normal,
'奥斯汀级': normal,
'惠特贝岛级': normal,
'圣安东尼奥级': normal,
'新港级': normal,
'提康德罗加级': normal,
'阿利·伯克级': normal,
'朱姆沃尔特级': normal,
'佩里级': normal+0.01,
'刘易斯和克拉克级': normal,
'供应级': normal-0.01,
'凯泽级': normal-0.01,
'霍普级': normal,
'仁慈级': normal,
'先锋级': normal*2,
'自由级': normal*2,
'独立级': normal*2,
'复仇者级': normal*2,
'胜利级': normal*2,
'潜艇':0.75,
'其他':1
}
NAME_LABEL_MAP_TZship = {
'航母': 1,
'黄蜂级': 2,
'圣安东尼奥级': 3,
'惠特贝岛级': 4,
'奥斯汀级': 5,
'提康德罗加级': 6,
'阿利·伯克级': 7,
'佩里级': 8,
'刘易斯和克拉克级': 9,
'凯泽级': 10,
'供应级': 11,
'霍普级': 12,
'仁慈级': 13
}
NAME_LABEL_MAP_HRSC = {
'船': 1,
'航母': 2,
'军舰': 3,
'商船': 4,
'尼米兹级航母': 5,
'企业级航母': 6,
'阿利伯克级驱逐舰': 7,
'惠德贝岛级船坞登陆舰': 8,
'佩里级护卫舰': 9,
'圣安东尼奥级两栖船坞运输舰': 10,
'提康德罗加级巡洋舰': 11,
'小鹰级航母': 12,
'俄罗斯库兹涅佐夫号航母': 13,
'阿武隈级护卫舰': 14,
'奥斯汀级两栖船坞运输舰': 15,
'塔拉瓦级通用两栖攻击舰': 16,
'蓝岭级指挥舰': 17,
'集装箱货船': 18,
'尾部OX头部圆指挥舰': 19,
'运输汽车船': 20,
'舰':21,
'气垫船': 22,
'航':23,
'游艇': 24,
'货船(_|.--.--|_]=':25,
'游轮':26,
'潜艇':27,
'琵琶形军舰':28,
'医疗船':29,
'运输汽车船(======|':30,
'福特级航空母舰':31,
'中途号航母':32,
'无敌级航空母舰':33
}
NAME_LABEL_MAP_HRSC_en = {
'ship': 1,
'aircraft carrier': 2,
'warcraft': 3,
'merchant ship': 4,
'Nimitz': 5,
'Enterprise': 6,
'<NAME>': 7,
'WhidbeyIsland': 8,
'Perry': 9,
'Sanantonio': 10,
'Ticonderoga': 11,
'Kitty Hawk': 12,
'Kuznetsov': 13,
'Abukuma': 14,
'Austen': 15,
'Tarawa': 16,
'Blue Ridge': 17,
'Container ship': 18,
'OXo|--)': 19,
'Car carrier([]==[])': 20,
'jian':21,
'Hovercraft': 22,
'hang':23,
'yacht': 24,
'CntShip(_|.--.--|_]=':25,
'Cruise':26,
'submarine':27,
'lute':28,
'Medical':29,
'Car carrier(======|':30,
'Ford-class':31,
'Midway-class':32,
'Invincible-class':33
}
NAME_LABEL_MAP_DOTA15 ={
'plane': 1,
'baseball-diamond': 2,
'bridge': 3,
'ground-track-field': 4,
'small-vehicle': 5,
'large-vehicle': 6,
'ship': 7,
'tennis-court': 8,
'basketball-court': 9,
'storage-tank': 10,
'soccer-ball-field': 11,
'roundabout': 12,
'harbor': 13,
'swimming-pool': 14,
'helicopter': 15,
'container-crane': 16,
'airport':17,
'helipad':18
}
NAME_LABEL_MAP_DOTA10 ={
'plane': 1,
'baseball-diamond': 2,
'bridge': 3,
'ground-track-field': 4,
'small-vehicle': 5,
'large-vehicle': 6,
'ship': 7,
'tennis-court': 8,
'basketball-court': 9,
'storage-tank': 10,
'soccer-ball-field': 11,
'roundabout': 12,
'harbor': 13,
'swimming-pool': 14,
'helicopter': 15
}
angle_number_dict_DOTA15={
'plane': 0,
'baseball-diamond': 10,
'bridge': 2,
'ground-track-field': 20,
'small-vehicle':0,
'large-vehicle': 0,
'ship': 0,
'tennis-court': 0,
'basketball-court': 8,
'storage-tank': 0,
'soccer-ball-field': 18,
'roundabout': 20,
'harbor': 0,
'swimming-pool': 2,
'helicopter': 9,
'container-crane': 12}
#rorate num and
# 'plane': 1,
# 'baseball-diamond': 30,
# 'bridge': 4,
# 'ground-track-field': 35,
# 'small-vehicle':0,
# 'large-vehicle': 0,
# 'ship': 0,
# 'tennis-court': 2,
# 'basketball-court': 20,
# 'storage-tank': 0,
# 'soccer-ball-field': 35,
# 'roundabout': 60,
# 'harbor': 1,
# 'swimming-pool': 4,
# 'helicopter': 9,
# 'container-crane': 15}
#plane 35404.0
#baseball-diamond 22932.0
#bridge 18288.0
#ground-track-field 16214.0
#small-vehicle 468075.0
#large-vehicle 59873.0
#ship 140994.0
#tennis-court 52116.0
#basketball-court 33070.0
#storage-tank 16147.0
#soccer-ball-field 18656.0
#roundabout 27237.0
#harbor 30551.0
#swimming-pool 17331.0
#helicopter 7764.0
#container-crane 11433.0
#total_num 976085.0
NAME_LABEL_MAP_TZship_num = {
'A1': 1,
'B3': 2,
'B4': 3,
'B6': 4,
'B7': 5,
'C1': 6,
'D1': 7,
'E1': 8,
'F1': 9,
'F2': 10,
'F3': 11,
'F4': 12,
'F5': 13
}
NAME_LABEL_MAP_TZplane = {
'E-3': 1,
'E-8': 2,
'RC-135V/W': 3,
'RC-135S': 4,
'E-2': 5,
'EP-3': 6,
'P-3C': 7,
'A-50': 8,
'P-8A': 9,
'F-22': 10,
'F-31': 11,
'F-16': 12,
'F-15': 13,
'F/A-18': 14,
'F/A-18E/F': 15,
'L-39': 16,
'MiG-29': 17,
'MiG-31': 18,
'Su-35': 19,
'Su-30': 20,
'Su-27': 21,
'Typhoon': 22,
'Su-24': 23,
'Su-34': 24,
'A-10': 25,
'Su-25': 26,
'B-52': 27,
'B-1B': 28,
'B-2': 29,
'Tu-95': 30,
'Tu-160': 31,
'KC-135': 32,
'KC-10': 33,
'C-130': 34,
'C-5': 35,
'C-2': 36,
'C-17': 37,
'Il-76': 38,
'V-22': 39,
'Tu-22M': 40,
'An-12': 41,
'An-24': 42,
'Yak-130': 43,
'KC-46A': 44,
'C-40': 45,
'C-21': 46,
'RQ-4': 47,
'F-5E': 48,
'AV-8': 49,
'helicopter': 50,
'other': 51,
'U-2': 51
}
NAME_long_MAP_TZplane = {
'E-3': 46.6,
'E-8': 46.6,
'RC-135V/W': 46.6,
'RC-135S': 46.6,
'E-2': 13.0,
'EP-3': 35.6,
'P-3C': 35.7,
'A-50': 46.6,#
'P-8A': 39.5,
'F-22': 18.9,
'F-31': 15.7,
'F-16': 12,#
'F-15': 15.1,
'F/A-18': 17.1,
'F/A-18E/F': 18.3,
'L-39': 12.1,#
'MiG-29': 17.4,
'MiG-31': 22.7,
'Su-35': 21.9,
'Su-30': 21.9,
'Su-27': 21.9,
'Typhoon': 16.0,#
'Su-24': 23.5,
'Su-34': 23.3,
'A-10': 16.3,#
'Su-25': 15.5,
'B-52': 48.5,
'B-1B': 44.5,
'B-2': 21,
'Tu-95': 49.5,
'Tu-160': 54.1,
'KC-135': 41.5,
'KC-10': 55.4,
'C-130': 29.8,
'C-5': 75.5,
'C-2': 13,
'C-17': 53,
'Il-76': 46.6,
'V-22': 17.5,
'Tu-22M': 42.5,
'An-12': 33.1,
'An-24': 23.5,
'Yak-130': 11.3,
'KC-46A': 50.5,
'C-40': 33.6,
'C-21': 14.8,
'RQ-4': 13.5,
'F-5E': 14.4,
'AV-8': 14.1,
'helicopter': 10,
'other': 30
}
NAME_width_MAP_TZplane = {
'E-3': 44.4,
'E-8': 44.4,
'RC-135V/W': 44.4,
'RC-135S': 44.4,
'E-2': 13.6,
'EP-3': 30.3,
'P-3C': 30.3,
'A-50': 50.5,#
'P-8A': 37.6,
'F-22': 13.6,
'F-31': 10.7,
'F-16': 9.4,
'F-15': 13.1,
'F/A-18': 11.4,
'F/A-18E/F': 13.6,
'L-39': 9.5,
'MiG-29': 11.4,
'MiG-31': 13.5,
'Su-35': 14.4,
'Su-30': 14.7,
'Su-27': 14.7,
'Typhoon': 11,#
'Su-24': 17.6,#10.4
'Su-34': 14.7,
'A-10': 17.5,#
'Su-25': 14.4,
'B-52': 56.4,
'B-1B': 41.8,
'B-2': 52.4,
'Tu-95': 51.1,
'Tu-160': 55.7,
'KC-135': 39.9,
'KC-10': 50.4,
'C-130': 40.4,
'C-5': 67.9,
'C-2': 13.6,
'C-17': 51.8,
'Il-76': 50.5,
'V-22': 14,
'Tu-22M': 34.3,#23.3
'An-12': 38,
'An-24': 29.2,
'Yak-130': 10.4,
'KC-46A': 48.1,
'C-40': 34.3,#35.7
'C-21': 12.0,
'RQ-4': 35.4,
'F-5E': 8.1,
'AV-8': 9.42,#
'helicopter': 8,
'other':30
}
NAME_LABEL_MAP_RuPlane = {
'other': 1,
'helicopter': 2,
'Tu-160': 3,
'Tu-95': 4,
'Tu-22M': 5,
'Il-76': 6,
'An-12': 7,
'An-24': 8,
'A-50': 9,
'Su-27': 10,
'Su-35': 11,
'Su-30': 12,
'Su-34': 13,
'MiG-29': 14,
'MiG-31': 15,
'Su-24': 16,
'Su-25': 17,
'Yak-130': 18,
'L-39': 19
}
USA_class=['E-3','E-8','RC-135V/W','RC-135S','E-2','EP-3','P-3C','P-8A',
'F-22','F-35','F-16','F-15','F/A-18','F/A-18E/F','Typhoon','A-10',
'B-52','B-1B','B-2','KC-135','KC-10','C-130','C-5','C-2','V-22',
'E-4','KC-46A','C-40','C-21', 'U-2','RQ-4','F-5E','AV-8']
Russia_class=['A-50','P-8A','MiG-29','MiG-31','Su-35','Su-30','Su-27',
'Su-24','Su-34','Su-25','Tu-95','Tu-160', 'Il-76','Tu-22M',
'An-12','An-24','Yak-130'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.